init
This commit is contained in:
7
Dockerfile
Normal file
7
Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM python:3.12-slim
|
||||
WORKDIR /app
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
COPY main.py .
|
||||
EXPOSE 3000
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "3000"]
|
||||
88
main.py
Normal file
88
main.py
Normal file
@@ -0,0 +1,88 @@
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel
|
||||
from datetime import datetime
|
||||
import platform
|
||||
import urllib.request
|
||||
import json as jsonlib
|
||||
|
||||
app = FastAPI(title="PropFirms API", version="1.2.0")
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
OLLAMA_URL = "http://10.0.1.1:11435"
|
||||
|
||||
SYSTEM_PROMPT = """Tu es un assistant spécialisé dans le trading et les prop firms.
|
||||
Tu résumes des transcripts de vidéos YouTube en français pour des traders.
|
||||
Format de réponse :
|
||||
- Contexte marché (1-2 phrases)
|
||||
- Points clés (3-5 bullet points)
|
||||
- Takeaways pour un trader prop firm (1-2 phrases)
|
||||
Sois concis et direct."""
|
||||
|
||||
@app.get("/")
|
||||
def root():
|
||||
return {"status": "ok", "service": "PropFirms DGX API", "version": "1.2.0"}
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
return {
|
||||
"status": "healthy",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"host": platform.node(),
|
||||
"ollama": "qwen3:14b"
|
||||
}
|
||||
|
||||
@app.get("/ping")
|
||||
def ping():
|
||||
return {"pong": True}
|
||||
|
||||
class SummarizeRequest(BaseModel):
|
||||
transcript: str
|
||||
video_title: str = ""
|
||||
|
||||
@app.post("/summarize")
|
||||
async def summarize(req: SummarizeRequest):
|
||||
if not req.transcript.strip():
|
||||
raise HTTPException(status_code=400, detail="Transcript vide")
|
||||
|
||||
prompt = f"Titre de la vidéo : {req.video_title}\n\nTranscript :\n{req.transcript[:4000]}"
|
||||
|
||||
payload = jsonlib.dumps({
|
||||
"model": "qwen3:14b",
|
||||
"prompt": prompt,
|
||||
"system": SYSTEM_PROMPT,
|
||||
"stream": False
|
||||
}).encode("utf-8")
|
||||
|
||||
try:
|
||||
request = urllib.request.Request(
|
||||
f"{OLLAMA_URL}/api/generate",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST"
|
||||
)
|
||||
with urllib.request.urlopen(request, timeout=120) as resp:
|
||||
result = jsonlib.loads(resp.read())
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=502, detail=f"Erreur Ollama : {str(e)}")
|
||||
|
||||
return {
|
||||
"summary": result.get("response", ""),
|
||||
"video_title": req.video_title,
|
||||
"model": "qwen3:14b",
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
class OCRRequest(BaseModel):
|
||||
image_base64: str
|
||||
platform: str = "auto"
|
||||
|
||||
@app.post("/ocr")
|
||||
def ocr(req: OCRRequest):
|
||||
raise HTTPException(status_code=503, detail="OCR non encore activé — disponible à l'étape 3")
|
||||
5
requirements.txt
Normal file
5
requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
fastapi==0.115.0
|
||||
uvicorn[standard]==0.30.6
|
||||
pydantic==2.9.0
|
||||
python-multipart==0.0.12
|
||||
httpx==0.27.2
|
||||
Reference in New Issue
Block a user