fix(ai-service): gestion erreurs, health check, limite taille, run_in_executor
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -1,4 +1,4 @@
|
|||||||
FROM python:3.11-slim
|
FROM python:3.11.11-slim
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
|
|||||||
@@ -1,14 +1,16 @@
|
|||||||
|
import asyncio
|
||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from fastapi import FastAPI, File, UploadFile
|
from fastapi import FastAPI, File, HTTPException, UploadFile
|
||||||
from PIL import Image
|
from PIL import Image, UnidentifiedImageError
|
||||||
|
|
||||||
app = FastAPI(title="AI Plant Detection Service")
|
app = FastAPI(title="AI Plant Detection Service")
|
||||||
|
|
||||||
_model = None
|
_model = None
|
||||||
MODEL_CACHE_DIR = os.environ.get("MODEL_CACHE_DIR", "/models")
|
MODEL_CACHE_DIR = os.environ.get("MODEL_CACHE_DIR", "/models")
|
||||||
|
MAX_FILE_SIZE = 10 * 1024 * 1024 # 10 MB
|
||||||
|
|
||||||
|
|
||||||
def get_model():
|
def get_model():
|
||||||
@@ -16,23 +18,16 @@ def get_model():
|
|||||||
if _model is None:
|
if _model is None:
|
||||||
from ultralytics import YOLO
|
from ultralytics import YOLO
|
||||||
os.makedirs(MODEL_CACHE_DIR, exist_ok=True)
|
os.makedirs(MODEL_CACHE_DIR, exist_ok=True)
|
||||||
_model = YOLO("foduucom/plant-leaf-detection-and-classification")
|
try:
|
||||||
|
_model = YOLO("foduucom/plant-leaf-detection-and-classification")
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Impossible de charger le modèle YOLO: {e}") from e
|
||||||
return _model
|
return _model
|
||||||
|
|
||||||
|
|
||||||
@app.get("/health")
|
def _run_inference(img: Image.Image) -> list:
|
||||||
def health():
|
|
||||||
return {"status": "ok"}
|
|
||||||
|
|
||||||
|
|
||||||
@app.post("/detect")
|
|
||||||
async def detect(file: UploadFile = File(...)):
|
|
||||||
data = await file.read()
|
|
||||||
img = Image.open(io.BytesIO(data)).convert("RGB")
|
|
||||||
|
|
||||||
model = get_model()
|
model = get_model()
|
||||||
results = model.predict(img, conf=0.25, iou=0.45, verbose=False)
|
results = model.predict(img, conf=0.25, iou=0.45, verbose=False)
|
||||||
|
|
||||||
detections = []
|
detections = []
|
||||||
if results and results[0].boxes:
|
if results and results[0].boxes:
|
||||||
boxes = results[0].boxes
|
boxes = results[0].boxes
|
||||||
@@ -45,3 +40,29 @@ async def detect(file: UploadFile = File(...)):
|
|||||||
"confidence": round(conf, 3),
|
"confidence": round(conf, 3),
|
||||||
})
|
})
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/health")
|
||||||
|
def health():
|
||||||
|
return {"status": "ok", "model_loaded": _model is not None}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/detect")
|
||||||
|
async def detect(file: UploadFile = File(...)):
|
||||||
|
data = await file.read()
|
||||||
|
|
||||||
|
if len(data) > MAX_FILE_SIZE:
|
||||||
|
raise HTTPException(status_code=413, detail="Fichier trop volumineux (max 10 MB)")
|
||||||
|
|
||||||
|
try:
|
||||||
|
img = Image.open(io.BytesIO(data)).convert("RGB")
|
||||||
|
except (UnidentifiedImageError, OSError) as e:
|
||||||
|
raise HTTPException(status_code=400, detail=f"Image invalide: {e}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
detections = await loop.run_in_executor(None, _run_inference, img)
|
||||||
|
except RuntimeError as e:
|
||||||
|
raise HTTPException(status_code=503, detail=str(e))
|
||||||
|
|
||||||
|
return detections
|
||||||
|
|||||||
Reference in New Issue
Block a user