Backend: - Add /debug/tables endpoint to dump SQLite tables - Add /debug/logs endpoint to read scrap.log Frontend: - Add react-router-dom for navigation - Create HomePage and DebugPage components - Add navigation bar with Debug link - Style debug page with Gruvbox theme - Fix package.json dependencies versions Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
116 lines
3.7 KiB
Python
116 lines
3.7 KiB
Python
from __future__ import annotations
|
|
|
|
import json
|
|
from pathlib import Path
|
|
from typing import Any
|
|
|
|
from fastapi import APIRouter, Depends, Query
|
|
from sqlalchemy.orm import Session
|
|
|
|
from backend.app.api.deps import get_db
|
|
from backend.app.db.models import Product, ProductSnapshot, ScrapeRun
|
|
|
|
router = APIRouter(prefix="/debug", tags=["debug"])
|
|
|
|
LOGS_DIR = Path(__file__).resolve().parent.parent.parent / "logs"
|
|
|
|
|
|
@router.get("/tables")
|
|
def get_tables(
|
|
limit: int = Query(default=50, le=200),
|
|
db: Session = Depends(get_db),
|
|
) -> dict[str, Any]:
|
|
"""Dump des tables SQLite pour debug (products, snapshots, scrape_runs)."""
|
|
products = db.query(Product).order_by(Product.id.desc()).limit(limit).all()
|
|
snapshots = db.query(ProductSnapshot).order_by(ProductSnapshot.id.desc()).limit(limit).all()
|
|
scrape_runs = db.query(ScrapeRun).order_by(ScrapeRun.id.desc()).limit(limit).all()
|
|
|
|
return {
|
|
"products": [
|
|
{
|
|
"id": p.id,
|
|
"boutique": p.boutique,
|
|
"url": p.url,
|
|
"asin": p.asin,
|
|
"titre": p.titre,
|
|
"url_image": p.url_image,
|
|
"categorie": p.categorie,
|
|
"type": p.type,
|
|
"actif": p.actif,
|
|
"cree_le": p.cree_le.isoformat() if p.cree_le else None,
|
|
"modifie_le": p.modifie_le.isoformat() if p.modifie_le else None,
|
|
}
|
|
for p in products
|
|
],
|
|
"snapshots": [
|
|
{
|
|
"id": s.id,
|
|
"produit_id": s.produit_id,
|
|
"run_scrap_id": s.run_scrap_id,
|
|
"scrape_le": s.scrape_le.isoformat() if s.scrape_le else None,
|
|
"prix_actuel": s.prix_actuel,
|
|
"prix_conseille": s.prix_conseille,
|
|
"prix_min_30j": s.prix_min_30j,
|
|
"etat_stock": s.etat_stock,
|
|
"en_stock": s.en_stock,
|
|
"note": s.note,
|
|
"nombre_avis": s.nombre_avis,
|
|
"prime": s.prime,
|
|
"choix_amazon": s.choix_amazon,
|
|
"offre_limitee": s.offre_limitee,
|
|
"exclusivite_amazon": s.exclusivite_amazon,
|
|
"statut_scrap": s.statut_scrap,
|
|
"message_erreur": s.message_erreur,
|
|
}
|
|
for s in snapshots
|
|
],
|
|
"scrape_runs": [
|
|
{
|
|
"id": r.id,
|
|
"demarre_le": r.demarre_le.isoformat() if r.demarre_le else None,
|
|
"termine_le": r.termine_le.isoformat() if r.termine_le else None,
|
|
"statut": r.statut,
|
|
"nb_total": r.nb_total,
|
|
"nb_ok": r.nb_ok,
|
|
"nb_echec": r.nb_echec,
|
|
"chemin_log": r.chemin_log,
|
|
}
|
|
for r in scrape_runs
|
|
],
|
|
}
|
|
|
|
|
|
@router.get("/logs")
|
|
def get_logs(
|
|
lines: int = Query(default=100, le=1000),
|
|
) -> dict[str, Any]:
|
|
"""Lecture des dernières lignes du fichier de log scrap."""
|
|
log_file = LOGS_DIR / "scrap.log"
|
|
|
|
if not log_file.exists():
|
|
return {"logs": [], "file": str(log_file), "exists": False}
|
|
|
|
# Lire les dernières lignes
|
|
with open(log_file, encoding="utf-8") as f:
|
|
all_lines = f.readlines()
|
|
|
|
recent_lines = all_lines[-lines:]
|
|
|
|
# Parser les lignes JSON si possible
|
|
parsed_logs = []
|
|
for line in recent_lines:
|
|
line = line.strip()
|
|
if not line:
|
|
continue
|
|
try:
|
|
parsed_logs.append(json.loads(line))
|
|
except json.JSONDecodeError:
|
|
parsed_logs.append({"raw": line})
|
|
|
|
return {
|
|
"logs": parsed_logs,
|
|
"file": str(log_file),
|
|
"exists": True,
|
|
"total_lines": len(all_lines),
|
|
}
|