This commit is contained in:
2026-01-25 14:48:26 +01:00
parent 5c3e6b84a4
commit c56a4632a2
958 changed files with 1149102 additions and 123 deletions

View File

@@ -44,4 +44,4 @@ class BackendConfig(BaseModel):
@lru_cache(maxsize=1)
def load_config() -> BackendConfig:
# on met en cache pour éviter de recharger le fichier à chaque requête
return BackendConfig.parse_file(CONFIG_PATH)
return BackendConfig.model_validate_json(CONFIG_PATH.read_text(encoding="utf-8"))

View File

@@ -1,5 +1,7 @@
from __future__ import annotations
from datetime import datetime, timedelta
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from loguru import logger
@@ -17,12 +19,49 @@ def start_scheduler() -> None:
config = load_config()
interval = config.scrape.interval_minutes
# Premier run après l'intervalle défini (pas immédiatement au démarrage)
first_run = datetime.now() + timedelta(minutes=interval)
scheduler.add_job(
scrape_all,
trigger=IntervalTrigger(minutes=interval),
id="scheduled-scrape-all",
replace_existing=True,
next_run_time=None,
next_run_time=first_run,
)
scheduler.start()
logger.info("Scheduler démarré avec un intervalle de %s minutes", interval)
logger.info("Scheduler démarré avec un intervalle de {} minutes (prochain run: {})", interval, first_run.strftime("%H:%M:%S"))
def get_scheduler_status() -> dict:
"""Retourne l'état actuel du scheduler."""
job = scheduler.get_job("scheduled-scrape-all")
config = load_config()
status = {
"running": scheduler.running,
"interval_minutes": config.scrape.interval_minutes,
"job_exists": job is not None,
"next_run_time": None,
"next_run_in_minutes": None,
}
if job and job.next_run_time:
status["next_run_time"] = job.next_run_time.isoformat()
# Calculer le temps restant
now = datetime.now(job.next_run_time.tzinfo)
delta = job.next_run_time - now
status["next_run_in_minutes"] = round(delta.total_seconds() / 60, 1)
return status
def trigger_next_run() -> dict:
"""Force le prochain scrape à s'exécuter maintenant."""
job = scheduler.get_job("scheduled-scrape-all")
if not job:
return {"success": False, "error": "Job non trouvé"}
# Modifier le job pour s'exécuter maintenant
scheduler.modify_job("scheduled-scrape-all", next_run_time=datetime.now())
logger.info("Prochain scrape programmé pour maintenant")
return {"success": True, "message": "Scrape programmé pour exécution immédiate"}