ipwatch
This commit is contained in:
1
backend/app/__init__.py
Executable file
1
backend/app/__init__.py
Executable file
@@ -0,0 +1 @@
|
||||
# IPWatch Backend Application
|
||||
1
backend/app/core/__init__.py
Executable file
1
backend/app/core/__init__.py
Executable file
@@ -0,0 +1 @@
|
||||
# Core configuration modules
|
||||
167
backend/app/core/config.py
Executable file
167
backend/app/core/config.py
Executable file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
Configuration management pour IPWatch
|
||||
Charge et valide le fichier config.yaml
|
||||
"""
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AppConfig(BaseModel):
|
||||
"""Configuration de l'application"""
|
||||
name: str = "IPWatch"
|
||||
version: str = "1.0.0"
|
||||
debug: bool = False
|
||||
|
||||
|
||||
class NetworkConfig(BaseModel):
|
||||
"""Configuration réseau"""
|
||||
cidr: str
|
||||
gateway: Optional[str] = None
|
||||
dns: Optional[List[str]] = None
|
||||
|
||||
|
||||
class ScanConfig(BaseModel):
|
||||
"""Configuration des scans"""
|
||||
ping_interval: int = 60 # secondes
|
||||
ping_count: int = 1 # Nombre de ping par IP
|
||||
port_scan_interval: int = 300 # secondes
|
||||
parallel_pings: int = 50
|
||||
timeout: float = 1.0
|
||||
force_vendor_update: bool = False
|
||||
|
||||
|
||||
class PortsConfig(BaseModel):
|
||||
"""Configuration des ports à scanner"""
|
||||
ranges: List[str] = ["22", "80", "443", "3389", "8080"]
|
||||
protocols: Optional[Dict[int, str]] = None # Mapping port -> protocole
|
||||
|
||||
|
||||
class HistoryConfig(BaseModel):
|
||||
"""Configuration de l'historique"""
|
||||
retention_hours: int = 24
|
||||
|
||||
|
||||
class UIConfig(BaseModel):
|
||||
"""Configuration UI"""
|
||||
offline_transparency: float = 0.5
|
||||
show_mac: bool = True
|
||||
show_vendor: bool = True
|
||||
cell_size: int = 30
|
||||
font_size: int = 10
|
||||
cell_gap: float = 2
|
||||
details_font_size: int = 13
|
||||
details_spacing: int = 2
|
||||
architecture_title_font_size: int = 18
|
||||
|
||||
|
||||
class LinksConfig(BaseModel):
|
||||
"""Configuration des liens"""
|
||||
hardware_bench_url: Optional[str] = None
|
||||
|
||||
|
||||
class ColorsConfig(BaseModel):
|
||||
"""Configuration des couleurs"""
|
||||
free: str = "#75715E"
|
||||
online_known: str = "#A6E22E"
|
||||
online_unknown: str = "#66D9EF"
|
||||
offline_known: str = "#F92672"
|
||||
offline_unknown: str = "#AE81FF"
|
||||
mac_changed: str = "#FD971F"
|
||||
network_device: str = "#1E3A8A"
|
||||
|
||||
|
||||
class OPNsenseConfig(BaseModel):
|
||||
"""Configuration OPNsense API"""
|
||||
enabled: bool = False
|
||||
host: str = ""
|
||||
api_key: str = ""
|
||||
api_secret: str = ""
|
||||
verify_ssl: bool = False
|
||||
protocol: str = "http" # "http" ou "https"
|
||||
|
||||
|
||||
class DatabaseConfig(BaseModel):
|
||||
"""Configuration base de données"""
|
||||
path: str = "./data/db.sqlite"
|
||||
|
||||
|
||||
class SubnetConfig(BaseModel):
|
||||
"""Configuration d'un sous-réseau"""
|
||||
name: str
|
||||
cidr: str
|
||||
start: str
|
||||
end: str
|
||||
description: str
|
||||
|
||||
|
||||
class HostConfig(BaseModel):
|
||||
"""Configuration d'un hôte avec sa localisation"""
|
||||
name: str
|
||||
location: str
|
||||
ip: Optional[str] = None
|
||||
ip_parent: Optional[str] = None
|
||||
ip_enfant: Optional[List[str]] = None
|
||||
|
||||
|
||||
class IPWatchConfig(BaseModel):
|
||||
"""Configuration complète IPWatch"""
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
app: AppConfig = Field(default_factory=AppConfig)
|
||||
network: NetworkConfig
|
||||
subnets: List[SubnetConfig] = Field(default_factory=list)
|
||||
ip_classes: Dict[str, Any] = Field(default_factory=dict)
|
||||
scan: ScanConfig = Field(default_factory=ScanConfig)
|
||||
ports: PortsConfig = Field(default_factory=PortsConfig)
|
||||
locations: List[str] = Field(default_factory=list)
|
||||
hosts: List[HostConfig] = Field(default_factory=list)
|
||||
history: HistoryConfig = Field(default_factory=HistoryConfig)
|
||||
ui: UIConfig = Field(default_factory=UIConfig)
|
||||
links: LinksConfig = Field(default_factory=LinksConfig)
|
||||
colors: ColorsConfig = Field(default_factory=ColorsConfig)
|
||||
database: DatabaseConfig = Field(default_factory=DatabaseConfig)
|
||||
opnsense: OPNsenseConfig = Field(default_factory=OPNsenseConfig)
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
"""Gestionnaire de configuration singleton"""
|
||||
_instance: Optional['ConfigManager'] = None
|
||||
_config: Optional[IPWatchConfig] = None
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def load_config(self, config_path: str = "./config.yaml") -> IPWatchConfig:
|
||||
"""Charge la configuration depuis le fichier YAML"""
|
||||
path = Path(config_path)
|
||||
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Fichier de configuration non trouvé: {config_path}")
|
||||
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
yaml_data = yaml.safe_load(f)
|
||||
|
||||
self._config = IPWatchConfig(**yaml_data)
|
||||
self._config_path = config_path
|
||||
return self._config
|
||||
|
||||
def reload_config(self) -> IPWatchConfig:
|
||||
"""Recharge la configuration depuis le fichier"""
|
||||
if not hasattr(self, '_config_path'):
|
||||
self._config_path = "./config.yaml"
|
||||
return self.load_config(self._config_path)
|
||||
|
||||
@property
|
||||
def config(self) -> IPWatchConfig:
|
||||
"""Retourne la configuration actuelle"""
|
||||
if self._config is None:
|
||||
raise RuntimeError("Configuration non chargée. Appelez load_config() d'abord.")
|
||||
return self._config
|
||||
|
||||
|
||||
# Instance globale
|
||||
config_manager = ConfigManager()
|
||||
101
backend/app/core/database.py
Executable file
101
backend/app/core/database.py
Executable file
@@ -0,0 +1,101 @@
|
||||
"""
|
||||
Configuration de la base de données SQLAlchemy
|
||||
"""
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from pathlib import Path
|
||||
|
||||
# Base pour les modèles SQLAlchemy (DB principale)
|
||||
Base = declarative_base()
|
||||
# Base dédiée à l'architecture
|
||||
ArchBase = declarative_base()
|
||||
|
||||
# Engine et session
|
||||
engine = None
|
||||
SessionLocal = None
|
||||
arch_engine = None
|
||||
ArchSessionLocal = None
|
||||
|
||||
|
||||
def init_database(db_path: str = "./data/db.sqlite"):
|
||||
"""Initialise la connexion à la base de données"""
|
||||
global engine, SessionLocal
|
||||
|
||||
# Créer le dossier data si nécessaire
|
||||
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Créer l'engine SQLite
|
||||
database_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(
|
||||
database_url,
|
||||
connect_args={"check_same_thread": False},
|
||||
echo=False
|
||||
)
|
||||
|
||||
# Créer la session factory
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
# Créer les tables
|
||||
Base.metadata.create_all(bind=engine)
|
||||
|
||||
# Migration : ajouter les colonnes manquantes
|
||||
_run_migrations(engine)
|
||||
|
||||
return engine
|
||||
|
||||
|
||||
def _run_migrations(eng):
|
||||
"""Ajoute les colonnes manquantes aux tables existantes"""
|
||||
import sqlalchemy
|
||||
inspector = sqlalchemy.inspect(eng)
|
||||
|
||||
# Migration de la table 'ip'
|
||||
if 'ip' in inspector.get_table_names():
|
||||
existing_columns = {col['name'] for col in inspector.get_columns('ip')}
|
||||
with eng.connect() as conn:
|
||||
if 'dhcp_synced' not in existing_columns:
|
||||
conn.execute(sqlalchemy.text("ALTER TABLE ip ADD COLUMN dhcp_synced BOOLEAN DEFAULT 0"))
|
||||
conn.commit()
|
||||
print("✓ Migration: colonne dhcp_synced ajoutée à la table ip")
|
||||
|
||||
|
||||
def init_architecture_database(db_path: str = "./architecture/database/architecture.sqlite"):
|
||||
"""Initialise la connexion à la base de données d'architecture"""
|
||||
global arch_engine, ArchSessionLocal
|
||||
|
||||
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
database_url = f"sqlite:///{db_path}"
|
||||
arch_engine = create_engine(
|
||||
database_url,
|
||||
connect_args={"check_same_thread": False},
|
||||
echo=False
|
||||
)
|
||||
|
||||
ArchSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=arch_engine)
|
||||
|
||||
# Créer les tables d'architecture si besoin
|
||||
ArchBase.metadata.create_all(bind=arch_engine)
|
||||
|
||||
return arch_engine
|
||||
|
||||
|
||||
def get_db():
|
||||
"""Dependency pour obtenir une session DB"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
def get_arch_db():
|
||||
"""Dependency pour obtenir une session DB architecture"""
|
||||
if ArchSessionLocal is None:
|
||||
init_architecture_database()
|
||||
db = ArchSessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
221
backend/app/main.py
Executable file
221
backend/app/main.py
Executable file
@@ -0,0 +1,221 @@
|
||||
"""
|
||||
Application FastAPI principale pour IPWatch
|
||||
Point d'entrée du backend
|
||||
"""
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi.responses import FileResponse
|
||||
from contextlib import asynccontextmanager
|
||||
from pathlib import Path
|
||||
|
||||
from backend.app.core.config import config_manager
|
||||
from backend.app.core.database import init_database, get_db
|
||||
from backend.app.routers import ips_router, scan_router, websocket_router
|
||||
from backend.app.routers import architecture as architecture_router
|
||||
from backend.app.routers import config as config_router
|
||||
from backend.app.routers import system as system_router
|
||||
from backend.app.routers import tracking as tracking_router
|
||||
from backend.app.routers import opnsense as opnsense_router
|
||||
from backend.app.services.scheduler import scan_scheduler
|
||||
from backend.app.routers.scan import perform_scan
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""
|
||||
Gestionnaire du cycle de vie de l'application
|
||||
Initialise et nettoie les ressources
|
||||
"""
|
||||
# Startup
|
||||
print("=== Démarrage IPWatch ===")
|
||||
|
||||
# 1. Charger la configuration
|
||||
try:
|
||||
config = config_manager.load_config("./config.yaml")
|
||||
print(f"✓ Configuration chargée: {config.network.cidr}")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur chargement config: {e}")
|
||||
raise
|
||||
|
||||
# 2. Initialiser la base de données
|
||||
try:
|
||||
init_database(config.database.path)
|
||||
print(f"✓ Base de données initialisée: {config.database.path}")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur initialisation DB: {e}")
|
||||
raise
|
||||
|
||||
# 3. Démarrer le scheduler
|
||||
try:
|
||||
scan_scheduler.start()
|
||||
|
||||
# Créer une session DB pour les scans planifiés
|
||||
from backend.app.core.database import SessionLocal
|
||||
|
||||
async def scheduled_scan():
|
||||
"""Wrapper pour scan planifié avec DB session"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
await perform_scan(db)
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
# Configurer les tâches périodiques
|
||||
scan_scheduler.add_ping_scan_job(
|
||||
scheduled_scan,
|
||||
interval_seconds=config.scan.ping_interval
|
||||
)
|
||||
|
||||
scan_scheduler.add_port_scan_job(
|
||||
scheduled_scan,
|
||||
interval_seconds=config.scan.port_scan_interval
|
||||
)
|
||||
|
||||
# Tâche de nettoyage historique
|
||||
async def cleanup_history():
|
||||
"""Nettoie l'historique ancien"""
|
||||
from backend.app.models.ip import IPHistory
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
cutoff = datetime.utcnow() - timedelta(hours=config.history.retention_hours)
|
||||
deleted = db.query(IPHistory).filter(IPHistory.timestamp < cutoff).delete()
|
||||
db.commit()
|
||||
print(f"Nettoyage historique: {deleted} entrées supprimées")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
scan_scheduler.add_cleanup_job(cleanup_history, interval_hours=1)
|
||||
|
||||
print("✓ Scheduler démarré")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur démarrage scheduler: {e}")
|
||||
|
||||
print("=== IPWatch prêt ===\n")
|
||||
|
||||
yield
|
||||
|
||||
# Shutdown
|
||||
print("\n=== Arrêt IPWatch ===")
|
||||
scan_scheduler.stop()
|
||||
print("✓ Scheduler arrêté")
|
||||
|
||||
|
||||
# Créer l'application FastAPI
|
||||
app = FastAPI(
|
||||
title="IPWatch API",
|
||||
description="API backend pour IPWatch - Scanner réseau temps réel",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan
|
||||
)
|
||||
|
||||
# Configuration CORS pour le frontend
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # À restreindre en production
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Enregistrer les routers API
|
||||
app.include_router(ips_router)
|
||||
app.include_router(scan_router)
|
||||
app.include_router(websocket_router)
|
||||
app.include_router(config_router.router)
|
||||
app.include_router(system_router.router)
|
||||
app.include_router(tracking_router.router)
|
||||
app.include_router(architecture_router.router)
|
||||
app.include_router(opnsense_router.router)
|
||||
|
||||
# Servir les ressources d'architecture
|
||||
architecture_dir = Path("./architecture")
|
||||
architecture_dir.mkdir(parents=True, exist_ok=True)
|
||||
app.mount("/architecture", StaticFiles(directory=str(architecture_dir)), name="architecture")
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
"""Health check endpoint"""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"scheduler": scan_scheduler.is_running
|
||||
}
|
||||
|
||||
|
||||
# Servir les fichiers statiques du frontend
|
||||
frontend_dist = Path(__file__).parent.parent.parent / "frontend" / "dist"
|
||||
|
||||
if frontend_dist.exists():
|
||||
# Monter les assets statiques
|
||||
app.mount("/assets", StaticFiles(directory=str(frontend_dist / "assets")), name="assets")
|
||||
|
||||
# Monter les icônes partagées
|
||||
icons_dir = Path("./data/icons")
|
||||
icons_dir.mkdir(parents=True, exist_ok=True)
|
||||
app.mount("/icons", StaticFiles(directory=str(icons_dir)), name="icons")
|
||||
|
||||
# Servir les fichiers statiques à la racine (favicon, manifest, etc.)
|
||||
@app.get("/favicon.ico")
|
||||
async def serve_favicon():
|
||||
favicon_path = frontend_dist / "favicon.ico"
|
||||
if favicon_path.exists():
|
||||
return FileResponse(favicon_path)
|
||||
return {"error": "Favicon non trouvée"}
|
||||
|
||||
# Route racine pour servir index.html
|
||||
@app.get("/")
|
||||
async def serve_frontend():
|
||||
"""Servir le frontend Vue"""
|
||||
index_file = frontend_dist / "index.html"
|
||||
if index_file.exists():
|
||||
return FileResponse(index_file)
|
||||
return {
|
||||
"name": "IPWatch API",
|
||||
"version": "1.0.0",
|
||||
"status": "running",
|
||||
"error": "Frontend non trouvé"
|
||||
}
|
||||
|
||||
# Catch-all pour le routing Vue (SPA)
|
||||
@app.get("/{full_path:path}")
|
||||
async def catch_all(full_path: str):
|
||||
"""Catch-all pour le routing Vue Router"""
|
||||
# Ne pas intercepter les routes API
|
||||
if full_path.startswith("api/") or full_path.startswith("ws"):
|
||||
return {"error": "Not found"}
|
||||
|
||||
# Servir les fichiers statiques à la racine si présents
|
||||
if ".." not in full_path:
|
||||
candidate = (frontend_dist / full_path).resolve()
|
||||
if frontend_dist in candidate.parents and candidate.is_file():
|
||||
return FileResponse(candidate)
|
||||
|
||||
# Servir index.html pour toutes les autres routes
|
||||
index_file = frontend_dist / "index.html"
|
||||
if index_file.exists():
|
||||
return FileResponse(index_file)
|
||||
return {"error": "Frontend non trouvé"}
|
||||
else:
|
||||
@app.get("/")
|
||||
async def root():
|
||||
"""Endpoint racine (mode développement sans frontend)"""
|
||||
return {
|
||||
"name": "IPWatch API",
|
||||
"version": "1.0.0",
|
||||
"status": "running",
|
||||
"note": "Frontend non buildé - utilisez le mode dev"
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(
|
||||
"backend.app.main:app",
|
||||
host="0.0.0.0",
|
||||
port=8080,
|
||||
reload=True
|
||||
)
|
||||
3
backend/app/migrations/__init__.py
Normal file
3
backend/app/migrations/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Module de migrations pour la base de données IPWatch
|
||||
"""
|
||||
57
backend/app/migrations/add_architecture_node_table.py
Normal file
57
backend/app/migrations/add_architecture_node_table.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""
|
||||
Script de migration pour ajouter la table architecture_node
|
||||
Exécuter avec: python -m backend.app.migrations.add_architecture_node_table
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name='architecture_node'"))
|
||||
if result.fetchone():
|
||||
print("✓ Table 'architecture_node' existe déjà")
|
||||
return
|
||||
|
||||
print("→ Création de la table 'architecture_node'...")
|
||||
conn.execute(text("""
|
||||
CREATE TABLE architecture_node (
|
||||
id TEXT PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
x INTEGER NOT NULL,
|
||||
y INTEGER NOT NULL,
|
||||
width INTEGER NOT NULL,
|
||||
height INTEGER NOT NULL,
|
||||
rotation INTEGER NOT NULL,
|
||||
payload TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL
|
||||
)
|
||||
"""))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_arch_node_created_at ON architecture_node(created_at)"))
|
||||
conn.commit()
|
||||
print("✓ Table 'architecture_node' créée")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur migration architecture_node: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
try:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
62
backend/app/migrations/add_hardware_bench_field.py
Normal file
62
backend/app/migrations/add_hardware_bench_field.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""
|
||||
Script de migration pour ajouter le champ 'hardware_bench' à la table IP
|
||||
Exécuter avec: python -m backend.app.migrations.add_hardware_bench_field
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
"""Ajoute la colonne 'hardware_bench' et son index à la table IP"""
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'hardware_bench' in columns:
|
||||
print("✓ La colonne 'hardware_bench' existe déjà dans la table IP")
|
||||
return
|
||||
|
||||
print("→ Ajout de la colonne 'hardware_bench' à la table IP...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN hardware_bench BOOLEAN DEFAULT 0"))
|
||||
|
||||
print("→ Création de l'index sur 'hardware_bench'...")
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_hardware_bench ON ip(hardware_bench)"))
|
||||
|
||||
conn.commit()
|
||||
print("✓ Migration terminée avec succès!")
|
||||
print(" - Colonne 'hardware_bench' ajoutée")
|
||||
print(" - Index 'idx_ip_hardware_bench' créé")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors de la migration: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
"""Supprime la colonne 'hardware_bench' (rollback de la migration)"""
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
print(" Pour annuler, restaurez une sauvegarde de la base de données")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors du rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
54
backend/app/migrations/add_icon_fields.py
Normal file
54
backend/app/migrations/add_icon_fields.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
Script de migration pour ajouter les champs 'icon_filename' et 'icon_url' à la table IP
|
||||
Exécuter avec: python -m backend.app.migrations.add_icon_fields
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'icon_filename' not in columns:
|
||||
print("→ Ajout de la colonne 'icon_filename'...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN icon_filename TEXT"))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_icon_filename ON ip(icon_filename)"))
|
||||
else:
|
||||
print("✓ Colonne 'icon_filename' déjà présente")
|
||||
|
||||
if 'icon_url' not in columns:
|
||||
print("→ Ajout de la colonne 'icon_url'...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN icon_url TEXT"))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_icon_url ON ip(icon_url)"))
|
||||
else:
|
||||
print("✓ Colonne 'icon_url' déjà présente")
|
||||
|
||||
conn.commit()
|
||||
print("✓ Migration terminée avec succès")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors de la migration: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
try:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors du rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
52
backend/app/migrations/add_network_device_field.py
Normal file
52
backend/app/migrations/add_network_device_field.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Migration: Ajouter le champ network_device à la table ip
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
from sqlalchemy import text, create_engine
|
||||
|
||||
def main():
|
||||
"""Ajoute le champ network_device à la table ip"""
|
||||
# Récupérer le chemin de la base de données
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
|
||||
# Créer l'engine directement
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
print(f"📦 Migration: Ajout du champ network_device")
|
||||
print(f"🗄️ Base de données: {db_path}")
|
||||
|
||||
try:
|
||||
with engine.connect() as conn:
|
||||
# Vérifier si la colonne existe déjà
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'network_device' in columns:
|
||||
print("⚠️ La colonne 'network_device' existe déjà. Migration ignorée.")
|
||||
return
|
||||
|
||||
# Ajouter la colonne network_device
|
||||
print("➕ Ajout de la colonne 'network_device'...")
|
||||
conn.execute(text("""
|
||||
ALTER TABLE ip
|
||||
ADD COLUMN network_device BOOLEAN DEFAULT 0
|
||||
"""))
|
||||
|
||||
# Créer un index sur la colonne
|
||||
print("🔍 Création de l'index sur 'network_device'...")
|
||||
conn.execute(text("""
|
||||
CREATE INDEX IF NOT EXISTS idx_ip_network_device ON ip(network_device)
|
||||
"""))
|
||||
|
||||
conn.commit()
|
||||
print("✅ Migration réussie!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Erreur lors de la migration: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
54
backend/app/migrations/add_scan_log_table.py
Normal file
54
backend/app/migrations/add_scan_log_table.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
Script de migration pour ajouter la table scan_log
|
||||
Exécuter avec: python -m backend.app.migrations.add_scan_log_table
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name='scan_log'"))
|
||||
if result.fetchone():
|
||||
print("✓ Table 'scan_log' existe déjà")
|
||||
return
|
||||
|
||||
print("→ Création de la table 'scan_log'...")
|
||||
conn.execute(text("""
|
||||
CREATE TABLE scan_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ip TEXT,
|
||||
status TEXT,
|
||||
message TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL
|
||||
)
|
||||
"""))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_scan_log_created_at ON scan_log(created_at)"))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_scan_log_ip ON scan_log(ip)"))
|
||||
conn.commit()
|
||||
print("✓ Table 'scan_log' créée")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur migration scan_log: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
try:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
70
backend/app/migrations/add_tracked_field.py
Normal file
70
backend/app/migrations/add_tracked_field.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
Script de migration pour ajouter le champ 'tracked' à la table IP
|
||||
Exécuter avec: python -m backend.app.migrations.add_tracked_field
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
"""Ajoute la colonne 'tracked' et son index à la table IP"""
|
||||
try:
|
||||
# Charger le chemin de la base de données depuis config.yaml ou utiliser le défaut
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
|
||||
# Créer l'engine
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# Vérifier si la colonne existe déjà
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'tracked' in columns:
|
||||
print("✓ La colonne 'tracked' existe déjà dans la table IP")
|
||||
return
|
||||
|
||||
# Ajouter la colonne tracked
|
||||
print("→ Ajout de la colonne 'tracked' à la table IP...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN tracked BOOLEAN DEFAULT 0"))
|
||||
|
||||
# Créer l'index
|
||||
print("→ Création de l'index sur 'tracked'...")
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_tracked ON ip(tracked)"))
|
||||
|
||||
conn.commit()
|
||||
print("✓ Migration terminée avec succès!")
|
||||
print(" - Colonne 'tracked' ajoutée")
|
||||
print(" - Index 'idx_ip_tracked' créé")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors de la migration: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
"""Supprime la colonne 'tracked' (rollback de la migration)"""
|
||||
try:
|
||||
# Charger le chemin de la base de données depuis config.yaml ou utiliser le défaut
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# SQLite ne supporte pas DROP COLUMN directement
|
||||
# Il faut recréer la table sans la colonne
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
print(" Pour annuler, restaurez une sauvegarde de la base de données")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors du rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
70
backend/app/migrations/add_vm_field.py
Normal file
70
backend/app/migrations/add_vm_field.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
Script de migration pour ajouter le champ 'vm' à la table IP
|
||||
Exécuter avec: python -m backend.app.migrations.add_vm_field
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
"""Ajoute la colonne 'vm' et son index à la table IP"""
|
||||
try:
|
||||
# Charger le chemin de la base de données depuis config.yaml ou utiliser le défaut
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
|
||||
# Créer l'engine
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# Vérifier si la colonne existe déjà
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'vm' in columns:
|
||||
print("✓ La colonne 'vm' existe déjà dans la table IP")
|
||||
return
|
||||
|
||||
# Ajouter la colonne vm
|
||||
print("→ Ajout de la colonne 'vm' à la table IP...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN vm BOOLEAN DEFAULT 0"))
|
||||
|
||||
# Créer l'index
|
||||
print("→ Création de l'index sur 'vm'...")
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_vm ON ip(vm)"))
|
||||
|
||||
conn.commit()
|
||||
print("✓ Migration terminée avec succès!")
|
||||
print(" - Colonne 'vm' ajoutée")
|
||||
print(" - Index 'idx_ip_vm' créé")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors de la migration: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
"""Supprime la colonne 'vm' (rollback de la migration)"""
|
||||
try:
|
||||
# Charger le chemin de la base de données depuis config.yaml ou utiliser le défaut
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# SQLite ne supporte pas DROP COLUMN directement
|
||||
# Il faut recréer la table sans la colonne
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
print(" Pour annuler, restaurez une sauvegarde de la base de données")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors du rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
60
backend/app/migrations/create_architecture_db.py
Normal file
60
backend/app/migrations/create_architecture_db.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""
|
||||
Script pour créer la base SQLite dédiée à l'architecture
|
||||
Exécuter avec: python -m backend.app.migrations.create_architecture_db
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
db_path = os.getenv("ARCH_DB_PATH", "./architecture/database/architecture.sqlite")
|
||||
db_file = Path(db_path)
|
||||
db_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
db_url = f"sqlite:///{db_file}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name='architecture_node'"))
|
||||
if result.fetchone():
|
||||
print("✓ Table 'architecture_node' existe déjà")
|
||||
return
|
||||
|
||||
print("→ Création de la table 'architecture_node'...")
|
||||
conn.execute(text("""
|
||||
CREATE TABLE architecture_node (
|
||||
id TEXT PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
x INTEGER NOT NULL,
|
||||
y INTEGER NOT NULL,
|
||||
width INTEGER NOT NULL,
|
||||
height INTEGER NOT NULL,
|
||||
rotation INTEGER NOT NULL,
|
||||
payload TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL
|
||||
)
|
||||
"""))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_arch_node_created_at ON architecture_node(created_at)"))
|
||||
conn.commit()
|
||||
print(f"✓ Base architecture créée: {db_file}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur création base architecture: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
try:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
8
backend/app/models/__init__.py
Executable file
8
backend/app/models/__init__.py
Executable file
@@ -0,0 +1,8 @@
|
||||
"""
|
||||
Modèles SQLAlchemy pour IPWatch
|
||||
"""
|
||||
from .ip import IP, IPHistory
|
||||
from .scan_log import ScanLog
|
||||
from .architecture import ArchitectureNode
|
||||
|
||||
__all__ = ["IP", "IPHistory", "ScanLog", "ArchitectureNode"]
|
||||
22
backend/app/models/architecture.py
Normal file
22
backend/app/models/architecture.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
Modèles SQLAlchemy pour l'éditeur d'architecture
|
||||
"""
|
||||
from datetime import datetime
|
||||
from sqlalchemy import Column, String, Integer, DateTime, Text
|
||||
|
||||
from backend.app.core.database import ArchBase
|
||||
|
||||
|
||||
class ArchitectureNode(ArchBase):
|
||||
"""Noeud d'architecture sauvegardé"""
|
||||
__tablename__ = "architecture_node"
|
||||
|
||||
id = Column(String, primary_key=True, index=True)
|
||||
type = Column(String, nullable=False)
|
||||
x = Column(Integer, nullable=False, default=0)
|
||||
y = Column(Integer, nullable=False, default=0)
|
||||
width = Column(Integer, nullable=False, default=50)
|
||||
height = Column(Integer, nullable=False, default=50)
|
||||
rotation = Column(Integer, nullable=False, default=0)
|
||||
payload = Column(Text, nullable=False, default="{}")
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
95
backend/app/models/ip.py
Executable file
95
backend/app/models/ip.py
Executable file
@@ -0,0 +1,95 @@
|
||||
"""
|
||||
Modèles de données pour les adresses IP et leur historique
|
||||
Basé sur modele-donnees.md
|
||||
"""
|
||||
from sqlalchemy import Column, String, Boolean, DateTime, Integer, ForeignKey, Index, JSON
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
from backend.app.core.database import Base
|
||||
|
||||
|
||||
class IP(Base):
|
||||
"""
|
||||
Table principale des adresses IP
|
||||
Stocke les informations actuelles et les métadonnées de chaque IP
|
||||
"""
|
||||
__tablename__ = "ip"
|
||||
|
||||
# Clé primaire
|
||||
ip = Column(String, primary_key=True, index=True)
|
||||
|
||||
# Métadonnées
|
||||
name = Column(String, nullable=True) # Nom donné à l'IP
|
||||
known = Column(Boolean, default=False, index=True) # IP connue ou inconnue
|
||||
tracked = Column(Boolean, default=False, index=True) # IP suivie pour monitoring
|
||||
vm = Column(Boolean, default=False, index=True) # Machine virtuelle
|
||||
network_device = Column(Boolean, default=False, index=True) # Équipement réseau (switch, routeur, borne WiFi)
|
||||
hardware_bench = Column(Boolean, default=False, index=True) # Lien hardware bench disponible
|
||||
location = Column(String, nullable=True) # Localisation (ex: "Bureau", "Serveur")
|
||||
host = Column(String, nullable=True) # Type d'hôte (ex: "PC", "Imprimante")
|
||||
ip_parent = Column(String, nullable=True) # IP parent liée (relation logique)
|
||||
ip_enfant = Column(JSON, default=list) # Liste d'IPs enfants (JSON)
|
||||
|
||||
# Timestamps
|
||||
first_seen = Column(DateTime, default=datetime.now) # Première détection
|
||||
last_seen = Column(DateTime, default=datetime.now, onupdate=datetime.now) # Dernière vue
|
||||
|
||||
# État réseau
|
||||
last_status = Column(String, index=True) # "online", "offline", "unknown"
|
||||
|
||||
# Informations réseau
|
||||
mac = Column(String, nullable=True) # Adresse MAC
|
||||
vendor = Column(String, nullable=True) # Fabricant (lookup MAC)
|
||||
hostname = Column(String, nullable=True) # Nom d'hôte réseau
|
||||
link = Column(String, nullable=True) # Lien personnalisé (URL)
|
||||
mac_changed = Column(Boolean, default=False) # MAC address différente de celle attendue
|
||||
icon_filename = Column(String, nullable=True) # Icône associée (fichier dans /data/icons)
|
||||
icon_url = Column(String, nullable=True) # Lien associé à l'icône
|
||||
|
||||
# Ports ouverts (stocké en JSON)
|
||||
open_ports = Column(JSON, default=list) # Liste des ports ouverts
|
||||
|
||||
# Synchronisation DHCP OPNsense
|
||||
dhcp_synced = Column(Boolean, default=False) # Réservation DHCP créée dans Kea
|
||||
|
||||
# Relation avec l'historique
|
||||
history = relationship("IPHistory", back_populates="ip_ref", cascade="all, delete-orphan")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<IP {self.ip} - {self.last_status} - {self.name or 'unnamed'}>"
|
||||
|
||||
|
||||
class IPHistory(Base):
|
||||
"""
|
||||
Table d'historique des états d'IP
|
||||
Stocke l'évolution dans le temps (24h par défaut)
|
||||
"""
|
||||
__tablename__ = "ip_history"
|
||||
|
||||
# Clé primaire auto-incrémentée
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
|
||||
# Foreign key vers la table IP
|
||||
ip = Column(String, ForeignKey("ip.ip", ondelete="CASCADE"), nullable=False, index=True)
|
||||
|
||||
# Timestamp de l'enregistrement
|
||||
timestamp = Column(DateTime, default=datetime.now, index=True, nullable=False)
|
||||
|
||||
# État à ce moment
|
||||
status = Column(String, nullable=False) # "online", "offline"
|
||||
|
||||
# Ports ouverts à ce moment (JSON)
|
||||
open_ports = Column(JSON, default=list)
|
||||
|
||||
# Relation inverse vers IP
|
||||
ip_ref = relationship("IP", back_populates="history")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<IPHistory {self.ip} - {self.timestamp} - {self.status}>"
|
||||
|
||||
|
||||
# Index recommandés (déjà définis dans les colonnes avec index=True)
|
||||
# Index supplémentaires si nécessaire
|
||||
Index('idx_ip_last_status', IP.last_status)
|
||||
Index('idx_ip_history_timestamp', IPHistory.timestamp)
|
||||
Index('idx_ip_history_ip', IPHistory.ip)
|
||||
25
backend/app/models/scan_log.py
Normal file
25
backend/app/models/scan_log.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
Historique détaillé des scans (logs par IP)
|
||||
"""
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Index
|
||||
from datetime import datetime
|
||||
from backend.app.core.database import Base
|
||||
|
||||
|
||||
class ScanLog(Base):
|
||||
"""
|
||||
Table de logs des scans réseau
|
||||
"""
|
||||
__tablename__ = "scan_log"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
ip = Column(String, index=True, nullable=True)
|
||||
status = Column(String, nullable=True)
|
||||
message = Column(String, nullable=False)
|
||||
created_at = Column(DateTime, default=datetime.now, index=True, nullable=False)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ScanLog {self.id} {self.ip} {self.status}>"
|
||||
|
||||
|
||||
Index('idx_scan_log_created_at', ScanLog.created_at)
|
||||
8
backend/app/routers/__init__.py
Executable file
8
backend/app/routers/__init__.py
Executable file
@@ -0,0 +1,8 @@
|
||||
"""
|
||||
Routers API pour IPWatch
|
||||
"""
|
||||
from .ips import router as ips_router
|
||||
from .scan import router as scan_router
|
||||
from .websocket import router as websocket_router
|
||||
|
||||
__all__ = ["ips_router", "scan_router", "websocket_router"]
|
||||
132
backend/app/routers/architecture.py
Normal file
132
backend/app/routers/architecture.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""
|
||||
Endpoints API pour l'éditeur d'architecture
|
||||
"""
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Optional, Dict, Any
|
||||
from uuid import uuid4
|
||||
from datetime import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from backend.app.core.database import get_arch_db
|
||||
from backend.app.models.architecture import ArchitectureNode
|
||||
|
||||
router = APIRouter(prefix="/api/architecture", tags=["Architecture"])
|
||||
DATA_DIR = Path(__file__).resolve().parents[3] / "data"
|
||||
WORLD_FILE = DATA_DIR / "architecture.json"
|
||||
|
||||
|
||||
class ArchitectureNodeCreate(BaseModel):
|
||||
id: Optional[str] = None
|
||||
type: str
|
||||
x: int
|
||||
y: int
|
||||
width: int
|
||||
height: int
|
||||
rotation: int = 0
|
||||
payload: Dict[str, Any]
|
||||
|
||||
|
||||
class ArchitectureNodeResponse(BaseModel):
|
||||
id: str
|
||||
type: str
|
||||
x: int
|
||||
y: int
|
||||
width: int
|
||||
height: int
|
||||
rotation: int
|
||||
payload: Dict[str, Any]
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ArchitectureWorldPayload(BaseModel):
|
||||
items: List[Dict[str, Any]]
|
||||
splines: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
|
||||
@router.get("/nodes", response_model=List[ArchitectureNodeResponse])
|
||||
async def list_nodes(db: Session = Depends(get_arch_db)):
|
||||
"""Liste tous les noeuds d'architecture"""
|
||||
nodes = db.query(ArchitectureNode).order_by(ArchitectureNode.created_at.asc()).all()
|
||||
results = []
|
||||
for node in nodes:
|
||||
try:
|
||||
payload = json.loads(node.payload or "{}")
|
||||
except json.JSONDecodeError:
|
||||
payload = {}
|
||||
results.append(ArchitectureNodeResponse(
|
||||
id=node.id,
|
||||
type=node.type,
|
||||
x=node.x,
|
||||
y=node.y,
|
||||
width=node.width,
|
||||
height=node.height,
|
||||
rotation=node.rotation,
|
||||
payload=payload,
|
||||
created_at=node.created_at
|
||||
))
|
||||
return results
|
||||
|
||||
|
||||
@router.post("/nodes", response_model=ArchitectureNodeResponse)
|
||||
async def create_node(payload: ArchitectureNodeCreate, db: Session = Depends(get_arch_db)):
|
||||
"""Créer un noeud d'architecture"""
|
||||
node_id = payload.id or str(uuid4())
|
||||
node = ArchitectureNode(
|
||||
id=node_id,
|
||||
type=payload.type,
|
||||
x=payload.x,
|
||||
y=payload.y,
|
||||
width=payload.width,
|
||||
height=payload.height,
|
||||
rotation=payload.rotation,
|
||||
payload=json.dumps(payload.payload)
|
||||
)
|
||||
db.add(node)
|
||||
db.commit()
|
||||
db.refresh(node)
|
||||
return ArchitectureNodeResponse(
|
||||
id=node.id,
|
||||
type=node.type,
|
||||
x=node.x,
|
||||
y=node.y,
|
||||
width=node.width,
|
||||
height=node.height,
|
||||
rotation=node.rotation,
|
||||
payload=payload.payload,
|
||||
created_at=node.created_at
|
||||
)
|
||||
|
||||
|
||||
def ensure_world_file() -> None:
|
||||
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
if not WORLD_FILE.exists():
|
||||
WORLD_FILE.write_text(json.dumps({"items": [], "splines": []}, indent=2), encoding="utf-8")
|
||||
|
||||
|
||||
@router.get("/world")
|
||||
async def get_world():
|
||||
"""Charge le fichier architecture.json, le crée si absent."""
|
||||
ensure_world_file()
|
||||
try:
|
||||
data = json.loads(WORLD_FILE.read_text(encoding="utf-8"))
|
||||
except json.JSONDecodeError:
|
||||
data = {"items": [], "splines": []}
|
||||
return data
|
||||
|
||||
|
||||
@router.post("/world")
|
||||
async def save_world(payload: ArchitectureWorldPayload):
|
||||
"""Sauvegarde les éléments du world dans architecture.json."""
|
||||
ensure_world_file()
|
||||
splines = payload.splines or []
|
||||
WORLD_FILE.write_text(
|
||||
json.dumps({"items": payload.items, "splines": splines}, indent=2),
|
||||
encoding="utf-8"
|
||||
)
|
||||
return {"status": "ok", "count": len(payload.items), "splines": len(splines)}
|
||||
73
backend/app/routers/config.py
Executable file
73
backend/app/routers/config.py
Executable file
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
Routes pour la configuration
|
||||
"""
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
import yaml
|
||||
from backend.app.core.config import config_manager
|
||||
|
||||
router = APIRouter(prefix="/api/config", tags=["config"])
|
||||
|
||||
@router.get("/ui")
|
||||
async def get_ui_config():
|
||||
"""Récupérer la configuration UI"""
|
||||
config = config_manager.config
|
||||
return {
|
||||
"cell_size": config.ui.cell_size,
|
||||
"font_size": config.ui.font_size,
|
||||
"cell_gap": config.ui.cell_gap,
|
||||
"offline_transparency": config.ui.offline_transparency,
|
||||
"show_mac": config.ui.show_mac,
|
||||
"show_vendor": config.ui.show_vendor,
|
||||
"architecture_title_font_size": config.ui.architecture_title_font_size
|
||||
}
|
||||
|
||||
@router.post("/reload")
|
||||
async def reload_config():
|
||||
"""Recharger la configuration depuis le fichier config.yaml"""
|
||||
try:
|
||||
config = config_manager.reload_config()
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Configuration rechargée avec succès",
|
||||
"ui": {
|
||||
"cell_size": config.ui.cell_size,
|
||||
"font_size": config.ui.font_size,
|
||||
"cell_gap": config.ui.cell_gap,
|
||||
"offline_transparency": config.ui.offline_transparency,
|
||||
"show_mac": config.ui.show_mac,
|
||||
"show_vendor": config.ui.show_vendor,
|
||||
"architecture_title_font_size": config.ui.architecture_title_font_size
|
||||
}
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur rechargement config: {str(e)}")
|
||||
|
||||
|
||||
class UIConfigUpdate(BaseModel):
|
||||
architecture_title_font_size: int
|
||||
|
||||
|
||||
@router.post("/ui")
|
||||
async def update_ui_config(payload: UIConfigUpdate):
|
||||
"""Mettre à jour la configuration UI"""
|
||||
config_path = "./config.yaml"
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
yaml_data = yaml.safe_load(f) or {}
|
||||
|
||||
if "ui" not in yaml_data or yaml_data["ui"] is None:
|
||||
yaml_data["ui"] = {}
|
||||
|
||||
yaml_data["ui"]["architecture_title_font_size"] = int(payload.architecture_title_font_size)
|
||||
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
yaml.safe_dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
||||
|
||||
config = config_manager.reload_config()
|
||||
return {
|
||||
"message": "Configuration UI mise à jour",
|
||||
"architecture_title_font_size": config.ui.architecture_title_font_size
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur mise à jour config UI: {str(e)}")
|
||||
665
backend/app/routers/ips.py
Executable file
665
backend/app/routers/ips.py
Executable file
@@ -0,0 +1,665 @@
|
||||
"""
|
||||
Endpoints API pour la gestion des IPs
|
||||
"""
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import desc
|
||||
from typing import List, Optional
|
||||
from datetime import datetime, timedelta
|
||||
import xml.etree.ElementTree as ET
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
import re
|
||||
import time
|
||||
import urllib.request
|
||||
|
||||
from backend.app.core.database import get_db
|
||||
from backend.app.models.ip import IP, IPHistory
|
||||
from backend.app.core.config import config_manager
|
||||
from pydantic import BaseModel
|
||||
|
||||
router = APIRouter(prefix="/api/ips", tags=["IPs"])
|
||||
|
||||
ICONS_DIR = Path("./data/icons")
|
||||
ALLOWED_ICON_EXTENSIONS = {".png", ".jpg", ".jpeg", ".webp", ".svg"}
|
||||
OUI_URL = "https://standards-oui.ieee.org/oui/oui.txt"
|
||||
OUI_PATH = Path("./data/oui/oui.txt")
|
||||
|
||||
|
||||
def _sanitize_filename(filename: str) -> str:
|
||||
name = Path(filename).name
|
||||
name = re.sub(r"[^A-Za-z0-9._-]+", "_", name)
|
||||
if not name or name in {".", ".."}:
|
||||
return f"icon_{int(time.time())}.png"
|
||||
if "." not in name:
|
||||
return f"{name}.png"
|
||||
return name
|
||||
|
||||
|
||||
@router.get("/oui/status")
|
||||
async def oui_status():
|
||||
"""
|
||||
Statut du fichier OUI local
|
||||
"""
|
||||
if not OUI_PATH.exists():
|
||||
return {"exists": False, "updated_at": None}
|
||||
updated_at = datetime.fromtimestamp(OUI_PATH.stat().st_mtime)
|
||||
return {"exists": True, "updated_at": updated_at.isoformat()}
|
||||
|
||||
|
||||
@router.post("/oui/update")
|
||||
async def update_oui(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Télécharge le fichier OUI et met à jour les fabricants inconnus
|
||||
"""
|
||||
OUI_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
request = urllib.request.Request(
|
||||
OUI_URL,
|
||||
headers={
|
||||
"User-Agent": "IPWatch/1.0 (+https://ipwatch.local)"
|
||||
}
|
||||
)
|
||||
with urllib.request.urlopen(request) as response:
|
||||
OUI_PATH.write_bytes(response.read())
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur téléchargement OUI: {str(e)}")
|
||||
|
||||
# Mettre à jour les vendors inconnus dans la DB
|
||||
from backend.app.services.network import OuiLookup
|
||||
|
||||
updated = 0
|
||||
ips = db.query(IP).filter(IP.mac.isnot(None)).all()
|
||||
for ip in ips:
|
||||
if ip.vendor and ip.vendor not in {"Unknown", ""}:
|
||||
continue
|
||||
vendor = OuiLookup.lookup(ip.mac)
|
||||
if vendor:
|
||||
ip.vendor = vendor
|
||||
updated += 1
|
||||
db.commit()
|
||||
|
||||
return {"message": "Liste OUI mise à jour", "updated_vendors": updated}
|
||||
|
||||
|
||||
@router.get("/icons")
|
||||
async def list_icons():
|
||||
"""
|
||||
Liste les icônes disponibles dans le dossier partagé
|
||||
"""
|
||||
ICONS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
files = []
|
||||
for path in ICONS_DIR.iterdir():
|
||||
if path.is_file() and path.suffix.lower() in ALLOWED_ICON_EXTENSIONS:
|
||||
files.append(path.name)
|
||||
return {"icons": sorted(files)}
|
||||
|
||||
|
||||
@router.post("/icons/upload")
|
||||
async def upload_icon(file: UploadFile = File(...)):
|
||||
"""
|
||||
Upload d'une icône dans le dossier partagé
|
||||
"""
|
||||
ICONS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
filename = _sanitize_filename(file.filename or "")
|
||||
ext = Path(filename).suffix.lower()
|
||||
if ext not in ALLOWED_ICON_EXTENSIONS:
|
||||
raise HTTPException(status_code=400, detail="Format d'image non supporté")
|
||||
|
||||
target = ICONS_DIR / filename
|
||||
|
||||
try:
|
||||
content = await file.read()
|
||||
target.write_bytes(content)
|
||||
return {
|
||||
"filename": target.name,
|
||||
"url": f"/icons/{target.name}"
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur upload: {str(e)}")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Schémas Pydantic pour validation
|
||||
class IPUpdate(BaseModel):
|
||||
"""Schéma pour mise à jour d'IP"""
|
||||
name: Optional[str] = None
|
||||
known: Optional[bool] = None
|
||||
tracked: Optional[bool] = None
|
||||
vm: Optional[bool] = None
|
||||
hardware_bench: Optional[bool] = None
|
||||
network_device: Optional[bool] = None
|
||||
location: Optional[str] = None
|
||||
host: Optional[str] = None
|
||||
link: Optional[str] = None
|
||||
last_status: Optional[str] = None
|
||||
mac: Optional[str] = None
|
||||
vendor: Optional[str] = None
|
||||
hostname: Optional[str] = None
|
||||
mac_changed: Optional[bool] = None
|
||||
open_ports: Optional[List[int]] = None
|
||||
first_seen: Optional[datetime] = None
|
||||
last_seen: Optional[datetime] = None
|
||||
icon_filename: Optional[str] = None
|
||||
icon_url: Optional[str] = None
|
||||
ip_parent: Optional[str] = None
|
||||
ip_enfant: Optional[List[str]] = None
|
||||
dhcp_synced: Optional[bool] = None
|
||||
|
||||
|
||||
class IPResponse(BaseModel):
|
||||
"""Schéma de réponse IP"""
|
||||
ip: str
|
||||
name: Optional[str]
|
||||
known: bool
|
||||
tracked: Optional[bool] = False
|
||||
vm: Optional[bool] = False
|
||||
hardware_bench: Optional[bool] = False
|
||||
network_device: Optional[bool] = False
|
||||
location: Optional[str]
|
||||
host: Optional[str]
|
||||
first_seen: Optional[datetime]
|
||||
last_seen: Optional[datetime]
|
||||
last_status: Optional[str]
|
||||
mac: Optional[str]
|
||||
vendor: Optional[str]
|
||||
hostname: Optional[str]
|
||||
link: Optional[str]
|
||||
mac_changed: Optional[bool] = False
|
||||
open_ports: List[int]
|
||||
icon_filename: Optional[str]
|
||||
icon_url: Optional[str]
|
||||
ip_parent: Optional[str]
|
||||
ip_enfant: List[str] = []
|
||||
dhcp_synced: Optional[bool] = False
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class IPHistoryResponse(BaseModel):
|
||||
"""Schéma de réponse historique"""
|
||||
id: int
|
||||
ip: str
|
||||
timestamp: datetime
|
||||
status: str
|
||||
open_ports: List[int]
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
@router.get("/", response_model=List[IPResponse])
|
||||
async def get_all_ips(
|
||||
status: Optional[str] = None,
|
||||
known: Optional[bool] = None,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Récupère toutes les IPs avec filtres optionnels
|
||||
|
||||
Args:
|
||||
status: Filtrer par statut (online/offline)
|
||||
known: Filtrer par IPs connues/inconnues
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Liste des IPs
|
||||
"""
|
||||
query = db.query(IP)
|
||||
|
||||
if status:
|
||||
query = query.filter(IP.last_status == status)
|
||||
|
||||
if known is not None:
|
||||
query = query.filter(IP.known == known)
|
||||
|
||||
ips = query.all()
|
||||
return ips
|
||||
|
||||
|
||||
@router.get("/{ip_address}", response_model=IPResponse)
|
||||
async def get_ip(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Récupère les détails d'une IP spécifique
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Détails de l'IP
|
||||
"""
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
return ip
|
||||
|
||||
|
||||
@router.put("/{ip_address}", response_model=IPResponse)
|
||||
async def update_ip(
|
||||
ip_address: str,
|
||||
ip_update: IPUpdate,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Met à jour les informations d'une IP
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
ip_update: Données à mettre à jour
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
IP mise à jour
|
||||
"""
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
# Mettre à jour les champs fournis
|
||||
update_data = ip_update.dict(exclude_unset=True)
|
||||
old_parent = ip.ip_parent
|
||||
new_parent = update_data.get("ip_parent", old_parent)
|
||||
for field, value in update_data.items():
|
||||
setattr(ip, field, value)
|
||||
|
||||
# Mettre à jour automatiquement network_device si host change
|
||||
if 'host' in update_data:
|
||||
ip.network_device = (update_data['host'] == 'Network')
|
||||
|
||||
if "ip_enfant" in update_data and update_data["ip_enfant"] is not None:
|
||||
ip.ip_enfant = update_data["ip_enfant"]
|
||||
|
||||
if new_parent != old_parent:
|
||||
if old_parent:
|
||||
parent = db.query(IP).filter(IP.ip == old_parent).first()
|
||||
if parent and parent.ip_enfant:
|
||||
parent.ip_enfant = [child for child in parent.ip_enfant if child != ip.ip]
|
||||
if new_parent:
|
||||
parent = db.query(IP).filter(IP.ip == new_parent).first()
|
||||
if parent:
|
||||
current_children = parent.ip_enfant or []
|
||||
if ip.ip not in current_children:
|
||||
parent.ip_enfant = current_children + [ip.ip]
|
||||
|
||||
db.commit()
|
||||
db.refresh(ip)
|
||||
|
||||
return ip
|
||||
|
||||
|
||||
@router.delete("/{ip_address}")
|
||||
async def delete_ip(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Supprime une IP (et son historique)
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Message de confirmation
|
||||
"""
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
db.delete(ip)
|
||||
db.commit()
|
||||
|
||||
return {"message": f"IP {ip_address} supprimée"}
|
||||
|
||||
|
||||
@router.get("/{ip_address}/history", response_model=List[IPHistoryResponse])
|
||||
async def get_ip_history(
|
||||
ip_address: str,
|
||||
hours: int = 24,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Récupère l'historique d'une IP
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
hours: Nombre d'heures d'historique (défaut: 24h)
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Liste des événements historiques
|
||||
"""
|
||||
# Vérifier que l'IP existe
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
# Calculer la date limite
|
||||
since = datetime.now() - timedelta(hours=hours)
|
||||
|
||||
# Récupérer l'historique
|
||||
history = db.query(IPHistory).filter(
|
||||
IPHistory.ip == ip_address,
|
||||
IPHistory.timestamp >= since
|
||||
).order_by(desc(IPHistory.timestamp)).all()
|
||||
|
||||
return history
|
||||
|
||||
|
||||
@router.delete("/{ip_address}/history")
|
||||
async def delete_ip_history(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Supprime l'historique d'une IP (sans supprimer l'IP elle-même)
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Message de confirmation avec nombre d'entrées supprimées
|
||||
"""
|
||||
# Vérifier que l'IP existe
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
# Supprimer tout l'historique de cette IP
|
||||
deleted_count = db.query(IPHistory).filter(IPHistory.ip == ip_address).delete()
|
||||
db.commit()
|
||||
|
||||
return {"message": f"Historique de {ip_address} supprimé", "deleted_count": deleted_count}
|
||||
|
||||
|
||||
@router.get("/stats/summary")
|
||||
async def get_stats(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Récupère les statistiques globales du réseau
|
||||
|
||||
Returns:
|
||||
Statistiques (total, online, offline, known, unknown)
|
||||
"""
|
||||
total = db.query(IP).count()
|
||||
online = db.query(IP).filter(IP.last_status == "online").count()
|
||||
offline = db.query(IP).filter(IP.last_status == "offline").count()
|
||||
known = db.query(IP).filter(IP.known == True).count()
|
||||
unknown = db.query(IP).filter(IP.known == False).count()
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"online": online,
|
||||
"offline": offline,
|
||||
"known": known,
|
||||
"unknown": unknown
|
||||
}
|
||||
|
||||
|
||||
@router.get("/config/options")
|
||||
async def get_config_options():
|
||||
"""
|
||||
Récupère les options de configuration (locations, hosts, port_protocols, version, subnets)
|
||||
|
||||
Returns:
|
||||
Dictionnaire avec locations, hosts, port_protocols, subnets et version
|
||||
"""
|
||||
config = config_manager.config
|
||||
|
||||
# Récupérer les protocoles de ports depuis la config
|
||||
port_protocols = {}
|
||||
if hasattr(config.ports, 'protocols') and config.ports.protocols:
|
||||
port_protocols = config.ports.protocols
|
||||
|
||||
# Récupérer les subnets
|
||||
subnets = []
|
||||
if hasattr(config, 'subnets') and config.subnets:
|
||||
subnets = [
|
||||
{
|
||||
"name": s.name,
|
||||
"cidr": s.cidr,
|
||||
"start": s.start,
|
||||
"end": s.end,
|
||||
"description": s.description
|
||||
}
|
||||
for s in config.subnets
|
||||
]
|
||||
|
||||
return {
|
||||
"locations": config.locations,
|
||||
"hosts": [{"name": h.name, "location": h.location} for h in config.hosts],
|
||||
"port_protocols": port_protocols,
|
||||
"subnets": subnets,
|
||||
"version": config.app.version,
|
||||
"hardware_bench_url": getattr(config.links, "hardware_bench_url", None),
|
||||
"force_vendor_update": getattr(config.scan, "force_vendor_update", False)
|
||||
}
|
||||
|
||||
|
||||
class HardwareBenchConfig(BaseModel):
|
||||
"""Schéma pour mise à jour du lien hardware bench"""
|
||||
url: Optional[str] = None
|
||||
|
||||
|
||||
class ForceVendorConfig(BaseModel):
|
||||
"""Schéma pour mise à jour du mode force fabricant"""
|
||||
enabled: bool = False
|
||||
|
||||
|
||||
@router.post("/config/hardware-bench")
|
||||
async def update_hardware_bench(config_update: HardwareBenchConfig):
|
||||
"""
|
||||
Met à jour l'URL hardware bench dans config.yaml
|
||||
|
||||
Returns:
|
||||
Message de confirmation
|
||||
"""
|
||||
config_path = "./config.yaml"
|
||||
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
yaml_data = yaml.safe_load(f) or {}
|
||||
|
||||
if "links" not in yaml_data or yaml_data["links"] is None:
|
||||
yaml_data["links"] = {}
|
||||
|
||||
url = (config_update.url or "").strip()
|
||||
yaml_data["links"]["hardware_bench_url"] = url if url else None
|
||||
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
yaml.safe_dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
||||
|
||||
config_manager.reload_config()
|
||||
return {"message": "Lien hardware bench mis à jour"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur mise à jour config: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/config/force-vendor")
|
||||
async def update_force_vendor(config_update: ForceVendorConfig):
|
||||
"""
|
||||
Active/désactive le mode force pour le fabricant
|
||||
"""
|
||||
config_path = "./config.yaml"
|
||||
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
yaml_data = yaml.safe_load(f) or {}
|
||||
|
||||
if "scan" not in yaml_data or yaml_data["scan"] is None:
|
||||
yaml_data["scan"] = {}
|
||||
|
||||
yaml_data["scan"]["force_vendor_update"] = bool(config_update.enabled)
|
||||
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
yaml.safe_dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
||||
|
||||
config_manager.reload_config()
|
||||
return {"message": "Mode force fabricant mis à jour"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur mise à jour config: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/config/content")
|
||||
async def get_config_content():
|
||||
"""
|
||||
Récupère le contenu brut du fichier config.yaml
|
||||
|
||||
Returns:
|
||||
Contenu du fichier YAML
|
||||
"""
|
||||
try:
|
||||
with open("./config.yaml", "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return {"content": content}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur lecture config: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/config/reload")
|
||||
async def reload_config():
|
||||
"""
|
||||
Recharge la configuration depuis config.yaml
|
||||
|
||||
Returns:
|
||||
Message de confirmation
|
||||
"""
|
||||
try:
|
||||
config_manager.reload_config()
|
||||
return {"message": "Configuration rechargée avec succès"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur rechargement config: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/import/ipscan")
|
||||
async def import_ipscan(file: UploadFile = File(...), db: Session = Depends(get_db)):
|
||||
"""
|
||||
Importe les données depuis un fichier XML Angry IP Scanner
|
||||
|
||||
Args:
|
||||
file: Fichier XML uploadé
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Statistiques d'import
|
||||
"""
|
||||
if not file.filename.endswith('.xml'):
|
||||
raise HTTPException(status_code=400, detail="Le fichier doit être un XML")
|
||||
|
||||
try:
|
||||
# Lire le contenu du fichier
|
||||
content = await file.read()
|
||||
|
||||
# Essayer de parser le XML avec récupération d'erreurs
|
||||
try:
|
||||
root = ET.fromstring(content)
|
||||
except ET.ParseError as e:
|
||||
# Si le parsing échoue, essayer de nettoyer le contenu
|
||||
import re
|
||||
content_str = content.decode('utf-8', errors='ignore')
|
||||
|
||||
# Supprimer les caractères de contrôle invalides (sauf tab, CR, LF)
|
||||
content_str = ''.join(char for char in content_str
|
||||
if ord(char) >= 32 or char in '\t\r\n')
|
||||
|
||||
try:
|
||||
root = ET.fromstring(content_str.encode('utf-8'))
|
||||
except ET.ParseError:
|
||||
raise HTTPException(status_code=400, detail=f"Fichier XML invalide même après nettoyage: {str(e)}")
|
||||
|
||||
imported = 0
|
||||
updated = 0
|
||||
errors = []
|
||||
|
||||
# Parser chaque host
|
||||
for host in root.findall('.//host'):
|
||||
try:
|
||||
# Extraire l'adresse IP
|
||||
ip_address = host.get('address')
|
||||
if not ip_address:
|
||||
continue
|
||||
|
||||
# Extraire les informations
|
||||
hostname = None
|
||||
mac = None
|
||||
vendor = None
|
||||
ports = []
|
||||
|
||||
for result in host.findall('result'):
|
||||
name = result.get('name')
|
||||
value = result.text.strip() if result.text else ""
|
||||
|
||||
# Nettoyer les valeurs [n/a]
|
||||
if value == "[n/a]":
|
||||
value = None
|
||||
|
||||
if name == "Nom d'hôte" and value:
|
||||
hostname = value
|
||||
elif name == "Adresse MAC" and value:
|
||||
mac = value
|
||||
elif name == "Constructeur MAC" and value:
|
||||
vendor = value
|
||||
elif name == "Ports" and value:
|
||||
# Parser les ports (format: "22,80,443")
|
||||
try:
|
||||
ports = [int(p.strip()) for p in value.split(',') if p.strip().isdigit()]
|
||||
except Exception as e:
|
||||
ports = []
|
||||
|
||||
# Vérifier si l'IP existe déjà
|
||||
existing_ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if existing_ip:
|
||||
# Mettre à jour avec de nouvelles informations
|
||||
if hostname:
|
||||
if not existing_ip.hostname:
|
||||
existing_ip.hostname = hostname
|
||||
if not existing_ip.name:
|
||||
existing_ip.name = hostname
|
||||
if mac and not existing_ip.mac:
|
||||
existing_ip.mac = mac
|
||||
# Toujours mettre à jour vendor et ports depuis IPScan (plus complet et à jour)
|
||||
if vendor:
|
||||
existing_ip.vendor = vendor
|
||||
if ports:
|
||||
existing_ip.open_ports = ports
|
||||
existing_ip.last_status = "online"
|
||||
existing_ip.last_seen = datetime.now()
|
||||
updated += 1
|
||||
else:
|
||||
# Créer une nouvelle entrée
|
||||
new_ip = IP(
|
||||
ip=ip_address,
|
||||
name=hostname,
|
||||
hostname=hostname,
|
||||
mac=mac,
|
||||
vendor=vendor,
|
||||
open_ports=ports or [],
|
||||
last_status="online",
|
||||
known=False,
|
||||
first_seen=datetime.now(),
|
||||
last_seen=datetime.now()
|
||||
)
|
||||
db.add(new_ip)
|
||||
imported += 1
|
||||
|
||||
except Exception as e:
|
||||
errors.append(f"Erreur pour {ip_address}: {str(e)}")
|
||||
continue
|
||||
|
||||
# Commit des changements
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "Import terminé",
|
||||
"imported": imported,
|
||||
"updated": updated,
|
||||
"errors": errors[:10] # Limiter à 10 erreurs
|
||||
}
|
||||
|
||||
except ET.ParseError as e:
|
||||
raise HTTPException(status_code=400, detail=f"Fichier XML invalide: {str(e)}")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur import: {str(e)}")
|
||||
164
backend/app/routers/opnsense.py
Normal file
164
backend/app/routers/opnsense.py
Normal file
@@ -0,0 +1,164 @@
|
||||
"""
|
||||
Endpoints API pour l'intégration OPNsense (Kea DHCP)
|
||||
"""
|
||||
import traceback
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional
|
||||
|
||||
from backend.app.core.database import get_db
|
||||
from backend.app.core.config import config_manager
|
||||
from backend.app.models.ip import IP
|
||||
from backend.app.services.opnsense_client import OPNsenseClient, OPNsenseAPIError
|
||||
|
||||
router = APIRouter(prefix="/api/opnsense", tags=["OPNsense"])
|
||||
|
||||
|
||||
class DHCPReservationRequest(BaseModel):
|
||||
"""Schéma pour créer/mettre à jour une réservation DHCP"""
|
||||
ip_address: str
|
||||
hw_address: str
|
||||
hostname: str = ""
|
||||
description: str = "Ajouté par IPWatch"
|
||||
|
||||
|
||||
def get_opnsense_client() -> OPNsenseClient:
|
||||
"""Retourne un client OPNsense configuré"""
|
||||
config = config_manager.config.opnsense
|
||||
print(f"[OPNsense Router] Config: enabled={config.enabled}, host={config.host}, api_key={'***' + config.api_key[-8:] if config.api_key else 'VIDE'}")
|
||||
if not config.enabled:
|
||||
raise HTTPException(status_code=503, detail="Intégration OPNsense désactivée")
|
||||
if not config.host or not config.api_key:
|
||||
raise HTTPException(status_code=503, detail="Configuration OPNsense incomplète")
|
||||
return OPNsenseClient()
|
||||
|
||||
|
||||
@router.get("/status")
|
||||
async def opnsense_status():
|
||||
"""Teste la connexion à l'API OPNsense"""
|
||||
client = get_opnsense_client()
|
||||
try:
|
||||
result = await client.test_connection()
|
||||
return {"status": "connected", "data": result}
|
||||
except Exception as e:
|
||||
print(f"[OPNsense Router] Erreur status: {type(e).__name__}: {e}")
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=502, detail=f"Connexion OPNsense échouée: {type(e).__name__}: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/dhcp/reservations")
|
||||
async def list_reservations():
|
||||
"""Liste toutes les réservations DHCP Kea"""
|
||||
client = get_opnsense_client()
|
||||
try:
|
||||
result = await client.search_reservations()
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"[OPNsense Router] Erreur list_reservations: {type(e).__name__}: {e}")
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=502, detail=f"Erreur récupération réservations: {type(e).__name__}: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/dhcp/reservation/{ip_address}")
|
||||
async def get_reservation_by_ip(ip_address: str):
|
||||
"""Cherche une réservation DHCP par adresse IP"""
|
||||
client = get_opnsense_client()
|
||||
try:
|
||||
reservation = await client.find_reservation_by_ip(ip_address)
|
||||
if reservation:
|
||||
return {"found": True, "reservation": reservation}
|
||||
return {"found": False, "reservation": None}
|
||||
except Exception as e:
|
||||
print(f"[OPNsense Router] Erreur get_reservation_by_ip: {type(e).__name__}: {e}")
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=502, detail=f"Erreur recherche réservation: {type(e).__name__}: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/dhcp/reservation")
|
||||
async def upsert_reservation(
|
||||
request: DHCPReservationRequest,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Crée ou met à jour une réservation DHCP Kea pour une IP.
|
||||
Après succès, met à jour dhcp_synced=True dans la BDD.
|
||||
"""
|
||||
print(f"[OPNsense Router] === UPSERT RESERVATION ===")
|
||||
print(f"[OPNsense Router] IP: {request.ip_address}, MAC: {request.hw_address}, Hostname: {request.hostname}")
|
||||
|
||||
client = get_opnsense_client()
|
||||
|
||||
try:
|
||||
# Étape 0 : Résoudre le subnet UUID
|
||||
print(f"[OPNsense Router] Étape 0: Résolution du subnet pour {request.ip_address}...")
|
||||
subnet_uuid = await client.find_subnet_for_ip(request.ip_address)
|
||||
if not subnet_uuid:
|
||||
raise HTTPException(status_code=400, detail=f"Aucun subnet Kea trouvé pour l'IP {request.ip_address}")
|
||||
|
||||
reservation_data = {
|
||||
"subnet": subnet_uuid,
|
||||
"ip_address": request.ip_address,
|
||||
"hw_address": request.hw_address,
|
||||
"hostname": request.hostname,
|
||||
"description": request.description
|
||||
}
|
||||
print(f"[OPNsense Router] Données réservation: {reservation_data}")
|
||||
|
||||
# Étape 1 : Chercher si une réservation existe déjà
|
||||
print(f"[OPNsense Router] Étape 1: Recherche réservation existante...")
|
||||
existing = await client.find_reservation_by_ip(request.ip_address)
|
||||
|
||||
if existing:
|
||||
# Mise à jour de la réservation existante
|
||||
uuid = existing.get("uuid")
|
||||
print(f"[OPNsense Router] Étape 2: Mise à jour réservation existante uuid={uuid}")
|
||||
if not uuid:
|
||||
raise HTTPException(status_code=500, detail="UUID de réservation introuvable")
|
||||
result = await client.set_reservation(uuid, reservation_data)
|
||||
action = "updated"
|
||||
else:
|
||||
# Création d'une nouvelle réservation
|
||||
print(f"[OPNsense Router] Étape 2: Création nouvelle réservation")
|
||||
result = await client.add_reservation(reservation_data)
|
||||
action = "created"
|
||||
|
||||
print(f"[OPNsense Router] Étape 2 terminée: action={action}, result={result}")
|
||||
|
||||
# Étape 3 : Appliquer les changements dans Kea
|
||||
print(f"[OPNsense Router] Étape 3: Reconfiguration Kea...")
|
||||
await client.reconfigure_kea()
|
||||
print(f"[OPNsense Router] Étape 3 terminée: Kea reconfiguré")
|
||||
|
||||
# Étape 4 : Mettre à jour dhcp_synced dans la BDD
|
||||
print(f"[OPNsense Router] Étape 4: Mise à jour BDD dhcp_synced=True")
|
||||
ip_record = db.query(IP).filter(IP.ip == request.ip_address).first()
|
||||
if ip_record:
|
||||
ip_record.dhcp_synced = True
|
||||
db.commit()
|
||||
db.refresh(ip_record)
|
||||
print(f"[OPNsense Router] Étape 4 terminée: BDD mise à jour")
|
||||
else:
|
||||
print(f"[OPNsense Router] ATTENTION: IP {request.ip_address} non trouvée en BDD")
|
||||
|
||||
print(f"[OPNsense Router] === SUCCÈS: {action} ===")
|
||||
return {
|
||||
"status": "success",
|
||||
"action": action,
|
||||
"ip_address": request.ip_address,
|
||||
"result": result
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except OPNsenseAPIError as e:
|
||||
print(f"[OPNsense Router] === ERREUR VALIDATION ===")
|
||||
print(f"[OPNsense Router] Message: {str(e)}")
|
||||
print(f"[OPNsense Router] Validations: {e.validations}")
|
||||
raise HTTPException(status_code=422, detail=str(e))
|
||||
except Exception as e:
|
||||
print(f"[OPNsense Router] === ERREUR ===")
|
||||
print(f"[OPNsense Router] Type: {type(e).__name__}")
|
||||
print(f"[OPNsense Router] Message: {str(e)}")
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=502, detail=f"Erreur OPNsense: {type(e).__name__}: {str(e)}")
|
||||
362
backend/app/routers/scan.py
Executable file
362
backend/app/routers/scan.py
Executable file
@@ -0,0 +1,362 @@
|
||||
"""
|
||||
Endpoints API pour le contrôle des scans réseau
|
||||
"""
|
||||
from fastapi import APIRouter, Depends, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Any, Optional, List
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.app.core.database import get_db
|
||||
from backend.app.core.config import config_manager
|
||||
from backend.app.models.ip import IP, IPHistory
|
||||
from backend.app.models.scan_log import ScanLog
|
||||
from backend.app.services.network import NetworkScanner, OuiLookup
|
||||
from backend.app.services.websocket import ws_manager
|
||||
|
||||
router = APIRouter(prefix="/api/scan", tags=["Scan"])
|
||||
|
||||
|
||||
class ScanLogResponse(BaseModel):
|
||||
"""Schéma de réponse logs scan"""
|
||||
id: int
|
||||
ip: Optional[str]
|
||||
status: Optional[str]
|
||||
message: str
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
async def perform_scan(db: Session):
|
||||
"""
|
||||
Effectue un scan complet du réseau
|
||||
Fonction asynchrone pour background task
|
||||
|
||||
Args:
|
||||
db: Session de base de données
|
||||
"""
|
||||
try:
|
||||
async def scan_log(message: str):
|
||||
print(message)
|
||||
try:
|
||||
await ws_manager.broadcast_scan_log(message)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
await scan_log(f"[{datetime.now()}] Début du scan réseau...")
|
||||
|
||||
# Notifier début du scan
|
||||
try:
|
||||
await ws_manager.broadcast_scan_start()
|
||||
except Exception as e:
|
||||
print(f"Erreur broadcast start (ignorée): {e}")
|
||||
|
||||
# Récupérer la config
|
||||
config = config_manager.config
|
||||
await scan_log(f"[{datetime.now()}] Config chargée: {config.network.cidr}")
|
||||
|
||||
# Initialiser le scanner
|
||||
scanner = NetworkScanner(
|
||||
cidr=config.network.cidr,
|
||||
timeout=config.scan.timeout,
|
||||
ping_count=config.scan.ping_count
|
||||
)
|
||||
|
||||
# Convertir les ports en liste d'entiers
|
||||
port_list = []
|
||||
for port_range in config.ports.ranges:
|
||||
if '-' in port_range:
|
||||
start, end = map(int, port_range.split('-'))
|
||||
port_list.extend(range(start, end + 1))
|
||||
else:
|
||||
port_list.append(int(port_range))
|
||||
|
||||
await scan_log(f"[{datetime.now()}] Ports à scanner: {len(port_list)}")
|
||||
|
||||
# Récupérer les IPs connues
|
||||
known_ips = config.ip_classes
|
||||
await scan_log(f"[{datetime.now()}] IPs connues: {len(known_ips)}")
|
||||
|
||||
# Callback de progression pour WebSocket
|
||||
async def progress_callback(current: int, total: int, current_ip: str, status: str, ping_ok: bool):
|
||||
try:
|
||||
ping_label = "ok" if ping_ok else "fail"
|
||||
await ws_manager.broadcast_scan_progress({
|
||||
"current": current,
|
||||
"total": total,
|
||||
"ip": current_ip
|
||||
})
|
||||
await ws_manager.broadcast_scan_log(
|
||||
f"[{current}/{total}] {current_ip} -> ping:{ping_label} ({status})"
|
||||
)
|
||||
except Exception:
|
||||
# Ignorer les erreurs WebSocket pour ne pas bloquer le scan
|
||||
pass
|
||||
|
||||
# Lancer le scan
|
||||
await scan_log(f"[{datetime.now()}] Lancement du scan (parallélisme: {config.scan.parallel_pings})...")
|
||||
scan_results = await scanner.full_scan(
|
||||
known_ips=known_ips,
|
||||
port_list=port_list,
|
||||
max_concurrent=config.scan.parallel_pings,
|
||||
progress_callback=progress_callback
|
||||
)
|
||||
await scan_log(f"[{datetime.now()}] Scan terminé: {len(scan_results)} IPs trouvées")
|
||||
|
||||
# Mettre à jour la base de données
|
||||
stats = {
|
||||
"total": 0,
|
||||
"online": 0,
|
||||
"offline": 0,
|
||||
"new": 0,
|
||||
"updated": 0
|
||||
}
|
||||
|
||||
for ip_address, ip_data in scan_results.items():
|
||||
stats["total"] += 1
|
||||
|
||||
if ip_data["last_status"] == "online":
|
||||
stats["online"] += 1
|
||||
else:
|
||||
stats["offline"] += 1
|
||||
|
||||
# Log par IP (historique scan)
|
||||
ping_label = "ok" if ip_data["last_status"] == "online" else "fail"
|
||||
log_message = f"Scan {ip_address} -> ping:{ping_label} ({ip_data['last_status']})"
|
||||
db.add(ScanLog(
|
||||
ip=ip_address,
|
||||
status=ip_data["last_status"],
|
||||
message=log_message
|
||||
))
|
||||
|
||||
# Vérifier si l'IP existe déjà
|
||||
existing_ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if existing_ip:
|
||||
# Mettre à jour l'IP existante
|
||||
old_status = existing_ip.last_status
|
||||
|
||||
# Si l'IP passe de offline à online ET qu'elle était inconnue, c'est une "nouvelle détection"
|
||||
# On réinitialise first_seen pour qu'elle apparaisse dans "Nouvelles Détections"
|
||||
if (old_status == "offline" and ip_data["last_status"] == "online" and not existing_ip.known):
|
||||
existing_ip.first_seen = datetime.now()
|
||||
|
||||
# Détecter changement de MAC address
|
||||
new_mac = ip_data.get("mac")
|
||||
if new_mac and existing_ip.mac and new_mac != existing_ip.mac:
|
||||
# MAC a changé ! Marquer comme changée
|
||||
existing_ip.mac_changed = True
|
||||
print(f"[ALERTE] MAC changée pour {ip_address}: {existing_ip.mac} -> {new_mac}")
|
||||
else:
|
||||
# Pas de changement ou pas de MAC précédente
|
||||
existing_ip.mac_changed = False
|
||||
|
||||
existing_ip.last_status = ip_data["last_status"]
|
||||
if ip_data["last_seen"]:
|
||||
existing_ip.last_seen = ip_data["last_seen"]
|
||||
existing_ip.mac = ip_data.get("mac") or existing_ip.mac
|
||||
|
||||
vendor = ip_data.get("vendor")
|
||||
if (not vendor or vendor == "Unknown") and existing_ip.mac:
|
||||
vendor = OuiLookup.lookup(existing_ip.mac) or vendor
|
||||
if config.scan.force_vendor_update:
|
||||
if vendor and vendor != "Unknown":
|
||||
existing_ip.vendor = vendor
|
||||
else:
|
||||
if (not existing_ip.vendor or existing_ip.vendor == "Unknown") and vendor and vendor != "Unknown":
|
||||
existing_ip.vendor = vendor
|
||||
existing_ip.hostname = ip_data.get("hostname") or existing_ip.hostname
|
||||
existing_ip.open_ports = ip_data.get("open_ports", [])
|
||||
|
||||
# Mettre à jour host seulement si présent dans ip_data (config)
|
||||
if "host" in ip_data:
|
||||
existing_ip.host = ip_data["host"]
|
||||
|
||||
# Mettre à jour le flag network_device (basé sur host="Network")
|
||||
# Utiliser le host existant si ip_data n'en a pas
|
||||
current_host = ip_data.get("host") or existing_ip.host
|
||||
existing_ip.network_device = (current_host == "Network")
|
||||
|
||||
# Si l'état a changé, notifier via WebSocket
|
||||
if old_status != ip_data["last_status"]:
|
||||
await ws_manager.broadcast_ip_update({
|
||||
"ip": ip_address,
|
||||
"old_status": old_status,
|
||||
"new_status": ip_data["last_status"]
|
||||
})
|
||||
|
||||
stats["updated"] += 1
|
||||
|
||||
else:
|
||||
# Créer une nouvelle IP
|
||||
vendor = ip_data.get("vendor")
|
||||
if (not vendor or vendor == "Unknown") and ip_data.get("mac"):
|
||||
vendor = OuiLookup.lookup(ip_data.get("mac")) or vendor
|
||||
new_ip = IP(
|
||||
ip=ip_address,
|
||||
name=ip_data.get("name"),
|
||||
known=ip_data.get("known", False),
|
||||
network_device=ip_data.get("host") == "Network",
|
||||
location=ip_data.get("location"),
|
||||
host=ip_data.get("host"),
|
||||
first_seen=datetime.now(),
|
||||
last_seen=ip_data.get("last_seen") or datetime.now(),
|
||||
last_status=ip_data["last_status"],
|
||||
mac=ip_data.get("mac"),
|
||||
vendor=vendor,
|
||||
hostname=ip_data.get("hostname"),
|
||||
open_ports=ip_data.get("open_ports", [])
|
||||
)
|
||||
db.add(new_ip)
|
||||
|
||||
# Notifier nouvelle IP
|
||||
await ws_manager.broadcast_new_ip({
|
||||
"ip": ip_address,
|
||||
"status": ip_data["last_status"],
|
||||
"known": ip_data.get("known", False)
|
||||
})
|
||||
|
||||
stats["new"] += 1
|
||||
|
||||
# Ajouter à l'historique
|
||||
history_entry = IPHistory(
|
||||
ip=ip_address,
|
||||
timestamp=datetime.now(),
|
||||
status=ip_data["last_status"],
|
||||
open_ports=ip_data.get("open_ports", [])
|
||||
)
|
||||
db.add(history_entry)
|
||||
|
||||
# Commit les changements
|
||||
db.commit()
|
||||
|
||||
# Notifier fin du scan avec stats
|
||||
await ws_manager.broadcast_scan_complete(stats)
|
||||
|
||||
print(f"[{datetime.now()}] Scan terminé: {stats}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Erreur lors du scan: {e}")
|
||||
db.rollback()
|
||||
|
||||
|
||||
@router.post("/start")
|
||||
async def start_scan(background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Déclenche un scan réseau immédiat
|
||||
|
||||
Returns:
|
||||
Message de confirmation
|
||||
"""
|
||||
# Lancer le scan en arrière-plan
|
||||
background_tasks.add_task(perform_scan, db)
|
||||
|
||||
return {
|
||||
"message": "Scan réseau démarré",
|
||||
"timestamp": datetime.now()
|
||||
}
|
||||
|
||||
|
||||
@router.get("/logs", response_model=List[ScanLogResponse])
|
||||
async def get_scan_logs(limit: int = 200, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Retourne les derniers logs de scan
|
||||
"""
|
||||
logs = db.query(ScanLog).order_by(ScanLog.created_at.desc()).limit(limit).all()
|
||||
return list(reversed(logs))
|
||||
|
||||
|
||||
@router.post("/ports/{ip_address}")
|
||||
async def scan_ip_ports(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Scanne les ports d'une IP spécifique
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP à scanner
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Liste des ports ouverts
|
||||
"""
|
||||
try:
|
||||
# Récupérer la config
|
||||
config = config_manager.config
|
||||
|
||||
# Convertir les ports en liste d'entiers
|
||||
port_list = []
|
||||
for port_range in config.ports.ranges:
|
||||
if '-' in port_range:
|
||||
start, end = map(int, port_range.split('-'))
|
||||
port_list.extend(range(start, end + 1))
|
||||
else:
|
||||
port_list.append(int(port_range))
|
||||
|
||||
# Initialiser le scanner
|
||||
scanner = NetworkScanner(
|
||||
cidr=config.network.cidr,
|
||||
timeout=config.scan.timeout,
|
||||
ping_count=config.scan.ping_count
|
||||
)
|
||||
|
||||
# Scanner les ports de cette IP
|
||||
print(f"[{datetime.now()}] Scan ports pour {ip_address}...")
|
||||
open_ports = await scanner.scan_ports(ip_address, port_list)
|
||||
print(f"[{datetime.now()}] Ports ouverts pour {ip_address}: {open_ports}")
|
||||
|
||||
# Mettre à jour la base de données
|
||||
ip_record = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
if ip_record:
|
||||
ip_record.open_ports = open_ports
|
||||
ip_record.last_seen = datetime.now()
|
||||
db.commit()
|
||||
|
||||
# Notifier via WebSocket
|
||||
await ws_manager.broadcast_ip_update({
|
||||
"ip": ip_address,
|
||||
"open_ports": open_ports
|
||||
})
|
||||
|
||||
return {
|
||||
"message": "Scan de ports terminé",
|
||||
"ip": ip_address,
|
||||
"open_ports": open_ports,
|
||||
"timestamp": datetime.now()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Erreur scan ports {ip_address}: {e}")
|
||||
return {
|
||||
"message": f"Erreur: {str(e)}",
|
||||
"ip": ip_address,
|
||||
"open_ports": [],
|
||||
"timestamp": datetime.now()
|
||||
}
|
||||
|
||||
|
||||
@router.post("/cleanup-history")
|
||||
async def cleanup_history(hours: int = 24, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Nettoie l'historique plus ancien que X heures
|
||||
|
||||
Args:
|
||||
hours: Nombre d'heures à conserver (défaut: 24h)
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Nombre d'entrées supprimées
|
||||
"""
|
||||
cutoff_date = datetime.now() - timedelta(hours=hours)
|
||||
|
||||
deleted = db.query(IPHistory).filter(
|
||||
IPHistory.timestamp < cutoff_date
|
||||
).delete()
|
||||
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": f"Historique nettoyé",
|
||||
"deleted_entries": deleted,
|
||||
"older_than_hours": hours
|
||||
}
|
||||
73
backend/app/routers/system.py
Executable file
73
backend/app/routers/system.py
Executable file
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
Router pour les statistiques système
|
||||
Fournit les métriques RAM et CPU du serveur IPWatch
|
||||
"""
|
||||
from fastapi import APIRouter
|
||||
import psutil
|
||||
from datetime import datetime
|
||||
|
||||
router = APIRouter(prefix="/api/system", tags=["system"])
|
||||
|
||||
|
||||
@router.get("/stats")
|
||||
async def get_system_stats():
|
||||
"""
|
||||
Récupère les statistiques système du serveur IPWatch
|
||||
|
||||
Returns:
|
||||
dict: Statistiques RAM et CPU
|
||||
- ram_percent: Pourcentage de RAM utilisée
|
||||
- ram_used: RAM utilisée en MB
|
||||
- ram_total: RAM totale en MB
|
||||
- ram_available: RAM disponible en MB
|
||||
- cpu_percent: Pourcentage d'utilisation CPU
|
||||
- cpu_count: Nombre de cœurs CPU
|
||||
- timestamp: Horodatage de la mesure
|
||||
"""
|
||||
# Statistiques mémoire
|
||||
memory = psutil.virtual_memory()
|
||||
|
||||
# Statistiques CPU (moyenne sur 1 seconde)
|
||||
cpu_percent = psutil.cpu_percent(interval=1)
|
||||
|
||||
# Informations processus IPWatch
|
||||
process = psutil.Process()
|
||||
process_memory = process.memory_info()
|
||||
|
||||
return {
|
||||
# RAM système
|
||||
"ram_percent": round(memory.percent, 1),
|
||||
"ram_used": round(memory.used / (1024 * 1024), 1), # MB
|
||||
"ram_total": round(memory.total / (1024 * 1024), 1), # MB
|
||||
"ram_available": round(memory.available / (1024 * 1024), 1), # MB
|
||||
|
||||
# CPU système
|
||||
"cpu_percent": round(cpu_percent, 1),
|
||||
"cpu_count": psutil.cpu_count(),
|
||||
|
||||
# Processus IPWatch
|
||||
"process_ram_mb": round(process_memory.rss / (1024 * 1024), 1), # MB
|
||||
"process_cpu_percent": round(process.cpu_percent(interval=0.1), 1),
|
||||
|
||||
# Timestamp
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.get("/uptime")
|
||||
async def get_uptime():
|
||||
"""
|
||||
Récupère l'uptime du système
|
||||
|
||||
Returns:
|
||||
dict: Informations sur l'uptime
|
||||
"""
|
||||
import time
|
||||
boot_time = psutil.boot_time()
|
||||
uptime_seconds = time.time() - boot_time
|
||||
|
||||
return {
|
||||
"uptime_seconds": int(uptime_seconds),
|
||||
"uptime_hours": round(uptime_seconds / 3600, 1),
|
||||
"boot_time": datetime.fromtimestamp(boot_time).isoformat()
|
||||
}
|
||||
227
backend/app/routers/tracking.py
Normal file
227
backend/app/routers/tracking.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""
|
||||
Endpoints API pour le suivi d'équipements (Wake-on-LAN, shutdown, etc.)
|
||||
"""
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import List, Optional
|
||||
from datetime import datetime
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.app.core.database import get_db
|
||||
from backend.app.models.ip import IP
|
||||
|
||||
router = APIRouter(prefix="/api/tracking", tags=["Tracking"])
|
||||
|
||||
|
||||
# Schémas Pydantic
|
||||
class IPTrackingResponse(BaseModel):
|
||||
"""Schéma de réponse pour les IPs suivies"""
|
||||
ip: str
|
||||
name: Optional[str]
|
||||
known: bool
|
||||
tracked: bool
|
||||
location: Optional[str]
|
||||
host: Optional[str]
|
||||
last_status: Optional[str]
|
||||
mac: Optional[str]
|
||||
vendor: Optional[str]
|
||||
hostname: Optional[str]
|
||||
link: Optional[str]
|
||||
last_seen: Optional[datetime]
|
||||
open_ports: List[int]
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class WOLResponse(BaseModel):
|
||||
"""Réponse après envoi Wake-on-LAN"""
|
||||
message: str
|
||||
ip: str
|
||||
mac: str
|
||||
success: bool
|
||||
|
||||
|
||||
class ShutdownResponse(BaseModel):
|
||||
"""Réponse après commande d'arrêt"""
|
||||
message: str
|
||||
ip: str
|
||||
success: bool
|
||||
|
||||
|
||||
@router.get("/", response_model=List[IPTrackingResponse])
|
||||
async def get_tracked_ips(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Récupère toutes les IPs marquées comme suivies
|
||||
Retourne la liste des équipements avec leur état actuel
|
||||
"""
|
||||
tracked_ips = db.query(IP).filter(IP.tracked == True).order_by(IP.name, IP.ip).all()
|
||||
return tracked_ips
|
||||
|
||||
|
||||
@router.post("/wol/{ip_address}", response_model=WOLResponse)
|
||||
async def wake_on_lan(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Envoie un paquet Magic Packet Wake-on-LAN à l'équipement
|
||||
Nécessite que l'IP ait une adresse MAC enregistrée
|
||||
"""
|
||||
# Récupérer l'IP depuis la base
|
||||
ip_obj = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip_obj:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"IP {ip_address} non trouvée dans la base de données"
|
||||
)
|
||||
|
||||
if not ip_obj.mac:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Adresse MAC manquante pour {ip_address}. Impossible d'envoyer le paquet WOL."
|
||||
)
|
||||
|
||||
try:
|
||||
# Importer la bibliothèque wakeonlan
|
||||
from wakeonlan import send_magic_packet
|
||||
|
||||
# Envoyer le paquet Magic Packet
|
||||
send_magic_packet(ip_obj.mac)
|
||||
|
||||
return WOLResponse(
|
||||
message=f"Paquet Wake-on-LAN envoyé avec succès",
|
||||
ip=ip_address,
|
||||
mac=ip_obj.mac,
|
||||
success=True
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="La bibliothèque 'wakeonlan' n'est pas installée. Exécutez: pip install wakeonlan"
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Erreur lors de l'envoi du paquet WOL: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post("/shutdown/{ip_address}", response_model=ShutdownResponse)
|
||||
async def shutdown_device(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Envoie une commande shutdown via MQTT à l'équipement
|
||||
"""
|
||||
# Récupérer l'IP depuis la base
|
||||
ip_obj = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip_obj:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"IP {ip_address} non trouvée dans la base de données"
|
||||
)
|
||||
|
||||
if ip_obj.last_status != "online":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"L'équipement {ip_address} est déjà hors ligne"
|
||||
)
|
||||
|
||||
try:
|
||||
from backend.app.services.mqtt_client import send_mqtt_command
|
||||
|
||||
# Envoyer commande shutdown via MQTT
|
||||
success = send_mqtt_command(ip_address, "shutdown")
|
||||
|
||||
if success:
|
||||
return ShutdownResponse(
|
||||
message=f"Commande shutdown envoyée à {ip_address} via MQTT",
|
||||
ip=ip_address,
|
||||
success=True
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Échec de l'envoi de la commande MQTT"
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Le service MQTT n'est pas configuré. Consultez mqtt/docs/README.md"
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Erreur lors de l'envoi de la commande: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post("/reboot/{ip_address}", response_model=ShutdownResponse)
|
||||
async def reboot_device(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Envoie une commande reboot via MQTT à l'équipement
|
||||
"""
|
||||
# Récupérer l'IP depuis la base
|
||||
ip_obj = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip_obj:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"IP {ip_address} non trouvée"
|
||||
)
|
||||
|
||||
if ip_obj.last_status != "online":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"L'équipement {ip_address} est hors ligne"
|
||||
)
|
||||
|
||||
try:
|
||||
from backend.app.services.mqtt_client import send_mqtt_command
|
||||
|
||||
# Envoyer commande reboot via MQTT
|
||||
success = send_mqtt_command(ip_address, "reboot")
|
||||
|
||||
if success:
|
||||
return ShutdownResponse(
|
||||
message=f"Commande reboot envoyée à {ip_address} via MQTT",
|
||||
ip=ip_address,
|
||||
success=True
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Échec de l'envoi de la commande MQTT"
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Le service MQTT n'est pas configuré"
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Erreur: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.patch("/{ip_address}/toggle", response_model=IPTrackingResponse)
|
||||
async def toggle_tracking(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Bascule l'état de suivi d'une IP (tracked true/false)
|
||||
"""
|
||||
ip_obj = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip_obj:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"IP {ip_address} non trouvée"
|
||||
)
|
||||
|
||||
# Inverser l'état tracked
|
||||
ip_obj.tracked = not ip_obj.tracked
|
||||
db.commit()
|
||||
db.refresh(ip_obj)
|
||||
|
||||
return ip_obj
|
||||
35
backend/app/routers/websocket.py
Executable file
35
backend/app/routers/websocket.py
Executable file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Endpoint WebSocket pour notifications temps réel
|
||||
"""
|
||||
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
||||
from backend.app.services.websocket import ws_manager
|
||||
|
||||
router = APIRouter(tags=["WebSocket"])
|
||||
|
||||
|
||||
@router.websocket("/ws")
|
||||
async def websocket_endpoint(websocket: WebSocket):
|
||||
"""
|
||||
Endpoint WebSocket pour notifications temps réel
|
||||
|
||||
Args:
|
||||
websocket: Connexion WebSocket
|
||||
"""
|
||||
await ws_manager.connect(websocket)
|
||||
|
||||
try:
|
||||
# Boucle de réception (keep-alive)
|
||||
while True:
|
||||
# Recevoir des messages du client (heartbeat)
|
||||
data = await websocket.receive_text()
|
||||
|
||||
# On peut gérer des commandes du client ici si nécessaire
|
||||
# Pour l'instant, on fait juste un echo pour keep-alive
|
||||
if data == "ping":
|
||||
await ws_manager.send_personal_message("pong", websocket)
|
||||
|
||||
except WebSocketDisconnect:
|
||||
ws_manager.disconnect(websocket)
|
||||
except Exception as e:
|
||||
print(f"Erreur WebSocket: {e}")
|
||||
ws_manager.disconnect(websocket)
|
||||
56
backend/app/scripts/check_network_device.py
Normal file
56
backend/app/scripts/check_network_device.py
Normal file
@@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script pour vérifier et forcer la mise à jour du flag network_device
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Ajouter le chemin parent pour les imports
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from backend.app.models.ip import IP
|
||||
|
||||
# Créer la connexion à la base de données
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
SessionLocal = sessionmaker(bind=engine)
|
||||
|
||||
db = SessionLocal()
|
||||
|
||||
try:
|
||||
# Récupérer toutes les IPs
|
||||
ips = db.query(IP).all()
|
||||
|
||||
print(f"\n📊 Total IPs: {len(ips)}\n")
|
||||
|
||||
updated = 0
|
||||
for ip in ips:
|
||||
# Afficher les IPs avec host défini
|
||||
if ip.host:
|
||||
status_icon = "🟢" if ip.last_status == "online" else "🔴"
|
||||
network_icon = "🔷" if ip.network_device else " "
|
||||
|
||||
print(f"{status_icon} {network_icon} {ip.ip:15s} | Host: {ip.host:15s} | Network: {ip.network_device} | Status: {ip.last_status}")
|
||||
|
||||
# Mettre à jour network_device si host == "Network"
|
||||
should_be_network = (ip.host == "Network")
|
||||
if ip.network_device != should_be_network:
|
||||
ip.network_device = should_be_network
|
||||
updated += 1
|
||||
print(f" ✓ Flag network_device mis à jour pour {ip.ip}: {should_be_network}")
|
||||
|
||||
if updated > 0:
|
||||
db.commit()
|
||||
print(f"\n✅ {updated} IP(s) mise(s) à jour!")
|
||||
else:
|
||||
print(f"\n✓ Tous les flags network_device sont déjà à jour")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Erreur: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
db.close()
|
||||
118
backend/app/scripts/rebuild_ip_relations.py
Normal file
118
backend/app/scripts/rebuild_ip_relations.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""
|
||||
Reconstruit ip_parent depuis config.yaml, puis recalcule ip_enfant depuis ip_parent.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
CONFIG_PATH = Path(__file__).resolve().parents[3] / "config.yaml"
|
||||
|
||||
|
||||
def load_config() -> Dict[str, Any]:
|
||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
||||
return yaml.safe_load(handle) or {}
|
||||
|
||||
|
||||
def normalize_children(value: Any) -> Optional[List[str]]:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, str):
|
||||
return [value] if value else []
|
||||
if isinstance(value, list):
|
||||
return [str(item) for item in value if item]
|
||||
return []
|
||||
|
||||
|
||||
def ensure_columns(conn: sqlite3.Connection) -> None:
|
||||
cursor = conn.execute("PRAGMA table_info(ip)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
if "ip_parent" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_parent TEXT")
|
||||
if "ip_enfant" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_enfant TEXT")
|
||||
conn.commit()
|
||||
|
||||
|
||||
def collect_parent_mapping(config: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
|
||||
mapping: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
ip_classes = config.get("ip_classes", {}) or {}
|
||||
for ip_address, data in ip_classes.items():
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
if "ip_parent" in data or "ip_enfant" in data:
|
||||
mapping[ip_address] = {
|
||||
"ip_parent": data.get("ip_parent"),
|
||||
"ip_enfant": normalize_children(data.get("ip_enfant"))
|
||||
}
|
||||
|
||||
for host in config.get("hosts", []) or []:
|
||||
if not isinstance(host, dict):
|
||||
continue
|
||||
ip_address = host.get("ip")
|
||||
if not ip_address:
|
||||
continue
|
||||
if "ip_parent" in host or "ip_enfant" in host:
|
||||
entry = mapping.setdefault(ip_address, {})
|
||||
entry.setdefault("ip_parent", host.get("ip_parent"))
|
||||
entry.setdefault("ip_enfant", normalize_children(host.get("ip_enfant")))
|
||||
|
||||
return mapping
|
||||
|
||||
|
||||
def main() -> None:
|
||||
config = load_config()
|
||||
db_path = Path(config.get("database", {}).get("path", "./data/db.sqlite"))
|
||||
mapping = collect_parent_mapping(config)
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"Base de données introuvable: {db_path}")
|
||||
|
||||
conn = sqlite3.connect(db_path)
|
||||
try:
|
||||
ensure_columns(conn)
|
||||
|
||||
if mapping:
|
||||
for ip_address, values in mapping.items():
|
||||
ip_parent = values.get("ip_parent")
|
||||
ip_enfant = values.get("ip_enfant")
|
||||
if ip_enfant is not None:
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_parent = ?, ip_enfant = ? WHERE ip = ?",
|
||||
(ip_parent, json.dumps(ip_enfant), ip_address)
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_parent = ? WHERE ip = ?",
|
||||
(ip_parent, ip_address)
|
||||
)
|
||||
|
||||
cursor = conn.execute("SELECT ip, ip_parent FROM ip")
|
||||
rows = cursor.fetchall()
|
||||
parent_children: Dict[str, List[str]] = {}
|
||||
|
||||
for ip_address, ip_parent in rows:
|
||||
if ip_parent:
|
||||
parent_children.setdefault(ip_parent, []).append(ip_address)
|
||||
|
||||
for ip_address, _ in rows:
|
||||
children = parent_children.get(ip_address, [])
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_enfant = ? WHERE ip = ?",
|
||||
(json.dumps(children), ip_address)
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
print("Reconstruction terminée.")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
98
backend/app/scripts/rebuild_ip_relations_from_hosts.py
Normal file
98
backend/app/scripts/rebuild_ip_relations_from_hosts.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
Reconstruit ip_parent/ip_enfant en utilisant le champ host et config.yaml.
|
||||
1) Pour chaque IP avec host, retrouve l'IP du host dans config.yaml et met ip_parent.
|
||||
2) Recalcule ip_enfant depuis ip_parent.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
CONFIG_PATH = Path(__file__).resolve().parents[3] / "config.yaml"
|
||||
|
||||
|
||||
def load_config() -> Dict[str, Any]:
|
||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
||||
return yaml.safe_load(handle) or {}
|
||||
|
||||
|
||||
def ensure_columns(conn: sqlite3.Connection) -> None:
|
||||
cursor = conn.execute("PRAGMA table_info(ip)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
if "ip_parent" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_parent TEXT")
|
||||
if "ip_enfant" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_enfant TEXT")
|
||||
conn.commit()
|
||||
|
||||
|
||||
def host_ip_map(config: Dict[str, Any]) -> Dict[str, str]:
|
||||
mapping: Dict[str, str] = {}
|
||||
for host in config.get("hosts", []) or []:
|
||||
if not isinstance(host, dict):
|
||||
continue
|
||||
name = (host.get("name") or "").strip()
|
||||
ip = (host.get("ip") or "").strip()
|
||||
if name and ip:
|
||||
mapping[name.lower()] = ip
|
||||
return mapping
|
||||
|
||||
|
||||
def main() -> None:
|
||||
config = load_config()
|
||||
db_path = Path(config.get("database", {}).get("path", "./data/db.sqlite"))
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"Base de données introuvable: {db_path}")
|
||||
|
||||
host_map = host_ip_map(config)
|
||||
conn = sqlite3.connect(db_path)
|
||||
try:
|
||||
ensure_columns(conn)
|
||||
|
||||
cursor = conn.execute("SELECT ip, host FROM ip")
|
||||
rows = cursor.fetchall()
|
||||
|
||||
updated = 0
|
||||
skipped = 0
|
||||
for ip_address, host in rows:
|
||||
if not host:
|
||||
skipped += 1
|
||||
continue
|
||||
parent_ip = host_map.get(str(host).lower())
|
||||
if not parent_ip:
|
||||
print(f"[WARN] host sans IP config: {host} (ip {ip_address})")
|
||||
skipped += 1
|
||||
continue
|
||||
conn.execute("UPDATE ip SET ip_parent = ? WHERE ip = ?", (parent_ip, ip_address))
|
||||
updated += 1
|
||||
|
||||
print(f"[INFO] ip_parent mis à jour: {updated} | ignorés: {skipped}")
|
||||
|
||||
config_by_ip = {ip for ip in host_map.values()}
|
||||
parent_children: Dict[str, list[str]] = {}
|
||||
for ip_address, host in rows:
|
||||
host_value = (host or "").strip()
|
||||
if host_value in config_by_ip:
|
||||
parent_children.setdefault(host_value, []).append(ip_address)
|
||||
|
||||
for parent_ip in config_by_ip:
|
||||
children = parent_children.get(parent_ip, [])
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_enfant = ? WHERE ip = ?",
|
||||
(json.dumps(children), parent_ip)
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
print("[INFO] ip_enfant recalculé.")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
135
backend/app/scripts/update_ip_relations.py
Normal file
135
backend/app/scripts/update_ip_relations.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""
|
||||
Met à jour la base IP avec les champs ip_parent/ip_enfant depuis config.yaml.
|
||||
Ajoute les colonnes si nécessaire et synchronise les relations.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
CONFIG_PATH = Path(__file__).resolve().parents[3] / "config.yaml"
|
||||
|
||||
|
||||
def load_config() -> Dict[str, Any]:
|
||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
||||
return yaml.safe_load(handle) or {}
|
||||
|
||||
|
||||
def normalize_children(value: Any) -> Optional[List[str]]:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, str):
|
||||
return [value] if value else []
|
||||
if isinstance(value, list):
|
||||
return [str(item) for item in value if item]
|
||||
return []
|
||||
|
||||
|
||||
def ensure_columns(conn: sqlite3.Connection) -> None:
|
||||
cursor = conn.execute("PRAGMA table_info(ip)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
if "ip_parent" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_parent TEXT")
|
||||
if "ip_enfant" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_enfant TEXT")
|
||||
conn.commit()
|
||||
|
||||
|
||||
def collect_config_mapping(config: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
|
||||
mapping: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
ip_classes = config.get("ip_classes", {}) or {}
|
||||
for ip_address, data in ip_classes.items():
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
if "ip_parent" in data or "ip_enfant" in data:
|
||||
mapping[ip_address] = {
|
||||
"ip_parent": data.get("ip_parent"),
|
||||
"ip_enfant": normalize_children(data.get("ip_enfant"))
|
||||
}
|
||||
|
||||
for host in config.get("hosts", []) or []:
|
||||
if not isinstance(host, dict):
|
||||
continue
|
||||
ip_address = host.get("ip")
|
||||
if not ip_address:
|
||||
continue
|
||||
if "ip_parent" in host or "ip_enfant" in host:
|
||||
entry = mapping.setdefault(ip_address, {})
|
||||
entry.setdefault("ip_parent", host.get("ip_parent"))
|
||||
entry.setdefault("ip_enfant", normalize_children(host.get("ip_enfant")))
|
||||
|
||||
return mapping
|
||||
|
||||
|
||||
def parse_json_list(value: Optional[str]) -> List[str]:
|
||||
if not value:
|
||||
return []
|
||||
try:
|
||||
parsed = json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
return []
|
||||
if isinstance(parsed, list):
|
||||
return [str(item) for item in parsed if item]
|
||||
return []
|
||||
|
||||
|
||||
def main() -> None:
|
||||
config = load_config()
|
||||
db_path = Path(config.get("database", {}).get("path", "./data/db.sqlite"))
|
||||
mapping = collect_config_mapping(config)
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"Base de données introuvable: {db_path}")
|
||||
|
||||
conn = sqlite3.connect(db_path)
|
||||
try:
|
||||
ensure_columns(conn)
|
||||
|
||||
if mapping:
|
||||
for ip_address, values in mapping.items():
|
||||
ip_parent = values.get("ip_parent")
|
||||
ip_enfant = values.get("ip_enfant")
|
||||
if ip_parent is None and ip_enfant is None:
|
||||
continue
|
||||
if ip_enfant is not None:
|
||||
ip_enfant_json = json.dumps(ip_enfant)
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_parent = COALESCE(?, ip_parent), ip_enfant = ? WHERE ip = ?",
|
||||
(ip_parent, ip_enfant_json, ip_address)
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_parent = COALESCE(?, ip_parent) WHERE ip = ?",
|
||||
(ip_parent, ip_address)
|
||||
)
|
||||
|
||||
cursor = conn.execute("SELECT ip, ip_parent, ip_enfant FROM ip")
|
||||
rows = cursor.fetchall()
|
||||
parent_children: Dict[str, List[str]] = {}
|
||||
|
||||
for ip_address, ip_parent, _ in rows:
|
||||
if ip_parent:
|
||||
parent_children.setdefault(ip_parent, []).append(ip_address)
|
||||
|
||||
for ip_address, _, ip_enfant_raw in rows:
|
||||
existing = parse_json_list(ip_enfant_raw)
|
||||
merged = list(dict.fromkeys(existing + parent_children.get(ip_address, [])))
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_enfant = ? WHERE ip = ?",
|
||||
(json.dumps(merged), ip_address)
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
print("Mise à jour terminée.")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
7
backend/app/services/__init__.py
Executable file
7
backend/app/services/__init__.py
Executable file
@@ -0,0 +1,7 @@
|
||||
"""
|
||||
Services réseau pour IPWatch
|
||||
"""
|
||||
from .network import NetworkScanner
|
||||
from .scheduler import ScanScheduler
|
||||
|
||||
__all__ = ["NetworkScanner", "ScanScheduler"]
|
||||
80
backend/app/services/mqtt_client.py
Normal file
80
backend/app/services/mqtt_client.py
Normal file
@@ -0,0 +1,80 @@
|
||||
"""
|
||||
Service MQTT pour IPWatch Backend
|
||||
Envoie des commandes MQTT aux agents installés sur les machines
|
||||
"""
|
||||
import paho.mqtt.client as mqtt
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Configuration MQTT (à charger depuis config.yaml ou variables d'environnement)
|
||||
MQTT_BROKER = os.getenv('MQTT_BROKER', 'localhost')
|
||||
MQTT_PORT = int(os.getenv('MQTT_PORT', '1883'))
|
||||
MQTT_USERNAME = os.getenv('MQTT_USERNAME', None)
|
||||
MQTT_PASSWORD = os.getenv('MQTT_PASSWORD', None)
|
||||
|
||||
|
||||
def send_mqtt_command(ip_address: str, command: str) -> bool:
|
||||
"""
|
||||
Envoie une commande MQTT à un équipement
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP de l'équipement
|
||||
command: Commande à envoyer (shutdown, reboot, status)
|
||||
|
||||
Returns:
|
||||
bool: True si la commande a été envoyée avec succès
|
||||
"""
|
||||
try:
|
||||
# Créer le client MQTT
|
||||
client = mqtt.Client(client_id=f"ipwatch-backend-{os.getpid()}")
|
||||
|
||||
# Authentification si configurée
|
||||
if MQTT_USERNAME and MQTT_PASSWORD:
|
||||
client.username_pw_set(MQTT_USERNAME, MQTT_PASSWORD)
|
||||
|
||||
# Connexion au broker
|
||||
client.connect(MQTT_BROKER, MQTT_PORT, keepalive=10)
|
||||
|
||||
# Topic de commande pour l'équipement
|
||||
topic = f"ipwatch/device/{ip_address}/command"
|
||||
|
||||
# Payload JSON
|
||||
payload = json.dumps({
|
||||
"command": command,
|
||||
"timestamp": __import__('datetime').datetime.now().isoformat()
|
||||
})
|
||||
|
||||
# Publier la commande
|
||||
result = client.publish(topic, payload, qos=1)
|
||||
|
||||
# Attendre que le message soit envoyé
|
||||
result.wait_for_publish(timeout=5)
|
||||
|
||||
# Déconnexion
|
||||
client.disconnect()
|
||||
|
||||
logger.info(f"✓ Commande '{command}' envoyée à {ip_address} via MQTT")
|
||||
return result.is_published()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"✗ Erreur envoi commande MQTT à {ip_address}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def get_device_status(ip_address: str) -> Optional[dict]:
|
||||
"""
|
||||
Récupère le statut d'un équipement via MQTT (si disponible)
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP de l'équipement
|
||||
|
||||
Returns:
|
||||
dict: Statut de l'équipement ou None
|
||||
"""
|
||||
# TODO: Implémenter la récupération du statut
|
||||
# Nécessite un mécanisme de souscription et d'attente de réponse
|
||||
pass
|
||||
365
backend/app/services/network.py
Executable file
365
backend/app/services/network.py
Executable file
@@ -0,0 +1,365 @@
|
||||
"""
|
||||
Modules réseau pour scan d'IP, ping, ARP et port scan
|
||||
Implémente le workflow de scan selon workflow-scan.md
|
||||
"""
|
||||
import asyncio
|
||||
import ipaddress
|
||||
import platform
|
||||
import subprocess
|
||||
import socket
|
||||
from typing import List, Dict, Optional, Tuple
|
||||
from datetime import datetime
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Scapy pour ARP
|
||||
try:
|
||||
from scapy.all import ARP, Ether, srp
|
||||
SCAPY_AVAILABLE = True
|
||||
except ImportError:
|
||||
SCAPY_AVAILABLE = False
|
||||
|
||||
|
||||
class NetworkScanner:
|
||||
"""Scanner réseau principal"""
|
||||
|
||||
def __init__(self, cidr: str, timeout: float = 1.0, ping_count: int = 1):
|
||||
"""
|
||||
Initialise le scanner réseau
|
||||
|
||||
Args:
|
||||
cidr: Réseau CIDR (ex: "192.168.1.0/24")
|
||||
timeout: Timeout pour ping et connexions (secondes)
|
||||
ping_count: Nombre de ping par IP
|
||||
"""
|
||||
self.cidr = cidr
|
||||
self.timeout = timeout
|
||||
self.ping_count = max(1, int(ping_count))
|
||||
self.network = ipaddress.ip_network(cidr, strict=False)
|
||||
|
||||
def generate_ip_list(self) -> List[str]:
|
||||
"""
|
||||
Génère la liste complète d'IP depuis le CIDR
|
||||
|
||||
Returns:
|
||||
Liste des adresses IP en string
|
||||
"""
|
||||
return [str(ip) for ip in self.network.hosts()]
|
||||
|
||||
async def ping(self, ip: str) -> bool:
|
||||
"""
|
||||
Ping une adresse IP (async)
|
||||
|
||||
Args:
|
||||
ip: Adresse IP à pinger
|
||||
|
||||
Returns:
|
||||
True si l'IP répond, False sinon
|
||||
"""
|
||||
# Détection de l'OS pour la commande ping
|
||||
param = '-n' if platform.system().lower() == 'windows' else '-c'
|
||||
timeout_param = '-w' if platform.system().lower() == 'windows' else '-W'
|
||||
|
||||
command = [
|
||||
'ping',
|
||||
param, str(self.ping_count),
|
||||
timeout_param,
|
||||
str(int(self.timeout * 1000) if platform.system().lower() == 'windows' else str(int(self.timeout))),
|
||||
ip
|
||||
]
|
||||
|
||||
try:
|
||||
# Exécuter le ping de manière asynchrone
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*command,
|
||||
stdout=asyncio.subprocess.DEVNULL,
|
||||
stderr=asyncio.subprocess.DEVNULL
|
||||
)
|
||||
await asyncio.wait_for(process.wait(), timeout=self.timeout + 1)
|
||||
return process.returncode == 0
|
||||
except (asyncio.TimeoutError, Exception):
|
||||
return False
|
||||
|
||||
async def ping_parallel(self, ip_list: List[str], max_concurrent: int = 50) -> Dict[str, bool]:
|
||||
"""
|
||||
Ping multiple IPs en parallèle
|
||||
|
||||
Args:
|
||||
ip_list: Liste des IPs à pinger
|
||||
max_concurrent: Nombre maximum de pings simultanés
|
||||
|
||||
Returns:
|
||||
Dictionnaire {ip: online_status}
|
||||
"""
|
||||
results = {}
|
||||
semaphore = asyncio.Semaphore(max_concurrent)
|
||||
|
||||
async def ping_with_semaphore(ip: str):
|
||||
async with semaphore:
|
||||
results[ip] = await self.ping(ip)
|
||||
|
||||
# Lancer tous les pings en parallèle avec limite
|
||||
await asyncio.gather(*[ping_with_semaphore(ip) for ip in ip_list])
|
||||
|
||||
return results
|
||||
|
||||
def get_arp_table(self) -> Dict[str, Tuple[str, str]]:
|
||||
"""
|
||||
Récupère la table ARP du système
|
||||
|
||||
Returns:
|
||||
Dictionnaire {ip: (mac, vendor)}
|
||||
"""
|
||||
arp_data = {}
|
||||
|
||||
if SCAPY_AVAILABLE:
|
||||
try:
|
||||
# Utiliser Scapy pour ARP scan
|
||||
answered, _ = srp(
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=self.cidr),
|
||||
timeout=2,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
for sent, received in answered:
|
||||
ip = received.psrc
|
||||
mac = received.hwsrc
|
||||
vendor = self._get_mac_vendor(mac)
|
||||
arp_data[ip] = (mac, vendor)
|
||||
except Exception as e:
|
||||
print(f"Erreur ARP scan avec Scapy: {e}")
|
||||
else:
|
||||
# Fallback: parser la table ARP système
|
||||
try:
|
||||
if platform.system().lower() == 'windows':
|
||||
output = subprocess.check_output(['arp', '-a'], text=True)
|
||||
pattern = r'(\d+\.\d+\.\d+\.\d+)\s+([0-9a-fA-F-:]+)'
|
||||
else:
|
||||
output = subprocess.check_output(['arp', '-n'], text=True)
|
||||
pattern = r'(\d+\.\d+\.\d+\.\d+)\s+\w+\s+([0-9a-fA-F:]+)'
|
||||
|
||||
matches = re.findall(pattern, output)
|
||||
for ip, mac in matches:
|
||||
if ip in [str(h) for h in self.network.hosts()]:
|
||||
vendor = self._get_mac_vendor(mac)
|
||||
arp_data[ip] = (mac, vendor)
|
||||
except Exception as e:
|
||||
print(f"Erreur lecture table ARP: {e}")
|
||||
|
||||
return arp_data
|
||||
|
||||
def _get_mac_vendor(self, mac: str) -> str:
|
||||
"""
|
||||
Lookup du fabricant depuis l'adresse MAC
|
||||
Simplifié pour l'instant - peut être étendu avec une vraie DB OUI
|
||||
|
||||
Args:
|
||||
mac: Adresse MAC
|
||||
|
||||
Returns:
|
||||
Nom du fabricant ou "Unknown"
|
||||
"""
|
||||
mac_norm = re.sub(r"[^0-9A-Fa-f]", "", mac).upper()
|
||||
if not mac_norm:
|
||||
return "Unknown"
|
||||
|
||||
# Lookup OUI si fichier disponible
|
||||
vendor = OuiLookup.lookup(mac_norm)
|
||||
if vendor:
|
||||
return vendor
|
||||
|
||||
# Mini DB des fabricants courants (fallback)
|
||||
vendors = {
|
||||
"00:0C:29": "VMware",
|
||||
"00:50:56": "VMware",
|
||||
"08:00:27": "VirtualBox",
|
||||
"DC:A6:32": "Raspberry Pi",
|
||||
"B8:27:EB": "Raspberry Pi",
|
||||
}
|
||||
|
||||
for prefix, vendor in vendors.items():
|
||||
prefix_norm = prefix.replace(":", "").upper()
|
||||
if mac_norm.startswith(prefix_norm):
|
||||
return vendor
|
||||
|
||||
return "Unknown"
|
||||
|
||||
|
||||
async def scan_ports(self, ip: str, ports: List[int]) -> List[int]:
|
||||
"""
|
||||
Scan des ports TCP sur une IP
|
||||
|
||||
Args:
|
||||
ip: Adresse IP cible
|
||||
ports: Liste des ports à scanner
|
||||
|
||||
Returns:
|
||||
Liste des ports ouverts
|
||||
"""
|
||||
open_ports = []
|
||||
|
||||
async def check_port(port: int) -> Optional[int]:
|
||||
try:
|
||||
# Tentative de connexion TCP
|
||||
reader, writer = await asyncio.wait_for(
|
||||
asyncio.open_connection(ip, port),
|
||||
timeout=self.timeout
|
||||
)
|
||||
writer.close()
|
||||
await writer.wait_closed()
|
||||
return port
|
||||
except:
|
||||
return None
|
||||
|
||||
# Scanner tous les ports en parallèle
|
||||
results = await asyncio.gather(*[check_port(p) for p in ports])
|
||||
open_ports = [p for p in results if p is not None]
|
||||
|
||||
return open_ports
|
||||
|
||||
def get_hostname(self, ip: str) -> Optional[str]:
|
||||
"""
|
||||
Résolution DNS inversée pour obtenir le hostname
|
||||
|
||||
Args:
|
||||
ip: Adresse IP
|
||||
|
||||
Returns:
|
||||
Hostname ou None
|
||||
"""
|
||||
try:
|
||||
hostname, _, _ = socket.gethostbyaddr(ip)
|
||||
return hostname
|
||||
except:
|
||||
return None
|
||||
|
||||
def classify_ip_status(self, is_online: bool, is_known: bool) -> str:
|
||||
"""
|
||||
Classification de l'état d'une IP
|
||||
|
||||
Args:
|
||||
is_online: IP en ligne
|
||||
is_known: IP connue dans la config
|
||||
|
||||
Returns:
|
||||
État: "online", "offline"
|
||||
"""
|
||||
return "online" if is_online else "offline"
|
||||
|
||||
async def full_scan(self, known_ips: Dict[str, Dict], port_list: List[int], max_concurrent: int = 50, progress_callback=None) -> Dict[str, Dict]:
|
||||
"""
|
||||
Scan complet du réseau selon workflow-scan.md
|
||||
|
||||
Args:
|
||||
known_ips: Dictionnaire des IPs connues depuis config
|
||||
port_list: Liste des ports à scanner
|
||||
max_concurrent: Pings simultanés max
|
||||
progress_callback: Fonction optionnelle pour rapporter la progression
|
||||
|
||||
Returns:
|
||||
Dictionnaire des résultats de scan pour chaque IP
|
||||
"""
|
||||
results = {}
|
||||
|
||||
# 1. Générer liste IP du CIDR
|
||||
ip_list = self.generate_ip_list()
|
||||
total_ips = len(ip_list)
|
||||
|
||||
# 2. Ping parallélisé
|
||||
ping_results = await self.ping_parallel(ip_list, max_concurrent)
|
||||
|
||||
# 3. ARP + MAC vendor
|
||||
arp_table = self.get_arp_table()
|
||||
|
||||
# 4. Pour chaque IP
|
||||
for index, ip in enumerate(ip_list, start=1):
|
||||
is_online = ping_results.get(ip, False)
|
||||
is_known = ip in known_ips
|
||||
|
||||
ip_data = {
|
||||
"ip": ip,
|
||||
"known": is_known,
|
||||
"last_status": self.classify_ip_status(is_online, is_known),
|
||||
"last_seen": datetime.now() if is_online else None,
|
||||
"mac": None,
|
||||
"vendor": None,
|
||||
"hostname": None,
|
||||
"open_ports": [],
|
||||
}
|
||||
|
||||
# Ajouter infos connues
|
||||
if is_known:
|
||||
ip_data.update(known_ips[ip])
|
||||
|
||||
# Infos ARP
|
||||
if ip in arp_table:
|
||||
mac, vendor = arp_table[ip]
|
||||
ip_data["mac"] = mac
|
||||
ip_data["vendor"] = vendor
|
||||
|
||||
# Hostname
|
||||
if is_online:
|
||||
hostname = self.get_hostname(ip)
|
||||
if hostname:
|
||||
ip_data["hostname"] = hostname
|
||||
|
||||
# 5. Port scan (uniquement si online)
|
||||
if is_online and port_list:
|
||||
open_ports = await self.scan_ports(ip, port_list)
|
||||
ip_data["open_ports"] = open_ports
|
||||
|
||||
results[ip] = ip_data
|
||||
|
||||
# Rapporter la progression
|
||||
if progress_callback:
|
||||
await progress_callback(index, total_ips, ip, ip_data["last_status"], is_online)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class OuiLookup:
|
||||
"""Lookup OUI basé sur un fichier local (oui.txt)"""
|
||||
_cache = {}
|
||||
_mtime = None
|
||||
_path = Path("./data/oui/oui.txt")
|
||||
|
||||
@classmethod
|
||||
def _load(cls):
|
||||
if not cls._path.exists():
|
||||
cls._cache = {}
|
||||
cls._mtime = None
|
||||
return
|
||||
|
||||
mtime = cls._path.stat().st_mtime
|
||||
if cls._mtime == mtime and cls._cache:
|
||||
return
|
||||
|
||||
cache = {}
|
||||
try:
|
||||
with cls._path.open("r", encoding="utf-8", errors="ignore") as handle:
|
||||
for line in handle:
|
||||
raw = line.strip()
|
||||
if "(hex)" in raw:
|
||||
left, right = raw.split("(hex)", 1)
|
||||
prefix = re.sub(r"[^0-9A-Fa-f]", "", left).upper()[:6]
|
||||
vendor = right.strip()
|
||||
if len(prefix) == 6 and vendor:
|
||||
cache[prefix] = vendor
|
||||
except Exception:
|
||||
cache = {}
|
||||
|
||||
cls._cache = cache
|
||||
cls._mtime = mtime
|
||||
print(f"[OUI] Base chargée: {len(cls._cache)} entrées depuis {cls._path}")
|
||||
|
||||
@classmethod
|
||||
def lookup(cls, mac: str) -> Optional[str]:
|
||||
if not mac:
|
||||
return None
|
||||
cls._load()
|
||||
if not cls._cache:
|
||||
return None
|
||||
prefix = re.sub(r"[^0-9A-Fa-f]", "", mac).upper()[:6]
|
||||
if len(prefix) != 6:
|
||||
return None
|
||||
return cls._cache.get(prefix)
|
||||
194
backend/app/services/opnsense_client.py
Normal file
194
backend/app/services/opnsense_client.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""
|
||||
Client API OPNsense pour IPWatch
|
||||
Gère les communications avec l'API REST OPNsense (Kea DHCP)
|
||||
"""
|
||||
import httpx
|
||||
import ipaddress
|
||||
from typing import Optional, Dict, Any, List
|
||||
from backend.app.core.config import config_manager
|
||||
|
||||
|
||||
class OPNsenseAPIError(Exception):
|
||||
"""Erreur retournée par l'API OPNsense (validation, etc.)"""
|
||||
def __init__(self, message: str, validations: dict = None):
|
||||
self.validations = validations or {}
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class OPNsenseClient:
|
||||
"""Client pour l'API OPNsense avec authentification Basic (api_key:api_secret)"""
|
||||
|
||||
def __init__(self):
|
||||
config = config_manager.config.opnsense
|
||||
self.base_url = f"{config.protocol}://{config.host}"
|
||||
self.auth = (config.api_key, config.api_secret)
|
||||
self.verify_ssl = config.verify_ssl
|
||||
self.enabled = config.enabled
|
||||
print(f"[OPNsense] Client initialisé: {self.base_url} (ssl_verify={self.verify_ssl})")
|
||||
|
||||
def _get_client(self) -> httpx.AsyncClient:
|
||||
"""Crée un client HTTP async configuré"""
|
||||
return httpx.AsyncClient(
|
||||
base_url=self.base_url,
|
||||
auth=self.auth,
|
||||
verify=self.verify_ssl,
|
||||
timeout=30.0
|
||||
)
|
||||
|
||||
def _check_result(self, data: Dict[str, Any], action: str):
|
||||
"""Vérifie que le résultat OPNsense n'est pas 'failed'"""
|
||||
if data.get("result") == "failed":
|
||||
validations = data.get("validations", {})
|
||||
msg = f"{action} échoué"
|
||||
if validations:
|
||||
details = "; ".join(f"{k}: {v}" for k, v in validations.items())
|
||||
msg = f"{action} échoué: {details}"
|
||||
print(f"[OPNsense] VALIDATION ERREUR: {msg}")
|
||||
raise OPNsenseAPIError(msg, validations)
|
||||
|
||||
async def test_connection(self) -> Dict[str, Any]:
|
||||
"""Teste la connexion à l'API OPNsense"""
|
||||
print(f"[OPNsense] Test connexion: GET {self.base_url}/api/core/firmware/status")
|
||||
async with self._get_client() as client:
|
||||
response = await client.get("/api/core/firmware/status")
|
||||
print(f"[OPNsense] Réponse test: {response.status_code}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
async def search_subnets(self) -> Dict[str, Any]:
|
||||
"""Liste les subnets Kea DHCPv4"""
|
||||
print(f"[OPNsense] Recherche subnets: GET {self.base_url}/api/kea/dhcpv4/search_subnet")
|
||||
async with self._get_client() as client:
|
||||
response = await client.get("/api/kea/dhcpv4/search_subnet")
|
||||
print(f"[OPNsense] Réponse search_subnet: {response.status_code}")
|
||||
if response.status_code != 200:
|
||||
print(f"[OPNsense] Corps réponse erreur: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
rows = data.get("rows", [])
|
||||
print(f"[OPNsense] {len(rows)} subnet(s) trouvé(s)")
|
||||
for row in rows:
|
||||
print(f"[OPNsense] - {row.get('subnet')}: uuid={row.get('uuid')}")
|
||||
return data
|
||||
|
||||
async def find_subnet_for_ip(self, ip_address: str) -> Optional[str]:
|
||||
"""Trouve le subnet UUID correspondant à une adresse IP"""
|
||||
print(f"[OPNsense] Recherche subnet pour IP {ip_address}")
|
||||
ip_obj = ipaddress.ip_address(ip_address)
|
||||
data = await self.search_subnets()
|
||||
rows = data.get("rows", [])
|
||||
for row in rows:
|
||||
subnet_cidr = row.get("subnet", "")
|
||||
try:
|
||||
network = ipaddress.ip_network(subnet_cidr, strict=False)
|
||||
if ip_obj in network:
|
||||
uuid = row.get("uuid")
|
||||
print(f"[OPNsense] Subnet trouvé: {subnet_cidr} -> uuid={uuid}")
|
||||
return uuid
|
||||
except ValueError:
|
||||
continue
|
||||
print(f"[OPNsense] Aucun subnet trouvé pour {ip_address}")
|
||||
return None
|
||||
|
||||
async def search_reservations(self) -> Dict[str, Any]:
|
||||
"""Liste toutes les réservations DHCP Kea"""
|
||||
print(f"[OPNsense] Recherche réservations: GET {self.base_url}/api/kea/dhcpv4/search_reservation")
|
||||
async with self._get_client() as client:
|
||||
response = await client.get("/api/kea/dhcpv4/search_reservation")
|
||||
print(f"[OPNsense] Réponse search_reservation: {response.status_code}")
|
||||
if response.status_code != 200:
|
||||
print(f"[OPNsense] Corps réponse erreur: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
rows = data.get("rows", [])
|
||||
print(f"[OPNsense] {len(rows)} réservation(s) trouvée(s)")
|
||||
return data
|
||||
|
||||
async def get_reservation(self, uuid: str) -> Dict[str, Any]:
|
||||
"""Récupère une réservation par UUID"""
|
||||
print(f"[OPNsense] Get réservation: {uuid}")
|
||||
async with self._get_client() as client:
|
||||
response = await client.get(f"/api/kea/dhcpv4/get_reservation/{uuid}")
|
||||
print(f"[OPNsense] Réponse get_reservation: {response.status_code}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
async def add_reservation(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Crée une nouvelle réservation DHCP Kea"""
|
||||
payload = {"reservation": data}
|
||||
print(f"[OPNsense] Ajout réservation: POST {self.base_url}/api/kea/dhcpv4/add_reservation")
|
||||
print(f"[OPNsense] Payload: {payload}")
|
||||
async with self._get_client() as client:
|
||||
response = await client.post(
|
||||
"/api/kea/dhcpv4/add_reservation",
|
||||
json=payload
|
||||
)
|
||||
print(f"[OPNsense] Réponse add_reservation: {response.status_code}")
|
||||
print(f"[OPNsense] Corps réponse: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
self._check_result(result, "Ajout réservation")
|
||||
return result
|
||||
|
||||
async def set_reservation(self, uuid: str, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Met à jour une réservation existante"""
|
||||
payload = {"reservation": data}
|
||||
print(f"[OPNsense] Mise à jour réservation {uuid}: POST {self.base_url}/api/kea/dhcpv4/set_reservation/{uuid}")
|
||||
print(f"[OPNsense] Payload: {payload}")
|
||||
async with self._get_client() as client:
|
||||
response = await client.post(
|
||||
f"/api/kea/dhcpv4/set_reservation/{uuid}",
|
||||
json=payload
|
||||
)
|
||||
print(f"[OPNsense] Réponse set_reservation: {response.status_code}")
|
||||
print(f"[OPNsense] Corps réponse: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
self._check_result(result, "Mise à jour réservation")
|
||||
return result
|
||||
|
||||
async def del_reservation(self, uuid: str) -> Dict[str, Any]:
|
||||
"""Supprime une réservation"""
|
||||
print(f"[OPNsense] Suppression réservation: {uuid}")
|
||||
async with self._get_client() as client:
|
||||
response = await client.post(f"/api/kea/dhcpv4/del_reservation/{uuid}")
|
||||
print(f"[OPNsense] Réponse del_reservation: {response.status_code}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
async def reconfigure_kea(self) -> Dict[str, Any]:
|
||||
"""Applique les changements Kea (reconfigure le service)"""
|
||||
print(f"[OPNsense] Reconfiguration Kea: POST {self.base_url}/api/kea/service/reconfigure")
|
||||
async with self._get_client() as client:
|
||||
response = await client.post("/api/kea/service/reconfigure")
|
||||
print(f"[OPNsense] Réponse reconfigure: {response.status_code}")
|
||||
if response.status_code != 200:
|
||||
print(f"[OPNsense] Corps réponse erreur: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
async def find_reservation_by_ip(self, ip_address: str) -> Optional[Dict[str, Any]]:
|
||||
"""Cherche une réservation existante par adresse IP"""
|
||||
print(f"[OPNsense] Recherche réservation par IP: {ip_address}")
|
||||
result = await self.search_reservations()
|
||||
rows = result.get("rows", [])
|
||||
for row in rows:
|
||||
if row.get("ip_address") == ip_address:
|
||||
print(f"[OPNsense] Réservation trouvée: uuid={row.get('uuid')}")
|
||||
return row
|
||||
print(f"[OPNsense] Aucune réservation existante pour {ip_address}")
|
||||
return None
|
||||
|
||||
async def find_reservation_by_mac(self, mac_address: str) -> Optional[Dict[str, Any]]:
|
||||
"""Cherche une réservation existante par adresse MAC"""
|
||||
mac_normalized = mac_address.lower().replace("-", ":")
|
||||
print(f"[OPNsense] Recherche réservation par MAC: {mac_normalized}")
|
||||
result = await self.search_reservations()
|
||||
rows = result.get("rows", [])
|
||||
for row in rows:
|
||||
row_mac = (row.get("hw_address") or "").lower().replace("-", ":")
|
||||
if row_mac == mac_normalized:
|
||||
print(f"[OPNsense] Réservation trouvée par MAC: uuid={row.get('uuid')}")
|
||||
return row
|
||||
print(f"[OPNsense] Aucune réservation pour MAC {mac_normalized}")
|
||||
return None
|
||||
103
backend/app/services/scheduler.py
Executable file
103
backend/app/services/scheduler.py
Executable file
@@ -0,0 +1,103 @@
|
||||
"""
|
||||
Scheduler APScheduler pour les scans réseau périodiques
|
||||
"""
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from apscheduler.triggers.interval import IntervalTrigger
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, Callable
|
||||
import asyncio
|
||||
|
||||
|
||||
class ScanScheduler:
|
||||
"""Gestionnaire de tâches planifiées pour les scans"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialise le scheduler"""
|
||||
self.scheduler = AsyncIOScheduler()
|
||||
self.is_running = False
|
||||
|
||||
def start(self):
|
||||
"""Démarre le scheduler"""
|
||||
if not self.is_running:
|
||||
self.scheduler.start()
|
||||
self.is_running = True
|
||||
print(f"[{datetime.now()}] Scheduler démarré")
|
||||
|
||||
def stop(self):
|
||||
"""Arrête le scheduler"""
|
||||
if self.is_running:
|
||||
self.scheduler.shutdown()
|
||||
self.is_running = False
|
||||
print(f"[{datetime.now()}] Scheduler arrêté")
|
||||
|
||||
def add_ping_scan_job(self, scan_function: Callable, interval_seconds: int = 60):
|
||||
"""
|
||||
Ajoute une tâche de scan ping périodique
|
||||
|
||||
Args:
|
||||
scan_function: Fonction async à exécuter
|
||||
interval_seconds: Intervalle en secondes
|
||||
"""
|
||||
self.scheduler.add_job(
|
||||
scan_function,
|
||||
trigger=IntervalTrigger(seconds=interval_seconds),
|
||||
id='ping_scan',
|
||||
name='Scan Ping périodique',
|
||||
replace_existing=True
|
||||
)
|
||||
print(f"Tâche ping_scan configurée: toutes les {interval_seconds}s")
|
||||
|
||||
def add_port_scan_job(self, scan_function: Callable, interval_seconds: int = 300):
|
||||
"""
|
||||
Ajoute une tâche de scan de ports périodique
|
||||
|
||||
Args:
|
||||
scan_function: Fonction async à exécuter
|
||||
interval_seconds: Intervalle en secondes
|
||||
"""
|
||||
self.scheduler.add_job(
|
||||
scan_function,
|
||||
trigger=IntervalTrigger(seconds=interval_seconds),
|
||||
id='port_scan',
|
||||
name='Scan ports périodique',
|
||||
replace_existing=True
|
||||
)
|
||||
print(f"Tâche port_scan configurée: toutes les {interval_seconds}s")
|
||||
|
||||
def add_cleanup_job(self, cleanup_function: Callable, interval_hours: int = 1):
|
||||
"""
|
||||
Ajoute une tâche de nettoyage de l'historique
|
||||
|
||||
Args:
|
||||
cleanup_function: Fonction async de nettoyage
|
||||
interval_hours: Intervalle en heures
|
||||
"""
|
||||
self.scheduler.add_job(
|
||||
cleanup_function,
|
||||
trigger=IntervalTrigger(hours=interval_hours),
|
||||
id='history_cleanup',
|
||||
name='Nettoyage historique',
|
||||
replace_existing=True
|
||||
)
|
||||
print(f"Tâche cleanup configurée: toutes les {interval_hours}h")
|
||||
|
||||
def remove_job(self, job_id: str):
|
||||
"""
|
||||
Supprime une tâche planifiée
|
||||
|
||||
Args:
|
||||
job_id: ID de la tâche
|
||||
"""
|
||||
try:
|
||||
self.scheduler.remove_job(job_id)
|
||||
print(f"Tâche {job_id} supprimée")
|
||||
except Exception as e:
|
||||
print(f"Erreur suppression tâche {job_id}: {e}")
|
||||
|
||||
def get_jobs(self):
|
||||
"""Retourne la liste des tâches planifiées"""
|
||||
return self.scheduler.get_jobs()
|
||||
|
||||
|
||||
# Instance globale du scheduler
|
||||
scan_scheduler = ScanScheduler()
|
||||
146
backend/app/services/websocket.py
Executable file
146
backend/app/services/websocket.py
Executable file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Gestionnaire WebSocket pour notifications temps réel
|
||||
"""
|
||||
from fastapi import WebSocket
|
||||
from typing import List, Dict, Any
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class WebSocketManager:
|
||||
"""Gestionnaire de connexions WebSocket"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialise le gestionnaire"""
|
||||
self.active_connections: List[WebSocket] = []
|
||||
|
||||
async def connect(self, websocket: WebSocket):
|
||||
"""
|
||||
Accepte une nouvelle connexion WebSocket
|
||||
|
||||
Args:
|
||||
websocket: Instance WebSocket
|
||||
"""
|
||||
await websocket.accept()
|
||||
self.active_connections.append(websocket)
|
||||
print(f"[{datetime.now()}] Nouvelle connexion WebSocket. Total: {len(self.active_connections)}")
|
||||
|
||||
def disconnect(self, websocket: WebSocket):
|
||||
"""
|
||||
Déconnecte un client WebSocket
|
||||
|
||||
Args:
|
||||
websocket: Instance WebSocket à déconnecter
|
||||
"""
|
||||
if websocket in self.active_connections:
|
||||
self.active_connections.remove(websocket)
|
||||
print(f"[{datetime.now()}] Déconnexion WebSocket. Total: {len(self.active_connections)}")
|
||||
|
||||
async def send_personal_message(self, message: str, websocket: WebSocket):
|
||||
"""
|
||||
Envoie un message à un client spécifique
|
||||
|
||||
Args:
|
||||
message: Message à envoyer
|
||||
websocket: Client destinataire
|
||||
"""
|
||||
try:
|
||||
await websocket.send_text(message)
|
||||
except Exception as e:
|
||||
print(f"Erreur envoi message personnel: {e}")
|
||||
|
||||
async def broadcast(self, message: Dict[str, Any]):
|
||||
"""
|
||||
Diffuse un message à tous les clients connectés
|
||||
|
||||
Args:
|
||||
message: Dictionnaire du message (sera converti en JSON)
|
||||
"""
|
||||
# Ajouter un timestamp
|
||||
message["timestamp"] = datetime.now().isoformat()
|
||||
|
||||
json_message = json.dumps(message)
|
||||
|
||||
# Liste des connexions à supprimer (déconnectées)
|
||||
disconnected = []
|
||||
|
||||
for connection in self.active_connections:
|
||||
try:
|
||||
await connection.send_text(json_message)
|
||||
except Exception as e:
|
||||
print(f"Erreur broadcast: {e}")
|
||||
disconnected.append(connection)
|
||||
|
||||
# Nettoyer les connexions mortes
|
||||
for conn in disconnected:
|
||||
self.disconnect(conn)
|
||||
|
||||
async def broadcast_scan_start(self):
|
||||
"""Notifie le début d'un scan"""
|
||||
await self.broadcast({
|
||||
"type": "scan_start",
|
||||
"message": "Scan réseau démarré"
|
||||
})
|
||||
|
||||
async def broadcast_scan_complete(self, stats: Dict[str, int]):
|
||||
"""
|
||||
Notifie la fin d'un scan avec statistiques
|
||||
|
||||
Args:
|
||||
stats: Statistiques du scan (total, online, offline, etc.)
|
||||
"""
|
||||
await self.broadcast({
|
||||
"type": "scan_complete",
|
||||
"message": "Scan réseau terminé",
|
||||
"stats": stats
|
||||
})
|
||||
|
||||
async def broadcast_ip_update(self, ip_data: Dict[str, Any]):
|
||||
"""
|
||||
Notifie un changement d'état d'IP
|
||||
|
||||
Args:
|
||||
ip_data: Données de l'IP mise à jour
|
||||
"""
|
||||
await self.broadcast({
|
||||
"type": "ip_update",
|
||||
"data": ip_data
|
||||
})
|
||||
|
||||
async def broadcast_new_ip(self, ip_data: Dict[str, Any]):
|
||||
"""
|
||||
Notifie la détection d'une nouvelle IP
|
||||
|
||||
Args:
|
||||
ip_data: Données de la nouvelle IP
|
||||
"""
|
||||
await self.broadcast({
|
||||
"type": "new_ip",
|
||||
"data": ip_data,
|
||||
"message": f"Nouvelle IP détectée: {ip_data.get('ip')}"
|
||||
})
|
||||
|
||||
async def broadcast_scan_progress(self, progress_data: Dict[str, Any]):
|
||||
"""
|
||||
Notifie la progression d'un scan en cours
|
||||
|
||||
Args:
|
||||
progress_data: Données de progression (current, total, ip)
|
||||
"""
|
||||
await self.broadcast({
|
||||
"type": "scan_progress",
|
||||
"current": progress_data.get("current"),
|
||||
"total": progress_data.get("total"),
|
||||
"ip": progress_data.get("ip")
|
||||
})
|
||||
|
||||
async def broadcast_scan_log(self, message: str):
|
||||
"""Diffuse une ligne de log de scan"""
|
||||
await self.broadcast({
|
||||
"type": "scan_log",
|
||||
"message": message
|
||||
})
|
||||
|
||||
|
||||
# Instance globale du gestionnaire WebSocket
|
||||
ws_manager = WebSocketManager()
|
||||
19
backend/requirements.txt
Executable file
19
backend/requirements.txt
Executable file
@@ -0,0 +1,19 @@
|
||||
fastapi==0.109.0
|
||||
uvicorn[standard]==0.27.0
|
||||
sqlalchemy==2.0.25
|
||||
pydantic==2.5.3
|
||||
pydantic-settings==2.1.0
|
||||
python-multipart==0.0.6
|
||||
websockets==12.0
|
||||
apscheduler==3.10.4
|
||||
pyyaml==6.0.1
|
||||
asyncio==3.4.3
|
||||
aiosqlite==0.19.0
|
||||
python-nmap==0.7.1
|
||||
scapy==2.5.0
|
||||
pytest==7.4.4
|
||||
pytest-asyncio==0.23.3
|
||||
httpx==0.26.0
|
||||
psutil==5.9.8
|
||||
wakeonlan==3.1.0
|
||||
paho-mqtt==1.6.1
|
||||
Reference in New Issue
Block a user