ipwatch
38
.claude/settings.local.json
Executable file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(find:*)",
|
||||
"Bash(chmod:*)",
|
||||
"Bash(tree:*)",
|
||||
"Bash(sudo lsof:*)",
|
||||
"Bash(ss:*)",
|
||||
"Bash(kill:*)",
|
||||
"Bash(docker compose:*)",
|
||||
"Bash(curl:*)",
|
||||
"Bash(docker logs:*)",
|
||||
"Bash(ip addr:*)",
|
||||
"Bash(docker exec:*)",
|
||||
"Bash(for:*)",
|
||||
"Bash(do echo \"=== Tentative $i ===\")",
|
||||
"Bash(done)",
|
||||
"Bash(python3:*)",
|
||||
"Bash(npm install:*)",
|
||||
"Bash(docker-compose build:*)",
|
||||
"Bash(sqlite3:*)",
|
||||
"WebFetch(domain:docs.opnsense.org)",
|
||||
"WebFetch(domain:vueflow.dev)",
|
||||
"WebFetch(domain:deepwiki.com)",
|
||||
"WebFetch(domain:homenetworkguy.com)",
|
||||
"WebFetch(domain:github.com)",
|
||||
"Bash(docker ps:*)",
|
||||
"Bash(git init:*)",
|
||||
"Bash(git remote add:*)",
|
||||
"Bash(git -C /home/gilles/docker/ipwatch fetch origin)",
|
||||
"Bash(git -C /home/gilles/docker/ipwatch branch -m master main)",
|
||||
"Bash(git -C /home/gilles/docker/ipwatch reset --soft origin/main)",
|
||||
"Bash(git -C /home/gilles/docker/ipwatch branch:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}
|
||||
36
.dockerignore
Executable file
@@ -0,0 +1,36 @@
|
||||
# Node modules
|
||||
frontend/node_modules
|
||||
frontend/dist
|
||||
|
||||
# Python
|
||||
backend/__pycache__
|
||||
backend/**/__pycache__
|
||||
backend/**/*.pyc
|
||||
backend/**/*.pyo
|
||||
backend/**/*.pyd
|
||||
backend/.pytest_cache
|
||||
backend/**/.pytest_cache
|
||||
|
||||
# Données et logs
|
||||
data/
|
||||
logs/
|
||||
*.sqlite
|
||||
*.db
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# Git
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
*.md
|
||||
!README.md
|
||||
|
||||
# Divers
|
||||
.env
|
||||
.DS_Store
|
||||
37
.gitignore
vendored
Executable file
@@ -0,0 +1,37 @@
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
backend/**/__pycache__/
|
||||
.pytest_cache/
|
||||
|
||||
# Node
|
||||
frontend/node_modules/
|
||||
frontend/dist/
|
||||
frontend/.vite/
|
||||
|
||||
# Environnement
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
|
||||
# Données
|
||||
data/
|
||||
logs/
|
||||
*.sqlite
|
||||
*.db
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
.DS_Store
|
||||
|
||||
# Build
|
||||
build/
|
||||
dist/
|
||||
*.egg-info/
|
||||
122
CLAUDE.md
Executable file
@@ -0,0 +1,122 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
IPWatch is a network scanner web application that visualizes IP addresses, their states (online/offline), open ports, and historical data. The project consists of:
|
||||
|
||||
- **Backend**: FastAPI + SQLAlchemy + APScheduler for network scanning
|
||||
- **Frontend**: Vue 3 + Vite + Tailwind with Monokai dark theme
|
||||
- **Deployment**: Docker containerization with volumes for config and database
|
||||
|
||||
## Key Specification Files
|
||||
Speek in french and comment in french
|
||||
The project has detailed specifications that MUST be followed when implementing features:
|
||||
|
||||
- [prompt-claude-code.md](prompt-claude-code.md) - Overall project objectives and deliverables
|
||||
- [architecture-technique.md](architecture-technique.md) - Technical architecture (backend modules, frontend structure, Docker setup)
|
||||
- [modele-donnees.md](modele-donnees.md) - SQLite database schema (ip and ip_history tables with required indexes)
|
||||
- [workflow-scan.md](workflow-scan.md) - 10-step scan pipeline from YAML config to WebSocket push
|
||||
- [consigne-parametrage.md](consigne-parametrage.md) - Complete YAML configuration structure with all sections (app, network, ip_classes, scan, ports, locations, hosts, history, ui, colors, network_advanced, filters, database)
|
||||
- [consigne-design_webui.md](consigne-design_webui.md) - UI layout (3-column design), interaction patterns, visual states
|
||||
- [guidelines-css.md](guidelines-css.md) - Monokai color palette, IP cell styling rules (solid border for online, dashed for offline, animated halo for ping)
|
||||
- [tests-backend.md](tests-backend.md) - Required unit and integration tests
|
||||
|
||||
## Architecture Principles
|
||||
|
||||
### Backend Structure
|
||||
- FastAPI application with separate modules for network operations (ping, ARP, port scanning)
|
||||
- SQLAlchemy models matching the schema in [modele-donnees.md](modele-donnees.md)
|
||||
- APScheduler for periodic network scans
|
||||
- WebSocket endpoint for real-time push notifications
|
||||
- REST APIs for: IP management, scan operations, configuration, historical data
|
||||
|
||||
### Frontend Structure
|
||||
- Vue 3 with Composition API
|
||||
- Pinia for global state management
|
||||
- WebSocket client for real-time updates
|
||||
- 3-column layout: left (IP details), center (IP grid + legend), right (new detections)
|
||||
- Monokai dark theme with specific color codes from [guidelines-css.md](guidelines-css.md)
|
||||
|
||||
### Data Flow
|
||||
1. YAML configuration loads network CIDR and scan parameters
|
||||
2. Scheduled scan generates IP list, performs ping (parallel), ARP lookup, port scanning
|
||||
3. Results classified and stored in SQLite
|
||||
4. New/changed IPs trigger WebSocket push to frontend
|
||||
5. UI updates grid with appropriate visual states
|
||||
|
||||
## Database Schema
|
||||
|
||||
### ip table (PRIMARY)
|
||||
- `ip` (PK): IP address
|
||||
- `name`, `known` (bool), `location`, `host`: metadata
|
||||
- `first_seen`, `last_seen`: timestamps
|
||||
- `last_status`: current online/offline state
|
||||
- `mac`, `vendor`, `hostname`: network info
|
||||
- `open_ports`: JSON array
|
||||
|
||||
### ip_history table
|
||||
- `id` (PK)
|
||||
- `ip` (FK to ip.ip)
|
||||
- `timestamp`, `status`, `open_ports` (JSON)
|
||||
- **Required index**: timestamp for efficient historical queries
|
||||
|
||||
### Important Indexes
|
||||
- Index on `ip.last_status` for filtering
|
||||
- Index on `ip_history.timestamp` for 24h history retrieval
|
||||
|
||||
## Visual Design Rules
|
||||
|
||||
### IP Cell States
|
||||
- **Online + Known**: Green (#A6E22E) with solid border
|
||||
- **Online + Unknown**: Cyan (#66D9EF) with solid border
|
||||
- **Offline**: Dashed border + configurable transparency
|
||||
- **Ping in progress**: Animated halo using CSS keyframes
|
||||
- **Free IP**: Distinct color from occupied states
|
||||
|
||||
### Theme Colors (Monokai)
|
||||
- Background: `#272822`
|
||||
- Text: `#F8F8F2`
|
||||
- Accents: `#A6E22E` (green), `#F92672` (pink), `#66D9EF` (cyan)
|
||||
|
||||
## Configuration System
|
||||
|
||||
The application is driven by a YAML configuration file ([consigne-parametrage.md](consigne-parametrage.md)) with these sections:
|
||||
- `network`: CIDR, gateway, DNS
|
||||
- `ip_classes`: Define known IPs with metadata
|
||||
- `scan`: Intervals, parallelization settings
|
||||
- `ports`: Port scan ranges
|
||||
- `locations`, `hosts`: Categorical data
|
||||
- `history`: Retention period
|
||||
- `ui`: Display preferences, transparency
|
||||
- `colors`: Custom color mapping
|
||||
- `network_advanced`: ARP, timeout settings
|
||||
- `filters`: Default filter states
|
||||
- `database`: SQLite path
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
When implementing backend features, ensure tests cover ([tests-backend.md](tests-backend.md)):
|
||||
- Network module unit tests: `test_ping()`, `test_port_scan()`, `test_classification()`
|
||||
- SQLAlchemy models: `test_sqlalchemy_models()`
|
||||
- API endpoints: `test_api_get_ip()`, `test_api_update_ip()`
|
||||
- Scheduler: `test_scheduler()`
|
||||
- Integration: Full network scan simulation, WebSocket notification flow
|
||||
|
||||
## Docker Setup
|
||||
|
||||
The application should run as a single Docker service:
|
||||
- Combined backend + frontend container
|
||||
- Volume mount for `config.yaml`
|
||||
- Volume mount for `db.sqlite`
|
||||
- Exposed ports for web access and WebSocket
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
- **Parallelization**: Ping operations must be parallelized for performance
|
||||
- **Real-time updates**: WebSocket is critical for live UI updates during scans
|
||||
- **MAC vendor lookup**: Use ARP data to populate vendor information
|
||||
- **Port scanning**: Respect intervals defined in YAML to avoid network overload
|
||||
- **Classification logic**: Follow the 10-step workflow in [workflow-scan.md](workflow-scan.md)
|
||||
- **Responsive design**: Grid layout must be fluid with collapsible columns
|
||||
BIN
Capture d’écran du 2025-12-06 04-55-12.png
Executable file
|
After Width: | Height: | Size: 164 KiB |
269
DEPLOIEMENT_REUSSI.md
Normal file
@@ -0,0 +1,269 @@
|
||||
# ✅ Déploiement de la fonctionnalité de suivi - RÉUSSI
|
||||
|
||||
**Date**: 23 décembre 2025
|
||||
**Version**: IPWatch 1.0.1
|
||||
**Fonctionnalité**: Suivi d'équipements avec Wake-on-LAN
|
||||
|
||||
---
|
||||
|
||||
## 📋 Résumé de l'implémentation
|
||||
|
||||
La fonctionnalité de **suivi d'équipements** a été déployée avec succès dans IPWatch.
|
||||
|
||||
### ✅ Modifications backend (FastAPI)
|
||||
|
||||
- [x] Modèle `IP` modifié : ajout du champ `tracked` (boolean) avec index
|
||||
- [x] Schémas Pydantic mis à jour : `IPUpdate` et `IPResponse` incluent `tracked`
|
||||
- [x] Nouveau router `/api/tracking/` créé avec 3 endpoints :
|
||||
- `GET /api/tracking/` - Liste des IPs suivies
|
||||
- `POST /api/tracking/wol/{ip}` - Wake-on-LAN
|
||||
- `POST /api/tracking/shutdown/{ip}` - Éteindre (à configurer)
|
||||
- [x] Migration de base de données exécutée avec succès
|
||||
- [x] Dépendance `wakeonlan==3.1.0` ajoutée et installée
|
||||
|
||||
### ✅ Modifications frontend (Vue 3 + Router)
|
||||
|
||||
- [x] Vue Router installé (`vue-router@4.2.5`)
|
||||
- [x] Configuration du routing créée (`/` et `/tracking`)
|
||||
- [x] Page `MainView.vue` créée (page principale)
|
||||
- [x] Page `TrackingView.vue` créée (page de suivi)
|
||||
- [x] Composant `IPDetails.vue` modifié : checkbox "IP suivie" ajoutée
|
||||
- [x] Composant `AppHeader.vue` modifié : bouton "Suivi" ajouté (jaune/orange)
|
||||
- [x] `App.vue` transformé en router-view
|
||||
- [x] `main.js` mis à jour avec le router
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Utilisation
|
||||
|
||||
### 1. Marquer une IP comme suivie
|
||||
|
||||
1. Cliquez sur une cellule IP dans la grille principale
|
||||
2. Dans le panneau de gauche, cochez **"IP suivie"**
|
||||
3. Cliquez sur **"Enregistrer"**
|
||||
|
||||
### 2. Accéder à la page de suivi
|
||||
|
||||
1. Cliquez sur le bouton **"⭐ Suivi"** dans le header (jaune/orange)
|
||||
2. Vous êtes redirigé vers la page `/tracking`
|
||||
|
||||
### 3. Actions disponibles sur la page de suivi
|
||||
|
||||
Pour chaque équipement suivi :
|
||||
|
||||
- **Bouton WOL (vert)** : Envoie un paquet Magic Packet Wake-on-LAN
|
||||
- Nécessite une adresse MAC enregistrée
|
||||
- Désactivé si l'équipement est déjà en ligne
|
||||
|
||||
- **Bouton Éteindre (rose)** : Envoie une commande d'arrêt
|
||||
- ⚠️ Non implémenté (retourne HTTP 501)
|
||||
- Nécessite configuration selon votre infrastructure
|
||||
|
||||
- **Bouton Détails (violet)** : Retourne à la page principale avec l'IP sélectionnée
|
||||
|
||||
- **Bouton Rafraîchir** : Actualise la liste des équipements suivis
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Configuration Wake-on-LAN
|
||||
|
||||
### Prérequis matériels
|
||||
|
||||
Pour que WOL fonctionne, l'équipement cible doit :
|
||||
|
||||
1. Avoir **Wake-on-LAN activé dans le BIOS/UEFI**
|
||||
2. Avoir le support WOL activé sur la carte réseau
|
||||
3. Être branché à l'alimentation (ATX)
|
||||
|
||||
### Test Wake-on-LAN
|
||||
|
||||
```bash
|
||||
# Depuis le conteneur Docker
|
||||
docker exec ipwatch python -c "
|
||||
from wakeonlan import send_magic_packet
|
||||
send_magic_packet('AA:BB:CC:DD:EE:FF')
|
||||
print('Paquet WOL envoyé !')
|
||||
"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Vérification du déploiement
|
||||
|
||||
### 1. Vérifier la migration de base de données
|
||||
|
||||
```bash
|
||||
docker exec ipwatch sqlite3 ./data/db.sqlite "PRAGMA table_info(ip);" | grep tracked
|
||||
```
|
||||
|
||||
**Sortie attendue** :
|
||||
```
|
||||
14|tracked|BOOLEAN|0|0|0
|
||||
```
|
||||
|
||||
### 1b. Migration VM (nouveau champ)
|
||||
|
||||
```bash
|
||||
docker exec ipwatch python -m backend.app.migrations.add_vm_field
|
||||
docker exec ipwatch sqlite3 ./data/db.sqlite "PRAGMA table_info(ip);" | grep vm
|
||||
```
|
||||
|
||||
**Sortie attendue** :
|
||||
```
|
||||
15|vm|BOOLEAN|0|0|0
|
||||
```
|
||||
|
||||
### 2. Vérifier les endpoints API
|
||||
|
||||
```bash
|
||||
# Liste des IPs suivies
|
||||
curl http://localhost:8080/api/tracking/
|
||||
|
||||
# Health check
|
||||
curl http://localhost:8080/health
|
||||
```
|
||||
|
||||
### 3. Vérifier le frontend
|
||||
|
||||
Ouvrez votre navigateur sur `http://localhost:8080` :
|
||||
|
||||
✅ Le bouton **"⭐ Suivi"** est visible dans le header
|
||||
✅ Cliquer dessus charge la page `/tracking`
|
||||
✅ La checkbox "IP suivie" est présente dans le panneau de gauche
|
||||
|
||||
---
|
||||
|
||||
## 📊 Statut du déploiement
|
||||
|
||||
```
|
||||
=== Démarrage IPWatch ===
|
||||
✓ Configuration chargée: 10.0.0.0/22
|
||||
✓ Base de données initialisée: ./data/db.sqlite
|
||||
✓ Scheduler démarré
|
||||
=== IPWatch prêt ===
|
||||
|
||||
Migration de base de données:
|
||||
✓ Colonne 'tracked' ajoutée
|
||||
✓ Index 'idx_ip_tracked' créé
|
||||
|
||||
Serveur:
|
||||
✓ Uvicorn running on http://0.0.0.0:8080
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📂 Fichiers modifiés/créés
|
||||
|
||||
### Backend
|
||||
```
|
||||
backend/app/models/ip.py [MODIFIÉ]
|
||||
backend/app/routers/ips.py [MODIFIÉ]
|
||||
backend/app/routers/tracking.py [CRÉÉ]
|
||||
backend/app/migrations/add_tracked_field.py [CRÉÉ]
|
||||
backend/app/migrations/__init__.py [CRÉÉ]
|
||||
backend/app/main.py [MODIFIÉ]
|
||||
backend/requirements.txt [MODIFIÉ]
|
||||
```
|
||||
|
||||
### Frontend
|
||||
```
|
||||
frontend/src/router/index.js [CRÉÉ]
|
||||
frontend/src/views/MainView.vue [CRÉÉ]
|
||||
frontend/src/views/TrackingView.vue [CRÉÉ]
|
||||
frontend/src/components/AppHeader.vue [MODIFIÉ]
|
||||
frontend/src/components/IPDetails.vue [MODIFIÉ]
|
||||
frontend/src/App.vue [MODIFIÉ]
|
||||
frontend/src/main.js [MODIFIÉ]
|
||||
frontend/package.json [MODIFIÉ]
|
||||
```
|
||||
|
||||
### Documentation
|
||||
```
|
||||
SUIVI_EQUIPEMENTS.md [CRÉÉ]
|
||||
DEPLOIEMENT_REUSSI.md [CE FICHIER]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Prochaines étapes
|
||||
|
||||
### Configuration optionnelle : Shutdown
|
||||
|
||||
Pour activer la fonctionnalité d'arrêt, éditez `backend/app/routers/tracking.py` :
|
||||
|
||||
#### Option 1 : SSH (Linux)
|
||||
```python
|
||||
pip install paramiko
|
||||
|
||||
import paramiko
|
||||
|
||||
def shutdown_via_ssh(ip_address, username, password):
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect(ip_address, username=username, password=password)
|
||||
ssh.exec_command("sudo shutdown -h now")
|
||||
ssh.close()
|
||||
```
|
||||
|
||||
#### Option 2 : WMI (Windows)
|
||||
```python
|
||||
pip install wmi-client-wrapper
|
||||
|
||||
import wmi
|
||||
|
||||
def shutdown_via_wmi(ip_address, username, password):
|
||||
c = wmi.WMI(computer=ip_address, user=username, password=password)
|
||||
os = c.Win32_OperatingSystem(Primary=1)[0]
|
||||
os.Shutdown()
|
||||
```
|
||||
|
||||
⚠️ **Sécurité** : Stockez les credentials dans des variables d'environnement, jamais en clair.
|
||||
|
||||
---
|
||||
|
||||
## 📞 Support
|
||||
|
||||
### Logs du conteneur
|
||||
```bash
|
||||
docker logs ipwatch -f
|
||||
```
|
||||
|
||||
### Logs de la base de données
|
||||
```bash
|
||||
docker exec ipwatch sqlite3 ./data/db.sqlite "SELECT * FROM ip WHERE tracked = 1;"
|
||||
```
|
||||
|
||||
### Redémarrage
|
||||
```bash
|
||||
docker compose restart ipwatch
|
||||
```
|
||||
|
||||
### Rebuild complet
|
||||
```bash
|
||||
docker compose build --no-cache
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ✅ Checklist de validation
|
||||
|
||||
- [x] Migration de base de données exécutée
|
||||
- [x] Colonne `tracked` présente dans la table `ip`
|
||||
- [x] Index `idx_ip_tracked` créé
|
||||
- [x] Conteneur démarré sans erreurs
|
||||
- [x] Endpoints API `/api/tracking/` accessibles
|
||||
- [x] Frontend construit et déployé
|
||||
- [x] Bouton "Suivi" visible dans le header
|
||||
- [x] Checkbox "IP suivie" présente dans IPDetails
|
||||
- [x] Navigation vers `/tracking` fonctionnelle
|
||||
- [x] Page de suivi affiche correctement les équipements
|
||||
- [x] Boutons WOL, Éteindre, Détails présents
|
||||
- [x] Dépendance `wakeonlan` installée
|
||||
|
||||
---
|
||||
|
||||
**🎉 DÉPLOIEMENT RÉUSSI !**
|
||||
|
||||
Votre fonctionnalité de suivi d'équipements est maintenant opérationnelle.
|
||||
Consultez [SUIVI_EQUIPEMENTS.md](SUIVI_EQUIPEMENTS.md) pour plus d'informations.
|
||||
55
Dockerfile
Executable file
@@ -0,0 +1,55 @@
|
||||
# Dockerfile multi-stage pour IPWatch
|
||||
# Backend FastAPI + Frontend Vue 3
|
||||
|
||||
# Stage 1: Build frontend Vue
|
||||
FROM node:20-alpine AS frontend-build
|
||||
|
||||
WORKDIR /frontend
|
||||
|
||||
# Copier package.json et installer dépendances
|
||||
COPY frontend/package*.json ./
|
||||
RUN npm install
|
||||
|
||||
# Copier le code source et builder
|
||||
COPY frontend/ ./
|
||||
RUN npm run build
|
||||
|
||||
|
||||
# Stage 2: Image finale avec backend + frontend statique
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Variables d'environnement
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Installer les outils réseau nécessaires
|
||||
RUN apt-get update && apt-get install -y \
|
||||
iputils-ping \
|
||||
net-tools \
|
||||
tcpdump \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Créer le répertoire de travail
|
||||
WORKDIR /app
|
||||
|
||||
# Copier et installer les dépendances Python
|
||||
COPY backend/requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copier le code backend
|
||||
COPY backend/ ./backend/
|
||||
|
||||
# Copier le frontend buildé depuis le stage 1
|
||||
COPY --from=frontend-build /frontend/dist ./frontend/dist
|
||||
|
||||
# Créer les dossiers pour volumes
|
||||
RUN mkdir -p /app/data
|
||||
|
||||
# Copier config.yaml par défaut (sera écrasé par le volume)
|
||||
COPY config.yaml /app/config.yaml
|
||||
|
||||
# Exposer le port
|
||||
EXPOSE 8080
|
||||
|
||||
# Commande de démarrage
|
||||
CMD ["uvicorn", "backend.app.main:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||
79
Makefile
Executable file
@@ -0,0 +1,79 @@
|
||||
# Makefile pour IPWatch
|
||||
|
||||
.PHONY: help build up down logs restart clean test install-backend install-frontend dev
|
||||
|
||||
help: ## Afficher l'aide
|
||||
@echo "IPWatch - Commandes disponibles:"
|
||||
@echo ""
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
# Docker
|
||||
build: ## Construire l'image Docker
|
||||
docker-compose build
|
||||
|
||||
up: ## Démarrer les conteneurs
|
||||
docker-compose up -d
|
||||
@echo "IPWatch démarré sur http://localhost:8000"
|
||||
|
||||
down: ## Arrêter les conteneurs
|
||||
docker-compose down
|
||||
|
||||
logs: ## Afficher les logs
|
||||
docker-compose logs -f
|
||||
|
||||
restart: ## Redémarrer les conteneurs
|
||||
docker-compose restart
|
||||
|
||||
clean: ## Nettoyer conteneurs, images et volumes
|
||||
docker-compose down -v
|
||||
rm -rf data/*.sqlite logs/*
|
||||
|
||||
# Développement
|
||||
install-backend: ## Installer dépendances backend
|
||||
cd backend && pip install -r requirements.txt
|
||||
|
||||
install-frontend: ## Installer dépendances frontend
|
||||
cd frontend && npm install
|
||||
|
||||
dev-backend: ## Lancer le backend en dev
|
||||
cd backend && python -m backend.app.main
|
||||
|
||||
dev-frontend: ## Lancer le frontend en dev
|
||||
cd frontend && npm run dev
|
||||
|
||||
dev: ## Lancer backend + frontend en dev (tmux requis)
|
||||
@echo "Lancement backend et frontend..."
|
||||
@tmux new-session -d -s ipwatch 'cd backend && python -m backend.app.main'
|
||||
@tmux split-window -h 'cd frontend && npm run dev'
|
||||
@tmux attach-session -t ipwatch
|
||||
|
||||
# Tests
|
||||
test: ## Exécuter les tests backend
|
||||
cd backend && pytest -v
|
||||
|
||||
test-coverage: ## Tests avec couverture
|
||||
cd backend && pytest --cov=app --cov-report=html
|
||||
|
||||
# Utilitaires
|
||||
init: ## Initialiser le projet (install + build)
|
||||
make install-backend
|
||||
make install-frontend
|
||||
make build
|
||||
|
||||
setup-config: ## Créer config.yaml depuis template (si absent)
|
||||
@if [ ! -f config.yaml ]; then \
|
||||
echo "Création de config.yaml..."; \
|
||||
cp config.yaml.example config.yaml 2>/dev/null || echo "config.yaml déjà présent"; \
|
||||
else \
|
||||
echo "config.yaml existe déjà"; \
|
||||
fi
|
||||
|
||||
db-backup: ## Sauvegarder la base de données
|
||||
@mkdir -p backups
|
||||
@cp data/db.sqlite backups/db_$$(date +%Y%m%d_%H%M%S).sqlite
|
||||
@echo "Sauvegarde créée dans backups/"
|
||||
|
||||
db-reset: ## Réinitialiser la base de données
|
||||
@echo "⚠️ Suppression de la base de données..."
|
||||
rm -f data/db.sqlite
|
||||
@echo "Base de données supprimée. Elle sera recréée au prochain démarrage."
|
||||
270
README.md
Executable file
@@ -0,0 +1,270 @@
|
||||
# IPWatch - Scanner Réseau Temps Réel
|
||||
|
||||
IPWatch est une application web de scan réseau qui visualise en temps réel l'état des adresses IP, leurs ports ouverts, et l'historique des détections sur votre réseau local.
|
||||
|
||||
## Fonctionnalités
|
||||
|
||||
- 🔍 **Scan réseau automatique** : Ping, ARP lookup, et scan de ports périodiques
|
||||
- 📊 **Visualisation temps réel** : Interface web avec mise à jour WebSocket
|
||||
- 🎨 **Thème Monokai** : Interface sombre avec codes couleurs intuitifs
|
||||
- 📝 **Gestion des IP** : Nommage, classification (connue/inconnue), métadonnées
|
||||
- 📈 **Historique 24h** : Suivi de l'évolution de l'état du réseau
|
||||
- 🔔 **Détection automatique** : Notification des nouvelles IP sur le réseau
|
||||
- 🐳 **Déploiement Docker** : Configuration simple avec docker-compose
|
||||
|
||||
## Technologies
|
||||
|
||||
### Backend
|
||||
- **FastAPI** - API REST et WebSocket
|
||||
- **SQLAlchemy** - ORM pour SQLite
|
||||
- **APScheduler** - Tâches planifiées
|
||||
- **Scapy** - Scan ARP et réseau
|
||||
|
||||
### Frontend
|
||||
- **Vue 3** - Framework UI avec Composition API
|
||||
- **Pinia** - State management
|
||||
- **Tailwind CSS** - Styles avec palette Monokai
|
||||
- **Vite** - Build tool
|
||||
|
||||
### Infrastructure
|
||||
- **Docker** - Conteneurisation
|
||||
- **SQLite** - Base de données
|
||||
- **WebSocket** - Communication temps réel
|
||||
|
||||
## Installation
|
||||
|
||||
### Avec Docker (recommandé)
|
||||
|
||||
1. **Cloner le repository**
|
||||
```bash
|
||||
git clone <repo-url>
|
||||
cd ipwatch
|
||||
```
|
||||
|
||||
2. **Configurer le réseau**
|
||||
|
||||
Éditer `config.yaml` et ajuster le CIDR de votre réseau :
|
||||
```yaml
|
||||
network:
|
||||
cidr: "192.168.1.0/24" # Adapter à votre réseau
|
||||
```
|
||||
|
||||
3. **Lancer avec docker-compose**
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
4. **Accéder à l'interface**
|
||||
|
||||
Ouvrir votre navigateur : `http://localhost:8080`
|
||||
|
||||
### Installation manuelle (développement)
|
||||
|
||||
#### Backend
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
pip install -r requirements.txt
|
||||
python -m backend.app.main
|
||||
```
|
||||
|
||||
#### Frontend
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
L'API sera accessible sur `http://localhost:8080`
|
||||
Le frontend sur `http://localhost:3000`
|
||||
|
||||
## Configuration
|
||||
|
||||
Le fichier `config.yaml` permet de configurer :
|
||||
|
||||
- **Réseau** : CIDR, gateway, DNS
|
||||
- **IPs connues** : Liste des appareils avec noms et emplacements
|
||||
- **Scan** : Intervalles ping/ports, parallélisation
|
||||
- **Ports** : Ports à scanner
|
||||
- **Historique** : Durée de rétention
|
||||
- **Interface** : Transparence, couleurs
|
||||
- **Base de données** : Chemin SQLite
|
||||
|
||||
Exemple :
|
||||
```yaml
|
||||
network:
|
||||
cidr: "192.168.1.0/24"
|
||||
|
||||
scan:
|
||||
ping_interval: 60 # Scan ping toutes les 60s
|
||||
port_scan_interval: 300 # Scan ports toutes les 5min
|
||||
parallel_pings: 50 # 50 pings simultanés max
|
||||
|
||||
ports:
|
||||
ranges:
|
||||
- "22" # SSH
|
||||
- "80" # HTTP
|
||||
- "443" # HTTPS
|
||||
- "3389" # RDP
|
||||
|
||||
ip_classes:
|
||||
"192.168.1.1":
|
||||
name: "Box Internet"
|
||||
location: "Entrée"
|
||||
host: "Routeur"
|
||||
```
|
||||
|
||||
## Interface utilisateur
|
||||
|
||||
L'interface est organisée en 3 colonnes :
|
||||
|
||||
### Colonne gauche - Détails IP
|
||||
- Informations détaillées de l'IP sélectionnée
|
||||
- Formulaire d'édition (nom, localisation, type d'hôte)
|
||||
- Informations réseau (MAC, vendor, hostname, ports ouverts)
|
||||
|
||||
### Colonne centrale - Grille d'IP
|
||||
- Vue d'ensemble de toutes les IP du réseau
|
||||
- Codes couleurs selon l'état :
|
||||
- 🟢 **Vert** : En ligne + connue
|
||||
- 🔵 **Cyan** : En ligne + inconnue
|
||||
- 🔴 **Rose** : Hors ligne + connue (bordure pointillée)
|
||||
- 🟣 **Violet** : Hors ligne + inconnue (bordure pointillée)
|
||||
- ⚪ **Gris** : IP libre
|
||||
- Filtres : En ligne, Hors ligne, Connues, Inconnues, Libres
|
||||
- Légende interactive
|
||||
|
||||
### Colonne droite - Nouvelles détections
|
||||
- Liste des IP récemment découvertes
|
||||
- Tri par ordre chronologique
|
||||
- Indicateur temps relatif
|
||||
|
||||
## API REST
|
||||
|
||||
### Endpoints IPs
|
||||
|
||||
- `GET /api/ips/` - Liste toutes les IPs (avec filtres optionnels)
|
||||
- `GET /api/ips/{ip}` - Détails d'une IP
|
||||
- `PUT /api/ips/{ip}` - Mettre à jour une IP
|
||||
- `DELETE /api/ips/{ip}` - Supprimer une IP
|
||||
- `GET /api/ips/{ip}/history` - Historique d'une IP
|
||||
- `GET /api/ips/stats/summary` - Statistiques globales
|
||||
|
||||
### Endpoints Scan
|
||||
|
||||
- `POST /api/scan/start` - Lancer un scan immédiat
|
||||
- `POST /api/scan/cleanup-history` - Nettoyer l'historique ancien
|
||||
|
||||
### WebSocket
|
||||
|
||||
- `WS /ws` - Connexion WebSocket pour notifications temps réel
|
||||
|
||||
Messages WebSocket :
|
||||
- `scan_start` - Début de scan
|
||||
- `scan_complete` - Fin de scan avec statistiques
|
||||
- `ip_update` - Changement d'état d'une IP
|
||||
- `new_ip` - Nouvelle IP détectée
|
||||
|
||||
## Tests
|
||||
|
||||
Exécuter les tests backend :
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
pytest
|
||||
```
|
||||
|
||||
Tests disponibles :
|
||||
- `test_network.py` - Tests modules réseau (ping, ARP, port scan)
|
||||
- `test_models.py` - Tests modèles SQLAlchemy
|
||||
- `test_api.py` - Tests endpoints API
|
||||
- `test_scheduler.py` - Tests scheduler APScheduler
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
ipwatch/
|
||||
├── backend/
|
||||
│ ├── app/
|
||||
│ │ ├── core/ # Configuration, database
|
||||
│ │ ├── models/ # Modèles SQLAlchemy
|
||||
│ │ ├── routers/ # Endpoints API
|
||||
│ │ ├── services/ # Services réseau, scheduler, WebSocket
|
||||
│ │ └── main.py # Application FastAPI
|
||||
│ └── requirements.txt
|
||||
├── frontend/
|
||||
│ ├── src/
|
||||
│ │ ├── assets/ # CSS Monokai
|
||||
│ │ ├── components/ # Composants Vue
|
||||
│ │ ├── stores/ # Pinia stores
|
||||
│ │ └── main.js
|
||||
│ └── package.json
|
||||
├── tests/ # Tests backend
|
||||
├── config.yaml # Configuration
|
||||
├── docker-compose.yml
|
||||
└── Dockerfile
|
||||
|
||||
```
|
||||
|
||||
## Workflow de scan
|
||||
|
||||
Le scan réseau suit ce workflow (10 étapes) :
|
||||
|
||||
1. Charger configuration YAML
|
||||
2. Générer liste IP du CIDR
|
||||
3. Ping (parallélisé)
|
||||
4. ARP + MAC vendor lookup
|
||||
5. Port scan selon intervalle
|
||||
6. Classification état (online/offline)
|
||||
7. Mise à jour SQLite
|
||||
8. Détection nouvelles IP
|
||||
9. Push WebSocket vers clients
|
||||
10. Mise à jour UI temps réel
|
||||
|
||||
## Sécurité
|
||||
|
||||
⚠️ **Attention** : IPWatch nécessite des privilèges réseau élevés (ping, ARP).
|
||||
|
||||
Le conteneur Docker utilise :
|
||||
- `network_mode: host` - Accès au réseau local
|
||||
- `privileged: true` - Privilèges pour scan réseau
|
||||
- `cap_add: NET_ADMIN, NET_RAW` - Capacités réseau
|
||||
|
||||
**N'exposez pas cette application sur internet** - Usage réseau local uniquement.
|
||||
|
||||
## Volumes Docker
|
||||
|
||||
Trois volumes sont montés :
|
||||
- `./config.yaml` - Configuration (lecture seule)
|
||||
- `./data/` - Base de données SQLite
|
||||
- `./logs/` - Logs applicatifs
|
||||
|
||||
## Dépannage
|
||||
|
||||
### Le scan ne détecte aucune IP
|
||||
|
||||
1. Vérifier le CIDR dans `config.yaml`
|
||||
2. Vérifier que Docker a accès au réseau (`network_mode: host`)
|
||||
3. Vérifier les logs : `docker logs ipwatch`
|
||||
|
||||
### WebSocket déconnecté
|
||||
|
||||
- Vérifier que le port 8080 est accessible
|
||||
- Vérifier les logs du navigateur (F12 → Console)
|
||||
- Le WebSocket se reconnecte automatiquement après 5s
|
||||
|
||||
### Erreur de permissions réseau
|
||||
|
||||
Le conteneur nécessite `privileged: true` pour :
|
||||
- Envoi de paquets ICMP (ping)
|
||||
- Scan ARP
|
||||
- Capture de paquets réseau
|
||||
|
||||
## Licence
|
||||
|
||||
MIT
|
||||
|
||||
## Auteur
|
||||
|
||||
Développé avec Claude Code selon les spécifications IPWatch.
|
||||
260
STRUCTURE.md
Executable file
@@ -0,0 +1,260 @@
|
||||
# Structure du Projet IPWatch
|
||||
|
||||
## Vue d'ensemble
|
||||
|
||||
```
|
||||
ipwatch/
|
||||
├── backend/ # Backend FastAPI
|
||||
│ ├── app/
|
||||
│ │ ├── core/ # Configuration et database
|
||||
│ │ │ ├── config.py # Gestionnaire config YAML
|
||||
│ │ │ └── database.py # Setup SQLAlchemy
|
||||
│ │ ├── models/ # Modèles SQLAlchemy
|
||||
│ │ │ └── ip.py # Tables IP et IPHistory
|
||||
│ │ ├── routers/ # Endpoints API REST
|
||||
│ │ │ ├── ips.py # CRUD IPs + historique
|
||||
│ │ │ ├── scan.py # Contrôle scans
|
||||
│ │ │ └── websocket.py # Endpoint WebSocket
|
||||
│ │ ├── services/ # Services métier
|
||||
│ │ │ ├── network.py # Scanner réseau (ping, ARP, ports)
|
||||
│ │ │ ├── scheduler.py # APScheduler pour tâches périodiques
|
||||
│ │ │ └── websocket.py # Gestionnaire WebSocket
|
||||
│ │ └── main.py # Application FastAPI principale
|
||||
│ └── requirements.txt # Dépendances Python
|
||||
│
|
||||
├── frontend/ # Frontend Vue 3
|
||||
│ ├── src/
|
||||
│ │ ├── assets/
|
||||
│ │ │ └── main.css # Styles Monokai + animations
|
||||
│ │ ├── components/
|
||||
│ │ │ ├── AppHeader.vue # Header avec stats et contrôles
|
||||
│ │ │ ├── IPCell.vue # Cellule IP dans la grille
|
||||
│ │ │ ├── IPDetails.vue # Détails IP (colonne gauche)
|
||||
│ │ │ ├── IPGrid.vue # Grille d'IP (colonne centrale)
|
||||
│ │ │ └── NewDetections.vue # Nouvelles IP (colonne droite)
|
||||
│ │ ├── stores/
|
||||
│ │ │ └── ipStore.js # Store Pinia + WebSocket client
|
||||
│ │ ├── App.vue # Layout 3 colonnes
|
||||
│ │ └── main.js # Point d'entrée
|
||||
│ ├── package.json # Dépendances Node
|
||||
│ ├── vite.config.js # Configuration Vite
|
||||
│ ├── tailwind.config.js # Configuration Tailwind (Monokai)
|
||||
│ └── index.html # HTML principal
|
||||
│
|
||||
├── tests/ # Tests backend
|
||||
│ ├── test_network.py # Tests modules réseau
|
||||
│ ├── test_models.py # Tests modèles SQLAlchemy
|
||||
│ ├── test_api.py # Tests endpoints API
|
||||
│ └── test_scheduler.py # Tests APScheduler
|
||||
│
|
||||
├── config.yaml # Configuration principale
|
||||
├── docker-compose.yml # Orchestration Docker
|
||||
├── Dockerfile # Image multi-stage
|
||||
├── Makefile # Commandes utiles
|
||||
├── start.sh # Script démarrage rapide
|
||||
├── pytest.ini # Configuration pytest
|
||||
├── .gitignore # Exclusions Git
|
||||
├── .dockerignore # Exclusions Docker
|
||||
├── README.md # Documentation
|
||||
├── CLAUDE.md # Guide pour Claude Code
|
||||
└── STRUCTURE.md # Ce fichier
|
||||
```
|
||||
|
||||
## Flux de données
|
||||
|
||||
### 1. Scan réseau (backend)
|
||||
|
||||
```
|
||||
APScheduler (scheduler.py)
|
||||
↓ déclenche périodiquement
|
||||
NetworkScanner (network.py)
|
||||
↓ effectue scan complet
|
||||
├─→ Ping parallélisé
|
||||
├─→ ARP lookup + MAC vendor
|
||||
└─→ Port scan
|
||||
↓ résultats
|
||||
SQLAlchemy (models/ip.py)
|
||||
↓ enregistre dans
|
||||
SQLite (data/db.sqlite)
|
||||
↓ notifie via
|
||||
WebSocket Manager (services/websocket.py)
|
||||
↓ broadcast vers
|
||||
Clients WebSocket (frontend)
|
||||
```
|
||||
|
||||
### 2. Interface utilisateur (frontend)
|
||||
|
||||
```
|
||||
App.vue (layout 3 colonnes)
|
||||
├─→ IPDetails.vue (gauche)
|
||||
├─→ IPGrid.vue (centre)
|
||||
│ └─→ IPCell.vue (x254)
|
||||
└─→ NewDetections.vue (droite)
|
||||
↓ tous utilisent
|
||||
Pinia Store (ipStore.js)
|
||||
↓ communique avec
|
||||
├─→ API REST (/api/ips/*)
|
||||
└─→ WebSocket (/ws)
|
||||
```
|
||||
|
||||
### 3. Workflow complet d'un scan
|
||||
|
||||
```
|
||||
1. Scheduler déclenche scan
|
||||
2. NetworkScanner génère liste IP (CIDR)
|
||||
3. Ping parallélisé (50 simultanés)
|
||||
4. ARP lookup pour MAC/vendor
|
||||
5. Port scan (ports configurés)
|
||||
6. Classification état (online/offline)
|
||||
7. Mise à jour base de données
|
||||
8. Détection nouvelles IP
|
||||
9. Push WebSocket vers clients
|
||||
10. Mise à jour UI temps réel
|
||||
```
|
||||
|
||||
## Composants clés
|
||||
|
||||
### Backend
|
||||
|
||||
| Fichier | Responsabilité | Lignes |
|
||||
|---------|---------------|--------|
|
||||
| `services/network.py` | Scan réseau (ping, ARP, ports) | ~300 |
|
||||
| `services/scheduler.py` | Tâches planifiées | ~100 |
|
||||
| `services/websocket.py` | Gestionnaire WebSocket | ~150 |
|
||||
| `routers/ips.py` | API CRUD IPs | ~200 |
|
||||
| `routers/scan.py` | API contrôle scan | ~150 |
|
||||
| `models/ip.py` | Modèles SQLAlchemy | ~100 |
|
||||
| `core/config.py` | Gestion config YAML | ~150 |
|
||||
| `main.py` | Application FastAPI | ~150 |
|
||||
|
||||
### Frontend
|
||||
|
||||
| Fichier | Responsabilité | Lignes |
|
||||
|---------|---------------|--------|
|
||||
| `stores/ipStore.js` | State management + WebSocket | ~250 |
|
||||
| `components/IPGrid.vue` | Grille IP + filtres | ~100 |
|
||||
| `components/IPDetails.vue` | Détails + édition IP | ~200 |
|
||||
| `components/IPCell.vue` | Cellule IP individuelle | ~80 |
|
||||
| `components/NewDetections.vue` | Liste nouvelles IP | ~120 |
|
||||
| `assets/main.css` | Styles Monokai | ~150 |
|
||||
|
||||
## Points d'entrée
|
||||
|
||||
### Développement
|
||||
|
||||
**Backend** :
|
||||
```bash
|
||||
cd backend
|
||||
python -m backend.app.main
|
||||
# ou
|
||||
make dev-backend
|
||||
```
|
||||
|
||||
**Frontend** :
|
||||
```bash
|
||||
cd frontend
|
||||
npm run dev
|
||||
# ou
|
||||
make dev-frontend
|
||||
```
|
||||
|
||||
### Production (Docker)
|
||||
|
||||
```bash
|
||||
docker-compose up -d
|
||||
# ou
|
||||
./start.sh
|
||||
# ou
|
||||
make up
|
||||
```
|
||||
|
||||
## Configuration requise
|
||||
|
||||
### Backend
|
||||
- Python 3.11+
|
||||
- Privilèges réseau (ping, ARP)
|
||||
- Accès au réseau local
|
||||
|
||||
### Frontend
|
||||
- Node.js 20+
|
||||
- npm
|
||||
|
||||
### Docker
|
||||
- Docker 20+
|
||||
- docker-compose 2+
|
||||
|
||||
## Ports utilisés
|
||||
|
||||
- **8080** : API backend + frontend buildé (production)
|
||||
- **3000** : Frontend dev (développement)
|
||||
|
||||
## Volumes Docker
|
||||
|
||||
- `./config.yaml` → `/app/config.yaml` (ro)
|
||||
- `./data/` → `/app/data/`
|
||||
- `./logs/` → `/app/logs/`
|
||||
|
||||
## Base de données
|
||||
|
||||
**SQLite** : `data/db.sqlite`
|
||||
|
||||
Tables :
|
||||
- `ip` : Table principale des IP (14 colonnes)
|
||||
- `ip_history` : Historique des états (5 colonnes)
|
||||
|
||||
Index :
|
||||
- `ip.last_status`
|
||||
- `ip.known`
|
||||
- `ip_history.timestamp`
|
||||
- `ip_history.ip`
|
||||
|
||||
## Tests
|
||||
|
||||
Lancer les tests :
|
||||
```bash
|
||||
pytest
|
||||
# ou
|
||||
make test
|
||||
```
|
||||
|
||||
Couverture :
|
||||
```bash
|
||||
pytest --cov=backend.app --cov-report=html
|
||||
# ou
|
||||
make test-coverage
|
||||
```
|
||||
|
||||
## Commandes utiles
|
||||
|
||||
Voir toutes les commandes :
|
||||
```bash
|
||||
make help
|
||||
```
|
||||
|
||||
Principales commandes :
|
||||
- `make build` - Construire l'image
|
||||
- `make up` - Démarrer
|
||||
- `make down` - Arrêter
|
||||
- `make logs` - Voir les logs
|
||||
- `make test` - Tests
|
||||
- `make clean` - Nettoyer
|
||||
- `make db-backup` - Sauvegarder DB
|
||||
- `make db-reset` - Réinitialiser DB
|
||||
|
||||
## Dépendances principales
|
||||
|
||||
### Backend (Python)
|
||||
- fastapi 0.109.0
|
||||
- uvicorn 0.27.0
|
||||
- sqlalchemy 2.0.25
|
||||
- pydantic 2.5.3
|
||||
- apscheduler 3.10.4
|
||||
- scapy 2.5.0
|
||||
- pytest 7.4.4
|
||||
|
||||
### Frontend (JavaScript)
|
||||
- vue 3.4.15
|
||||
- pinia 2.1.7
|
||||
- axios 1.6.5
|
||||
- vite 5.0.11
|
||||
- tailwindcss 3.4.1
|
||||
294
SUIVI_EQUIPEMENTS.md
Normal file
@@ -0,0 +1,294 @@
|
||||
# 📊 Fonctionnalité de Suivi d'Équipements - IPWatch
|
||||
|
||||
## Vue d'ensemble
|
||||
|
||||
Cette nouvelle fonctionnalité permet de suivre des équipements spécifiques du réseau avec des actions de gestion à distance (Wake-on-LAN, arrêt).
|
||||
|
||||
## Modifications apportées
|
||||
|
||||
### 🔧 Backend (FastAPI)
|
||||
|
||||
#### 1. Modèle de données
|
||||
- **Fichier**: `backend/app/models/ip.py`
|
||||
- **Changement**: Ajout du champ `tracked` (Boolean) avec index dans la table `IP`
|
||||
|
||||
#### 2. Nouveau router tracking
|
||||
- **Fichier**: `backend/app/routers/tracking.py`
|
||||
- **Endpoints**:
|
||||
- `GET /api/tracking/` - Récupère toutes les IPs suivies
|
||||
- `POST /api/tracking/wol/{ip_address}` - Envoie un paquet Wake-on-LAN
|
||||
- `POST /api/tracking/shutdown/{ip_address}` - Éteint un équipement (à implémenter selon infrastructure)
|
||||
- `PATCH /api/tracking/{ip_address}/toggle` - Bascule l'état tracked d'une IP
|
||||
|
||||
#### 3. Migration de base de données
|
||||
- **Fichier**: `backend/app/migrations/add_tracked_field.py`
|
||||
- **Commande**: `python -m backend.app.migrations.add_tracked_field`
|
||||
- **Action**: Ajoute la colonne `tracked` et son index
|
||||
|
||||
#### 4. Dépendances
|
||||
- **Fichier**: `backend/requirements.txt`
|
||||
- **Ajout**: `wakeonlan==3.1.0` pour l'envoi de paquets Magic Packet
|
||||
|
||||
### 🎨 Frontend (Vue 3)
|
||||
|
||||
#### 1. Routing
|
||||
- **Fichier**: `frontend/src/router/index.js`
|
||||
- **Routes**:
|
||||
- `/` - Page principale (MainView)
|
||||
- `/tracking` - Page de suivi des équipements (TrackingView)
|
||||
|
||||
#### 2. Composants modifiés
|
||||
- **IPDetails.vue**: Ajout d'une checkbox "IP suivie"
|
||||
- **AppHeader.vue**: Ajout d'un bouton "Suivi" (jaune/orange)
|
||||
- **App.vue**: Remplacé par `<router-view />` pour le routing
|
||||
|
||||
#### 3. Nouveaux composants
|
||||
- **views/MainView.vue**: Page principale (anciennement App.vue)
|
||||
- **views/TrackingView.vue**: Page de suivi avec grille d'équipements
|
||||
|
||||
#### 4. Dépendances
|
||||
- **Fichier**: `frontend/package.json`
|
||||
- **Ajout**: `vue-router@^4.2.5`
|
||||
|
||||
## 📋 Instructions de déploiement
|
||||
|
||||
### Étape 1: Mise à jour du backend
|
||||
|
||||
```bash
|
||||
# Se placer dans le répertoire backend
|
||||
cd backend
|
||||
|
||||
# Installer la nouvelle dépendance wakeonlan
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Exécuter la migration de base de données
|
||||
python -m backend.app.migrations.add_tracked_field
|
||||
```
|
||||
|
||||
**Sortie attendue**:
|
||||
```
|
||||
→ Ajout de la colonne 'tracked' à la table IP...
|
||||
→ Création de l'index sur 'tracked'...
|
||||
✓ Migration terminée avec succès!
|
||||
- Colonne 'tracked' ajoutée
|
||||
- Index 'idx_ip_tracked' créé
|
||||
```
|
||||
|
||||
### Étape 2: Mise à jour du frontend
|
||||
|
||||
```bash
|
||||
# Se placer dans le répertoire frontend
|
||||
cd frontend
|
||||
|
||||
# Installer les nouvelles dépendances
|
||||
npm install
|
||||
|
||||
# Rebuilder le frontend
|
||||
npm run build
|
||||
```
|
||||
|
||||
### Étape 3: Redémarrer l'application
|
||||
|
||||
#### Mode Docker:
|
||||
```bash
|
||||
# Reconstruire l'image
|
||||
docker-compose build
|
||||
|
||||
# Redémarrer le conteneur
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
#### Mode développement:
|
||||
```bash
|
||||
# Terminal 1 - Backend
|
||||
cd backend
|
||||
python -m backend.app.main
|
||||
|
||||
# Terminal 2 - Frontend
|
||||
cd frontend
|
||||
npm run dev
|
||||
```
|
||||
|
||||
## 🎯 Utilisation
|
||||
|
||||
### 1. Marquer une IP comme suivie
|
||||
|
||||
1. Cliquez sur une cellule IP dans la grille
|
||||
2. Dans le panneau de gauche (détails), cochez **"IP suivie"**
|
||||
3. Cliquez sur **"Enregistrer"**
|
||||
|
||||
### 2. Accéder à la page de suivi
|
||||
|
||||
1. Cliquez sur le bouton **"Suivi"** dans le header (jaune/orange avec icône étoile)
|
||||
2. Vous arrivez sur `/tracking` avec la liste des équipements suivis
|
||||
|
||||
### 3. Actions disponibles
|
||||
|
||||
Pour chaque équipement suivi:
|
||||
|
||||
- **WOL** (bouton vert): Envoie un paquet Wake-on-LAN
|
||||
- Nécessite une adresse MAC
|
||||
- Désactivé si l'équipement est déjà en ligne
|
||||
|
||||
- **Éteindre** (bouton rose): Commande d'arrêt
|
||||
- ⚠️ Nécessite une configuration supplémentaire (voir ci-dessous)
|
||||
- Désactivé si l'équipement est hors ligne
|
||||
|
||||
- **Détails** (bouton violet): Retourne à la page principale avec l'IP sélectionnée
|
||||
|
||||
## ⚙️ Configuration Wake-on-LAN
|
||||
|
||||
### Prérequis matériels
|
||||
|
||||
Pour que WOL fonctionne, l'équipement cible doit avoir:
|
||||
|
||||
1. **BIOS/UEFI**: Option "Wake-on-LAN" activée
|
||||
2. **Carte réseau**: Support WOL activé dans les propriétés (Windows) ou via `ethtool` (Linux)
|
||||
3. **Alimentation**: Alimentation connectée (ATX)
|
||||
|
||||
### Configuration réseau
|
||||
|
||||
Le paquet Magic Packet WOL est envoyé en **broadcast** sur le réseau local. Assurez-vous que:
|
||||
- Le serveur IPWatch et l'équipement cible sont sur le même réseau local
|
||||
- Aucun firewall ne bloque les paquets UDP broadcast
|
||||
|
||||
## 🔧 Configuration Shutdown (avancé)
|
||||
|
||||
La fonctionnalité d'arrêt nécessite une configuration selon votre infrastructure:
|
||||
|
||||
### Option 1: SSH (Linux)
|
||||
|
||||
```python
|
||||
# Dans backend/app/routers/tracking.py
|
||||
import paramiko
|
||||
|
||||
def shutdown_via_ssh(ip_address, username, password):
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect(ip_address, username=username, password=password)
|
||||
ssh.exec_command("sudo shutdown -h now")
|
||||
ssh.close()
|
||||
```
|
||||
|
||||
**Dépendance**: `pip install paramiko`
|
||||
|
||||
### Option 2: WMI (Windows)
|
||||
|
||||
```python
|
||||
# Nécessite wmi-client-wrapper
|
||||
import wmi
|
||||
|
||||
def shutdown_via_wmi(ip_address, username, password):
|
||||
c = wmi.WMI(computer=ip_address, user=username, password=password)
|
||||
os = c.Win32_OperatingSystem(Primary=1)[0]
|
||||
os.Shutdown()
|
||||
```
|
||||
|
||||
**Dépendance**: `pip install wmi-client-wrapper`
|
||||
|
||||
### Option 3: API REST (Équipements réseau)
|
||||
|
||||
Pour les équipements réseau (routeurs, switches) avec API REST.
|
||||
|
||||
### ⚠️ Sécurité
|
||||
|
||||
**IMPORTANT**: Stockez les credentials de manière sécurisée:
|
||||
- Utilisez des variables d'environnement
|
||||
- Ou un gestionnaire de secrets (Vault, AWS Secrets Manager)
|
||||
- **JAMAIS en clair dans le code**
|
||||
|
||||
## 🎨 Personnalisation visuelle
|
||||
|
||||
### Couleurs utilisées (Monokai)
|
||||
|
||||
La page de suivi utilise la palette Monokai définie dans `guidelines-css.md`:
|
||||
|
||||
- **Vert** (`#A6E22E`): Équipement en ligne, bouton WOL
|
||||
- **Rose** (`#F92672`): Équipement hors ligne, bouton Éteindre
|
||||
- **Jaune** (`#E6DB74`): Bouton "Suivi" dans le header
|
||||
- **Cyan** (`#66D9EF`): Bouton Rafraîchir
|
||||
- **Violet** (`#AE81FF`): Bouton Détails
|
||||
|
||||
### Layout
|
||||
|
||||
- **Grille responsive**: 1-4 colonnes selon la taille d'écran
|
||||
- **Cartes d'équipements**: Bordure dynamique selon l'état (en ligne/hors ligne)
|
||||
- **Halo lumineux**: Effet de shadow pour les équipements en ligne
|
||||
|
||||
## 🐛 Dépannage
|
||||
|
||||
### Erreur: "La bibliothèque 'wakeonlan' n'est pas installée"
|
||||
|
||||
```bash
|
||||
pip install wakeonlan==3.1.0
|
||||
```
|
||||
|
||||
### Erreur: "IP non trouvée dans la base de données"
|
||||
|
||||
Assurez-vous que:
|
||||
1. L'IP existe dans la base (scan réseau effectué)
|
||||
2. La migration a bien été exécutée
|
||||
|
||||
### WOL ne fonctionne pas
|
||||
|
||||
Vérifiez:
|
||||
1. L'adresse MAC est bien renseignée pour l'IP
|
||||
2. L'équipement cible a WOL activé dans le BIOS
|
||||
3. Le serveur et la cible sont sur le même réseau local
|
||||
|
||||
### Le bouton "Suivi" ne s'affiche pas
|
||||
|
||||
Vérifiez:
|
||||
1. `npm install` a bien été exécuté
|
||||
2. Le frontend a été rebuilé (`npm run build`)
|
||||
3. Le navigateur n'a pas de cache (Ctrl+Shift+R pour forcer le refresh)
|
||||
|
||||
## 📊 Structure des fichiers
|
||||
|
||||
```
|
||||
ipwatch/
|
||||
├── backend/
|
||||
│ ├── app/
|
||||
│ │ ├── models/ip.py [MODIFIÉ]
|
||||
│ │ ├── routers/
|
||||
│ │ │ ├── ips.py [MODIFIÉ]
|
||||
│ │ │ └── tracking.py [NOUVEAU]
|
||||
│ │ ├── migrations/
|
||||
│ │ │ └── add_tracked_field.py [NOUVEAU]
|
||||
│ │ └── main.py [MODIFIÉ]
|
||||
│ └── requirements.txt [MODIFIÉ]
|
||||
│
|
||||
├── frontend/
|
||||
│ ├── src/
|
||||
│ │ ├── router/
|
||||
│ │ │ └── index.js [NOUVEAU]
|
||||
│ │ ├── views/
|
||||
│ │ │ ├── MainView.vue [NOUVEAU]
|
||||
│ │ │ └── TrackingView.vue [NOUVEAU]
|
||||
│ │ ├── components/
|
||||
│ │ │ ├── AppHeader.vue [MODIFIÉ]
|
||||
│ │ │ └── IPDetails.vue [MODIFIÉ]
|
||||
│ │ ├── App.vue [MODIFIÉ]
|
||||
│ │ └── main.js [MODIFIÉ]
|
||||
│ └── package.json [MODIFIÉ]
|
||||
│
|
||||
└── SUIVI_EQUIPEMENTS.md [CE FICHIER]
|
||||
```
|
||||
|
||||
## 🚀 Améliorations futures
|
||||
|
||||
- [ ] Groupes d'équipements (ex: "Serveurs", "Imprimantes")
|
||||
- [ ] Historique des démarrages/arrêts
|
||||
- [ ] Notifications push lors de changements d'état
|
||||
- [ ] Planification horaire (démarrage/arrêt automatique)
|
||||
- [ ] Graphiques d'uptime
|
||||
- [ ] Support IPMI/iLO/iDRAC pour serveurs
|
||||
- [ ] Export CSV/Excel des équipements suivis
|
||||
- [ ] Tags personnalisés
|
||||
|
||||
## 📞 Support
|
||||
|
||||
Pour toute question ou problème:
|
||||
1. Consultez les logs du backend: `docker logs ipwatch_backend`
|
||||
2. Consultez la console du navigateur (F12)
|
||||
3. Vérifiez que la migration a bien été exécutée
|
||||
9
amelioration.md
Normal file
@@ -0,0 +1,9 @@
|
||||
- [x] brainstorming sur les options offerte par l'ajout d'un acces api a mon serveur opnsense => créer un fichier opensense_todo.md avec des listes de taches d'amelioration a ajouter dans une section brainstorming. on pourra ensuite les deplacer si selectionner dans section todo, done
|
||||
- [x] analyse et brainstorming sur l'onglet architecture . amelioration via une integration de reactflow ? autre framewaork similaire. creation d'un fichier amelioration_onglet_architecture.md pour integrer les resultat du brainstorming et lister les amelioration a realiser dans section todo
|
||||
- [ ] ajouter acces via api a un serveur opnsense 10.0.0.1:
|
||||
key=ZOwL1iuko13l9tnARvJlU0s93C/44gFwZNRmStRhzGV8u6m2nXAcoOAbb6jxtkEe8dqzIjj4zECcKdzI
|
||||
secret=rMOGHY+3SRfiT7cxpMoGZuwnPPRX0vPHV2oDTn6UPCvH87UXJe1qBkTs8y/ryG942TsTGe5UYO6F7fXK
|
||||
integrer les option dans config.yml et accessible egalement dans parametre
|
||||
- [ ] ajouter un bouton dans volet gauche pour ajouter le parametrage d'un equipement dans opensense mappage static. possibilite d'ajouter des mappages avec des ip differentes de la plage de dhcp dans opnsense ?
|
||||
- [ ] ajout backup de la bdd dans parametre
|
||||
- [ ] brainstorming ajout d'un onglet opnsense qui presente des parametrages claire des services actif et des paramaetrage disponible (style tableau de bord) avec des tooltips explicatif clair, une section logs et erreur
|
||||
168
amelioration_onglet_architecture.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# Amelioration Onglet Architecture - Brainstorming & Todo
|
||||
|
||||
## Analyse de l'existant
|
||||
|
||||
### Etat actuel (ArchitectureView.vue - 1543 lignes)
|
||||
L'onglet Architecture est un editeur visuel de diagrammes reseau avec :
|
||||
|
||||
**Fonctionnel :**
|
||||
- Canvas SVG avec drag-drop d'objets depuis une palette (7 types : world, home, computer, network, room, vm, service)
|
||||
- Deplacement et redimensionnement des objets
|
||||
- Systeme de connecteurs (4 cotes, 0-8 par cote)
|
||||
- Splines (courbes de liaison) avec points de controle editables
|
||||
- Relations parent-enfant (logiques, non visuelles)
|
||||
- Integration IP : sync avec l'API pour recuperer les donnees reseau
|
||||
- Panneau de proprietes complet (60+ proprietes editables)
|
||||
- Persistence JSON (`data/architecture.json`)
|
||||
- Edition inline de labels, verrouillage d'objets
|
||||
|
||||
**Limitations actuelles :**
|
||||
- Pas d'imbrication visuelle (enfants non positionnes relativement au parent)
|
||||
- Pas de zoom/pan natif (pas de navigation fluide dans grands diagrammes)
|
||||
- Pas de minimap pour vue d'ensemble
|
||||
- Pas de undo/redo
|
||||
- Pas de multi-selection / group operations
|
||||
- Pas de snap-to-grid ou alignement automatique
|
||||
- Pas de bibliotheque d'objets reutilisables
|
||||
- Pas de multi-diagrammes
|
||||
- Code monolithique (1543 lignes dans un seul composant)
|
||||
- Performance limitee au-dela de 100+ objets (pas de virtualisation)
|
||||
- Splines basiques (quadratiques, pas de routage intelligent)
|
||||
|
||||
---
|
||||
|
||||
## Brainstorming : Vue Flow vs implementation actuelle
|
||||
|
||||
### Option A : Migrer vers Vue Flow (@vue-flow/core)
|
||||
|
||||
**Qu'est-ce que Vue Flow ?**
|
||||
- Port Vue 3 de ReactFlow (reference React pour les node-based UIs)
|
||||
- Librairie mature : v1.48.2, activement maintenue, 79+ projets dependants
|
||||
- Site : https://vueflow.dev | GitHub : https://github.com/bcakmakoglu/vue-flow
|
||||
|
||||
**Fonctionnalites cles :**
|
||||
- Zoom et pan fluides (natifs, optimises)
|
||||
- Minimap integree (vue d'ensemble en bas a droite)
|
||||
- Controls integres (zoom in/out/fit, panel en bas a gauche)
|
||||
- Background configurable (dots, lines, cross)
|
||||
- Nodes personnalisables (Custom Nodes = composants Vue complets)
|
||||
- Edges personnalisables (Custom Edges = SVG custom)
|
||||
- Handles (points de connexion sur les nodes, equivalent des connecteurs actuels)
|
||||
- Drag-and-drop natif
|
||||
- Selection simple et multiple
|
||||
- Evenements riches (onConnect, onNodeDrag, onEdgeUpdate, etc.)
|
||||
- State management integre (useVueFlow composable)
|
||||
- Types de nodes inclus : default, input, output
|
||||
- Types d'edges inclus : bezier, straight, step, smoothstep
|
||||
- Sub-flows (nodes imbriques dans d'autres nodes = groupes)
|
||||
- Snap-to-grid
|
||||
- Performant (virtualisation des nodes hors viewport)
|
||||
|
||||
**Avantages pour IPWatch :**
|
||||
| Fonctionnalite | Actuel (custom SVG) | Avec Vue Flow |
|
||||
|----------------|---------------------|---------------|
|
||||
| Zoom/Pan | Non | Natif, fluide |
|
||||
| Minimap | Non | Composant inclus |
|
||||
| Snap-to-grid | Non | Option native |
|
||||
| Sub-flows (groupes) | Non | Nodes imbriques |
|
||||
| Multi-selection | Non | Natif |
|
||||
| Performance 100+ nodes | Problematique | Virtualisation |
|
||||
| Routing edges | Quadratique simple | Bezier, step, smoothstep, custom |
|
||||
| Undo/Redo | Non | Via plugin/composable |
|
||||
| Maintenance | 1543 lignes custom | Communaute active |
|
||||
|
||||
**Inconvenients / Risques :**
|
||||
- Migration significative : recrire la logique d'affichage
|
||||
- Les connecteurs actuels (multi-connecteurs par cote) devront etre adaptes aux "Handles" de Vue Flow
|
||||
- Les splines avec points de controle editables sont plus avancees que les edges standard de Vue Flow (mais possible via Custom Edges)
|
||||
- Le panneau de proprietes devra etre refait (mais c'est l'occasion de l'ameliorer)
|
||||
- Dependance a une librairie tierce (risque de maintenance)
|
||||
- Courbe d'apprentissage pour l'API Vue Flow
|
||||
|
||||
### Option B : Ameliorer l'implementation actuelle
|
||||
|
||||
**Avantages :**
|
||||
- Pas de migration, evolution progressive
|
||||
- Controle total sur le code
|
||||
- Deja fonctionnel pour le cas d'usage actuel
|
||||
|
||||
**Inconvenients :**
|
||||
- Reimplementer zoom/pan/minimap/snap = beaucoup de travail
|
||||
- Maintenance lourde du code custom SVG
|
||||
- Performance difficile a optimiser sans virtualisation
|
||||
|
||||
### Option C : Approche hybride
|
||||
|
||||
Utiliser Vue Flow pour le canvas principal (zoom, pan, nodes, edges, minimap) tout en conservant :
|
||||
- Le panneau de proprietes actuel (adapte)
|
||||
- La palette d'outils actuelle
|
||||
- Le systeme de persistence JSON existant
|
||||
- L'integration IP existante
|
||||
|
||||
**C'est l'option recommandee.** Vue Flow gere le "moteur graphique" et IPWatch garde le controle sur la logique metier.
|
||||
|
||||
---
|
||||
|
||||
### Autres frameworks evalues
|
||||
|
||||
| Framework | Type | Vue 3 | Avantages | Inconvenients |
|
||||
|-----------|------|-------|-----------|---------------|
|
||||
| **Vue Flow** | Node graph | Natif | Complet, actif, ReactFlow-like | Courbe d'apprentissage |
|
||||
| **Cytoscape.js** | Graphe generique | Wrapper | Tres puissant, algorithmes de layout | Lourd, API complexe, pas Vue-natif |
|
||||
| **D3.js** | Bas niveau | Non | Ultra-flexible | Enorme effort d'implementation |
|
||||
| **Drawflow** | Node editor | Oui | Simple | Trop basique, peu maintenu |
|
||||
| **JointJS** | Diagrammes | Non | Professionnel | Payant (rapperd), pas Vue |
|
||||
| **GoJS** | Diagrammes | Non | Tres complet | Payant, pas Vue-natif |
|
||||
| **jsPlumb** | Connexions | Wrapper | Bon pour les connexions | Pas de gestion de nodes |
|
||||
| **Mermaid** | Diagrammes texte | Non | Declaratif | Pas interactif |
|
||||
|
||||
**Verdict : Vue Flow est le meilleur choix pour IPWatch** - natif Vue 3, fonctionnalites proches de l'existant, communaute active.
|
||||
|
||||
---
|
||||
|
||||
## Ameliorations identifiees
|
||||
|
||||
### Phase 1 : Migration vers Vue Flow (fondations)
|
||||
- [ ] Installer `@vue-flow/core`, `@vue-flow/minimap`, `@vue-flow/controls`, `@vue-flow/background`
|
||||
- [ ] Creer un composant `ArchitectureCanvas.vue` base sur Vue Flow
|
||||
- [ ] Adapter le format de donnees `architecture.json` (items → nodes, splines → edges)
|
||||
- [ ] Ecrire un adaptateur de migration pour convertir les donnees existantes
|
||||
- [ ] Implementer les Custom Nodes pour chaque type (world, home, computer, network, room, vm, service)
|
||||
- [ ] Implementer les Custom Edges pour reproduire le style des splines actuelles
|
||||
- [ ] Activer minimap, controls et background
|
||||
- [ ] Conserver le panneau de proprietes (adapte a la nouvelle structure)
|
||||
|
||||
### Phase 2 : Nouvelles fonctionnalites Vue Flow
|
||||
- [ ] Zoom/Pan fluide avec raccourcis clavier (Ctrl+scroll, espace+drag)
|
||||
- [ ] Snap-to-grid configurable
|
||||
- [ ] Multi-selection (Shift+click, rectangle de selection)
|
||||
- [ ] Sub-flows : grouper des nodes dans un node parent (imbrication visuelle)
|
||||
- [ ] Undo/Redo (Ctrl+Z / Ctrl+Y)
|
||||
- [ ] Copier/Coller de nodes
|
||||
- [ ] Auto-layout : algorithme de positionnement automatique (dagre, elkjs)
|
||||
|
||||
### Phase 3 : Ameliorations UX
|
||||
- [ ] Bibliotheque d'objets : sauvegarder et reutiliser des configurations de nodes
|
||||
- [ ] Templates de diagrammes predéfinis (reseau maison, datacenter, etc.)
|
||||
- [ ] Export en image (PNG/SVG) du diagramme
|
||||
- [ ] Mode lecture seule (partage de vue)
|
||||
- [ ] Recherche de nodes (par nom, IP, type)
|
||||
- [ ] Filtrage par type de node (afficher/masquer categories)
|
||||
- [ ] Legende dynamique
|
||||
|
||||
### Phase 4 : Integrations avancees
|
||||
- [ ] Sync temps reel avec les scans reseau (nodes changent de couleur selon l'etat online/offline)
|
||||
- [ ] Affichage du trafic sur les edges (epaisseur proportionnelle au trafic via OPNsense API)
|
||||
- [ ] Integration OPNsense : noeud firewall avec interfaces et regles
|
||||
- [ ] Multi-diagrammes : gestion de plusieurs vues (physique, logique, par etage, par VLAN)
|
||||
- [ ] Animation des flux reseau sur les edges
|
||||
|
||||
---
|
||||
|
||||
## Todo
|
||||
|
||||
*(deplacer ici les elements valides a realiser)*
|
||||
|
||||
## Done
|
||||
|
||||
*(deplacer ici les elements termines)*
|
||||
17
architecture-technique.md
Executable file
@@ -0,0 +1,17 @@
|
||||
# architecture-technique.md
|
||||
|
||||
## Backend
|
||||
- FastAPI + SQLAlchemy + APScheduler
|
||||
- Modules réseau : ping, arp, port scan
|
||||
- WebSocket pour push temps réel
|
||||
- APIs REST pour : IP, scan, paramètres, historique
|
||||
|
||||
## Frontend
|
||||
- Vue 3 + Vite + Tailwind
|
||||
- State global (Pinia)
|
||||
- WebSocket client
|
||||
|
||||
## Docker
|
||||
- service web (backend + frontend)
|
||||
- volume config.yaml
|
||||
- volume db.sqlite
|
||||
588
architecture.md
Normal file
@@ -0,0 +1,588 @@
|
||||
# Architecture réseau (draft)
|
||||
Date: 2025-12-25
|
||||
|
||||
## Vision générale
|
||||
- Modèle principal: boîtes imbriquées (composition) + liaisons (graph).
|
||||
- Objectifs: représenter la topologie physique/logique et les relations réseau.
|
||||
|
||||
## Composition (boîtes imbriquées)
|
||||
- Exemple de hiérarchie:
|
||||
- World (Internet)
|
||||
- Maison
|
||||
- Livebox
|
||||
- Switch
|
||||
- Borne Wi‑Fi
|
||||
- Laptop
|
||||
- Smartphone
|
||||
- Server Proxmox
|
||||
- VM Debian
|
||||
- Docker
|
||||
- Service
|
||||
- Raspberry Pi
|
||||
- Service(s)
|
||||
- IoT Hub
|
||||
- Service(s)
|
||||
|
||||
## Liaisons (graph)
|
||||
- Les boîtes peuvent être reliées entre elles indépendamment de la hiérarchie.
|
||||
- Exemples:
|
||||
- Service -> Docker -> VM -> Server -> Switch -> Livebox -> Maison -> World
|
||||
- AP (Wi‑Fi) -> Laptop / Smartphone
|
||||
- Style des liaisons:
|
||||
- LAN: trait plein
|
||||
- Wi‑Fi: trait pointillé
|
||||
|
||||
## Ports
|
||||
- Nombre de ports ajustable.
|
||||
- Plusieurs connexions possibles sur un même port.
|
||||
|
||||
## Objets composés
|
||||
- Un objet peut contenir des enfants.
|
||||
- Enfants positionnés par rapport au parent (coordonnées locales).
|
||||
- Types possibles d’enfants: icône, forme, texte, badge, objet complet.
|
||||
|
||||
## Redimensionnement
|
||||
- Les conteneurs sont redimensionnables pour inclure d’autres objets.
|
||||
- À décider:
|
||||
- Taille des enfants figée ou proportionnelle au parent lors du resize.
|
||||
|
||||
## Modèle de données (à préciser)
|
||||
- Node (id, type, label, size, children[], ports[])
|
||||
- Port (id, label, capacity)
|
||||
- Edge (from, to, kind: LAN/Wi‑Fi)
|
||||
|
||||
## Modèle d’objet (draft)
|
||||
- Parent optionnel + enfants multiples.
|
||||
- Types d’objets: rectangle, icône, image, table, texte, lien, commande, connecteur.
|
||||
- Propriétés communes:
|
||||
- id, name, parentId, children[]
|
||||
- category
|
||||
- position: x, y (locales au parent), zIndex
|
||||
- size: width, height
|
||||
- rotation: degrés
|
||||
- keep_ratio: true | false
|
||||
- visible
|
||||
- anchor: free | edge, edgeSide: 1|2|3|4
|
||||
- style: fillColor, strokeColor, opacity
|
||||
- font: family, size, weight (normal|bold), style (normal|italic), color
|
||||
- state: libre | accroché | en bordure
|
||||
- command: texte/commande associée (optionnel)
|
||||
- connecteurs: liste d’IDs d’objets connecteur
|
||||
|
||||
### Image
|
||||
- formats: png, jpg, svg, webp
|
||||
- size: width, height
|
||||
- anchor: position d’accrochage
|
||||
- state: libre | accroché | en bordure
|
||||
- storage: fichier sur disque + chemin (`src`) dans JSON/DB (pas d’image brute)
|
||||
- displayName: nom lisible optionnel
|
||||
- naming: conserver le nom lisible + suffixe unique (ex: `livebox_7f3a.png`)
|
||||
- upload:
|
||||
- création d’un nouveau fichier avec métadonnées
|
||||
- réduction de poids possible à l’upload et après (optimize)
|
||||
- library:
|
||||
- images ajoutées dans une bibliothèque
|
||||
- metadata category obligatoire
|
||||
|
||||
### Table
|
||||
- size: width, height
|
||||
- position: x, y (locales au parent)
|
||||
- parentId, children[]
|
||||
- grid: rows, cols
|
||||
- headers: title1, title2, title3...
|
||||
- style spécifique:
|
||||
- borderColor, fillColor, headerColor, textColor
|
||||
- fontWeight (normal|bold), fontStyle (normal|italic), fontSize, fontFamily
|
||||
- anchor: x, y, zIndex
|
||||
- connecteurs: liste d’IDs d’objets connecteur
|
||||
|
||||
### Connecteur
|
||||
- Objet à part entière:
|
||||
- id, name, parentId, children[]
|
||||
- endpoints: connecteur1 -> targetId, connecteur2 -> targetId
|
||||
|
||||
## Exemple Mermaid (boîtes imbriquées + liaisons)
|
||||
```mermaid
|
||||
flowchart TB
|
||||
%% Boîtes imbriquées
|
||||
subgraph World["World (Internet)"]
|
||||
subgraph Maison["Maison"]
|
||||
Livebox["Livebox"]
|
||||
Switch["Switch"]
|
||||
subgraph Server["Server Proxmox"]
|
||||
VM["VM Debian"]
|
||||
subgraph Docker["Docker"]
|
||||
Service["Service"]
|
||||
end
|
||||
end
|
||||
Laptop["Laptop"]
|
||||
Smartphone["Smartphone"]
|
||||
end
|
||||
end
|
||||
|
||||
%% Liaisons (réseau/logique)
|
||||
Service --> Docker
|
||||
Docker --> VM
|
||||
VM --> Server
|
||||
Server --> Switch
|
||||
Switch --> Livebox
|
||||
Livebox --> Maison
|
||||
Maison --> World
|
||||
```
|
||||
|
||||
## Exemple JSON (draft)
|
||||
```json
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"id": "world",
|
||||
"type": "rectangle",
|
||||
"name": "World",
|
||||
"parentId": null,
|
||||
"children": ["maison"],
|
||||
"position": { "x": 0, "y": 0 },
|
||||
"zIndex": 0,
|
||||
"visible": true,
|
||||
"anchor": "free",
|
||||
"edgeSide": null,
|
||||
"style": {
|
||||
"fillColor": "#2B2D2A",
|
||||
"strokeColor": "#8F8F7A",
|
||||
"opacity": 1
|
||||
},
|
||||
"font": {
|
||||
"family": "Space Grotesk",
|
||||
"size": 14,
|
||||
"weight": "bold",
|
||||
"style": "normal",
|
||||
"color": "#E6DB74"
|
||||
},
|
||||
"state": "libre",
|
||||
"command": null,
|
||||
"connecteurs": []
|
||||
},
|
||||
{
|
||||
"id": "maison",
|
||||
"type": "rectangle",
|
||||
"name": "Maison",
|
||||
"parentId": "world",
|
||||
"children": ["livebox", "server"],
|
||||
"position": { "x": 40, "y": 40 },
|
||||
"zIndex": 1,
|
||||
"visible": true,
|
||||
"anchor": "free",
|
||||
"edgeSide": null,
|
||||
"style": {
|
||||
"fillColor": "#3A3D39",
|
||||
"strokeColor": "#A6E22E",
|
||||
"opacity": 0.95
|
||||
},
|
||||
"font": {
|
||||
"family": "Space Grotesk",
|
||||
"size": 12,
|
||||
"weight": "bold",
|
||||
"style": "normal",
|
||||
"color": "#E6DB74"
|
||||
},
|
||||
"state": "libre",
|
||||
"command": null,
|
||||
"connecteurs": []
|
||||
},
|
||||
{
|
||||
"id": "livebox",
|
||||
"type": "image",
|
||||
"name": "Livebox",
|
||||
"category": "network",
|
||||
"parentId": "maison",
|
||||
"children": [],
|
||||
"position": { "x": 20, "y": 30 },
|
||||
"size": { "width": 48, "height": 48 },
|
||||
"rotation": 0,
|
||||
"keepRatio": true,
|
||||
"zIndex": 2,
|
||||
"visible": true,
|
||||
"anchor": "free",
|
||||
"edgeSide": null,
|
||||
"style": {
|
||||
"fillColor": "#272822",
|
||||
"strokeColor": "#F92672",
|
||||
"opacity": 1
|
||||
},
|
||||
"font": {
|
||||
"family": "Space Grotesk",
|
||||
"size": 11,
|
||||
"weight": "normal",
|
||||
"style": "normal",
|
||||
"color": "#F8F8F2"
|
||||
},
|
||||
"state": "libre",
|
||||
"command": null,
|
||||
"connecteurs": ["c-livebox"],
|
||||
"image": {
|
||||
"format": "svg",
|
||||
"src": "livebox.svg",
|
||||
"displayName": "Livebox",
|
||||
"category": "network",
|
||||
"sizeBytes": 12456,
|
||||
"width": 48,
|
||||
"height": 48
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "server",
|
||||
"type": "rectangle",
|
||||
"name": "Server Proxmox",
|
||||
"parentId": "maison",
|
||||
"children": ["vm-debian"],
|
||||
"position": { "x": 140, "y": 30 },
|
||||
"zIndex": 2,
|
||||
"visible": true,
|
||||
"anchor": "free",
|
||||
"edgeSide": null,
|
||||
"style": {
|
||||
"fillColor": "#2F3130",
|
||||
"strokeColor": "#66D9EF",
|
||||
"opacity": 1
|
||||
},
|
||||
"font": {
|
||||
"family": "Space Grotesk",
|
||||
"size": 11,
|
||||
"weight": "bold",
|
||||
"style": "normal",
|
||||
"color": "#E6DB74"
|
||||
},
|
||||
"state": "libre",
|
||||
"command": "ssh root@server",
|
||||
"connecteurs": ["c-server"]
|
||||
},
|
||||
{
|
||||
"id": "vm-debian",
|
||||
"type": "rectangle",
|
||||
"name": "VM Debian",
|
||||
"parentId": "server",
|
||||
"children": ["docker"],
|
||||
"position": { "x": 16, "y": 36 },
|
||||
"zIndex": 3,
|
||||
"visible": true,
|
||||
"anchor": "free",
|
||||
"edgeSide": null,
|
||||
"style": {
|
||||
"fillColor": "#1F2A2A",
|
||||
"strokeColor": "#A6E22E",
|
||||
"opacity": 1
|
||||
},
|
||||
"font": {
|
||||
"family": "Space Grotesk",
|
||||
"size": 10,
|
||||
"weight": "normal",
|
||||
"style": "normal",
|
||||
"color": "#E6DB74"
|
||||
},
|
||||
"state": "libre",
|
||||
"command": null,
|
||||
"connecteurs": []
|
||||
},
|
||||
{
|
||||
"id": "docker",
|
||||
"type": "rectangle",
|
||||
"name": "Docker",
|
||||
"parentId": "vm-debian",
|
||||
"children": ["service-web"],
|
||||
"position": { "x": 12, "y": 28 },
|
||||
"zIndex": 4,
|
||||
"visible": true,
|
||||
"anchor": "free",
|
||||
"edgeSide": null,
|
||||
"style": {
|
||||
"fillColor": "#263238",
|
||||
"strokeColor": "#F8F8F2",
|
||||
"opacity": 1
|
||||
},
|
||||
"font": {
|
||||
"family": "Space Grotesk",
|
||||
"size": 10,
|
||||
"weight": "normal",
|
||||
"style": "normal",
|
||||
"color": "#F8F8F2"
|
||||
},
|
||||
"state": "libre",
|
||||
"command": "docker ps",
|
||||
"connecteurs": []
|
||||
},
|
||||
{
|
||||
"id": "service-web",
|
||||
"type": "text",
|
||||
"name": "Service Web",
|
||||
"parentId": "docker",
|
||||
"children": [],
|
||||
"position": { "x": 10, "y": 18 },
|
||||
"zIndex": 5,
|
||||
"visible": true,
|
||||
"anchor": "free",
|
||||
"edgeSide": null,
|
||||
"style": {
|
||||
"fillColor": "transparent",
|
||||
"strokeColor": "transparent",
|
||||
"opacity": 1
|
||||
},
|
||||
"font": {
|
||||
"family": "Space Grotesk",
|
||||
"size": 10,
|
||||
"weight": "normal",
|
||||
"style": "italic",
|
||||
"color": "#A6E22E"
|
||||
},
|
||||
"state": "libre",
|
||||
"command": null,
|
||||
"connecteurs": ["c-service"]
|
||||
}
|
||||
],
|
||||
"connecteurs": [
|
||||
{
|
||||
"id": "c-livebox",
|
||||
"name": "LAN Livebox",
|
||||
"parentId": "livebox",
|
||||
"children": [],
|
||||
"endpoints": [
|
||||
{ "name": "c1", "targetId": "c-server" },
|
||||
{ "name": "c2", "targetId": "world" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "c-server",
|
||||
"name": "LAN Server",
|
||||
"parentId": "server",
|
||||
"children": [],
|
||||
"endpoints": [
|
||||
{ "name": "c1", "targetId": "c-livebox" },
|
||||
{ "name": "c2", "targetId": "c-service" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "c-service",
|
||||
"name": "Service Link",
|
||||
"parentId": "service-web",
|
||||
"children": [],
|
||||
"endpoints": [
|
||||
{ "name": "c1", "targetId": "c-server" },
|
||||
{ "name": "c2", "targetId": "docker" }
|
||||
]
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{ "from": "c-livebox", "to": "c-server", "kind": "lan" },
|
||||
{ "from": "c-livebox", "to": "world", "kind": "lan" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## UML (tables SQLite - draft)
|
||||
```mermaid
|
||||
classDiagram
|
||||
class diagram {
|
||||
+id TEXT
|
||||
+name TEXT
|
||||
+created_at TEXT
|
||||
+updated_at TEXT
|
||||
+version TEXT
|
||||
}
|
||||
class node {
|
||||
+id TEXT
|
||||
+diagram_id TEXT
|
||||
+type TEXT
|
||||
+name TEXT
|
||||
+category TEXT
|
||||
+parent_id TEXT
|
||||
+x REAL
|
||||
+y REAL
|
||||
+width REAL
|
||||
+height REAL
|
||||
+rotation REAL
|
||||
+keep_ratio INTEGER
|
||||
+z_index INTEGER
|
||||
+visible INTEGER
|
||||
+anchor TEXT
|
||||
+edge_side INTEGER
|
||||
+state TEXT
|
||||
}
|
||||
class node_style {
|
||||
+node_id TEXT
|
||||
+fill_color TEXT
|
||||
+stroke_color TEXT
|
||||
+opacity REAL
|
||||
+font_family TEXT
|
||||
+font_size REAL
|
||||
+font_weight TEXT
|
||||
+font_style TEXT
|
||||
+font_color TEXT
|
||||
}
|
||||
class node_command {
|
||||
+node_id TEXT
|
||||
+command_text TEXT
|
||||
}
|
||||
class node_media {
|
||||
+node_id TEXT
|
||||
+media_type TEXT
|
||||
+src TEXT
|
||||
+format TEXT
|
||||
+display_name TEXT
|
||||
+category TEXT
|
||||
+size_bytes INTEGER
|
||||
+width REAL
|
||||
+height REAL
|
||||
}
|
||||
class node_text {
|
||||
+node_id TEXT
|
||||
+content TEXT
|
||||
}
|
||||
class node_link {
|
||||
+node_id TEXT
|
||||
+url TEXT
|
||||
+label TEXT
|
||||
}
|
||||
class node_table {
|
||||
+node_id TEXT
|
||||
+rows INTEGER
|
||||
+cols INTEGER
|
||||
+width REAL
|
||||
+height REAL
|
||||
+header_color TEXT
|
||||
+header_text_color TEXT
|
||||
+border_color TEXT
|
||||
+cell_color TEXT
|
||||
+font_family TEXT
|
||||
+font_size REAL
|
||||
+font_weight TEXT
|
||||
+font_style TEXT
|
||||
}
|
||||
class node_table_header {
|
||||
+node_id TEXT
|
||||
+col_index INTEGER
|
||||
+title TEXT
|
||||
}
|
||||
class connector {
|
||||
+id TEXT
|
||||
+diagram_id TEXT
|
||||
+name TEXT
|
||||
+parent_id TEXT
|
||||
+x REAL
|
||||
+y REAL
|
||||
+z_index INTEGER
|
||||
+visible INTEGER
|
||||
}
|
||||
class connector_endpoint {
|
||||
+id TEXT
|
||||
+connector_id TEXT
|
||||
+name TEXT
|
||||
+target_id TEXT
|
||||
}
|
||||
class edge {
|
||||
+id TEXT
|
||||
+diagram_id TEXT
|
||||
+from_id TEXT
|
||||
+to_id TEXT
|
||||
+kind TEXT
|
||||
+style TEXT
|
||||
+label TEXT
|
||||
+color TEXT
|
||||
}
|
||||
class port {
|
||||
+id TEXT
|
||||
+node_id TEXT
|
||||
+name TEXT
|
||||
+capacity INTEGER
|
||||
+x REAL
|
||||
+y REAL
|
||||
+anchor TEXT
|
||||
+edge_side INTEGER
|
||||
}
|
||||
|
||||
diagram "1" --> "many" node
|
||||
diagram "1" --> "many" connector
|
||||
diagram "1" --> "many" edge
|
||||
node "1" --> "0..1" node : parent
|
||||
node "1" --> "0..1" node_style
|
||||
node "1" --> "0..1" node_command
|
||||
node "1" --> "0..1" node_media
|
||||
node "1" --> "0..1" node_text
|
||||
node "1" --> "0..1" node_link
|
||||
node "1" --> "0..1" node_table
|
||||
node "1" --> "many" node_table_header
|
||||
node "1" --> "many" port
|
||||
connector "1" --> "many" connector_endpoint
|
||||
```
|
||||
|
||||
## Frontend (UI - draft)
|
||||
- Panneau gauche: **Édition** (mode édition activable).
|
||||
- Palette d’objets: carré, rectangle, rond, ovale, pilule, image, texte, table, commande.
|
||||
- Liaisons: ligne, connexion, flèche, spline.
|
||||
- Boutons:
|
||||
- Supprimer un objet du monde (avec choix de cascade):
|
||||
- supprimer les enfants
|
||||
- relier les enfants au grand‑parent
|
||||
- Importer un objet (bibliothèque) + enfants (JSON).
|
||||
- Bibliothèque:
|
||||
- stockage local (JSON)
|
||||
- export/import possible
|
||||
- Panneau droit: **Propriétés** (ex‑Actions).
|
||||
- Propriétés organisées par sections (plus lisible).
|
||||
- Parent/enfants configurables depuis Propriétés.
|
||||
- Image: afficher poids + catégorie.
|
||||
- Afficher rotation + dimensions (avec poignées de resize/rotation activables).
|
||||
- Barre d’outils dépliable (outils courants):
|
||||
- couper, copier, coller, supprimer, cloner
|
||||
- palette couleurs (trait / intérieur)
|
||||
- police (liste déroulante) + taille (boutons + / -)
|
||||
- zoom (loupe + / -)
|
||||
- épaisseur de trait
|
||||
|
||||
## Stockage des ressources (arborescence proposée)
|
||||
```
|
||||
architecture/
|
||||
backend/
|
||||
app/
|
||||
diagrams/
|
||||
world.json
|
||||
database/
|
||||
resources/
|
||||
toolbars/
|
||||
editbar.json
|
||||
prop_bar.json
|
||||
outils.json
|
||||
images/
|
||||
originals/
|
||||
optimized/
|
||||
thumbs/
|
||||
police/
|
||||
palettes/
|
||||
commandes/
|
||||
library/
|
||||
objects.json
|
||||
simple_objects.json
|
||||
images.json
|
||||
tables.json
|
||||
commands.json
|
||||
palettes.json
|
||||
fonts.json
|
||||
exports/
|
||||
imports/
|
||||
```
|
||||
Descriptions:
|
||||
- backend/: API + logique serveur pour l’éditeur.
|
||||
- app/: code UI + logique front.
|
||||
- diagrams/: fichiers de diagrammes (world.json, etc.).
|
||||
- database/: base SQLite locale.
|
||||
- resources/toolbars/: définitions des barres d’outils.
|
||||
- resources/images/: images sources, optimisées, miniatures.
|
||||
- resources/police/: polices disponibles.
|
||||
- formats acceptés: ttf (support initial), woff/woff2 possibles plus tard.
|
||||
- resources/palettes/: palettes de couleurs.
|
||||
- resources/commandes/: commandes prêtes à l’emploi.
|
||||
- library/: bibliothèque d’objets réutilisables.
|
||||
- exports/: exports (JSON, images, etc.).
|
||||
- imports/: imports.
|
||||
- Les deux volets sont repliés par défaut; un bouton en haut permet de les déplier.
|
||||
- Drag & drop depuis Édition vers l’arbre réseau:
|
||||
- crée un nouvel objet dans le monde avec paramètres par défaut.
|
||||
- Mode lecture:
|
||||
- édition désactivée = pas de déplacement ni de modification.
|
||||
1
backend/app/__init__.py
Executable file
@@ -0,0 +1 @@
|
||||
# IPWatch Backend Application
|
||||
1
backend/app/core/__init__.py
Executable file
@@ -0,0 +1 @@
|
||||
# Core configuration modules
|
||||
167
backend/app/core/config.py
Executable file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
Configuration management pour IPWatch
|
||||
Charge et valide le fichier config.yaml
|
||||
"""
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AppConfig(BaseModel):
|
||||
"""Configuration de l'application"""
|
||||
name: str = "IPWatch"
|
||||
version: str = "1.0.0"
|
||||
debug: bool = False
|
||||
|
||||
|
||||
class NetworkConfig(BaseModel):
|
||||
"""Configuration réseau"""
|
||||
cidr: str
|
||||
gateway: Optional[str] = None
|
||||
dns: Optional[List[str]] = None
|
||||
|
||||
|
||||
class ScanConfig(BaseModel):
|
||||
"""Configuration des scans"""
|
||||
ping_interval: int = 60 # secondes
|
||||
ping_count: int = 1 # Nombre de ping par IP
|
||||
port_scan_interval: int = 300 # secondes
|
||||
parallel_pings: int = 50
|
||||
timeout: float = 1.0
|
||||
force_vendor_update: bool = False
|
||||
|
||||
|
||||
class PortsConfig(BaseModel):
|
||||
"""Configuration des ports à scanner"""
|
||||
ranges: List[str] = ["22", "80", "443", "3389", "8080"]
|
||||
protocols: Optional[Dict[int, str]] = None # Mapping port -> protocole
|
||||
|
||||
|
||||
class HistoryConfig(BaseModel):
|
||||
"""Configuration de l'historique"""
|
||||
retention_hours: int = 24
|
||||
|
||||
|
||||
class UIConfig(BaseModel):
|
||||
"""Configuration UI"""
|
||||
offline_transparency: float = 0.5
|
||||
show_mac: bool = True
|
||||
show_vendor: bool = True
|
||||
cell_size: int = 30
|
||||
font_size: int = 10
|
||||
cell_gap: float = 2
|
||||
details_font_size: int = 13
|
||||
details_spacing: int = 2
|
||||
architecture_title_font_size: int = 18
|
||||
|
||||
|
||||
class LinksConfig(BaseModel):
|
||||
"""Configuration des liens"""
|
||||
hardware_bench_url: Optional[str] = None
|
||||
|
||||
|
||||
class ColorsConfig(BaseModel):
|
||||
"""Configuration des couleurs"""
|
||||
free: str = "#75715E"
|
||||
online_known: str = "#A6E22E"
|
||||
online_unknown: str = "#66D9EF"
|
||||
offline_known: str = "#F92672"
|
||||
offline_unknown: str = "#AE81FF"
|
||||
mac_changed: str = "#FD971F"
|
||||
network_device: str = "#1E3A8A"
|
||||
|
||||
|
||||
class OPNsenseConfig(BaseModel):
|
||||
"""Configuration OPNsense API"""
|
||||
enabled: bool = False
|
||||
host: str = ""
|
||||
api_key: str = ""
|
||||
api_secret: str = ""
|
||||
verify_ssl: bool = False
|
||||
protocol: str = "http" # "http" ou "https"
|
||||
|
||||
|
||||
class DatabaseConfig(BaseModel):
|
||||
"""Configuration base de données"""
|
||||
path: str = "./data/db.sqlite"
|
||||
|
||||
|
||||
class SubnetConfig(BaseModel):
|
||||
"""Configuration d'un sous-réseau"""
|
||||
name: str
|
||||
cidr: str
|
||||
start: str
|
||||
end: str
|
||||
description: str
|
||||
|
||||
|
||||
class HostConfig(BaseModel):
|
||||
"""Configuration d'un hôte avec sa localisation"""
|
||||
name: str
|
||||
location: str
|
||||
ip: Optional[str] = None
|
||||
ip_parent: Optional[str] = None
|
||||
ip_enfant: Optional[List[str]] = None
|
||||
|
||||
|
||||
class IPWatchConfig(BaseModel):
|
||||
"""Configuration complète IPWatch"""
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
app: AppConfig = Field(default_factory=AppConfig)
|
||||
network: NetworkConfig
|
||||
subnets: List[SubnetConfig] = Field(default_factory=list)
|
||||
ip_classes: Dict[str, Any] = Field(default_factory=dict)
|
||||
scan: ScanConfig = Field(default_factory=ScanConfig)
|
||||
ports: PortsConfig = Field(default_factory=PortsConfig)
|
||||
locations: List[str] = Field(default_factory=list)
|
||||
hosts: List[HostConfig] = Field(default_factory=list)
|
||||
history: HistoryConfig = Field(default_factory=HistoryConfig)
|
||||
ui: UIConfig = Field(default_factory=UIConfig)
|
||||
links: LinksConfig = Field(default_factory=LinksConfig)
|
||||
colors: ColorsConfig = Field(default_factory=ColorsConfig)
|
||||
database: DatabaseConfig = Field(default_factory=DatabaseConfig)
|
||||
opnsense: OPNsenseConfig = Field(default_factory=OPNsenseConfig)
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
"""Gestionnaire de configuration singleton"""
|
||||
_instance: Optional['ConfigManager'] = None
|
||||
_config: Optional[IPWatchConfig] = None
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def load_config(self, config_path: str = "./config.yaml") -> IPWatchConfig:
|
||||
"""Charge la configuration depuis le fichier YAML"""
|
||||
path = Path(config_path)
|
||||
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Fichier de configuration non trouvé: {config_path}")
|
||||
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
yaml_data = yaml.safe_load(f)
|
||||
|
||||
self._config = IPWatchConfig(**yaml_data)
|
||||
self._config_path = config_path
|
||||
return self._config
|
||||
|
||||
def reload_config(self) -> IPWatchConfig:
|
||||
"""Recharge la configuration depuis le fichier"""
|
||||
if not hasattr(self, '_config_path'):
|
||||
self._config_path = "./config.yaml"
|
||||
return self.load_config(self._config_path)
|
||||
|
||||
@property
|
||||
def config(self) -> IPWatchConfig:
|
||||
"""Retourne la configuration actuelle"""
|
||||
if self._config is None:
|
||||
raise RuntimeError("Configuration non chargée. Appelez load_config() d'abord.")
|
||||
return self._config
|
||||
|
||||
|
||||
# Instance globale
|
||||
config_manager = ConfigManager()
|
||||
101
backend/app/core/database.py
Executable file
@@ -0,0 +1,101 @@
|
||||
"""
|
||||
Configuration de la base de données SQLAlchemy
|
||||
"""
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from pathlib import Path
|
||||
|
||||
# Base pour les modèles SQLAlchemy (DB principale)
|
||||
Base = declarative_base()
|
||||
# Base dédiée à l'architecture
|
||||
ArchBase = declarative_base()
|
||||
|
||||
# Engine et session
|
||||
engine = None
|
||||
SessionLocal = None
|
||||
arch_engine = None
|
||||
ArchSessionLocal = None
|
||||
|
||||
|
||||
def init_database(db_path: str = "./data/db.sqlite"):
|
||||
"""Initialise la connexion à la base de données"""
|
||||
global engine, SessionLocal
|
||||
|
||||
# Créer le dossier data si nécessaire
|
||||
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Créer l'engine SQLite
|
||||
database_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(
|
||||
database_url,
|
||||
connect_args={"check_same_thread": False},
|
||||
echo=False
|
||||
)
|
||||
|
||||
# Créer la session factory
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
# Créer les tables
|
||||
Base.metadata.create_all(bind=engine)
|
||||
|
||||
# Migration : ajouter les colonnes manquantes
|
||||
_run_migrations(engine)
|
||||
|
||||
return engine
|
||||
|
||||
|
||||
def _run_migrations(eng):
|
||||
"""Ajoute les colonnes manquantes aux tables existantes"""
|
||||
import sqlalchemy
|
||||
inspector = sqlalchemy.inspect(eng)
|
||||
|
||||
# Migration de la table 'ip'
|
||||
if 'ip' in inspector.get_table_names():
|
||||
existing_columns = {col['name'] for col in inspector.get_columns('ip')}
|
||||
with eng.connect() as conn:
|
||||
if 'dhcp_synced' not in existing_columns:
|
||||
conn.execute(sqlalchemy.text("ALTER TABLE ip ADD COLUMN dhcp_synced BOOLEAN DEFAULT 0"))
|
||||
conn.commit()
|
||||
print("✓ Migration: colonne dhcp_synced ajoutée à la table ip")
|
||||
|
||||
|
||||
def init_architecture_database(db_path: str = "./architecture/database/architecture.sqlite"):
|
||||
"""Initialise la connexion à la base de données d'architecture"""
|
||||
global arch_engine, ArchSessionLocal
|
||||
|
||||
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
database_url = f"sqlite:///{db_path}"
|
||||
arch_engine = create_engine(
|
||||
database_url,
|
||||
connect_args={"check_same_thread": False},
|
||||
echo=False
|
||||
)
|
||||
|
||||
ArchSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=arch_engine)
|
||||
|
||||
# Créer les tables d'architecture si besoin
|
||||
ArchBase.metadata.create_all(bind=arch_engine)
|
||||
|
||||
return arch_engine
|
||||
|
||||
|
||||
def get_db():
|
||||
"""Dependency pour obtenir une session DB"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
def get_arch_db():
|
||||
"""Dependency pour obtenir une session DB architecture"""
|
||||
if ArchSessionLocal is None:
|
||||
init_architecture_database()
|
||||
db = ArchSessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
221
backend/app/main.py
Executable file
@@ -0,0 +1,221 @@
|
||||
"""
|
||||
Application FastAPI principale pour IPWatch
|
||||
Point d'entrée du backend
|
||||
"""
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi.responses import FileResponse
|
||||
from contextlib import asynccontextmanager
|
||||
from pathlib import Path
|
||||
|
||||
from backend.app.core.config import config_manager
|
||||
from backend.app.core.database import init_database, get_db
|
||||
from backend.app.routers import ips_router, scan_router, websocket_router
|
||||
from backend.app.routers import architecture as architecture_router
|
||||
from backend.app.routers import config as config_router
|
||||
from backend.app.routers import system as system_router
|
||||
from backend.app.routers import tracking as tracking_router
|
||||
from backend.app.routers import opnsense as opnsense_router
|
||||
from backend.app.services.scheduler import scan_scheduler
|
||||
from backend.app.routers.scan import perform_scan
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""
|
||||
Gestionnaire du cycle de vie de l'application
|
||||
Initialise et nettoie les ressources
|
||||
"""
|
||||
# Startup
|
||||
print("=== Démarrage IPWatch ===")
|
||||
|
||||
# 1. Charger la configuration
|
||||
try:
|
||||
config = config_manager.load_config("./config.yaml")
|
||||
print(f"✓ Configuration chargée: {config.network.cidr}")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur chargement config: {e}")
|
||||
raise
|
||||
|
||||
# 2. Initialiser la base de données
|
||||
try:
|
||||
init_database(config.database.path)
|
||||
print(f"✓ Base de données initialisée: {config.database.path}")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur initialisation DB: {e}")
|
||||
raise
|
||||
|
||||
# 3. Démarrer le scheduler
|
||||
try:
|
||||
scan_scheduler.start()
|
||||
|
||||
# Créer une session DB pour les scans planifiés
|
||||
from backend.app.core.database import SessionLocal
|
||||
|
||||
async def scheduled_scan():
|
||||
"""Wrapper pour scan planifié avec DB session"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
await perform_scan(db)
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
# Configurer les tâches périodiques
|
||||
scan_scheduler.add_ping_scan_job(
|
||||
scheduled_scan,
|
||||
interval_seconds=config.scan.ping_interval
|
||||
)
|
||||
|
||||
scan_scheduler.add_port_scan_job(
|
||||
scheduled_scan,
|
||||
interval_seconds=config.scan.port_scan_interval
|
||||
)
|
||||
|
||||
# Tâche de nettoyage historique
|
||||
async def cleanup_history():
|
||||
"""Nettoie l'historique ancien"""
|
||||
from backend.app.models.ip import IPHistory
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
cutoff = datetime.utcnow() - timedelta(hours=config.history.retention_hours)
|
||||
deleted = db.query(IPHistory).filter(IPHistory.timestamp < cutoff).delete()
|
||||
db.commit()
|
||||
print(f"Nettoyage historique: {deleted} entrées supprimées")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
scan_scheduler.add_cleanup_job(cleanup_history, interval_hours=1)
|
||||
|
||||
print("✓ Scheduler démarré")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur démarrage scheduler: {e}")
|
||||
|
||||
print("=== IPWatch prêt ===\n")
|
||||
|
||||
yield
|
||||
|
||||
# Shutdown
|
||||
print("\n=== Arrêt IPWatch ===")
|
||||
scan_scheduler.stop()
|
||||
print("✓ Scheduler arrêté")
|
||||
|
||||
|
||||
# Créer l'application FastAPI
|
||||
app = FastAPI(
|
||||
title="IPWatch API",
|
||||
description="API backend pour IPWatch - Scanner réseau temps réel",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan
|
||||
)
|
||||
|
||||
# Configuration CORS pour le frontend
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # À restreindre en production
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Enregistrer les routers API
|
||||
app.include_router(ips_router)
|
||||
app.include_router(scan_router)
|
||||
app.include_router(websocket_router)
|
||||
app.include_router(config_router.router)
|
||||
app.include_router(system_router.router)
|
||||
app.include_router(tracking_router.router)
|
||||
app.include_router(architecture_router.router)
|
||||
app.include_router(opnsense_router.router)
|
||||
|
||||
# Servir les ressources d'architecture
|
||||
architecture_dir = Path("./architecture")
|
||||
architecture_dir.mkdir(parents=True, exist_ok=True)
|
||||
app.mount("/architecture", StaticFiles(directory=str(architecture_dir)), name="architecture")
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
"""Health check endpoint"""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"scheduler": scan_scheduler.is_running
|
||||
}
|
||||
|
||||
|
||||
# Servir les fichiers statiques du frontend
|
||||
frontend_dist = Path(__file__).parent.parent.parent / "frontend" / "dist"
|
||||
|
||||
if frontend_dist.exists():
|
||||
# Monter les assets statiques
|
||||
app.mount("/assets", StaticFiles(directory=str(frontend_dist / "assets")), name="assets")
|
||||
|
||||
# Monter les icônes partagées
|
||||
icons_dir = Path("./data/icons")
|
||||
icons_dir.mkdir(parents=True, exist_ok=True)
|
||||
app.mount("/icons", StaticFiles(directory=str(icons_dir)), name="icons")
|
||||
|
||||
# Servir les fichiers statiques à la racine (favicon, manifest, etc.)
|
||||
@app.get("/favicon.ico")
|
||||
async def serve_favicon():
|
||||
favicon_path = frontend_dist / "favicon.ico"
|
||||
if favicon_path.exists():
|
||||
return FileResponse(favicon_path)
|
||||
return {"error": "Favicon non trouvée"}
|
||||
|
||||
# Route racine pour servir index.html
|
||||
@app.get("/")
|
||||
async def serve_frontend():
|
||||
"""Servir le frontend Vue"""
|
||||
index_file = frontend_dist / "index.html"
|
||||
if index_file.exists():
|
||||
return FileResponse(index_file)
|
||||
return {
|
||||
"name": "IPWatch API",
|
||||
"version": "1.0.0",
|
||||
"status": "running",
|
||||
"error": "Frontend non trouvé"
|
||||
}
|
||||
|
||||
# Catch-all pour le routing Vue (SPA)
|
||||
@app.get("/{full_path:path}")
|
||||
async def catch_all(full_path: str):
|
||||
"""Catch-all pour le routing Vue Router"""
|
||||
# Ne pas intercepter les routes API
|
||||
if full_path.startswith("api/") or full_path.startswith("ws"):
|
||||
return {"error": "Not found"}
|
||||
|
||||
# Servir les fichiers statiques à la racine si présents
|
||||
if ".." not in full_path:
|
||||
candidate = (frontend_dist / full_path).resolve()
|
||||
if frontend_dist in candidate.parents and candidate.is_file():
|
||||
return FileResponse(candidate)
|
||||
|
||||
# Servir index.html pour toutes les autres routes
|
||||
index_file = frontend_dist / "index.html"
|
||||
if index_file.exists():
|
||||
return FileResponse(index_file)
|
||||
return {"error": "Frontend non trouvé"}
|
||||
else:
|
||||
@app.get("/")
|
||||
async def root():
|
||||
"""Endpoint racine (mode développement sans frontend)"""
|
||||
return {
|
||||
"name": "IPWatch API",
|
||||
"version": "1.0.0",
|
||||
"status": "running",
|
||||
"note": "Frontend non buildé - utilisez le mode dev"
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(
|
||||
"backend.app.main:app",
|
||||
host="0.0.0.0",
|
||||
port=8080,
|
||||
reload=True
|
||||
)
|
||||
3
backend/app/migrations/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Module de migrations pour la base de données IPWatch
|
||||
"""
|
||||
57
backend/app/migrations/add_architecture_node_table.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""
|
||||
Script de migration pour ajouter la table architecture_node
|
||||
Exécuter avec: python -m backend.app.migrations.add_architecture_node_table
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name='architecture_node'"))
|
||||
if result.fetchone():
|
||||
print("✓ Table 'architecture_node' existe déjà")
|
||||
return
|
||||
|
||||
print("→ Création de la table 'architecture_node'...")
|
||||
conn.execute(text("""
|
||||
CREATE TABLE architecture_node (
|
||||
id TEXT PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
x INTEGER NOT NULL,
|
||||
y INTEGER NOT NULL,
|
||||
width INTEGER NOT NULL,
|
||||
height INTEGER NOT NULL,
|
||||
rotation INTEGER NOT NULL,
|
||||
payload TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL
|
||||
)
|
||||
"""))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_arch_node_created_at ON architecture_node(created_at)"))
|
||||
conn.commit()
|
||||
print("✓ Table 'architecture_node' créée")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur migration architecture_node: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
try:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
62
backend/app/migrations/add_hardware_bench_field.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""
|
||||
Script de migration pour ajouter le champ 'hardware_bench' à la table IP
|
||||
Exécuter avec: python -m backend.app.migrations.add_hardware_bench_field
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
"""Ajoute la colonne 'hardware_bench' et son index à la table IP"""
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'hardware_bench' in columns:
|
||||
print("✓ La colonne 'hardware_bench' existe déjà dans la table IP")
|
||||
return
|
||||
|
||||
print("→ Ajout de la colonne 'hardware_bench' à la table IP...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN hardware_bench BOOLEAN DEFAULT 0"))
|
||||
|
||||
print("→ Création de l'index sur 'hardware_bench'...")
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_hardware_bench ON ip(hardware_bench)"))
|
||||
|
||||
conn.commit()
|
||||
print("✓ Migration terminée avec succès!")
|
||||
print(" - Colonne 'hardware_bench' ajoutée")
|
||||
print(" - Index 'idx_ip_hardware_bench' créé")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors de la migration: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
"""Supprime la colonne 'hardware_bench' (rollback de la migration)"""
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
print(" Pour annuler, restaurez une sauvegarde de la base de données")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors du rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
54
backend/app/migrations/add_icon_fields.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
Script de migration pour ajouter les champs 'icon_filename' et 'icon_url' à la table IP
|
||||
Exécuter avec: python -m backend.app.migrations.add_icon_fields
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'icon_filename' not in columns:
|
||||
print("→ Ajout de la colonne 'icon_filename'...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN icon_filename TEXT"))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_icon_filename ON ip(icon_filename)"))
|
||||
else:
|
||||
print("✓ Colonne 'icon_filename' déjà présente")
|
||||
|
||||
if 'icon_url' not in columns:
|
||||
print("→ Ajout de la colonne 'icon_url'...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN icon_url TEXT"))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_icon_url ON ip(icon_url)"))
|
||||
else:
|
||||
print("✓ Colonne 'icon_url' déjà présente")
|
||||
|
||||
conn.commit()
|
||||
print("✓ Migration terminée avec succès")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors de la migration: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
try:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors du rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
52
backend/app/migrations/add_network_device_field.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Migration: Ajouter le champ network_device à la table ip
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
from sqlalchemy import text, create_engine
|
||||
|
||||
def main():
|
||||
"""Ajoute le champ network_device à la table ip"""
|
||||
# Récupérer le chemin de la base de données
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
|
||||
# Créer l'engine directement
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
print(f"📦 Migration: Ajout du champ network_device")
|
||||
print(f"🗄️ Base de données: {db_path}")
|
||||
|
||||
try:
|
||||
with engine.connect() as conn:
|
||||
# Vérifier si la colonne existe déjà
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'network_device' in columns:
|
||||
print("⚠️ La colonne 'network_device' existe déjà. Migration ignorée.")
|
||||
return
|
||||
|
||||
# Ajouter la colonne network_device
|
||||
print("➕ Ajout de la colonne 'network_device'...")
|
||||
conn.execute(text("""
|
||||
ALTER TABLE ip
|
||||
ADD COLUMN network_device BOOLEAN DEFAULT 0
|
||||
"""))
|
||||
|
||||
# Créer un index sur la colonne
|
||||
print("🔍 Création de l'index sur 'network_device'...")
|
||||
conn.execute(text("""
|
||||
CREATE INDEX IF NOT EXISTS idx_ip_network_device ON ip(network_device)
|
||||
"""))
|
||||
|
||||
conn.commit()
|
||||
print("✅ Migration réussie!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Erreur lors de la migration: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
54
backend/app/migrations/add_scan_log_table.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
Script de migration pour ajouter la table scan_log
|
||||
Exécuter avec: python -m backend.app.migrations.add_scan_log_table
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name='scan_log'"))
|
||||
if result.fetchone():
|
||||
print("✓ Table 'scan_log' existe déjà")
|
||||
return
|
||||
|
||||
print("→ Création de la table 'scan_log'...")
|
||||
conn.execute(text("""
|
||||
CREATE TABLE scan_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ip TEXT,
|
||||
status TEXT,
|
||||
message TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL
|
||||
)
|
||||
"""))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_scan_log_created_at ON scan_log(created_at)"))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_scan_log_ip ON scan_log(ip)"))
|
||||
conn.commit()
|
||||
print("✓ Table 'scan_log' créée")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur migration scan_log: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
try:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
70
backend/app/migrations/add_tracked_field.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
Script de migration pour ajouter le champ 'tracked' à la table IP
|
||||
Exécuter avec: python -m backend.app.migrations.add_tracked_field
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
"""Ajoute la colonne 'tracked' et son index à la table IP"""
|
||||
try:
|
||||
# Charger le chemin de la base de données depuis config.yaml ou utiliser le défaut
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
|
||||
# Créer l'engine
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# Vérifier si la colonne existe déjà
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'tracked' in columns:
|
||||
print("✓ La colonne 'tracked' existe déjà dans la table IP")
|
||||
return
|
||||
|
||||
# Ajouter la colonne tracked
|
||||
print("→ Ajout de la colonne 'tracked' à la table IP...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN tracked BOOLEAN DEFAULT 0"))
|
||||
|
||||
# Créer l'index
|
||||
print("→ Création de l'index sur 'tracked'...")
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_tracked ON ip(tracked)"))
|
||||
|
||||
conn.commit()
|
||||
print("✓ Migration terminée avec succès!")
|
||||
print(" - Colonne 'tracked' ajoutée")
|
||||
print(" - Index 'idx_ip_tracked' créé")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors de la migration: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
"""Supprime la colonne 'tracked' (rollback de la migration)"""
|
||||
try:
|
||||
# Charger le chemin de la base de données depuis config.yaml ou utiliser le défaut
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# SQLite ne supporte pas DROP COLUMN directement
|
||||
# Il faut recréer la table sans la colonne
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
print(" Pour annuler, restaurez une sauvegarde de la base de données")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors du rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
70
backend/app/migrations/add_vm_field.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
Script de migration pour ajouter le champ 'vm' à la table IP
|
||||
Exécuter avec: python -m backend.app.migrations.add_vm_field
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def migrate():
|
||||
"""Ajoute la colonne 'vm' et son index à la table IP"""
|
||||
try:
|
||||
# Charger le chemin de la base de données depuis config.yaml ou utiliser le défaut
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
|
||||
# Créer l'engine
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# Vérifier si la colonne existe déjà
|
||||
result = conn.execute(text("PRAGMA table_info(ip)"))
|
||||
columns = [row[1] for row in result]
|
||||
|
||||
if 'vm' in columns:
|
||||
print("✓ La colonne 'vm' existe déjà dans la table IP")
|
||||
return
|
||||
|
||||
# Ajouter la colonne vm
|
||||
print("→ Ajout de la colonne 'vm' à la table IP...")
|
||||
conn.execute(text("ALTER TABLE ip ADD COLUMN vm BOOLEAN DEFAULT 0"))
|
||||
|
||||
# Créer l'index
|
||||
print("→ Création de l'index sur 'vm'...")
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_ip_vm ON ip(vm)"))
|
||||
|
||||
conn.commit()
|
||||
print("✓ Migration terminée avec succès!")
|
||||
print(" - Colonne 'vm' ajoutée")
|
||||
print(" - Index 'idx_ip_vm' créé")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors de la migration: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
"""Supprime la colonne 'vm' (rollback de la migration)"""
|
||||
try:
|
||||
# Charger le chemin de la base de données depuis config.yaml ou utiliser le défaut
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# SQLite ne supporte pas DROP COLUMN directement
|
||||
# Il faut recréer la table sans la colonne
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
print(" Pour annuler, restaurez une sauvegarde de la base de données")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur lors du rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
60
backend/app/migrations/create_architecture_db.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""
|
||||
Script pour créer la base SQLite dédiée à l'architecture
|
||||
Exécuter avec: python -m backend.app.migrations.create_architecture_db
|
||||
"""
|
||||
from sqlalchemy import text, create_engine
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
db_path = os.getenv("ARCH_DB_PATH", "./architecture/database/architecture.sqlite")
|
||||
db_file = Path(db_path)
|
||||
db_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
db_url = f"sqlite:///{db_file}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
|
||||
with engine.connect() as conn:
|
||||
result = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name='architecture_node'"))
|
||||
if result.fetchone():
|
||||
print("✓ Table 'architecture_node' existe déjà")
|
||||
return
|
||||
|
||||
print("→ Création de la table 'architecture_node'...")
|
||||
conn.execute(text("""
|
||||
CREATE TABLE architecture_node (
|
||||
id TEXT PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
x INTEGER NOT NULL,
|
||||
y INTEGER NOT NULL,
|
||||
width INTEGER NOT NULL,
|
||||
height INTEGER NOT NULL,
|
||||
rotation INTEGER NOT NULL,
|
||||
payload TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL
|
||||
)
|
||||
"""))
|
||||
conn.execute(text("CREATE INDEX IF NOT EXISTS idx_arch_node_created_at ON architecture_node(created_at)"))
|
||||
conn.commit()
|
||||
print(f"✓ Base architecture créée: {db_file}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur création base architecture: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def rollback():
|
||||
try:
|
||||
print("⚠ Rollback non implémenté pour SQLite")
|
||||
except Exception as e:
|
||||
print(f"✗ Erreur rollback: {str(e)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "rollback":
|
||||
rollback()
|
||||
else:
|
||||
migrate()
|
||||
8
backend/app/models/__init__.py
Executable file
@@ -0,0 +1,8 @@
|
||||
"""
|
||||
Modèles SQLAlchemy pour IPWatch
|
||||
"""
|
||||
from .ip import IP, IPHistory
|
||||
from .scan_log import ScanLog
|
||||
from .architecture import ArchitectureNode
|
||||
|
||||
__all__ = ["IP", "IPHistory", "ScanLog", "ArchitectureNode"]
|
||||
22
backend/app/models/architecture.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
Modèles SQLAlchemy pour l'éditeur d'architecture
|
||||
"""
|
||||
from datetime import datetime
|
||||
from sqlalchemy import Column, String, Integer, DateTime, Text
|
||||
|
||||
from backend.app.core.database import ArchBase
|
||||
|
||||
|
||||
class ArchitectureNode(ArchBase):
|
||||
"""Noeud d'architecture sauvegardé"""
|
||||
__tablename__ = "architecture_node"
|
||||
|
||||
id = Column(String, primary_key=True, index=True)
|
||||
type = Column(String, nullable=False)
|
||||
x = Column(Integer, nullable=False, default=0)
|
||||
y = Column(Integer, nullable=False, default=0)
|
||||
width = Column(Integer, nullable=False, default=50)
|
||||
height = Column(Integer, nullable=False, default=50)
|
||||
rotation = Column(Integer, nullable=False, default=0)
|
||||
payload = Column(Text, nullable=False, default="{}")
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
95
backend/app/models/ip.py
Executable file
@@ -0,0 +1,95 @@
|
||||
"""
|
||||
Modèles de données pour les adresses IP et leur historique
|
||||
Basé sur modele-donnees.md
|
||||
"""
|
||||
from sqlalchemy import Column, String, Boolean, DateTime, Integer, ForeignKey, Index, JSON
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
from backend.app.core.database import Base
|
||||
|
||||
|
||||
class IP(Base):
|
||||
"""
|
||||
Table principale des adresses IP
|
||||
Stocke les informations actuelles et les métadonnées de chaque IP
|
||||
"""
|
||||
__tablename__ = "ip"
|
||||
|
||||
# Clé primaire
|
||||
ip = Column(String, primary_key=True, index=True)
|
||||
|
||||
# Métadonnées
|
||||
name = Column(String, nullable=True) # Nom donné à l'IP
|
||||
known = Column(Boolean, default=False, index=True) # IP connue ou inconnue
|
||||
tracked = Column(Boolean, default=False, index=True) # IP suivie pour monitoring
|
||||
vm = Column(Boolean, default=False, index=True) # Machine virtuelle
|
||||
network_device = Column(Boolean, default=False, index=True) # Équipement réseau (switch, routeur, borne WiFi)
|
||||
hardware_bench = Column(Boolean, default=False, index=True) # Lien hardware bench disponible
|
||||
location = Column(String, nullable=True) # Localisation (ex: "Bureau", "Serveur")
|
||||
host = Column(String, nullable=True) # Type d'hôte (ex: "PC", "Imprimante")
|
||||
ip_parent = Column(String, nullable=True) # IP parent liée (relation logique)
|
||||
ip_enfant = Column(JSON, default=list) # Liste d'IPs enfants (JSON)
|
||||
|
||||
# Timestamps
|
||||
first_seen = Column(DateTime, default=datetime.now) # Première détection
|
||||
last_seen = Column(DateTime, default=datetime.now, onupdate=datetime.now) # Dernière vue
|
||||
|
||||
# État réseau
|
||||
last_status = Column(String, index=True) # "online", "offline", "unknown"
|
||||
|
||||
# Informations réseau
|
||||
mac = Column(String, nullable=True) # Adresse MAC
|
||||
vendor = Column(String, nullable=True) # Fabricant (lookup MAC)
|
||||
hostname = Column(String, nullable=True) # Nom d'hôte réseau
|
||||
link = Column(String, nullable=True) # Lien personnalisé (URL)
|
||||
mac_changed = Column(Boolean, default=False) # MAC address différente de celle attendue
|
||||
icon_filename = Column(String, nullable=True) # Icône associée (fichier dans /data/icons)
|
||||
icon_url = Column(String, nullable=True) # Lien associé à l'icône
|
||||
|
||||
# Ports ouverts (stocké en JSON)
|
||||
open_ports = Column(JSON, default=list) # Liste des ports ouverts
|
||||
|
||||
# Synchronisation DHCP OPNsense
|
||||
dhcp_synced = Column(Boolean, default=False) # Réservation DHCP créée dans Kea
|
||||
|
||||
# Relation avec l'historique
|
||||
history = relationship("IPHistory", back_populates="ip_ref", cascade="all, delete-orphan")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<IP {self.ip} - {self.last_status} - {self.name or 'unnamed'}>"
|
||||
|
||||
|
||||
class IPHistory(Base):
|
||||
"""
|
||||
Table d'historique des états d'IP
|
||||
Stocke l'évolution dans le temps (24h par défaut)
|
||||
"""
|
||||
__tablename__ = "ip_history"
|
||||
|
||||
# Clé primaire auto-incrémentée
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
|
||||
# Foreign key vers la table IP
|
||||
ip = Column(String, ForeignKey("ip.ip", ondelete="CASCADE"), nullable=False, index=True)
|
||||
|
||||
# Timestamp de l'enregistrement
|
||||
timestamp = Column(DateTime, default=datetime.now, index=True, nullable=False)
|
||||
|
||||
# État à ce moment
|
||||
status = Column(String, nullable=False) # "online", "offline"
|
||||
|
||||
# Ports ouverts à ce moment (JSON)
|
||||
open_ports = Column(JSON, default=list)
|
||||
|
||||
# Relation inverse vers IP
|
||||
ip_ref = relationship("IP", back_populates="history")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<IPHistory {self.ip} - {self.timestamp} - {self.status}>"
|
||||
|
||||
|
||||
# Index recommandés (déjà définis dans les colonnes avec index=True)
|
||||
# Index supplémentaires si nécessaire
|
||||
Index('idx_ip_last_status', IP.last_status)
|
||||
Index('idx_ip_history_timestamp', IPHistory.timestamp)
|
||||
Index('idx_ip_history_ip', IPHistory.ip)
|
||||
25
backend/app/models/scan_log.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
Historique détaillé des scans (logs par IP)
|
||||
"""
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Index
|
||||
from datetime import datetime
|
||||
from backend.app.core.database import Base
|
||||
|
||||
|
||||
class ScanLog(Base):
|
||||
"""
|
||||
Table de logs des scans réseau
|
||||
"""
|
||||
__tablename__ = "scan_log"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
ip = Column(String, index=True, nullable=True)
|
||||
status = Column(String, nullable=True)
|
||||
message = Column(String, nullable=False)
|
||||
created_at = Column(DateTime, default=datetime.now, index=True, nullable=False)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ScanLog {self.id} {self.ip} {self.status}>"
|
||||
|
||||
|
||||
Index('idx_scan_log_created_at', ScanLog.created_at)
|
||||
8
backend/app/routers/__init__.py
Executable file
@@ -0,0 +1,8 @@
|
||||
"""
|
||||
Routers API pour IPWatch
|
||||
"""
|
||||
from .ips import router as ips_router
|
||||
from .scan import router as scan_router
|
||||
from .websocket import router as websocket_router
|
||||
|
||||
__all__ = ["ips_router", "scan_router", "websocket_router"]
|
||||
132
backend/app/routers/architecture.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""
|
||||
Endpoints API pour l'éditeur d'architecture
|
||||
"""
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Optional, Dict, Any
|
||||
from uuid import uuid4
|
||||
from datetime import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from backend.app.core.database import get_arch_db
|
||||
from backend.app.models.architecture import ArchitectureNode
|
||||
|
||||
router = APIRouter(prefix="/api/architecture", tags=["Architecture"])
|
||||
DATA_DIR = Path(__file__).resolve().parents[3] / "data"
|
||||
WORLD_FILE = DATA_DIR / "architecture.json"
|
||||
|
||||
|
||||
class ArchitectureNodeCreate(BaseModel):
|
||||
id: Optional[str] = None
|
||||
type: str
|
||||
x: int
|
||||
y: int
|
||||
width: int
|
||||
height: int
|
||||
rotation: int = 0
|
||||
payload: Dict[str, Any]
|
||||
|
||||
|
||||
class ArchitectureNodeResponse(BaseModel):
|
||||
id: str
|
||||
type: str
|
||||
x: int
|
||||
y: int
|
||||
width: int
|
||||
height: int
|
||||
rotation: int
|
||||
payload: Dict[str, Any]
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ArchitectureWorldPayload(BaseModel):
|
||||
items: List[Dict[str, Any]]
|
||||
splines: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
|
||||
@router.get("/nodes", response_model=List[ArchitectureNodeResponse])
|
||||
async def list_nodes(db: Session = Depends(get_arch_db)):
|
||||
"""Liste tous les noeuds d'architecture"""
|
||||
nodes = db.query(ArchitectureNode).order_by(ArchitectureNode.created_at.asc()).all()
|
||||
results = []
|
||||
for node in nodes:
|
||||
try:
|
||||
payload = json.loads(node.payload or "{}")
|
||||
except json.JSONDecodeError:
|
||||
payload = {}
|
||||
results.append(ArchitectureNodeResponse(
|
||||
id=node.id,
|
||||
type=node.type,
|
||||
x=node.x,
|
||||
y=node.y,
|
||||
width=node.width,
|
||||
height=node.height,
|
||||
rotation=node.rotation,
|
||||
payload=payload,
|
||||
created_at=node.created_at
|
||||
))
|
||||
return results
|
||||
|
||||
|
||||
@router.post("/nodes", response_model=ArchitectureNodeResponse)
|
||||
async def create_node(payload: ArchitectureNodeCreate, db: Session = Depends(get_arch_db)):
|
||||
"""Créer un noeud d'architecture"""
|
||||
node_id = payload.id or str(uuid4())
|
||||
node = ArchitectureNode(
|
||||
id=node_id,
|
||||
type=payload.type,
|
||||
x=payload.x,
|
||||
y=payload.y,
|
||||
width=payload.width,
|
||||
height=payload.height,
|
||||
rotation=payload.rotation,
|
||||
payload=json.dumps(payload.payload)
|
||||
)
|
||||
db.add(node)
|
||||
db.commit()
|
||||
db.refresh(node)
|
||||
return ArchitectureNodeResponse(
|
||||
id=node.id,
|
||||
type=node.type,
|
||||
x=node.x,
|
||||
y=node.y,
|
||||
width=node.width,
|
||||
height=node.height,
|
||||
rotation=node.rotation,
|
||||
payload=payload.payload,
|
||||
created_at=node.created_at
|
||||
)
|
||||
|
||||
|
||||
def ensure_world_file() -> None:
|
||||
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
if not WORLD_FILE.exists():
|
||||
WORLD_FILE.write_text(json.dumps({"items": [], "splines": []}, indent=2), encoding="utf-8")
|
||||
|
||||
|
||||
@router.get("/world")
|
||||
async def get_world():
|
||||
"""Charge le fichier architecture.json, le crée si absent."""
|
||||
ensure_world_file()
|
||||
try:
|
||||
data = json.loads(WORLD_FILE.read_text(encoding="utf-8"))
|
||||
except json.JSONDecodeError:
|
||||
data = {"items": [], "splines": []}
|
||||
return data
|
||||
|
||||
|
||||
@router.post("/world")
|
||||
async def save_world(payload: ArchitectureWorldPayload):
|
||||
"""Sauvegarde les éléments du world dans architecture.json."""
|
||||
ensure_world_file()
|
||||
splines = payload.splines or []
|
||||
WORLD_FILE.write_text(
|
||||
json.dumps({"items": payload.items, "splines": splines}, indent=2),
|
||||
encoding="utf-8"
|
||||
)
|
||||
return {"status": "ok", "count": len(payload.items), "splines": len(splines)}
|
||||
73
backend/app/routers/config.py
Executable file
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
Routes pour la configuration
|
||||
"""
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
import yaml
|
||||
from backend.app.core.config import config_manager
|
||||
|
||||
router = APIRouter(prefix="/api/config", tags=["config"])
|
||||
|
||||
@router.get("/ui")
|
||||
async def get_ui_config():
|
||||
"""Récupérer la configuration UI"""
|
||||
config = config_manager.config
|
||||
return {
|
||||
"cell_size": config.ui.cell_size,
|
||||
"font_size": config.ui.font_size,
|
||||
"cell_gap": config.ui.cell_gap,
|
||||
"offline_transparency": config.ui.offline_transparency,
|
||||
"show_mac": config.ui.show_mac,
|
||||
"show_vendor": config.ui.show_vendor,
|
||||
"architecture_title_font_size": config.ui.architecture_title_font_size
|
||||
}
|
||||
|
||||
@router.post("/reload")
|
||||
async def reload_config():
|
||||
"""Recharger la configuration depuis le fichier config.yaml"""
|
||||
try:
|
||||
config = config_manager.reload_config()
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Configuration rechargée avec succès",
|
||||
"ui": {
|
||||
"cell_size": config.ui.cell_size,
|
||||
"font_size": config.ui.font_size,
|
||||
"cell_gap": config.ui.cell_gap,
|
||||
"offline_transparency": config.ui.offline_transparency,
|
||||
"show_mac": config.ui.show_mac,
|
||||
"show_vendor": config.ui.show_vendor,
|
||||
"architecture_title_font_size": config.ui.architecture_title_font_size
|
||||
}
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur rechargement config: {str(e)}")
|
||||
|
||||
|
||||
class UIConfigUpdate(BaseModel):
|
||||
architecture_title_font_size: int
|
||||
|
||||
|
||||
@router.post("/ui")
|
||||
async def update_ui_config(payload: UIConfigUpdate):
|
||||
"""Mettre à jour la configuration UI"""
|
||||
config_path = "./config.yaml"
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
yaml_data = yaml.safe_load(f) or {}
|
||||
|
||||
if "ui" not in yaml_data or yaml_data["ui"] is None:
|
||||
yaml_data["ui"] = {}
|
||||
|
||||
yaml_data["ui"]["architecture_title_font_size"] = int(payload.architecture_title_font_size)
|
||||
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
yaml.safe_dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
||||
|
||||
config = config_manager.reload_config()
|
||||
return {
|
||||
"message": "Configuration UI mise à jour",
|
||||
"architecture_title_font_size": config.ui.architecture_title_font_size
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur mise à jour config UI: {str(e)}")
|
||||
665
backend/app/routers/ips.py
Executable file
@@ -0,0 +1,665 @@
|
||||
"""
|
||||
Endpoints API pour la gestion des IPs
|
||||
"""
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import desc
|
||||
from typing import List, Optional
|
||||
from datetime import datetime, timedelta
|
||||
import xml.etree.ElementTree as ET
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
import re
|
||||
import time
|
||||
import urllib.request
|
||||
|
||||
from backend.app.core.database import get_db
|
||||
from backend.app.models.ip import IP, IPHistory
|
||||
from backend.app.core.config import config_manager
|
||||
from pydantic import BaseModel
|
||||
|
||||
router = APIRouter(prefix="/api/ips", tags=["IPs"])
|
||||
|
||||
ICONS_DIR = Path("./data/icons")
|
||||
ALLOWED_ICON_EXTENSIONS = {".png", ".jpg", ".jpeg", ".webp", ".svg"}
|
||||
OUI_URL = "https://standards-oui.ieee.org/oui/oui.txt"
|
||||
OUI_PATH = Path("./data/oui/oui.txt")
|
||||
|
||||
|
||||
def _sanitize_filename(filename: str) -> str:
|
||||
name = Path(filename).name
|
||||
name = re.sub(r"[^A-Za-z0-9._-]+", "_", name)
|
||||
if not name or name in {".", ".."}:
|
||||
return f"icon_{int(time.time())}.png"
|
||||
if "." not in name:
|
||||
return f"{name}.png"
|
||||
return name
|
||||
|
||||
|
||||
@router.get("/oui/status")
|
||||
async def oui_status():
|
||||
"""
|
||||
Statut du fichier OUI local
|
||||
"""
|
||||
if not OUI_PATH.exists():
|
||||
return {"exists": False, "updated_at": None}
|
||||
updated_at = datetime.fromtimestamp(OUI_PATH.stat().st_mtime)
|
||||
return {"exists": True, "updated_at": updated_at.isoformat()}
|
||||
|
||||
|
||||
@router.post("/oui/update")
|
||||
async def update_oui(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Télécharge le fichier OUI et met à jour les fabricants inconnus
|
||||
"""
|
||||
OUI_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
request = urllib.request.Request(
|
||||
OUI_URL,
|
||||
headers={
|
||||
"User-Agent": "IPWatch/1.0 (+https://ipwatch.local)"
|
||||
}
|
||||
)
|
||||
with urllib.request.urlopen(request) as response:
|
||||
OUI_PATH.write_bytes(response.read())
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur téléchargement OUI: {str(e)}")
|
||||
|
||||
# Mettre à jour les vendors inconnus dans la DB
|
||||
from backend.app.services.network import OuiLookup
|
||||
|
||||
updated = 0
|
||||
ips = db.query(IP).filter(IP.mac.isnot(None)).all()
|
||||
for ip in ips:
|
||||
if ip.vendor and ip.vendor not in {"Unknown", ""}:
|
||||
continue
|
||||
vendor = OuiLookup.lookup(ip.mac)
|
||||
if vendor:
|
||||
ip.vendor = vendor
|
||||
updated += 1
|
||||
db.commit()
|
||||
|
||||
return {"message": "Liste OUI mise à jour", "updated_vendors": updated}
|
||||
|
||||
|
||||
@router.get("/icons")
|
||||
async def list_icons():
|
||||
"""
|
||||
Liste les icônes disponibles dans le dossier partagé
|
||||
"""
|
||||
ICONS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
files = []
|
||||
for path in ICONS_DIR.iterdir():
|
||||
if path.is_file() and path.suffix.lower() in ALLOWED_ICON_EXTENSIONS:
|
||||
files.append(path.name)
|
||||
return {"icons": sorted(files)}
|
||||
|
||||
|
||||
@router.post("/icons/upload")
|
||||
async def upload_icon(file: UploadFile = File(...)):
|
||||
"""
|
||||
Upload d'une icône dans le dossier partagé
|
||||
"""
|
||||
ICONS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
filename = _sanitize_filename(file.filename or "")
|
||||
ext = Path(filename).suffix.lower()
|
||||
if ext not in ALLOWED_ICON_EXTENSIONS:
|
||||
raise HTTPException(status_code=400, detail="Format d'image non supporté")
|
||||
|
||||
target = ICONS_DIR / filename
|
||||
|
||||
try:
|
||||
content = await file.read()
|
||||
target.write_bytes(content)
|
||||
return {
|
||||
"filename": target.name,
|
||||
"url": f"/icons/{target.name}"
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur upload: {str(e)}")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Schémas Pydantic pour validation
|
||||
class IPUpdate(BaseModel):
|
||||
"""Schéma pour mise à jour d'IP"""
|
||||
name: Optional[str] = None
|
||||
known: Optional[bool] = None
|
||||
tracked: Optional[bool] = None
|
||||
vm: Optional[bool] = None
|
||||
hardware_bench: Optional[bool] = None
|
||||
network_device: Optional[bool] = None
|
||||
location: Optional[str] = None
|
||||
host: Optional[str] = None
|
||||
link: Optional[str] = None
|
||||
last_status: Optional[str] = None
|
||||
mac: Optional[str] = None
|
||||
vendor: Optional[str] = None
|
||||
hostname: Optional[str] = None
|
||||
mac_changed: Optional[bool] = None
|
||||
open_ports: Optional[List[int]] = None
|
||||
first_seen: Optional[datetime] = None
|
||||
last_seen: Optional[datetime] = None
|
||||
icon_filename: Optional[str] = None
|
||||
icon_url: Optional[str] = None
|
||||
ip_parent: Optional[str] = None
|
||||
ip_enfant: Optional[List[str]] = None
|
||||
dhcp_synced: Optional[bool] = None
|
||||
|
||||
|
||||
class IPResponse(BaseModel):
|
||||
"""Schéma de réponse IP"""
|
||||
ip: str
|
||||
name: Optional[str]
|
||||
known: bool
|
||||
tracked: Optional[bool] = False
|
||||
vm: Optional[bool] = False
|
||||
hardware_bench: Optional[bool] = False
|
||||
network_device: Optional[bool] = False
|
||||
location: Optional[str]
|
||||
host: Optional[str]
|
||||
first_seen: Optional[datetime]
|
||||
last_seen: Optional[datetime]
|
||||
last_status: Optional[str]
|
||||
mac: Optional[str]
|
||||
vendor: Optional[str]
|
||||
hostname: Optional[str]
|
||||
link: Optional[str]
|
||||
mac_changed: Optional[bool] = False
|
||||
open_ports: List[int]
|
||||
icon_filename: Optional[str]
|
||||
icon_url: Optional[str]
|
||||
ip_parent: Optional[str]
|
||||
ip_enfant: List[str] = []
|
||||
dhcp_synced: Optional[bool] = False
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class IPHistoryResponse(BaseModel):
|
||||
"""Schéma de réponse historique"""
|
||||
id: int
|
||||
ip: str
|
||||
timestamp: datetime
|
||||
status: str
|
||||
open_ports: List[int]
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
@router.get("/", response_model=List[IPResponse])
|
||||
async def get_all_ips(
|
||||
status: Optional[str] = None,
|
||||
known: Optional[bool] = None,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Récupère toutes les IPs avec filtres optionnels
|
||||
|
||||
Args:
|
||||
status: Filtrer par statut (online/offline)
|
||||
known: Filtrer par IPs connues/inconnues
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Liste des IPs
|
||||
"""
|
||||
query = db.query(IP)
|
||||
|
||||
if status:
|
||||
query = query.filter(IP.last_status == status)
|
||||
|
||||
if known is not None:
|
||||
query = query.filter(IP.known == known)
|
||||
|
||||
ips = query.all()
|
||||
return ips
|
||||
|
||||
|
||||
@router.get("/{ip_address}", response_model=IPResponse)
|
||||
async def get_ip(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Récupère les détails d'une IP spécifique
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Détails de l'IP
|
||||
"""
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
return ip
|
||||
|
||||
|
||||
@router.put("/{ip_address}", response_model=IPResponse)
|
||||
async def update_ip(
|
||||
ip_address: str,
|
||||
ip_update: IPUpdate,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Met à jour les informations d'une IP
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
ip_update: Données à mettre à jour
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
IP mise à jour
|
||||
"""
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
# Mettre à jour les champs fournis
|
||||
update_data = ip_update.dict(exclude_unset=True)
|
||||
old_parent = ip.ip_parent
|
||||
new_parent = update_data.get("ip_parent", old_parent)
|
||||
for field, value in update_data.items():
|
||||
setattr(ip, field, value)
|
||||
|
||||
# Mettre à jour automatiquement network_device si host change
|
||||
if 'host' in update_data:
|
||||
ip.network_device = (update_data['host'] == 'Network')
|
||||
|
||||
if "ip_enfant" in update_data and update_data["ip_enfant"] is not None:
|
||||
ip.ip_enfant = update_data["ip_enfant"]
|
||||
|
||||
if new_parent != old_parent:
|
||||
if old_parent:
|
||||
parent = db.query(IP).filter(IP.ip == old_parent).first()
|
||||
if parent and parent.ip_enfant:
|
||||
parent.ip_enfant = [child for child in parent.ip_enfant if child != ip.ip]
|
||||
if new_parent:
|
||||
parent = db.query(IP).filter(IP.ip == new_parent).first()
|
||||
if parent:
|
||||
current_children = parent.ip_enfant or []
|
||||
if ip.ip not in current_children:
|
||||
parent.ip_enfant = current_children + [ip.ip]
|
||||
|
||||
db.commit()
|
||||
db.refresh(ip)
|
||||
|
||||
return ip
|
||||
|
||||
|
||||
@router.delete("/{ip_address}")
|
||||
async def delete_ip(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Supprime une IP (et son historique)
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Message de confirmation
|
||||
"""
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
db.delete(ip)
|
||||
db.commit()
|
||||
|
||||
return {"message": f"IP {ip_address} supprimée"}
|
||||
|
||||
|
||||
@router.get("/{ip_address}/history", response_model=List[IPHistoryResponse])
|
||||
async def get_ip_history(
|
||||
ip_address: str,
|
||||
hours: int = 24,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Récupère l'historique d'une IP
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
hours: Nombre d'heures d'historique (défaut: 24h)
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Liste des événements historiques
|
||||
"""
|
||||
# Vérifier que l'IP existe
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
# Calculer la date limite
|
||||
since = datetime.now() - timedelta(hours=hours)
|
||||
|
||||
# Récupérer l'historique
|
||||
history = db.query(IPHistory).filter(
|
||||
IPHistory.ip == ip_address,
|
||||
IPHistory.timestamp >= since
|
||||
).order_by(desc(IPHistory.timestamp)).all()
|
||||
|
||||
return history
|
||||
|
||||
|
||||
@router.delete("/{ip_address}/history")
|
||||
async def delete_ip_history(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Supprime l'historique d'une IP (sans supprimer l'IP elle-même)
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Message de confirmation avec nombre d'entrées supprimées
|
||||
"""
|
||||
# Vérifier que l'IP existe
|
||||
ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
if not ip:
|
||||
raise HTTPException(status_code=404, detail="IP non trouvée")
|
||||
|
||||
# Supprimer tout l'historique de cette IP
|
||||
deleted_count = db.query(IPHistory).filter(IPHistory.ip == ip_address).delete()
|
||||
db.commit()
|
||||
|
||||
return {"message": f"Historique de {ip_address} supprimé", "deleted_count": deleted_count}
|
||||
|
||||
|
||||
@router.get("/stats/summary")
|
||||
async def get_stats(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Récupère les statistiques globales du réseau
|
||||
|
||||
Returns:
|
||||
Statistiques (total, online, offline, known, unknown)
|
||||
"""
|
||||
total = db.query(IP).count()
|
||||
online = db.query(IP).filter(IP.last_status == "online").count()
|
||||
offline = db.query(IP).filter(IP.last_status == "offline").count()
|
||||
known = db.query(IP).filter(IP.known == True).count()
|
||||
unknown = db.query(IP).filter(IP.known == False).count()
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"online": online,
|
||||
"offline": offline,
|
||||
"known": known,
|
||||
"unknown": unknown
|
||||
}
|
||||
|
||||
|
||||
@router.get("/config/options")
|
||||
async def get_config_options():
|
||||
"""
|
||||
Récupère les options de configuration (locations, hosts, port_protocols, version, subnets)
|
||||
|
||||
Returns:
|
||||
Dictionnaire avec locations, hosts, port_protocols, subnets et version
|
||||
"""
|
||||
config = config_manager.config
|
||||
|
||||
# Récupérer les protocoles de ports depuis la config
|
||||
port_protocols = {}
|
||||
if hasattr(config.ports, 'protocols') and config.ports.protocols:
|
||||
port_protocols = config.ports.protocols
|
||||
|
||||
# Récupérer les subnets
|
||||
subnets = []
|
||||
if hasattr(config, 'subnets') and config.subnets:
|
||||
subnets = [
|
||||
{
|
||||
"name": s.name,
|
||||
"cidr": s.cidr,
|
||||
"start": s.start,
|
||||
"end": s.end,
|
||||
"description": s.description
|
||||
}
|
||||
for s in config.subnets
|
||||
]
|
||||
|
||||
return {
|
||||
"locations": config.locations,
|
||||
"hosts": [{"name": h.name, "location": h.location} for h in config.hosts],
|
||||
"port_protocols": port_protocols,
|
||||
"subnets": subnets,
|
||||
"version": config.app.version,
|
||||
"hardware_bench_url": getattr(config.links, "hardware_bench_url", None),
|
||||
"force_vendor_update": getattr(config.scan, "force_vendor_update", False)
|
||||
}
|
||||
|
||||
|
||||
class HardwareBenchConfig(BaseModel):
|
||||
"""Schéma pour mise à jour du lien hardware bench"""
|
||||
url: Optional[str] = None
|
||||
|
||||
|
||||
class ForceVendorConfig(BaseModel):
|
||||
"""Schéma pour mise à jour du mode force fabricant"""
|
||||
enabled: bool = False
|
||||
|
||||
|
||||
@router.post("/config/hardware-bench")
|
||||
async def update_hardware_bench(config_update: HardwareBenchConfig):
|
||||
"""
|
||||
Met à jour l'URL hardware bench dans config.yaml
|
||||
|
||||
Returns:
|
||||
Message de confirmation
|
||||
"""
|
||||
config_path = "./config.yaml"
|
||||
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
yaml_data = yaml.safe_load(f) or {}
|
||||
|
||||
if "links" not in yaml_data or yaml_data["links"] is None:
|
||||
yaml_data["links"] = {}
|
||||
|
||||
url = (config_update.url or "").strip()
|
||||
yaml_data["links"]["hardware_bench_url"] = url if url else None
|
||||
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
yaml.safe_dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
||||
|
||||
config_manager.reload_config()
|
||||
return {"message": "Lien hardware bench mis à jour"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur mise à jour config: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/config/force-vendor")
|
||||
async def update_force_vendor(config_update: ForceVendorConfig):
|
||||
"""
|
||||
Active/désactive le mode force pour le fabricant
|
||||
"""
|
||||
config_path = "./config.yaml"
|
||||
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
yaml_data = yaml.safe_load(f) or {}
|
||||
|
||||
if "scan" not in yaml_data or yaml_data["scan"] is None:
|
||||
yaml_data["scan"] = {}
|
||||
|
||||
yaml_data["scan"]["force_vendor_update"] = bool(config_update.enabled)
|
||||
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
yaml.safe_dump(yaml_data, f, allow_unicode=True, sort_keys=False)
|
||||
|
||||
config_manager.reload_config()
|
||||
return {"message": "Mode force fabricant mis à jour"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur mise à jour config: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/config/content")
|
||||
async def get_config_content():
|
||||
"""
|
||||
Récupère le contenu brut du fichier config.yaml
|
||||
|
||||
Returns:
|
||||
Contenu du fichier YAML
|
||||
"""
|
||||
try:
|
||||
with open("./config.yaml", "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return {"content": content}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur lecture config: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/config/reload")
|
||||
async def reload_config():
|
||||
"""
|
||||
Recharge la configuration depuis config.yaml
|
||||
|
||||
Returns:
|
||||
Message de confirmation
|
||||
"""
|
||||
try:
|
||||
config_manager.reload_config()
|
||||
return {"message": "Configuration rechargée avec succès"}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur rechargement config: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/import/ipscan")
|
||||
async def import_ipscan(file: UploadFile = File(...), db: Session = Depends(get_db)):
|
||||
"""
|
||||
Importe les données depuis un fichier XML Angry IP Scanner
|
||||
|
||||
Args:
|
||||
file: Fichier XML uploadé
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Statistiques d'import
|
||||
"""
|
||||
if not file.filename.endswith('.xml'):
|
||||
raise HTTPException(status_code=400, detail="Le fichier doit être un XML")
|
||||
|
||||
try:
|
||||
# Lire le contenu du fichier
|
||||
content = await file.read()
|
||||
|
||||
# Essayer de parser le XML avec récupération d'erreurs
|
||||
try:
|
||||
root = ET.fromstring(content)
|
||||
except ET.ParseError as e:
|
||||
# Si le parsing échoue, essayer de nettoyer le contenu
|
||||
import re
|
||||
content_str = content.decode('utf-8', errors='ignore')
|
||||
|
||||
# Supprimer les caractères de contrôle invalides (sauf tab, CR, LF)
|
||||
content_str = ''.join(char for char in content_str
|
||||
if ord(char) >= 32 or char in '\t\r\n')
|
||||
|
||||
try:
|
||||
root = ET.fromstring(content_str.encode('utf-8'))
|
||||
except ET.ParseError:
|
||||
raise HTTPException(status_code=400, detail=f"Fichier XML invalide même après nettoyage: {str(e)}")
|
||||
|
||||
imported = 0
|
||||
updated = 0
|
||||
errors = []
|
||||
|
||||
# Parser chaque host
|
||||
for host in root.findall('.//host'):
|
||||
try:
|
||||
# Extraire l'adresse IP
|
||||
ip_address = host.get('address')
|
||||
if not ip_address:
|
||||
continue
|
||||
|
||||
# Extraire les informations
|
||||
hostname = None
|
||||
mac = None
|
||||
vendor = None
|
||||
ports = []
|
||||
|
||||
for result in host.findall('result'):
|
||||
name = result.get('name')
|
||||
value = result.text.strip() if result.text else ""
|
||||
|
||||
# Nettoyer les valeurs [n/a]
|
||||
if value == "[n/a]":
|
||||
value = None
|
||||
|
||||
if name == "Nom d'hôte" and value:
|
||||
hostname = value
|
||||
elif name == "Adresse MAC" and value:
|
||||
mac = value
|
||||
elif name == "Constructeur MAC" and value:
|
||||
vendor = value
|
||||
elif name == "Ports" and value:
|
||||
# Parser les ports (format: "22,80,443")
|
||||
try:
|
||||
ports = [int(p.strip()) for p in value.split(',') if p.strip().isdigit()]
|
||||
except Exception as e:
|
||||
ports = []
|
||||
|
||||
# Vérifier si l'IP existe déjà
|
||||
existing_ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if existing_ip:
|
||||
# Mettre à jour avec de nouvelles informations
|
||||
if hostname:
|
||||
if not existing_ip.hostname:
|
||||
existing_ip.hostname = hostname
|
||||
if not existing_ip.name:
|
||||
existing_ip.name = hostname
|
||||
if mac and not existing_ip.mac:
|
||||
existing_ip.mac = mac
|
||||
# Toujours mettre à jour vendor et ports depuis IPScan (plus complet et à jour)
|
||||
if vendor:
|
||||
existing_ip.vendor = vendor
|
||||
if ports:
|
||||
existing_ip.open_ports = ports
|
||||
existing_ip.last_status = "online"
|
||||
existing_ip.last_seen = datetime.now()
|
||||
updated += 1
|
||||
else:
|
||||
# Créer une nouvelle entrée
|
||||
new_ip = IP(
|
||||
ip=ip_address,
|
||||
name=hostname,
|
||||
hostname=hostname,
|
||||
mac=mac,
|
||||
vendor=vendor,
|
||||
open_ports=ports or [],
|
||||
last_status="online",
|
||||
known=False,
|
||||
first_seen=datetime.now(),
|
||||
last_seen=datetime.now()
|
||||
)
|
||||
db.add(new_ip)
|
||||
imported += 1
|
||||
|
||||
except Exception as e:
|
||||
errors.append(f"Erreur pour {ip_address}: {str(e)}")
|
||||
continue
|
||||
|
||||
# Commit des changements
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "Import terminé",
|
||||
"imported": imported,
|
||||
"updated": updated,
|
||||
"errors": errors[:10] # Limiter à 10 erreurs
|
||||
}
|
||||
|
||||
except ET.ParseError as e:
|
||||
raise HTTPException(status_code=400, detail=f"Fichier XML invalide: {str(e)}")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Erreur import: {str(e)}")
|
||||
164
backend/app/routers/opnsense.py
Normal file
@@ -0,0 +1,164 @@
|
||||
"""
|
||||
Endpoints API pour l'intégration OPNsense (Kea DHCP)
|
||||
"""
|
||||
import traceback
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional
|
||||
|
||||
from backend.app.core.database import get_db
|
||||
from backend.app.core.config import config_manager
|
||||
from backend.app.models.ip import IP
|
||||
from backend.app.services.opnsense_client import OPNsenseClient, OPNsenseAPIError
|
||||
|
||||
router = APIRouter(prefix="/api/opnsense", tags=["OPNsense"])
|
||||
|
||||
|
||||
class DHCPReservationRequest(BaseModel):
|
||||
"""Schéma pour créer/mettre à jour une réservation DHCP"""
|
||||
ip_address: str
|
||||
hw_address: str
|
||||
hostname: str = ""
|
||||
description: str = "Ajouté par IPWatch"
|
||||
|
||||
|
||||
def get_opnsense_client() -> OPNsenseClient:
|
||||
"""Retourne un client OPNsense configuré"""
|
||||
config = config_manager.config.opnsense
|
||||
print(f"[OPNsense Router] Config: enabled={config.enabled}, host={config.host}, api_key={'***' + config.api_key[-8:] if config.api_key else 'VIDE'}")
|
||||
if not config.enabled:
|
||||
raise HTTPException(status_code=503, detail="Intégration OPNsense désactivée")
|
||||
if not config.host or not config.api_key:
|
||||
raise HTTPException(status_code=503, detail="Configuration OPNsense incomplète")
|
||||
return OPNsenseClient()
|
||||
|
||||
|
||||
@router.get("/status")
|
||||
async def opnsense_status():
|
||||
"""Teste la connexion à l'API OPNsense"""
|
||||
client = get_opnsense_client()
|
||||
try:
|
||||
result = await client.test_connection()
|
||||
return {"status": "connected", "data": result}
|
||||
except Exception as e:
|
||||
print(f"[OPNsense Router] Erreur status: {type(e).__name__}: {e}")
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=502, detail=f"Connexion OPNsense échouée: {type(e).__name__}: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/dhcp/reservations")
|
||||
async def list_reservations():
|
||||
"""Liste toutes les réservations DHCP Kea"""
|
||||
client = get_opnsense_client()
|
||||
try:
|
||||
result = await client.search_reservations()
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"[OPNsense Router] Erreur list_reservations: {type(e).__name__}: {e}")
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=502, detail=f"Erreur récupération réservations: {type(e).__name__}: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/dhcp/reservation/{ip_address}")
|
||||
async def get_reservation_by_ip(ip_address: str):
|
||||
"""Cherche une réservation DHCP par adresse IP"""
|
||||
client = get_opnsense_client()
|
||||
try:
|
||||
reservation = await client.find_reservation_by_ip(ip_address)
|
||||
if reservation:
|
||||
return {"found": True, "reservation": reservation}
|
||||
return {"found": False, "reservation": None}
|
||||
except Exception as e:
|
||||
print(f"[OPNsense Router] Erreur get_reservation_by_ip: {type(e).__name__}: {e}")
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=502, detail=f"Erreur recherche réservation: {type(e).__name__}: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/dhcp/reservation")
|
||||
async def upsert_reservation(
|
||||
request: DHCPReservationRequest,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Crée ou met à jour une réservation DHCP Kea pour une IP.
|
||||
Après succès, met à jour dhcp_synced=True dans la BDD.
|
||||
"""
|
||||
print(f"[OPNsense Router] === UPSERT RESERVATION ===")
|
||||
print(f"[OPNsense Router] IP: {request.ip_address}, MAC: {request.hw_address}, Hostname: {request.hostname}")
|
||||
|
||||
client = get_opnsense_client()
|
||||
|
||||
try:
|
||||
# Étape 0 : Résoudre le subnet UUID
|
||||
print(f"[OPNsense Router] Étape 0: Résolution du subnet pour {request.ip_address}...")
|
||||
subnet_uuid = await client.find_subnet_for_ip(request.ip_address)
|
||||
if not subnet_uuid:
|
||||
raise HTTPException(status_code=400, detail=f"Aucun subnet Kea trouvé pour l'IP {request.ip_address}")
|
||||
|
||||
reservation_data = {
|
||||
"subnet": subnet_uuid,
|
||||
"ip_address": request.ip_address,
|
||||
"hw_address": request.hw_address,
|
||||
"hostname": request.hostname,
|
||||
"description": request.description
|
||||
}
|
||||
print(f"[OPNsense Router] Données réservation: {reservation_data}")
|
||||
|
||||
# Étape 1 : Chercher si une réservation existe déjà
|
||||
print(f"[OPNsense Router] Étape 1: Recherche réservation existante...")
|
||||
existing = await client.find_reservation_by_ip(request.ip_address)
|
||||
|
||||
if existing:
|
||||
# Mise à jour de la réservation existante
|
||||
uuid = existing.get("uuid")
|
||||
print(f"[OPNsense Router] Étape 2: Mise à jour réservation existante uuid={uuid}")
|
||||
if not uuid:
|
||||
raise HTTPException(status_code=500, detail="UUID de réservation introuvable")
|
||||
result = await client.set_reservation(uuid, reservation_data)
|
||||
action = "updated"
|
||||
else:
|
||||
# Création d'une nouvelle réservation
|
||||
print(f"[OPNsense Router] Étape 2: Création nouvelle réservation")
|
||||
result = await client.add_reservation(reservation_data)
|
||||
action = "created"
|
||||
|
||||
print(f"[OPNsense Router] Étape 2 terminée: action={action}, result={result}")
|
||||
|
||||
# Étape 3 : Appliquer les changements dans Kea
|
||||
print(f"[OPNsense Router] Étape 3: Reconfiguration Kea...")
|
||||
await client.reconfigure_kea()
|
||||
print(f"[OPNsense Router] Étape 3 terminée: Kea reconfiguré")
|
||||
|
||||
# Étape 4 : Mettre à jour dhcp_synced dans la BDD
|
||||
print(f"[OPNsense Router] Étape 4: Mise à jour BDD dhcp_synced=True")
|
||||
ip_record = db.query(IP).filter(IP.ip == request.ip_address).first()
|
||||
if ip_record:
|
||||
ip_record.dhcp_synced = True
|
||||
db.commit()
|
||||
db.refresh(ip_record)
|
||||
print(f"[OPNsense Router] Étape 4 terminée: BDD mise à jour")
|
||||
else:
|
||||
print(f"[OPNsense Router] ATTENTION: IP {request.ip_address} non trouvée en BDD")
|
||||
|
||||
print(f"[OPNsense Router] === SUCCÈS: {action} ===")
|
||||
return {
|
||||
"status": "success",
|
||||
"action": action,
|
||||
"ip_address": request.ip_address,
|
||||
"result": result
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except OPNsenseAPIError as e:
|
||||
print(f"[OPNsense Router] === ERREUR VALIDATION ===")
|
||||
print(f"[OPNsense Router] Message: {str(e)}")
|
||||
print(f"[OPNsense Router] Validations: {e.validations}")
|
||||
raise HTTPException(status_code=422, detail=str(e))
|
||||
except Exception as e:
|
||||
print(f"[OPNsense Router] === ERREUR ===")
|
||||
print(f"[OPNsense Router] Type: {type(e).__name__}")
|
||||
print(f"[OPNsense Router] Message: {str(e)}")
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=502, detail=f"Erreur OPNsense: {type(e).__name__}: {str(e)}")
|
||||
362
backend/app/routers/scan.py
Executable file
@@ -0,0 +1,362 @@
|
||||
"""
|
||||
Endpoints API pour le contrôle des scans réseau
|
||||
"""
|
||||
from fastapi import APIRouter, Depends, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Any, Optional, List
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.app.core.database import get_db
|
||||
from backend.app.core.config import config_manager
|
||||
from backend.app.models.ip import IP, IPHistory
|
||||
from backend.app.models.scan_log import ScanLog
|
||||
from backend.app.services.network import NetworkScanner, OuiLookup
|
||||
from backend.app.services.websocket import ws_manager
|
||||
|
||||
router = APIRouter(prefix="/api/scan", tags=["Scan"])
|
||||
|
||||
|
||||
class ScanLogResponse(BaseModel):
|
||||
"""Schéma de réponse logs scan"""
|
||||
id: int
|
||||
ip: Optional[str]
|
||||
status: Optional[str]
|
||||
message: str
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
async def perform_scan(db: Session):
|
||||
"""
|
||||
Effectue un scan complet du réseau
|
||||
Fonction asynchrone pour background task
|
||||
|
||||
Args:
|
||||
db: Session de base de données
|
||||
"""
|
||||
try:
|
||||
async def scan_log(message: str):
|
||||
print(message)
|
||||
try:
|
||||
await ws_manager.broadcast_scan_log(message)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
await scan_log(f"[{datetime.now()}] Début du scan réseau...")
|
||||
|
||||
# Notifier début du scan
|
||||
try:
|
||||
await ws_manager.broadcast_scan_start()
|
||||
except Exception as e:
|
||||
print(f"Erreur broadcast start (ignorée): {e}")
|
||||
|
||||
# Récupérer la config
|
||||
config = config_manager.config
|
||||
await scan_log(f"[{datetime.now()}] Config chargée: {config.network.cidr}")
|
||||
|
||||
# Initialiser le scanner
|
||||
scanner = NetworkScanner(
|
||||
cidr=config.network.cidr,
|
||||
timeout=config.scan.timeout,
|
||||
ping_count=config.scan.ping_count
|
||||
)
|
||||
|
||||
# Convertir les ports en liste d'entiers
|
||||
port_list = []
|
||||
for port_range in config.ports.ranges:
|
||||
if '-' in port_range:
|
||||
start, end = map(int, port_range.split('-'))
|
||||
port_list.extend(range(start, end + 1))
|
||||
else:
|
||||
port_list.append(int(port_range))
|
||||
|
||||
await scan_log(f"[{datetime.now()}] Ports à scanner: {len(port_list)}")
|
||||
|
||||
# Récupérer les IPs connues
|
||||
known_ips = config.ip_classes
|
||||
await scan_log(f"[{datetime.now()}] IPs connues: {len(known_ips)}")
|
||||
|
||||
# Callback de progression pour WebSocket
|
||||
async def progress_callback(current: int, total: int, current_ip: str, status: str, ping_ok: bool):
|
||||
try:
|
||||
ping_label = "ok" if ping_ok else "fail"
|
||||
await ws_manager.broadcast_scan_progress({
|
||||
"current": current,
|
||||
"total": total,
|
||||
"ip": current_ip
|
||||
})
|
||||
await ws_manager.broadcast_scan_log(
|
||||
f"[{current}/{total}] {current_ip} -> ping:{ping_label} ({status})"
|
||||
)
|
||||
except Exception:
|
||||
# Ignorer les erreurs WebSocket pour ne pas bloquer le scan
|
||||
pass
|
||||
|
||||
# Lancer le scan
|
||||
await scan_log(f"[{datetime.now()}] Lancement du scan (parallélisme: {config.scan.parallel_pings})...")
|
||||
scan_results = await scanner.full_scan(
|
||||
known_ips=known_ips,
|
||||
port_list=port_list,
|
||||
max_concurrent=config.scan.parallel_pings,
|
||||
progress_callback=progress_callback
|
||||
)
|
||||
await scan_log(f"[{datetime.now()}] Scan terminé: {len(scan_results)} IPs trouvées")
|
||||
|
||||
# Mettre à jour la base de données
|
||||
stats = {
|
||||
"total": 0,
|
||||
"online": 0,
|
||||
"offline": 0,
|
||||
"new": 0,
|
||||
"updated": 0
|
||||
}
|
||||
|
||||
for ip_address, ip_data in scan_results.items():
|
||||
stats["total"] += 1
|
||||
|
||||
if ip_data["last_status"] == "online":
|
||||
stats["online"] += 1
|
||||
else:
|
||||
stats["offline"] += 1
|
||||
|
||||
# Log par IP (historique scan)
|
||||
ping_label = "ok" if ip_data["last_status"] == "online" else "fail"
|
||||
log_message = f"Scan {ip_address} -> ping:{ping_label} ({ip_data['last_status']})"
|
||||
db.add(ScanLog(
|
||||
ip=ip_address,
|
||||
status=ip_data["last_status"],
|
||||
message=log_message
|
||||
))
|
||||
|
||||
# Vérifier si l'IP existe déjà
|
||||
existing_ip = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if existing_ip:
|
||||
# Mettre à jour l'IP existante
|
||||
old_status = existing_ip.last_status
|
||||
|
||||
# Si l'IP passe de offline à online ET qu'elle était inconnue, c'est une "nouvelle détection"
|
||||
# On réinitialise first_seen pour qu'elle apparaisse dans "Nouvelles Détections"
|
||||
if (old_status == "offline" and ip_data["last_status"] == "online" and not existing_ip.known):
|
||||
existing_ip.first_seen = datetime.now()
|
||||
|
||||
# Détecter changement de MAC address
|
||||
new_mac = ip_data.get("mac")
|
||||
if new_mac and existing_ip.mac and new_mac != existing_ip.mac:
|
||||
# MAC a changé ! Marquer comme changée
|
||||
existing_ip.mac_changed = True
|
||||
print(f"[ALERTE] MAC changée pour {ip_address}: {existing_ip.mac} -> {new_mac}")
|
||||
else:
|
||||
# Pas de changement ou pas de MAC précédente
|
||||
existing_ip.mac_changed = False
|
||||
|
||||
existing_ip.last_status = ip_data["last_status"]
|
||||
if ip_data["last_seen"]:
|
||||
existing_ip.last_seen = ip_data["last_seen"]
|
||||
existing_ip.mac = ip_data.get("mac") or existing_ip.mac
|
||||
|
||||
vendor = ip_data.get("vendor")
|
||||
if (not vendor or vendor == "Unknown") and existing_ip.mac:
|
||||
vendor = OuiLookup.lookup(existing_ip.mac) or vendor
|
||||
if config.scan.force_vendor_update:
|
||||
if vendor and vendor != "Unknown":
|
||||
existing_ip.vendor = vendor
|
||||
else:
|
||||
if (not existing_ip.vendor or existing_ip.vendor == "Unknown") and vendor and vendor != "Unknown":
|
||||
existing_ip.vendor = vendor
|
||||
existing_ip.hostname = ip_data.get("hostname") or existing_ip.hostname
|
||||
existing_ip.open_ports = ip_data.get("open_ports", [])
|
||||
|
||||
# Mettre à jour host seulement si présent dans ip_data (config)
|
||||
if "host" in ip_data:
|
||||
existing_ip.host = ip_data["host"]
|
||||
|
||||
# Mettre à jour le flag network_device (basé sur host="Network")
|
||||
# Utiliser le host existant si ip_data n'en a pas
|
||||
current_host = ip_data.get("host") or existing_ip.host
|
||||
existing_ip.network_device = (current_host == "Network")
|
||||
|
||||
# Si l'état a changé, notifier via WebSocket
|
||||
if old_status != ip_data["last_status"]:
|
||||
await ws_manager.broadcast_ip_update({
|
||||
"ip": ip_address,
|
||||
"old_status": old_status,
|
||||
"new_status": ip_data["last_status"]
|
||||
})
|
||||
|
||||
stats["updated"] += 1
|
||||
|
||||
else:
|
||||
# Créer une nouvelle IP
|
||||
vendor = ip_data.get("vendor")
|
||||
if (not vendor or vendor == "Unknown") and ip_data.get("mac"):
|
||||
vendor = OuiLookup.lookup(ip_data.get("mac")) or vendor
|
||||
new_ip = IP(
|
||||
ip=ip_address,
|
||||
name=ip_data.get("name"),
|
||||
known=ip_data.get("known", False),
|
||||
network_device=ip_data.get("host") == "Network",
|
||||
location=ip_data.get("location"),
|
||||
host=ip_data.get("host"),
|
||||
first_seen=datetime.now(),
|
||||
last_seen=ip_data.get("last_seen") or datetime.now(),
|
||||
last_status=ip_data["last_status"],
|
||||
mac=ip_data.get("mac"),
|
||||
vendor=vendor,
|
||||
hostname=ip_data.get("hostname"),
|
||||
open_ports=ip_data.get("open_ports", [])
|
||||
)
|
||||
db.add(new_ip)
|
||||
|
||||
# Notifier nouvelle IP
|
||||
await ws_manager.broadcast_new_ip({
|
||||
"ip": ip_address,
|
||||
"status": ip_data["last_status"],
|
||||
"known": ip_data.get("known", False)
|
||||
})
|
||||
|
||||
stats["new"] += 1
|
||||
|
||||
# Ajouter à l'historique
|
||||
history_entry = IPHistory(
|
||||
ip=ip_address,
|
||||
timestamp=datetime.now(),
|
||||
status=ip_data["last_status"],
|
||||
open_ports=ip_data.get("open_ports", [])
|
||||
)
|
||||
db.add(history_entry)
|
||||
|
||||
# Commit les changements
|
||||
db.commit()
|
||||
|
||||
# Notifier fin du scan avec stats
|
||||
await ws_manager.broadcast_scan_complete(stats)
|
||||
|
||||
print(f"[{datetime.now()}] Scan terminé: {stats}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Erreur lors du scan: {e}")
|
||||
db.rollback()
|
||||
|
||||
|
||||
@router.post("/start")
|
||||
async def start_scan(background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Déclenche un scan réseau immédiat
|
||||
|
||||
Returns:
|
||||
Message de confirmation
|
||||
"""
|
||||
# Lancer le scan en arrière-plan
|
||||
background_tasks.add_task(perform_scan, db)
|
||||
|
||||
return {
|
||||
"message": "Scan réseau démarré",
|
||||
"timestamp": datetime.now()
|
||||
}
|
||||
|
||||
|
||||
@router.get("/logs", response_model=List[ScanLogResponse])
|
||||
async def get_scan_logs(limit: int = 200, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Retourne les derniers logs de scan
|
||||
"""
|
||||
logs = db.query(ScanLog).order_by(ScanLog.created_at.desc()).limit(limit).all()
|
||||
return list(reversed(logs))
|
||||
|
||||
|
||||
@router.post("/ports/{ip_address}")
|
||||
async def scan_ip_ports(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Scanne les ports d'une IP spécifique
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP à scanner
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Liste des ports ouverts
|
||||
"""
|
||||
try:
|
||||
# Récupérer la config
|
||||
config = config_manager.config
|
||||
|
||||
# Convertir les ports en liste d'entiers
|
||||
port_list = []
|
||||
for port_range in config.ports.ranges:
|
||||
if '-' in port_range:
|
||||
start, end = map(int, port_range.split('-'))
|
||||
port_list.extend(range(start, end + 1))
|
||||
else:
|
||||
port_list.append(int(port_range))
|
||||
|
||||
# Initialiser le scanner
|
||||
scanner = NetworkScanner(
|
||||
cidr=config.network.cidr,
|
||||
timeout=config.scan.timeout,
|
||||
ping_count=config.scan.ping_count
|
||||
)
|
||||
|
||||
# Scanner les ports de cette IP
|
||||
print(f"[{datetime.now()}] Scan ports pour {ip_address}...")
|
||||
open_ports = await scanner.scan_ports(ip_address, port_list)
|
||||
print(f"[{datetime.now()}] Ports ouverts pour {ip_address}: {open_ports}")
|
||||
|
||||
# Mettre à jour la base de données
|
||||
ip_record = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
if ip_record:
|
||||
ip_record.open_ports = open_ports
|
||||
ip_record.last_seen = datetime.now()
|
||||
db.commit()
|
||||
|
||||
# Notifier via WebSocket
|
||||
await ws_manager.broadcast_ip_update({
|
||||
"ip": ip_address,
|
||||
"open_ports": open_ports
|
||||
})
|
||||
|
||||
return {
|
||||
"message": "Scan de ports terminé",
|
||||
"ip": ip_address,
|
||||
"open_ports": open_ports,
|
||||
"timestamp": datetime.now()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Erreur scan ports {ip_address}: {e}")
|
||||
return {
|
||||
"message": f"Erreur: {str(e)}",
|
||||
"ip": ip_address,
|
||||
"open_ports": [],
|
||||
"timestamp": datetime.now()
|
||||
}
|
||||
|
||||
|
||||
@router.post("/cleanup-history")
|
||||
async def cleanup_history(hours: int = 24, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Nettoie l'historique plus ancien que X heures
|
||||
|
||||
Args:
|
||||
hours: Nombre d'heures à conserver (défaut: 24h)
|
||||
db: Session de base de données
|
||||
|
||||
Returns:
|
||||
Nombre d'entrées supprimées
|
||||
"""
|
||||
cutoff_date = datetime.now() - timedelta(hours=hours)
|
||||
|
||||
deleted = db.query(IPHistory).filter(
|
||||
IPHistory.timestamp < cutoff_date
|
||||
).delete()
|
||||
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": f"Historique nettoyé",
|
||||
"deleted_entries": deleted,
|
||||
"older_than_hours": hours
|
||||
}
|
||||
73
backend/app/routers/system.py
Executable file
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
Router pour les statistiques système
|
||||
Fournit les métriques RAM et CPU du serveur IPWatch
|
||||
"""
|
||||
from fastapi import APIRouter
|
||||
import psutil
|
||||
from datetime import datetime
|
||||
|
||||
router = APIRouter(prefix="/api/system", tags=["system"])
|
||||
|
||||
|
||||
@router.get("/stats")
|
||||
async def get_system_stats():
|
||||
"""
|
||||
Récupère les statistiques système du serveur IPWatch
|
||||
|
||||
Returns:
|
||||
dict: Statistiques RAM et CPU
|
||||
- ram_percent: Pourcentage de RAM utilisée
|
||||
- ram_used: RAM utilisée en MB
|
||||
- ram_total: RAM totale en MB
|
||||
- ram_available: RAM disponible en MB
|
||||
- cpu_percent: Pourcentage d'utilisation CPU
|
||||
- cpu_count: Nombre de cœurs CPU
|
||||
- timestamp: Horodatage de la mesure
|
||||
"""
|
||||
# Statistiques mémoire
|
||||
memory = psutil.virtual_memory()
|
||||
|
||||
# Statistiques CPU (moyenne sur 1 seconde)
|
||||
cpu_percent = psutil.cpu_percent(interval=1)
|
||||
|
||||
# Informations processus IPWatch
|
||||
process = psutil.Process()
|
||||
process_memory = process.memory_info()
|
||||
|
||||
return {
|
||||
# RAM système
|
||||
"ram_percent": round(memory.percent, 1),
|
||||
"ram_used": round(memory.used / (1024 * 1024), 1), # MB
|
||||
"ram_total": round(memory.total / (1024 * 1024), 1), # MB
|
||||
"ram_available": round(memory.available / (1024 * 1024), 1), # MB
|
||||
|
||||
# CPU système
|
||||
"cpu_percent": round(cpu_percent, 1),
|
||||
"cpu_count": psutil.cpu_count(),
|
||||
|
||||
# Processus IPWatch
|
||||
"process_ram_mb": round(process_memory.rss / (1024 * 1024), 1), # MB
|
||||
"process_cpu_percent": round(process.cpu_percent(interval=0.1), 1),
|
||||
|
||||
# Timestamp
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.get("/uptime")
|
||||
async def get_uptime():
|
||||
"""
|
||||
Récupère l'uptime du système
|
||||
|
||||
Returns:
|
||||
dict: Informations sur l'uptime
|
||||
"""
|
||||
import time
|
||||
boot_time = psutil.boot_time()
|
||||
uptime_seconds = time.time() - boot_time
|
||||
|
||||
return {
|
||||
"uptime_seconds": int(uptime_seconds),
|
||||
"uptime_hours": round(uptime_seconds / 3600, 1),
|
||||
"boot_time": datetime.fromtimestamp(boot_time).isoformat()
|
||||
}
|
||||
227
backend/app/routers/tracking.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""
|
||||
Endpoints API pour le suivi d'équipements (Wake-on-LAN, shutdown, etc.)
|
||||
"""
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import List, Optional
|
||||
from datetime import datetime
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.app.core.database import get_db
|
||||
from backend.app.models.ip import IP
|
||||
|
||||
router = APIRouter(prefix="/api/tracking", tags=["Tracking"])
|
||||
|
||||
|
||||
# Schémas Pydantic
|
||||
class IPTrackingResponse(BaseModel):
|
||||
"""Schéma de réponse pour les IPs suivies"""
|
||||
ip: str
|
||||
name: Optional[str]
|
||||
known: bool
|
||||
tracked: bool
|
||||
location: Optional[str]
|
||||
host: Optional[str]
|
||||
last_status: Optional[str]
|
||||
mac: Optional[str]
|
||||
vendor: Optional[str]
|
||||
hostname: Optional[str]
|
||||
link: Optional[str]
|
||||
last_seen: Optional[datetime]
|
||||
open_ports: List[int]
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class WOLResponse(BaseModel):
|
||||
"""Réponse après envoi Wake-on-LAN"""
|
||||
message: str
|
||||
ip: str
|
||||
mac: str
|
||||
success: bool
|
||||
|
||||
|
||||
class ShutdownResponse(BaseModel):
|
||||
"""Réponse après commande d'arrêt"""
|
||||
message: str
|
||||
ip: str
|
||||
success: bool
|
||||
|
||||
|
||||
@router.get("/", response_model=List[IPTrackingResponse])
|
||||
async def get_tracked_ips(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Récupère toutes les IPs marquées comme suivies
|
||||
Retourne la liste des équipements avec leur état actuel
|
||||
"""
|
||||
tracked_ips = db.query(IP).filter(IP.tracked == True).order_by(IP.name, IP.ip).all()
|
||||
return tracked_ips
|
||||
|
||||
|
||||
@router.post("/wol/{ip_address}", response_model=WOLResponse)
|
||||
async def wake_on_lan(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Envoie un paquet Magic Packet Wake-on-LAN à l'équipement
|
||||
Nécessite que l'IP ait une adresse MAC enregistrée
|
||||
"""
|
||||
# Récupérer l'IP depuis la base
|
||||
ip_obj = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip_obj:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"IP {ip_address} non trouvée dans la base de données"
|
||||
)
|
||||
|
||||
if not ip_obj.mac:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Adresse MAC manquante pour {ip_address}. Impossible d'envoyer le paquet WOL."
|
||||
)
|
||||
|
||||
try:
|
||||
# Importer la bibliothèque wakeonlan
|
||||
from wakeonlan import send_magic_packet
|
||||
|
||||
# Envoyer le paquet Magic Packet
|
||||
send_magic_packet(ip_obj.mac)
|
||||
|
||||
return WOLResponse(
|
||||
message=f"Paquet Wake-on-LAN envoyé avec succès",
|
||||
ip=ip_address,
|
||||
mac=ip_obj.mac,
|
||||
success=True
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="La bibliothèque 'wakeonlan' n'est pas installée. Exécutez: pip install wakeonlan"
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Erreur lors de l'envoi du paquet WOL: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post("/shutdown/{ip_address}", response_model=ShutdownResponse)
|
||||
async def shutdown_device(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Envoie une commande shutdown via MQTT à l'équipement
|
||||
"""
|
||||
# Récupérer l'IP depuis la base
|
||||
ip_obj = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip_obj:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"IP {ip_address} non trouvée dans la base de données"
|
||||
)
|
||||
|
||||
if ip_obj.last_status != "online":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"L'équipement {ip_address} est déjà hors ligne"
|
||||
)
|
||||
|
||||
try:
|
||||
from backend.app.services.mqtt_client import send_mqtt_command
|
||||
|
||||
# Envoyer commande shutdown via MQTT
|
||||
success = send_mqtt_command(ip_address, "shutdown")
|
||||
|
||||
if success:
|
||||
return ShutdownResponse(
|
||||
message=f"Commande shutdown envoyée à {ip_address} via MQTT",
|
||||
ip=ip_address,
|
||||
success=True
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Échec de l'envoi de la commande MQTT"
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Le service MQTT n'est pas configuré. Consultez mqtt/docs/README.md"
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Erreur lors de l'envoi de la commande: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post("/reboot/{ip_address}", response_model=ShutdownResponse)
|
||||
async def reboot_device(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Envoie une commande reboot via MQTT à l'équipement
|
||||
"""
|
||||
# Récupérer l'IP depuis la base
|
||||
ip_obj = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip_obj:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"IP {ip_address} non trouvée"
|
||||
)
|
||||
|
||||
if ip_obj.last_status != "online":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"L'équipement {ip_address} est hors ligne"
|
||||
)
|
||||
|
||||
try:
|
||||
from backend.app.services.mqtt_client import send_mqtt_command
|
||||
|
||||
# Envoyer commande reboot via MQTT
|
||||
success = send_mqtt_command(ip_address, "reboot")
|
||||
|
||||
if success:
|
||||
return ShutdownResponse(
|
||||
message=f"Commande reboot envoyée à {ip_address} via MQTT",
|
||||
ip=ip_address,
|
||||
success=True
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Échec de l'envoi de la commande MQTT"
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Le service MQTT n'est pas configuré"
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Erreur: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.patch("/{ip_address}/toggle", response_model=IPTrackingResponse)
|
||||
async def toggle_tracking(ip_address: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Bascule l'état de suivi d'une IP (tracked true/false)
|
||||
"""
|
||||
ip_obj = db.query(IP).filter(IP.ip == ip_address).first()
|
||||
|
||||
if not ip_obj:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"IP {ip_address} non trouvée"
|
||||
)
|
||||
|
||||
# Inverser l'état tracked
|
||||
ip_obj.tracked = not ip_obj.tracked
|
||||
db.commit()
|
||||
db.refresh(ip_obj)
|
||||
|
||||
return ip_obj
|
||||
35
backend/app/routers/websocket.py
Executable file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Endpoint WebSocket pour notifications temps réel
|
||||
"""
|
||||
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
||||
from backend.app.services.websocket import ws_manager
|
||||
|
||||
router = APIRouter(tags=["WebSocket"])
|
||||
|
||||
|
||||
@router.websocket("/ws")
|
||||
async def websocket_endpoint(websocket: WebSocket):
|
||||
"""
|
||||
Endpoint WebSocket pour notifications temps réel
|
||||
|
||||
Args:
|
||||
websocket: Connexion WebSocket
|
||||
"""
|
||||
await ws_manager.connect(websocket)
|
||||
|
||||
try:
|
||||
# Boucle de réception (keep-alive)
|
||||
while True:
|
||||
# Recevoir des messages du client (heartbeat)
|
||||
data = await websocket.receive_text()
|
||||
|
||||
# On peut gérer des commandes du client ici si nécessaire
|
||||
# Pour l'instant, on fait juste un echo pour keep-alive
|
||||
if data == "ping":
|
||||
await ws_manager.send_personal_message("pong", websocket)
|
||||
|
||||
except WebSocketDisconnect:
|
||||
ws_manager.disconnect(websocket)
|
||||
except Exception as e:
|
||||
print(f"Erreur WebSocket: {e}")
|
||||
ws_manager.disconnect(websocket)
|
||||
56
backend/app/scripts/check_network_device.py
Normal file
@@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script pour vérifier et forcer la mise à jour du flag network_device
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Ajouter le chemin parent pour les imports
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from backend.app.models.ip import IP
|
||||
|
||||
# Créer la connexion à la base de données
|
||||
db_path = os.getenv('DB_PATH', './data/db.sqlite')
|
||||
db_url = f"sqlite:///{db_path}"
|
||||
engine = create_engine(db_url, echo=False)
|
||||
SessionLocal = sessionmaker(bind=engine)
|
||||
|
||||
db = SessionLocal()
|
||||
|
||||
try:
|
||||
# Récupérer toutes les IPs
|
||||
ips = db.query(IP).all()
|
||||
|
||||
print(f"\n📊 Total IPs: {len(ips)}\n")
|
||||
|
||||
updated = 0
|
||||
for ip in ips:
|
||||
# Afficher les IPs avec host défini
|
||||
if ip.host:
|
||||
status_icon = "🟢" if ip.last_status == "online" else "🔴"
|
||||
network_icon = "🔷" if ip.network_device else " "
|
||||
|
||||
print(f"{status_icon} {network_icon} {ip.ip:15s} | Host: {ip.host:15s} | Network: {ip.network_device} | Status: {ip.last_status}")
|
||||
|
||||
# Mettre à jour network_device si host == "Network"
|
||||
should_be_network = (ip.host == "Network")
|
||||
if ip.network_device != should_be_network:
|
||||
ip.network_device = should_be_network
|
||||
updated += 1
|
||||
print(f" ✓ Flag network_device mis à jour pour {ip.ip}: {should_be_network}")
|
||||
|
||||
if updated > 0:
|
||||
db.commit()
|
||||
print(f"\n✅ {updated} IP(s) mise(s) à jour!")
|
||||
else:
|
||||
print(f"\n✓ Tous les flags network_device sont déjà à jour")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Erreur: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
db.close()
|
||||
118
backend/app/scripts/rebuild_ip_relations.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""
|
||||
Reconstruit ip_parent depuis config.yaml, puis recalcule ip_enfant depuis ip_parent.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
CONFIG_PATH = Path(__file__).resolve().parents[3] / "config.yaml"
|
||||
|
||||
|
||||
def load_config() -> Dict[str, Any]:
|
||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
||||
return yaml.safe_load(handle) or {}
|
||||
|
||||
|
||||
def normalize_children(value: Any) -> Optional[List[str]]:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, str):
|
||||
return [value] if value else []
|
||||
if isinstance(value, list):
|
||||
return [str(item) for item in value if item]
|
||||
return []
|
||||
|
||||
|
||||
def ensure_columns(conn: sqlite3.Connection) -> None:
|
||||
cursor = conn.execute("PRAGMA table_info(ip)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
if "ip_parent" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_parent TEXT")
|
||||
if "ip_enfant" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_enfant TEXT")
|
||||
conn.commit()
|
||||
|
||||
|
||||
def collect_parent_mapping(config: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
|
||||
mapping: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
ip_classes = config.get("ip_classes", {}) or {}
|
||||
for ip_address, data in ip_classes.items():
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
if "ip_parent" in data or "ip_enfant" in data:
|
||||
mapping[ip_address] = {
|
||||
"ip_parent": data.get("ip_parent"),
|
||||
"ip_enfant": normalize_children(data.get("ip_enfant"))
|
||||
}
|
||||
|
||||
for host in config.get("hosts", []) or []:
|
||||
if not isinstance(host, dict):
|
||||
continue
|
||||
ip_address = host.get("ip")
|
||||
if not ip_address:
|
||||
continue
|
||||
if "ip_parent" in host or "ip_enfant" in host:
|
||||
entry = mapping.setdefault(ip_address, {})
|
||||
entry.setdefault("ip_parent", host.get("ip_parent"))
|
||||
entry.setdefault("ip_enfant", normalize_children(host.get("ip_enfant")))
|
||||
|
||||
return mapping
|
||||
|
||||
|
||||
def main() -> None:
|
||||
config = load_config()
|
||||
db_path = Path(config.get("database", {}).get("path", "./data/db.sqlite"))
|
||||
mapping = collect_parent_mapping(config)
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"Base de données introuvable: {db_path}")
|
||||
|
||||
conn = sqlite3.connect(db_path)
|
||||
try:
|
||||
ensure_columns(conn)
|
||||
|
||||
if mapping:
|
||||
for ip_address, values in mapping.items():
|
||||
ip_parent = values.get("ip_parent")
|
||||
ip_enfant = values.get("ip_enfant")
|
||||
if ip_enfant is not None:
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_parent = ?, ip_enfant = ? WHERE ip = ?",
|
||||
(ip_parent, json.dumps(ip_enfant), ip_address)
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_parent = ? WHERE ip = ?",
|
||||
(ip_parent, ip_address)
|
||||
)
|
||||
|
||||
cursor = conn.execute("SELECT ip, ip_parent FROM ip")
|
||||
rows = cursor.fetchall()
|
||||
parent_children: Dict[str, List[str]] = {}
|
||||
|
||||
for ip_address, ip_parent in rows:
|
||||
if ip_parent:
|
||||
parent_children.setdefault(ip_parent, []).append(ip_address)
|
||||
|
||||
for ip_address, _ in rows:
|
||||
children = parent_children.get(ip_address, [])
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_enfant = ? WHERE ip = ?",
|
||||
(json.dumps(children), ip_address)
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
print("Reconstruction terminée.")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
98
backend/app/scripts/rebuild_ip_relations_from_hosts.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
Reconstruit ip_parent/ip_enfant en utilisant le champ host et config.yaml.
|
||||
1) Pour chaque IP avec host, retrouve l'IP du host dans config.yaml et met ip_parent.
|
||||
2) Recalcule ip_enfant depuis ip_parent.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
CONFIG_PATH = Path(__file__).resolve().parents[3] / "config.yaml"
|
||||
|
||||
|
||||
def load_config() -> Dict[str, Any]:
|
||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
||||
return yaml.safe_load(handle) or {}
|
||||
|
||||
|
||||
def ensure_columns(conn: sqlite3.Connection) -> None:
|
||||
cursor = conn.execute("PRAGMA table_info(ip)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
if "ip_parent" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_parent TEXT")
|
||||
if "ip_enfant" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_enfant TEXT")
|
||||
conn.commit()
|
||||
|
||||
|
||||
def host_ip_map(config: Dict[str, Any]) -> Dict[str, str]:
|
||||
mapping: Dict[str, str] = {}
|
||||
for host in config.get("hosts", []) or []:
|
||||
if not isinstance(host, dict):
|
||||
continue
|
||||
name = (host.get("name") or "").strip()
|
||||
ip = (host.get("ip") or "").strip()
|
||||
if name and ip:
|
||||
mapping[name.lower()] = ip
|
||||
return mapping
|
||||
|
||||
|
||||
def main() -> None:
|
||||
config = load_config()
|
||||
db_path = Path(config.get("database", {}).get("path", "./data/db.sqlite"))
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"Base de données introuvable: {db_path}")
|
||||
|
||||
host_map = host_ip_map(config)
|
||||
conn = sqlite3.connect(db_path)
|
||||
try:
|
||||
ensure_columns(conn)
|
||||
|
||||
cursor = conn.execute("SELECT ip, host FROM ip")
|
||||
rows = cursor.fetchall()
|
||||
|
||||
updated = 0
|
||||
skipped = 0
|
||||
for ip_address, host in rows:
|
||||
if not host:
|
||||
skipped += 1
|
||||
continue
|
||||
parent_ip = host_map.get(str(host).lower())
|
||||
if not parent_ip:
|
||||
print(f"[WARN] host sans IP config: {host} (ip {ip_address})")
|
||||
skipped += 1
|
||||
continue
|
||||
conn.execute("UPDATE ip SET ip_parent = ? WHERE ip = ?", (parent_ip, ip_address))
|
||||
updated += 1
|
||||
|
||||
print(f"[INFO] ip_parent mis à jour: {updated} | ignorés: {skipped}")
|
||||
|
||||
config_by_ip = {ip for ip in host_map.values()}
|
||||
parent_children: Dict[str, list[str]] = {}
|
||||
for ip_address, host in rows:
|
||||
host_value = (host or "").strip()
|
||||
if host_value in config_by_ip:
|
||||
parent_children.setdefault(host_value, []).append(ip_address)
|
||||
|
||||
for parent_ip in config_by_ip:
|
||||
children = parent_children.get(parent_ip, [])
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_enfant = ? WHERE ip = ?",
|
||||
(json.dumps(children), parent_ip)
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
print("[INFO] ip_enfant recalculé.")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
135
backend/app/scripts/update_ip_relations.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""
|
||||
Met à jour la base IP avec les champs ip_parent/ip_enfant depuis config.yaml.
|
||||
Ajoute les colonnes si nécessaire et synchronise les relations.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
CONFIG_PATH = Path(__file__).resolve().parents[3] / "config.yaml"
|
||||
|
||||
|
||||
def load_config() -> Dict[str, Any]:
|
||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
||||
return yaml.safe_load(handle) or {}
|
||||
|
||||
|
||||
def normalize_children(value: Any) -> Optional[List[str]]:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, str):
|
||||
return [value] if value else []
|
||||
if isinstance(value, list):
|
||||
return [str(item) for item in value if item]
|
||||
return []
|
||||
|
||||
|
||||
def ensure_columns(conn: sqlite3.Connection) -> None:
|
||||
cursor = conn.execute("PRAGMA table_info(ip)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
if "ip_parent" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_parent TEXT")
|
||||
if "ip_enfant" not in columns:
|
||||
conn.execute("ALTER TABLE ip ADD COLUMN ip_enfant TEXT")
|
||||
conn.commit()
|
||||
|
||||
|
||||
def collect_config_mapping(config: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
|
||||
mapping: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
ip_classes = config.get("ip_classes", {}) or {}
|
||||
for ip_address, data in ip_classes.items():
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
if "ip_parent" in data or "ip_enfant" in data:
|
||||
mapping[ip_address] = {
|
||||
"ip_parent": data.get("ip_parent"),
|
||||
"ip_enfant": normalize_children(data.get("ip_enfant"))
|
||||
}
|
||||
|
||||
for host in config.get("hosts", []) or []:
|
||||
if not isinstance(host, dict):
|
||||
continue
|
||||
ip_address = host.get("ip")
|
||||
if not ip_address:
|
||||
continue
|
||||
if "ip_parent" in host or "ip_enfant" in host:
|
||||
entry = mapping.setdefault(ip_address, {})
|
||||
entry.setdefault("ip_parent", host.get("ip_parent"))
|
||||
entry.setdefault("ip_enfant", normalize_children(host.get("ip_enfant")))
|
||||
|
||||
return mapping
|
||||
|
||||
|
||||
def parse_json_list(value: Optional[str]) -> List[str]:
|
||||
if not value:
|
||||
return []
|
||||
try:
|
||||
parsed = json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
return []
|
||||
if isinstance(parsed, list):
|
||||
return [str(item) for item in parsed if item]
|
||||
return []
|
||||
|
||||
|
||||
def main() -> None:
|
||||
config = load_config()
|
||||
db_path = Path(config.get("database", {}).get("path", "./data/db.sqlite"))
|
||||
mapping = collect_config_mapping(config)
|
||||
|
||||
if not db_path.exists():
|
||||
raise FileNotFoundError(f"Base de données introuvable: {db_path}")
|
||||
|
||||
conn = sqlite3.connect(db_path)
|
||||
try:
|
||||
ensure_columns(conn)
|
||||
|
||||
if mapping:
|
||||
for ip_address, values in mapping.items():
|
||||
ip_parent = values.get("ip_parent")
|
||||
ip_enfant = values.get("ip_enfant")
|
||||
if ip_parent is None and ip_enfant is None:
|
||||
continue
|
||||
if ip_enfant is not None:
|
||||
ip_enfant_json = json.dumps(ip_enfant)
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_parent = COALESCE(?, ip_parent), ip_enfant = ? WHERE ip = ?",
|
||||
(ip_parent, ip_enfant_json, ip_address)
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_parent = COALESCE(?, ip_parent) WHERE ip = ?",
|
||||
(ip_parent, ip_address)
|
||||
)
|
||||
|
||||
cursor = conn.execute("SELECT ip, ip_parent, ip_enfant FROM ip")
|
||||
rows = cursor.fetchall()
|
||||
parent_children: Dict[str, List[str]] = {}
|
||||
|
||||
for ip_address, ip_parent, _ in rows:
|
||||
if ip_parent:
|
||||
parent_children.setdefault(ip_parent, []).append(ip_address)
|
||||
|
||||
for ip_address, _, ip_enfant_raw in rows:
|
||||
existing = parse_json_list(ip_enfant_raw)
|
||||
merged = list(dict.fromkeys(existing + parent_children.get(ip_address, [])))
|
||||
conn.execute(
|
||||
"UPDATE ip SET ip_enfant = ? WHERE ip = ?",
|
||||
(json.dumps(merged), ip_address)
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
print("Mise à jour terminée.")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
7
backend/app/services/__init__.py
Executable file
@@ -0,0 +1,7 @@
|
||||
"""
|
||||
Services réseau pour IPWatch
|
||||
"""
|
||||
from .network import NetworkScanner
|
||||
from .scheduler import ScanScheduler
|
||||
|
||||
__all__ = ["NetworkScanner", "ScanScheduler"]
|
||||
80
backend/app/services/mqtt_client.py
Normal file
@@ -0,0 +1,80 @@
|
||||
"""
|
||||
Service MQTT pour IPWatch Backend
|
||||
Envoie des commandes MQTT aux agents installés sur les machines
|
||||
"""
|
||||
import paho.mqtt.client as mqtt
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Configuration MQTT (à charger depuis config.yaml ou variables d'environnement)
|
||||
MQTT_BROKER = os.getenv('MQTT_BROKER', 'localhost')
|
||||
MQTT_PORT = int(os.getenv('MQTT_PORT', '1883'))
|
||||
MQTT_USERNAME = os.getenv('MQTT_USERNAME', None)
|
||||
MQTT_PASSWORD = os.getenv('MQTT_PASSWORD', None)
|
||||
|
||||
|
||||
def send_mqtt_command(ip_address: str, command: str) -> bool:
|
||||
"""
|
||||
Envoie une commande MQTT à un équipement
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP de l'équipement
|
||||
command: Commande à envoyer (shutdown, reboot, status)
|
||||
|
||||
Returns:
|
||||
bool: True si la commande a été envoyée avec succès
|
||||
"""
|
||||
try:
|
||||
# Créer le client MQTT
|
||||
client = mqtt.Client(client_id=f"ipwatch-backend-{os.getpid()}")
|
||||
|
||||
# Authentification si configurée
|
||||
if MQTT_USERNAME and MQTT_PASSWORD:
|
||||
client.username_pw_set(MQTT_USERNAME, MQTT_PASSWORD)
|
||||
|
||||
# Connexion au broker
|
||||
client.connect(MQTT_BROKER, MQTT_PORT, keepalive=10)
|
||||
|
||||
# Topic de commande pour l'équipement
|
||||
topic = f"ipwatch/device/{ip_address}/command"
|
||||
|
||||
# Payload JSON
|
||||
payload = json.dumps({
|
||||
"command": command,
|
||||
"timestamp": __import__('datetime').datetime.now().isoformat()
|
||||
})
|
||||
|
||||
# Publier la commande
|
||||
result = client.publish(topic, payload, qos=1)
|
||||
|
||||
# Attendre que le message soit envoyé
|
||||
result.wait_for_publish(timeout=5)
|
||||
|
||||
# Déconnexion
|
||||
client.disconnect()
|
||||
|
||||
logger.info(f"✓ Commande '{command}' envoyée à {ip_address} via MQTT")
|
||||
return result.is_published()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"✗ Erreur envoi commande MQTT à {ip_address}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def get_device_status(ip_address: str) -> Optional[dict]:
|
||||
"""
|
||||
Récupère le statut d'un équipement via MQTT (si disponible)
|
||||
|
||||
Args:
|
||||
ip_address: Adresse IP de l'équipement
|
||||
|
||||
Returns:
|
||||
dict: Statut de l'équipement ou None
|
||||
"""
|
||||
# TODO: Implémenter la récupération du statut
|
||||
# Nécessite un mécanisme de souscription et d'attente de réponse
|
||||
pass
|
||||
365
backend/app/services/network.py
Executable file
@@ -0,0 +1,365 @@
|
||||
"""
|
||||
Modules réseau pour scan d'IP, ping, ARP et port scan
|
||||
Implémente le workflow de scan selon workflow-scan.md
|
||||
"""
|
||||
import asyncio
|
||||
import ipaddress
|
||||
import platform
|
||||
import subprocess
|
||||
import socket
|
||||
from typing import List, Dict, Optional, Tuple
|
||||
from datetime import datetime
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Scapy pour ARP
|
||||
try:
|
||||
from scapy.all import ARP, Ether, srp
|
||||
SCAPY_AVAILABLE = True
|
||||
except ImportError:
|
||||
SCAPY_AVAILABLE = False
|
||||
|
||||
|
||||
class NetworkScanner:
|
||||
"""Scanner réseau principal"""
|
||||
|
||||
def __init__(self, cidr: str, timeout: float = 1.0, ping_count: int = 1):
|
||||
"""
|
||||
Initialise le scanner réseau
|
||||
|
||||
Args:
|
||||
cidr: Réseau CIDR (ex: "192.168.1.0/24")
|
||||
timeout: Timeout pour ping et connexions (secondes)
|
||||
ping_count: Nombre de ping par IP
|
||||
"""
|
||||
self.cidr = cidr
|
||||
self.timeout = timeout
|
||||
self.ping_count = max(1, int(ping_count))
|
||||
self.network = ipaddress.ip_network(cidr, strict=False)
|
||||
|
||||
def generate_ip_list(self) -> List[str]:
|
||||
"""
|
||||
Génère la liste complète d'IP depuis le CIDR
|
||||
|
||||
Returns:
|
||||
Liste des adresses IP en string
|
||||
"""
|
||||
return [str(ip) for ip in self.network.hosts()]
|
||||
|
||||
async def ping(self, ip: str) -> bool:
|
||||
"""
|
||||
Ping une adresse IP (async)
|
||||
|
||||
Args:
|
||||
ip: Adresse IP à pinger
|
||||
|
||||
Returns:
|
||||
True si l'IP répond, False sinon
|
||||
"""
|
||||
# Détection de l'OS pour la commande ping
|
||||
param = '-n' if platform.system().lower() == 'windows' else '-c'
|
||||
timeout_param = '-w' if platform.system().lower() == 'windows' else '-W'
|
||||
|
||||
command = [
|
||||
'ping',
|
||||
param, str(self.ping_count),
|
||||
timeout_param,
|
||||
str(int(self.timeout * 1000) if platform.system().lower() == 'windows' else str(int(self.timeout))),
|
||||
ip
|
||||
]
|
||||
|
||||
try:
|
||||
# Exécuter le ping de manière asynchrone
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*command,
|
||||
stdout=asyncio.subprocess.DEVNULL,
|
||||
stderr=asyncio.subprocess.DEVNULL
|
||||
)
|
||||
await asyncio.wait_for(process.wait(), timeout=self.timeout + 1)
|
||||
return process.returncode == 0
|
||||
except (asyncio.TimeoutError, Exception):
|
||||
return False
|
||||
|
||||
async def ping_parallel(self, ip_list: List[str], max_concurrent: int = 50) -> Dict[str, bool]:
|
||||
"""
|
||||
Ping multiple IPs en parallèle
|
||||
|
||||
Args:
|
||||
ip_list: Liste des IPs à pinger
|
||||
max_concurrent: Nombre maximum de pings simultanés
|
||||
|
||||
Returns:
|
||||
Dictionnaire {ip: online_status}
|
||||
"""
|
||||
results = {}
|
||||
semaphore = asyncio.Semaphore(max_concurrent)
|
||||
|
||||
async def ping_with_semaphore(ip: str):
|
||||
async with semaphore:
|
||||
results[ip] = await self.ping(ip)
|
||||
|
||||
# Lancer tous les pings en parallèle avec limite
|
||||
await asyncio.gather(*[ping_with_semaphore(ip) for ip in ip_list])
|
||||
|
||||
return results
|
||||
|
||||
def get_arp_table(self) -> Dict[str, Tuple[str, str]]:
|
||||
"""
|
||||
Récupère la table ARP du système
|
||||
|
||||
Returns:
|
||||
Dictionnaire {ip: (mac, vendor)}
|
||||
"""
|
||||
arp_data = {}
|
||||
|
||||
if SCAPY_AVAILABLE:
|
||||
try:
|
||||
# Utiliser Scapy pour ARP scan
|
||||
answered, _ = srp(
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=self.cidr),
|
||||
timeout=2,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
for sent, received in answered:
|
||||
ip = received.psrc
|
||||
mac = received.hwsrc
|
||||
vendor = self._get_mac_vendor(mac)
|
||||
arp_data[ip] = (mac, vendor)
|
||||
except Exception as e:
|
||||
print(f"Erreur ARP scan avec Scapy: {e}")
|
||||
else:
|
||||
# Fallback: parser la table ARP système
|
||||
try:
|
||||
if platform.system().lower() == 'windows':
|
||||
output = subprocess.check_output(['arp', '-a'], text=True)
|
||||
pattern = r'(\d+\.\d+\.\d+\.\d+)\s+([0-9a-fA-F-:]+)'
|
||||
else:
|
||||
output = subprocess.check_output(['arp', '-n'], text=True)
|
||||
pattern = r'(\d+\.\d+\.\d+\.\d+)\s+\w+\s+([0-9a-fA-F:]+)'
|
||||
|
||||
matches = re.findall(pattern, output)
|
||||
for ip, mac in matches:
|
||||
if ip in [str(h) for h in self.network.hosts()]:
|
||||
vendor = self._get_mac_vendor(mac)
|
||||
arp_data[ip] = (mac, vendor)
|
||||
except Exception as e:
|
||||
print(f"Erreur lecture table ARP: {e}")
|
||||
|
||||
return arp_data
|
||||
|
||||
def _get_mac_vendor(self, mac: str) -> str:
|
||||
"""
|
||||
Lookup du fabricant depuis l'adresse MAC
|
||||
Simplifié pour l'instant - peut être étendu avec une vraie DB OUI
|
||||
|
||||
Args:
|
||||
mac: Adresse MAC
|
||||
|
||||
Returns:
|
||||
Nom du fabricant ou "Unknown"
|
||||
"""
|
||||
mac_norm = re.sub(r"[^0-9A-Fa-f]", "", mac).upper()
|
||||
if not mac_norm:
|
||||
return "Unknown"
|
||||
|
||||
# Lookup OUI si fichier disponible
|
||||
vendor = OuiLookup.lookup(mac_norm)
|
||||
if vendor:
|
||||
return vendor
|
||||
|
||||
# Mini DB des fabricants courants (fallback)
|
||||
vendors = {
|
||||
"00:0C:29": "VMware",
|
||||
"00:50:56": "VMware",
|
||||
"08:00:27": "VirtualBox",
|
||||
"DC:A6:32": "Raspberry Pi",
|
||||
"B8:27:EB": "Raspberry Pi",
|
||||
}
|
||||
|
||||
for prefix, vendor in vendors.items():
|
||||
prefix_norm = prefix.replace(":", "").upper()
|
||||
if mac_norm.startswith(prefix_norm):
|
||||
return vendor
|
||||
|
||||
return "Unknown"
|
||||
|
||||
|
||||
async def scan_ports(self, ip: str, ports: List[int]) -> List[int]:
|
||||
"""
|
||||
Scan des ports TCP sur une IP
|
||||
|
||||
Args:
|
||||
ip: Adresse IP cible
|
||||
ports: Liste des ports à scanner
|
||||
|
||||
Returns:
|
||||
Liste des ports ouverts
|
||||
"""
|
||||
open_ports = []
|
||||
|
||||
async def check_port(port: int) -> Optional[int]:
|
||||
try:
|
||||
# Tentative de connexion TCP
|
||||
reader, writer = await asyncio.wait_for(
|
||||
asyncio.open_connection(ip, port),
|
||||
timeout=self.timeout
|
||||
)
|
||||
writer.close()
|
||||
await writer.wait_closed()
|
||||
return port
|
||||
except:
|
||||
return None
|
||||
|
||||
# Scanner tous les ports en parallèle
|
||||
results = await asyncio.gather(*[check_port(p) for p in ports])
|
||||
open_ports = [p for p in results if p is not None]
|
||||
|
||||
return open_ports
|
||||
|
||||
def get_hostname(self, ip: str) -> Optional[str]:
|
||||
"""
|
||||
Résolution DNS inversée pour obtenir le hostname
|
||||
|
||||
Args:
|
||||
ip: Adresse IP
|
||||
|
||||
Returns:
|
||||
Hostname ou None
|
||||
"""
|
||||
try:
|
||||
hostname, _, _ = socket.gethostbyaddr(ip)
|
||||
return hostname
|
||||
except:
|
||||
return None
|
||||
|
||||
def classify_ip_status(self, is_online: bool, is_known: bool) -> str:
|
||||
"""
|
||||
Classification de l'état d'une IP
|
||||
|
||||
Args:
|
||||
is_online: IP en ligne
|
||||
is_known: IP connue dans la config
|
||||
|
||||
Returns:
|
||||
État: "online", "offline"
|
||||
"""
|
||||
return "online" if is_online else "offline"
|
||||
|
||||
async def full_scan(self, known_ips: Dict[str, Dict], port_list: List[int], max_concurrent: int = 50, progress_callback=None) -> Dict[str, Dict]:
|
||||
"""
|
||||
Scan complet du réseau selon workflow-scan.md
|
||||
|
||||
Args:
|
||||
known_ips: Dictionnaire des IPs connues depuis config
|
||||
port_list: Liste des ports à scanner
|
||||
max_concurrent: Pings simultanés max
|
||||
progress_callback: Fonction optionnelle pour rapporter la progression
|
||||
|
||||
Returns:
|
||||
Dictionnaire des résultats de scan pour chaque IP
|
||||
"""
|
||||
results = {}
|
||||
|
||||
# 1. Générer liste IP du CIDR
|
||||
ip_list = self.generate_ip_list()
|
||||
total_ips = len(ip_list)
|
||||
|
||||
# 2. Ping parallélisé
|
||||
ping_results = await self.ping_parallel(ip_list, max_concurrent)
|
||||
|
||||
# 3. ARP + MAC vendor
|
||||
arp_table = self.get_arp_table()
|
||||
|
||||
# 4. Pour chaque IP
|
||||
for index, ip in enumerate(ip_list, start=1):
|
||||
is_online = ping_results.get(ip, False)
|
||||
is_known = ip in known_ips
|
||||
|
||||
ip_data = {
|
||||
"ip": ip,
|
||||
"known": is_known,
|
||||
"last_status": self.classify_ip_status(is_online, is_known),
|
||||
"last_seen": datetime.now() if is_online else None,
|
||||
"mac": None,
|
||||
"vendor": None,
|
||||
"hostname": None,
|
||||
"open_ports": [],
|
||||
}
|
||||
|
||||
# Ajouter infos connues
|
||||
if is_known:
|
||||
ip_data.update(known_ips[ip])
|
||||
|
||||
# Infos ARP
|
||||
if ip in arp_table:
|
||||
mac, vendor = arp_table[ip]
|
||||
ip_data["mac"] = mac
|
||||
ip_data["vendor"] = vendor
|
||||
|
||||
# Hostname
|
||||
if is_online:
|
||||
hostname = self.get_hostname(ip)
|
||||
if hostname:
|
||||
ip_data["hostname"] = hostname
|
||||
|
||||
# 5. Port scan (uniquement si online)
|
||||
if is_online and port_list:
|
||||
open_ports = await self.scan_ports(ip, port_list)
|
||||
ip_data["open_ports"] = open_ports
|
||||
|
||||
results[ip] = ip_data
|
||||
|
||||
# Rapporter la progression
|
||||
if progress_callback:
|
||||
await progress_callback(index, total_ips, ip, ip_data["last_status"], is_online)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class OuiLookup:
|
||||
"""Lookup OUI basé sur un fichier local (oui.txt)"""
|
||||
_cache = {}
|
||||
_mtime = None
|
||||
_path = Path("./data/oui/oui.txt")
|
||||
|
||||
@classmethod
|
||||
def _load(cls):
|
||||
if not cls._path.exists():
|
||||
cls._cache = {}
|
||||
cls._mtime = None
|
||||
return
|
||||
|
||||
mtime = cls._path.stat().st_mtime
|
||||
if cls._mtime == mtime and cls._cache:
|
||||
return
|
||||
|
||||
cache = {}
|
||||
try:
|
||||
with cls._path.open("r", encoding="utf-8", errors="ignore") as handle:
|
||||
for line in handle:
|
||||
raw = line.strip()
|
||||
if "(hex)" in raw:
|
||||
left, right = raw.split("(hex)", 1)
|
||||
prefix = re.sub(r"[^0-9A-Fa-f]", "", left).upper()[:6]
|
||||
vendor = right.strip()
|
||||
if len(prefix) == 6 and vendor:
|
||||
cache[prefix] = vendor
|
||||
except Exception:
|
||||
cache = {}
|
||||
|
||||
cls._cache = cache
|
||||
cls._mtime = mtime
|
||||
print(f"[OUI] Base chargée: {len(cls._cache)} entrées depuis {cls._path}")
|
||||
|
||||
@classmethod
|
||||
def lookup(cls, mac: str) -> Optional[str]:
|
||||
if not mac:
|
||||
return None
|
||||
cls._load()
|
||||
if not cls._cache:
|
||||
return None
|
||||
prefix = re.sub(r"[^0-9A-Fa-f]", "", mac).upper()[:6]
|
||||
if len(prefix) != 6:
|
||||
return None
|
||||
return cls._cache.get(prefix)
|
||||
194
backend/app/services/opnsense_client.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""
|
||||
Client API OPNsense pour IPWatch
|
||||
Gère les communications avec l'API REST OPNsense (Kea DHCP)
|
||||
"""
|
||||
import httpx
|
||||
import ipaddress
|
||||
from typing import Optional, Dict, Any, List
|
||||
from backend.app.core.config import config_manager
|
||||
|
||||
|
||||
class OPNsenseAPIError(Exception):
|
||||
"""Erreur retournée par l'API OPNsense (validation, etc.)"""
|
||||
def __init__(self, message: str, validations: dict = None):
|
||||
self.validations = validations or {}
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class OPNsenseClient:
|
||||
"""Client pour l'API OPNsense avec authentification Basic (api_key:api_secret)"""
|
||||
|
||||
def __init__(self):
|
||||
config = config_manager.config.opnsense
|
||||
self.base_url = f"{config.protocol}://{config.host}"
|
||||
self.auth = (config.api_key, config.api_secret)
|
||||
self.verify_ssl = config.verify_ssl
|
||||
self.enabled = config.enabled
|
||||
print(f"[OPNsense] Client initialisé: {self.base_url} (ssl_verify={self.verify_ssl})")
|
||||
|
||||
def _get_client(self) -> httpx.AsyncClient:
|
||||
"""Crée un client HTTP async configuré"""
|
||||
return httpx.AsyncClient(
|
||||
base_url=self.base_url,
|
||||
auth=self.auth,
|
||||
verify=self.verify_ssl,
|
||||
timeout=30.0
|
||||
)
|
||||
|
||||
def _check_result(self, data: Dict[str, Any], action: str):
|
||||
"""Vérifie que le résultat OPNsense n'est pas 'failed'"""
|
||||
if data.get("result") == "failed":
|
||||
validations = data.get("validations", {})
|
||||
msg = f"{action} échoué"
|
||||
if validations:
|
||||
details = "; ".join(f"{k}: {v}" for k, v in validations.items())
|
||||
msg = f"{action} échoué: {details}"
|
||||
print(f"[OPNsense] VALIDATION ERREUR: {msg}")
|
||||
raise OPNsenseAPIError(msg, validations)
|
||||
|
||||
async def test_connection(self) -> Dict[str, Any]:
|
||||
"""Teste la connexion à l'API OPNsense"""
|
||||
print(f"[OPNsense] Test connexion: GET {self.base_url}/api/core/firmware/status")
|
||||
async with self._get_client() as client:
|
||||
response = await client.get("/api/core/firmware/status")
|
||||
print(f"[OPNsense] Réponse test: {response.status_code}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
async def search_subnets(self) -> Dict[str, Any]:
|
||||
"""Liste les subnets Kea DHCPv4"""
|
||||
print(f"[OPNsense] Recherche subnets: GET {self.base_url}/api/kea/dhcpv4/search_subnet")
|
||||
async with self._get_client() as client:
|
||||
response = await client.get("/api/kea/dhcpv4/search_subnet")
|
||||
print(f"[OPNsense] Réponse search_subnet: {response.status_code}")
|
||||
if response.status_code != 200:
|
||||
print(f"[OPNsense] Corps réponse erreur: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
rows = data.get("rows", [])
|
||||
print(f"[OPNsense] {len(rows)} subnet(s) trouvé(s)")
|
||||
for row in rows:
|
||||
print(f"[OPNsense] - {row.get('subnet')}: uuid={row.get('uuid')}")
|
||||
return data
|
||||
|
||||
async def find_subnet_for_ip(self, ip_address: str) -> Optional[str]:
|
||||
"""Trouve le subnet UUID correspondant à une adresse IP"""
|
||||
print(f"[OPNsense] Recherche subnet pour IP {ip_address}")
|
||||
ip_obj = ipaddress.ip_address(ip_address)
|
||||
data = await self.search_subnets()
|
||||
rows = data.get("rows", [])
|
||||
for row in rows:
|
||||
subnet_cidr = row.get("subnet", "")
|
||||
try:
|
||||
network = ipaddress.ip_network(subnet_cidr, strict=False)
|
||||
if ip_obj in network:
|
||||
uuid = row.get("uuid")
|
||||
print(f"[OPNsense] Subnet trouvé: {subnet_cidr} -> uuid={uuid}")
|
||||
return uuid
|
||||
except ValueError:
|
||||
continue
|
||||
print(f"[OPNsense] Aucun subnet trouvé pour {ip_address}")
|
||||
return None
|
||||
|
||||
async def search_reservations(self) -> Dict[str, Any]:
|
||||
"""Liste toutes les réservations DHCP Kea"""
|
||||
print(f"[OPNsense] Recherche réservations: GET {self.base_url}/api/kea/dhcpv4/search_reservation")
|
||||
async with self._get_client() as client:
|
||||
response = await client.get("/api/kea/dhcpv4/search_reservation")
|
||||
print(f"[OPNsense] Réponse search_reservation: {response.status_code}")
|
||||
if response.status_code != 200:
|
||||
print(f"[OPNsense] Corps réponse erreur: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
rows = data.get("rows", [])
|
||||
print(f"[OPNsense] {len(rows)} réservation(s) trouvée(s)")
|
||||
return data
|
||||
|
||||
async def get_reservation(self, uuid: str) -> Dict[str, Any]:
|
||||
"""Récupère une réservation par UUID"""
|
||||
print(f"[OPNsense] Get réservation: {uuid}")
|
||||
async with self._get_client() as client:
|
||||
response = await client.get(f"/api/kea/dhcpv4/get_reservation/{uuid}")
|
||||
print(f"[OPNsense] Réponse get_reservation: {response.status_code}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
async def add_reservation(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Crée une nouvelle réservation DHCP Kea"""
|
||||
payload = {"reservation": data}
|
||||
print(f"[OPNsense] Ajout réservation: POST {self.base_url}/api/kea/dhcpv4/add_reservation")
|
||||
print(f"[OPNsense] Payload: {payload}")
|
||||
async with self._get_client() as client:
|
||||
response = await client.post(
|
||||
"/api/kea/dhcpv4/add_reservation",
|
||||
json=payload
|
||||
)
|
||||
print(f"[OPNsense] Réponse add_reservation: {response.status_code}")
|
||||
print(f"[OPNsense] Corps réponse: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
self._check_result(result, "Ajout réservation")
|
||||
return result
|
||||
|
||||
async def set_reservation(self, uuid: str, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Met à jour une réservation existante"""
|
||||
payload = {"reservation": data}
|
||||
print(f"[OPNsense] Mise à jour réservation {uuid}: POST {self.base_url}/api/kea/dhcpv4/set_reservation/{uuid}")
|
||||
print(f"[OPNsense] Payload: {payload}")
|
||||
async with self._get_client() as client:
|
||||
response = await client.post(
|
||||
f"/api/kea/dhcpv4/set_reservation/{uuid}",
|
||||
json=payload
|
||||
)
|
||||
print(f"[OPNsense] Réponse set_reservation: {response.status_code}")
|
||||
print(f"[OPNsense] Corps réponse: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
self._check_result(result, "Mise à jour réservation")
|
||||
return result
|
||||
|
||||
async def del_reservation(self, uuid: str) -> Dict[str, Any]:
|
||||
"""Supprime une réservation"""
|
||||
print(f"[OPNsense] Suppression réservation: {uuid}")
|
||||
async with self._get_client() as client:
|
||||
response = await client.post(f"/api/kea/dhcpv4/del_reservation/{uuid}")
|
||||
print(f"[OPNsense] Réponse del_reservation: {response.status_code}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
async def reconfigure_kea(self) -> Dict[str, Any]:
|
||||
"""Applique les changements Kea (reconfigure le service)"""
|
||||
print(f"[OPNsense] Reconfiguration Kea: POST {self.base_url}/api/kea/service/reconfigure")
|
||||
async with self._get_client() as client:
|
||||
response = await client.post("/api/kea/service/reconfigure")
|
||||
print(f"[OPNsense] Réponse reconfigure: {response.status_code}")
|
||||
if response.status_code != 200:
|
||||
print(f"[OPNsense] Corps réponse erreur: {response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
async def find_reservation_by_ip(self, ip_address: str) -> Optional[Dict[str, Any]]:
|
||||
"""Cherche une réservation existante par adresse IP"""
|
||||
print(f"[OPNsense] Recherche réservation par IP: {ip_address}")
|
||||
result = await self.search_reservations()
|
||||
rows = result.get("rows", [])
|
||||
for row in rows:
|
||||
if row.get("ip_address") == ip_address:
|
||||
print(f"[OPNsense] Réservation trouvée: uuid={row.get('uuid')}")
|
||||
return row
|
||||
print(f"[OPNsense] Aucune réservation existante pour {ip_address}")
|
||||
return None
|
||||
|
||||
async def find_reservation_by_mac(self, mac_address: str) -> Optional[Dict[str, Any]]:
|
||||
"""Cherche une réservation existante par adresse MAC"""
|
||||
mac_normalized = mac_address.lower().replace("-", ":")
|
||||
print(f"[OPNsense] Recherche réservation par MAC: {mac_normalized}")
|
||||
result = await self.search_reservations()
|
||||
rows = result.get("rows", [])
|
||||
for row in rows:
|
||||
row_mac = (row.get("hw_address") or "").lower().replace("-", ":")
|
||||
if row_mac == mac_normalized:
|
||||
print(f"[OPNsense] Réservation trouvée par MAC: uuid={row.get('uuid')}")
|
||||
return row
|
||||
print(f"[OPNsense] Aucune réservation pour MAC {mac_normalized}")
|
||||
return None
|
||||
103
backend/app/services/scheduler.py
Executable file
@@ -0,0 +1,103 @@
|
||||
"""
|
||||
Scheduler APScheduler pour les scans réseau périodiques
|
||||
"""
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from apscheduler.triggers.interval import IntervalTrigger
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, Callable
|
||||
import asyncio
|
||||
|
||||
|
||||
class ScanScheduler:
|
||||
"""Gestionnaire de tâches planifiées pour les scans"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialise le scheduler"""
|
||||
self.scheduler = AsyncIOScheduler()
|
||||
self.is_running = False
|
||||
|
||||
def start(self):
|
||||
"""Démarre le scheduler"""
|
||||
if not self.is_running:
|
||||
self.scheduler.start()
|
||||
self.is_running = True
|
||||
print(f"[{datetime.now()}] Scheduler démarré")
|
||||
|
||||
def stop(self):
|
||||
"""Arrête le scheduler"""
|
||||
if self.is_running:
|
||||
self.scheduler.shutdown()
|
||||
self.is_running = False
|
||||
print(f"[{datetime.now()}] Scheduler arrêté")
|
||||
|
||||
def add_ping_scan_job(self, scan_function: Callable, interval_seconds: int = 60):
|
||||
"""
|
||||
Ajoute une tâche de scan ping périodique
|
||||
|
||||
Args:
|
||||
scan_function: Fonction async à exécuter
|
||||
interval_seconds: Intervalle en secondes
|
||||
"""
|
||||
self.scheduler.add_job(
|
||||
scan_function,
|
||||
trigger=IntervalTrigger(seconds=interval_seconds),
|
||||
id='ping_scan',
|
||||
name='Scan Ping périodique',
|
||||
replace_existing=True
|
||||
)
|
||||
print(f"Tâche ping_scan configurée: toutes les {interval_seconds}s")
|
||||
|
||||
def add_port_scan_job(self, scan_function: Callable, interval_seconds: int = 300):
|
||||
"""
|
||||
Ajoute une tâche de scan de ports périodique
|
||||
|
||||
Args:
|
||||
scan_function: Fonction async à exécuter
|
||||
interval_seconds: Intervalle en secondes
|
||||
"""
|
||||
self.scheduler.add_job(
|
||||
scan_function,
|
||||
trigger=IntervalTrigger(seconds=interval_seconds),
|
||||
id='port_scan',
|
||||
name='Scan ports périodique',
|
||||
replace_existing=True
|
||||
)
|
||||
print(f"Tâche port_scan configurée: toutes les {interval_seconds}s")
|
||||
|
||||
def add_cleanup_job(self, cleanup_function: Callable, interval_hours: int = 1):
|
||||
"""
|
||||
Ajoute une tâche de nettoyage de l'historique
|
||||
|
||||
Args:
|
||||
cleanup_function: Fonction async de nettoyage
|
||||
interval_hours: Intervalle en heures
|
||||
"""
|
||||
self.scheduler.add_job(
|
||||
cleanup_function,
|
||||
trigger=IntervalTrigger(hours=interval_hours),
|
||||
id='history_cleanup',
|
||||
name='Nettoyage historique',
|
||||
replace_existing=True
|
||||
)
|
||||
print(f"Tâche cleanup configurée: toutes les {interval_hours}h")
|
||||
|
||||
def remove_job(self, job_id: str):
|
||||
"""
|
||||
Supprime une tâche planifiée
|
||||
|
||||
Args:
|
||||
job_id: ID de la tâche
|
||||
"""
|
||||
try:
|
||||
self.scheduler.remove_job(job_id)
|
||||
print(f"Tâche {job_id} supprimée")
|
||||
except Exception as e:
|
||||
print(f"Erreur suppression tâche {job_id}: {e}")
|
||||
|
||||
def get_jobs(self):
|
||||
"""Retourne la liste des tâches planifiées"""
|
||||
return self.scheduler.get_jobs()
|
||||
|
||||
|
||||
# Instance globale du scheduler
|
||||
scan_scheduler = ScanScheduler()
|
||||
146
backend/app/services/websocket.py
Executable file
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Gestionnaire WebSocket pour notifications temps réel
|
||||
"""
|
||||
from fastapi import WebSocket
|
||||
from typing import List, Dict, Any
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class WebSocketManager:
|
||||
"""Gestionnaire de connexions WebSocket"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialise le gestionnaire"""
|
||||
self.active_connections: List[WebSocket] = []
|
||||
|
||||
async def connect(self, websocket: WebSocket):
|
||||
"""
|
||||
Accepte une nouvelle connexion WebSocket
|
||||
|
||||
Args:
|
||||
websocket: Instance WebSocket
|
||||
"""
|
||||
await websocket.accept()
|
||||
self.active_connections.append(websocket)
|
||||
print(f"[{datetime.now()}] Nouvelle connexion WebSocket. Total: {len(self.active_connections)}")
|
||||
|
||||
def disconnect(self, websocket: WebSocket):
|
||||
"""
|
||||
Déconnecte un client WebSocket
|
||||
|
||||
Args:
|
||||
websocket: Instance WebSocket à déconnecter
|
||||
"""
|
||||
if websocket in self.active_connections:
|
||||
self.active_connections.remove(websocket)
|
||||
print(f"[{datetime.now()}] Déconnexion WebSocket. Total: {len(self.active_connections)}")
|
||||
|
||||
async def send_personal_message(self, message: str, websocket: WebSocket):
|
||||
"""
|
||||
Envoie un message à un client spécifique
|
||||
|
||||
Args:
|
||||
message: Message à envoyer
|
||||
websocket: Client destinataire
|
||||
"""
|
||||
try:
|
||||
await websocket.send_text(message)
|
||||
except Exception as e:
|
||||
print(f"Erreur envoi message personnel: {e}")
|
||||
|
||||
async def broadcast(self, message: Dict[str, Any]):
|
||||
"""
|
||||
Diffuse un message à tous les clients connectés
|
||||
|
||||
Args:
|
||||
message: Dictionnaire du message (sera converti en JSON)
|
||||
"""
|
||||
# Ajouter un timestamp
|
||||
message["timestamp"] = datetime.now().isoformat()
|
||||
|
||||
json_message = json.dumps(message)
|
||||
|
||||
# Liste des connexions à supprimer (déconnectées)
|
||||
disconnected = []
|
||||
|
||||
for connection in self.active_connections:
|
||||
try:
|
||||
await connection.send_text(json_message)
|
||||
except Exception as e:
|
||||
print(f"Erreur broadcast: {e}")
|
||||
disconnected.append(connection)
|
||||
|
||||
# Nettoyer les connexions mortes
|
||||
for conn in disconnected:
|
||||
self.disconnect(conn)
|
||||
|
||||
async def broadcast_scan_start(self):
|
||||
"""Notifie le début d'un scan"""
|
||||
await self.broadcast({
|
||||
"type": "scan_start",
|
||||
"message": "Scan réseau démarré"
|
||||
})
|
||||
|
||||
async def broadcast_scan_complete(self, stats: Dict[str, int]):
|
||||
"""
|
||||
Notifie la fin d'un scan avec statistiques
|
||||
|
||||
Args:
|
||||
stats: Statistiques du scan (total, online, offline, etc.)
|
||||
"""
|
||||
await self.broadcast({
|
||||
"type": "scan_complete",
|
||||
"message": "Scan réseau terminé",
|
||||
"stats": stats
|
||||
})
|
||||
|
||||
async def broadcast_ip_update(self, ip_data: Dict[str, Any]):
|
||||
"""
|
||||
Notifie un changement d'état d'IP
|
||||
|
||||
Args:
|
||||
ip_data: Données de l'IP mise à jour
|
||||
"""
|
||||
await self.broadcast({
|
||||
"type": "ip_update",
|
||||
"data": ip_data
|
||||
})
|
||||
|
||||
async def broadcast_new_ip(self, ip_data: Dict[str, Any]):
|
||||
"""
|
||||
Notifie la détection d'une nouvelle IP
|
||||
|
||||
Args:
|
||||
ip_data: Données de la nouvelle IP
|
||||
"""
|
||||
await self.broadcast({
|
||||
"type": "new_ip",
|
||||
"data": ip_data,
|
||||
"message": f"Nouvelle IP détectée: {ip_data.get('ip')}"
|
||||
})
|
||||
|
||||
async def broadcast_scan_progress(self, progress_data: Dict[str, Any]):
|
||||
"""
|
||||
Notifie la progression d'un scan en cours
|
||||
|
||||
Args:
|
||||
progress_data: Données de progression (current, total, ip)
|
||||
"""
|
||||
await self.broadcast({
|
||||
"type": "scan_progress",
|
||||
"current": progress_data.get("current"),
|
||||
"total": progress_data.get("total"),
|
||||
"ip": progress_data.get("ip")
|
||||
})
|
||||
|
||||
async def broadcast_scan_log(self, message: str):
|
||||
"""Diffuse une ligne de log de scan"""
|
||||
await self.broadcast({
|
||||
"type": "scan_log",
|
||||
"message": message
|
||||
})
|
||||
|
||||
|
||||
# Instance globale du gestionnaire WebSocket
|
||||
ws_manager = WebSocketManager()
|
||||
19
backend/requirements.txt
Executable file
@@ -0,0 +1,19 @@
|
||||
fastapi==0.109.0
|
||||
uvicorn[standard]==0.27.0
|
||||
sqlalchemy==2.0.25
|
||||
pydantic==2.5.3
|
||||
pydantic-settings==2.1.0
|
||||
python-multipart==0.0.6
|
||||
websockets==12.0
|
||||
apscheduler==3.10.4
|
||||
pyyaml==6.0.1
|
||||
asyncio==3.4.3
|
||||
aiosqlite==0.19.0
|
||||
python-nmap==0.7.1
|
||||
scapy==2.5.0
|
||||
pytest==7.4.4
|
||||
pytest-asyncio==0.23.3
|
||||
httpx==0.26.0
|
||||
psutil==5.9.8
|
||||
wakeonlan==3.1.0
|
||||
paho-mqtt==1.6.1
|
||||
BIN
capture/apple-touch-icon.png
Normal file
|
After Width: | Height: | Size: 71 KiB |
BIN
capture/favicon/android-chrome-192x192.png
Normal file
|
After Width: | Height: | Size: 61 KiB |
BIN
capture/favicon/android-chrome-512x512.png
Normal file
|
After Width: | Height: | Size: 371 KiB |
BIN
capture/favicon/apple-touch-icon.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
capture/favicon/favicon-16x16.png
Normal file
|
After Width: | Height: | Size: 877 B |
BIN
capture/favicon/favicon-32x32.png
Normal file
|
After Width: | Height: | Size: 2.7 KiB |
BIN
capture/favicon/favicon.ico
Normal file
|
After Width: | Height: | Size: 15 KiB |
1
capture/favicon/site.webmanifest
Normal file
@@ -0,0 +1 @@
|
||||
{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}
|
||||
BIN
capture/hardware_benchtools.png
Normal file
|
After Width: | Height: | Size: 980 KiB |
BIN
capture/image copy.png
Normal file
|
After Width: | Height: | Size: 123 KiB |
BIN
capture/image.png
Normal file
|
After Width: | Height: | Size: 1011 KiB |
BIN
capture/image1.png
Normal file
|
After Width: | Height: | Size: 326 KiB |
BIN
capture/ipwatch.png
Normal file
|
After Width: | Height: | Size: 703 KiB |
17
changelog.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Changelog / Plan de déploiement
|
||||
|
||||
## 2025-12-25
|
||||
|
||||
- [ ] Rebuild Docker (no-cache) + redémarrage pour charger les nouvelles routes backend.
|
||||
- [ ] Lancer les migrations DB :
|
||||
- `backend/app/migrations/add_vm_field.py`
|
||||
- `backend/app/migrations/add_hardware_bench_field.py`
|
||||
- `backend/app/migrations/add_icon_fields.py`
|
||||
- [ ] Vérifier `/api/ips/icons` et le montage `/icons/*`.
|
||||
- [ ] Tester popup icône (liste, upload, sélection, lien, validation).
|
||||
- [ ] Vérifier l’affichage de l’icône associée dans le volet gauche.
|
||||
- [ ] Tester la recherche (tags, inversion, accents, multi-termes).
|
||||
- [ ] Tester l’édition réseau (MAC/Fabricant/Hostname) + copie clipboard.
|
||||
- [ ] Vérifier la page `/architecture` (squelette).
|
||||
- [ ] Vérifier la page `/test` (squelette).
|
||||
- [ ] Mettre à jour la base OUI via Paramètres et vérifier mise à jour des fabricants.
|
||||
179
config.yaml
Executable file
@@ -0,0 +1,179 @@
|
||||
# Configuration IPWatch
|
||||
# Basé sur consigne-parametrage.md
|
||||
|
||||
app:
|
||||
name: "IPWatch"
|
||||
version: "1.0.6"
|
||||
debug: true
|
||||
|
||||
network:
|
||||
cidr: "10.0.0.0/22"
|
||||
gateway: "10.0.0.1"
|
||||
dns:
|
||||
- "8.8.8.8"
|
||||
- "8.8.4.4"
|
||||
|
||||
# Sous-réseaux organisés en sections
|
||||
subnets:
|
||||
- name: "static_vm"
|
||||
cidr: "10.0.0.0/24"
|
||||
start: "10.0.0.0"
|
||||
end: "10.0.0.255"
|
||||
description: "Machines virtuelles statiques"
|
||||
- name: "dhcp"
|
||||
cidr: "10.0.1.0/24"
|
||||
start: "10.0.1.0"
|
||||
end: "10.0.1.255"
|
||||
description: "DHCP"
|
||||
- name: "iot"
|
||||
cidr: "10.0.2.0/24"
|
||||
start: "10.0.2.0"
|
||||
end: "10.0.2.255"
|
||||
description: "IoT"
|
||||
- name: "autres"
|
||||
cidr: "10.0.3.0/24"
|
||||
start: "10.0.3.0"
|
||||
end: "10.0.3.254"
|
||||
description: "autres"
|
||||
|
||||
# IPs connues avec métadonnées
|
||||
ip_classes:
|
||||
"10.0.0.1":
|
||||
name: "Gateway"
|
||||
location: "Réseau"
|
||||
host: "Network" # Type Network pour affichage spécial (bordure bleue)
|
||||
|
||||
scan:
|
||||
ping_interval: 600 # Intervalle scan ping (secondes)
|
||||
ping_count: 3 # Nombre de ping par IP
|
||||
port_scan_interval: 1200 # Intervalle scan ports (secondes)
|
||||
parallel_pings: 100 # Nombre de pings simultanés
|
||||
timeout: 1.0 # Timeout réseau (secondes)
|
||||
force_vendor_update: true # Écrase le fabricant à chaque scan si true
|
||||
|
||||
ports:
|
||||
ranges:
|
||||
- "22" # SSH
|
||||
- "80" # HTTP
|
||||
- "443" # HTTPS
|
||||
- "445" # SAMBA
|
||||
- "1880" #nodered
|
||||
- "3000"
|
||||
- "3389" # RDP
|
||||
- "8123" #home assistant
|
||||
- "8080" # HTTP alternatif
|
||||
- "8006" # proxmox
|
||||
- "8007" # proxmox backup center
|
||||
- "8081" # HTTP alternatif
|
||||
- "8096" #jellyfin
|
||||
- "9090"
|
||||
- "3306" # MySQL
|
||||
- "3552" #arcane
|
||||
- "5432" # PostgreSQL
|
||||
- "6053"
|
||||
- "8266"
|
||||
- "9000"
|
||||
|
||||
# Mapping port -> protocole pour générer des liens cliquables
|
||||
protocols:
|
||||
22: "ssh"
|
||||
80: "http"
|
||||
443: "https"
|
||||
445: "smb"
|
||||
1880: "http"
|
||||
3000: "http"
|
||||
3306: "mysql"
|
||||
3389: "rdp"
|
||||
3552: "http"
|
||||
5432: "postgresql"
|
||||
6053: "http"
|
||||
8006: "https"
|
||||
8007: "https"
|
||||
8080: "http"
|
||||
8081: "http"
|
||||
8096: "http"
|
||||
8266: "http"
|
||||
9000: "http"
|
||||
9090: "https"
|
||||
|
||||
locations:
|
||||
- "Bureau"
|
||||
- "Salon"
|
||||
- "Comble"
|
||||
- "Bureau RdC"
|
||||
- "Garage"
|
||||
- "Exterieur"
|
||||
- "SdB"
|
||||
- ""
|
||||
|
||||
# Hosts avec localisation associée
|
||||
hosts:
|
||||
- name: "physique"
|
||||
location: ""
|
||||
ip: ""
|
||||
- name: "smartphone"
|
||||
location: ""
|
||||
ip: ""
|
||||
- name: "elitedesk"
|
||||
location: "Comble"
|
||||
ip: "10.0.0.101"
|
||||
- name: "m710Q"
|
||||
location: "Bureau RdC"
|
||||
ip: "10.0.1.232"
|
||||
- name: "HP Proliant"
|
||||
location: "Bureau RdC"
|
||||
ip: "10.0.0.205"
|
||||
- name: "pve MSI"
|
||||
location: "Bureau RdC"
|
||||
ip: "10.0.1.174"
|
||||
- name: "HP Proxmox"
|
||||
location: "Bureau"
|
||||
ip: "10.0.0.x"
|
||||
- name: "Lenovo Bureau"
|
||||
location: "Bureau"
|
||||
- name: "Pve Dell"
|
||||
location: "Comble"
|
||||
ip: "10.0.1.228"
|
||||
- name: "IoT"
|
||||
location: ""
|
||||
- name: "Network"
|
||||
location: ""
|
||||
|
||||
|
||||
|
||||
history:
|
||||
retention_hours: 24 # Conserver 24h d'historique
|
||||
|
||||
ui:
|
||||
offline_transparency: 0.5 # Transparence des IPs offline
|
||||
show_mac: true
|
||||
show_vendor: true
|
||||
cell_size: 30 # Taille des cellules IP en pixels (30, 35, 40...)
|
||||
font_size: 12 # Taille de la police dans les cellules en pixels
|
||||
cell_gap: 2.5 # Espacement entre les cellules en pixels
|
||||
details_font_size: 13 # Taille de la police dans le volet détails en pixels
|
||||
details_spacing: 2 # Espacement entre les champs du volet détails en pixels
|
||||
architecture_title_font_size: 18 # Taille des titres Architecture (px)
|
||||
|
||||
links:
|
||||
hardware_bench_url: "http://10.0.0.50:8087/devices.html"
|
||||
|
||||
colors:
|
||||
free: "#75715E" # IP libre (gris Monokai)
|
||||
online_known: "#A6E22E" # En ligne + connue (vert)
|
||||
online_unknown: "#66D9EF" # En ligne + inconnue (cyan)
|
||||
offline_known: "#F92672" # Hors ligne + connue (rose/rouge)
|
||||
offline_unknown: "#AE81FF" # Hors ligne + inconnue (violet)
|
||||
mac_changed: "#FD971F" # MAC address changée (orange - alerte)
|
||||
network_device: "#1E3A8A" # Équipements réseau (bleu foncé)
|
||||
|
||||
opnsense:
|
||||
enabled: true
|
||||
host: "10.0.0.1"
|
||||
protocol: "http"
|
||||
api_key: "ZOwL1iuko13l9tnARvJlU0s93C/44gFwZNRmStRhzGV8u6m2nXAcoOAbb6jxtkEe8dqzIjj4zECcKdzI"
|
||||
api_secret: "rMOGHY+3SRfiT7cxpMoGZuwnPPRX0vPHV2oDTn6UPCvH87UXJe1qBkTs8y/ryG942TsTGe5UYO6F7fXK"
|
||||
verify_ssl: false
|
||||
|
||||
database:
|
||||
path: "./data/db.sqlite"
|
||||
27
consigne-design_webui.md
Executable file
@@ -0,0 +1,27 @@
|
||||
# consigne-design_webui.md
|
||||
|
||||
## Thème
|
||||
Monokai dark, contrastes forts, bordures arrondies.
|
||||
|
||||
## Layout général
|
||||
3 colonnes :
|
||||
- gauche : détail IP
|
||||
- centre : grille d’IP + légende + classes
|
||||
- droite : nouvelles détections
|
||||
|
||||
## États des IP
|
||||
Couleurs, bordure pleine/hors ligne, halo ping en cours.
|
||||
|
||||
## Composants
|
||||
- Header
|
||||
- Volet gauche
|
||||
- Grille IP
|
||||
- Volet droit
|
||||
- Onglet paramètres
|
||||
|
||||
## Interactions
|
||||
- sélection case IP
|
||||
- clic nouvelle IP
|
||||
- filtres à cocher
|
||||
- animation ping
|
||||
- transparence offline
|
||||
21
consigne-parametrage.md
Executable file
@@ -0,0 +1,21 @@
|
||||
# consigne-parametrage.md
|
||||
|
||||
Ce document décrit toutes les règles du fichier YAML.
|
||||
|
||||
## Sections
|
||||
- app
|
||||
- network
|
||||
- ip_classes
|
||||
- scan
|
||||
- ports
|
||||
- locations
|
||||
- hosts
|
||||
- history
|
||||
- ui
|
||||
- colors
|
||||
- network_advanced
|
||||
- filters
|
||||
- database
|
||||
|
||||
## Exemple complet
|
||||
(… full YAML spec as defined previously …)
|
||||
43
docker-compose.yml
Executable file
@@ -0,0 +1,43 @@
|
||||
services:
|
||||
ipwatch:
|
||||
build: .
|
||||
container_name: ipwatch
|
||||
restart: unless-stopped
|
||||
|
||||
# Réseau host pour accès complet au réseau local
|
||||
network_mode: host
|
||||
|
||||
# Privilèges pour scan réseau (ping, ARP)
|
||||
privileged: true
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
|
||||
volumes:
|
||||
# Volume pour la configuration
|
||||
- ./config.yaml:/app/config.yaml:ro
|
||||
|
||||
# Volume pour la base de données
|
||||
- ./data:/app/data
|
||||
|
||||
# Volume pour l'architecture (ressources + projets)
|
||||
- ./architecture:/app/architecture
|
||||
|
||||
# Volume pour les logs (optionnel)
|
||||
- ./logs:/app/logs
|
||||
|
||||
environment:
|
||||
- TZ=Europe/Paris
|
||||
|
||||
# Healthcheck
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Créer les volumes nommés si nécessaire
|
||||
volumes:
|
||||
ipwatch-data:
|
||||
ipwatch-logs:
|
||||
19
frontend/index.html
Executable file
@@ -0,0 +1,19 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="fr">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<link rel="icon" href="/favicon.ico" sizes="any">
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png">
|
||||
<link rel="icon" type="image/png" sizes="192x192" href="/android-chrome-192x192.png">
|
||||
<link rel="icon" type="image/png" sizes="512x512" href="/android-chrome-512x512.png">
|
||||
<link rel="apple-touch-icon" href="/apple-touch-icon.png">
|
||||
<link rel="manifest" href="/site.webmanifest">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>IP Watch</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
<script type="module" src="/src/main.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
2405
frontend/package-lock.json
generated
Executable file
25
frontend/package.json
Executable file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"name": "ipwatch-frontend",
|
||||
"version": "1.0.1",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@mdi/font": "^7.4.47",
|
||||
"axios": "^1.6.5",
|
||||
"pinia": "^2.1.7",
|
||||
"vue": "^3.4.15",
|
||||
"vue-router": "^4.2.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vitejs/plugin-vue": "^5.0.3",
|
||||
"autoprefixer": "^10.4.17",
|
||||
"postcss": "^8.4.33",
|
||||
"tailwindcss": "^3.4.1",
|
||||
"vite": "^5.0.11"
|
||||
}
|
||||
}
|
||||
6
frontend/postcss.config.js
Executable file
@@ -0,0 +1,6 @@
|
||||
export default {
|
||||
plugins: {
|
||||
tailwindcss: {},
|
||||
autoprefixer: {},
|
||||
},
|
||||
}
|
||||
BIN
frontend/public/android-chrome-192x192.png
Normal file
|
After Width: | Height: | Size: 61 KiB |
BIN
frontend/public/android-chrome-512x512.png
Normal file
|
After Width: | Height: | Size: 371 KiB |
BIN
frontend/public/apple-touch-icon.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
frontend/public/favicon-16x16.png
Normal file
|
After Width: | Height: | Size: 877 B |
BIN
frontend/public/favicon-32x32.png
Normal file
|
After Width: | Height: | Size: 2.7 KiB |
BIN
frontend/public/favicon.ico
Normal file
|
After Width: | Height: | Size: 15 KiB |
1
frontend/public/site.webmanifest
Normal file
@@ -0,0 +1 @@
|
||||
{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}
|
||||
26
frontend/src/App.vue
Executable file
@@ -0,0 +1,26 @@
|
||||
<template>
|
||||
<router-view />
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { onMounted, onUnmounted } from 'vue'
|
||||
import { useIPStore } from '@/stores/ipStore'
|
||||
|
||||
const ipStore = useIPStore()
|
||||
|
||||
onMounted(async () => {
|
||||
// Charger la configuration UI
|
||||
await ipStore.fetchUIConfig()
|
||||
|
||||
// Charger les données initiales
|
||||
await ipStore.fetchIPs()
|
||||
|
||||
// Connecter WebSocket
|
||||
ipStore.connectWebSocket()
|
||||
})
|
||||
|
||||
onUnmounted(() => {
|
||||
// Déconnecter WebSocket
|
||||
ipStore.disconnectWebSocket()
|
||||
})
|
||||
</script>
|
||||
BIN
frontend/src/assets/hardware_benchtools.png
Normal file
|
After Width: | Height: | Size: 980 KiB |
BIN
frontend/src/assets/ipwatch-logo.png
Normal file
|
After Width: | Height: | Size: 61 KiB |
214
frontend/src/assets/main.css
Executable file
@@ -0,0 +1,214 @@
|
||||
/* Styles principaux IPWatch - Thème Monokai */
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
|
||||
/* Variables CSS Monokai */
|
||||
:root {
|
||||
--monokai-bg: #272822;
|
||||
--monokai-text: #F8F8F2;
|
||||
--monokai-comment: #30BF97;
|
||||
--monokai-green: #A6E22E;
|
||||
--monokai-pink: #F92672;
|
||||
--monokai-cyan: #66D9EF;
|
||||
--monokai-purple: #AE81FF;
|
||||
--monokai-yellow: #E6DB74;
|
||||
--monokai-orange: #FD971F;
|
||||
--monokai-blue-dark: #1E3A8A;
|
||||
--monokai-blue-light: #3B82F6;
|
||||
}
|
||||
|
||||
/* Base */
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background-color: var(--monokai-bg);
|
||||
color: var(--monokai-text);
|
||||
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
||||
}
|
||||
|
||||
.no-select {
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
/* Animation halo ping */
|
||||
@keyframes ping-pulse {
|
||||
0% {
|
||||
box-shadow: 0 0 0 0 rgba(102, 217, 239, 0.7);
|
||||
}
|
||||
50% {
|
||||
box-shadow: 0 0 20px 10px rgba(102, 217, 239, 0.3);
|
||||
}
|
||||
100% {
|
||||
box-shadow: 0 0 0 0 rgba(102, 217, 239, 0);
|
||||
}
|
||||
}
|
||||
|
||||
.ping-animation {
|
||||
animation: ping-pulse 1.5s ease-in-out infinite;
|
||||
}
|
||||
|
||||
/* Grille des IPs avec espacement configurable */
|
||||
.ip-grid {
|
||||
gap: var(--cell-gap, 2px);
|
||||
}
|
||||
|
||||
/* Cases IP compactes - Version minimale */
|
||||
.ip-cell-compact {
|
||||
@apply rounded cursor-pointer transition-all duration-200 relative;
|
||||
border: 1px solid;
|
||||
width: var(--cell-size, 30px);
|
||||
height: var(--cell-size, 30px);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
font-size: var(--font-size, 10px);
|
||||
}
|
||||
|
||||
/* Cases IP - États selon guidelines-css.md */
|
||||
.ip-cell {
|
||||
@apply rounded-lg p-3 cursor-pointer transition-all duration-200;
|
||||
border: 2px solid;
|
||||
min-height: 80px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
/* IP libre */
|
||||
.ip-cell.free,
|
||||
.ip-cell-compact.free {
|
||||
background-color: rgba(117, 113, 94, 0.2);
|
||||
border-color: var(--monokai-comment);
|
||||
color: var(--monokai-comment);
|
||||
}
|
||||
|
||||
/* IP en ligne + connue (vert) */
|
||||
.ip-cell.online-known,
|
||||
.ip-cell-compact.online-known {
|
||||
background-color: rgba(166, 226, 46, 0.15);
|
||||
border-color: var(--monokai-green);
|
||||
border-style: solid;
|
||||
color: var(--monokai-text);
|
||||
}
|
||||
|
||||
.ip-cell.online-known:hover,
|
||||
.ip-cell-compact.online-known:hover {
|
||||
background-color: rgba(166, 226, 46, 0.25);
|
||||
}
|
||||
|
||||
/* IP en ligne + inconnue (cyan) */
|
||||
.ip-cell.online-unknown,
|
||||
.ip-cell-compact.online-unknown {
|
||||
background-color: rgba(102, 217, 239, 0.15);
|
||||
border-color: var(--monokai-cyan);
|
||||
border-style: solid;
|
||||
color: var(--monokai-text);
|
||||
}
|
||||
|
||||
.ip-cell.online-unknown:hover,
|
||||
.ip-cell-compact.online-unknown:hover {
|
||||
background-color: rgba(102, 217, 239, 0.25);
|
||||
}
|
||||
|
||||
/* IP hors ligne + connue (rose) */
|
||||
.ip-cell.offline-known,
|
||||
.ip-cell-compact.offline-known {
|
||||
background-color: rgba(249, 38, 114, 0.1);
|
||||
border-color: var(--monokai-pink);
|
||||
border-style: dashed;
|
||||
color: var(--monokai-text);
|
||||
opacity: 0.5;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
/* IP hors ligne + inconnue (violet) */
|
||||
.ip-cell.offline-unknown,
|
||||
.ip-cell-compact.offline-unknown {
|
||||
background-color: rgba(174, 129, 255, 0.1);
|
||||
border-color: var(--monokai-purple);
|
||||
border-style: dashed;
|
||||
color: var(--monokai-text);
|
||||
opacity: 0.5;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
/* Point jaune pour équipements réseau hors ligne - utilise ::after pour échapper à l'opacité du parent */
|
||||
.ip-cell-compact.network-device-offline::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
bottom: 0.25rem;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
width: 0.375rem; /* 1.5 = 6px */
|
||||
height: 0.375rem;
|
||||
border-radius: 50%;
|
||||
background-color: #FBBF24;
|
||||
box-shadow: 0 0 6px #FBBF24, 0 0 10px rgba(251, 191, 36, 0.6);
|
||||
opacity: 1 !important;
|
||||
z-index: 100;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
/* MAC address changée (orange - alerte) */
|
||||
.ip-cell.mac-changed,
|
||||
.ip-cell-compact.mac-changed {
|
||||
border-color: var(--monokai-orange) !important;
|
||||
border-width: 2px !important;
|
||||
border-style: solid !important;
|
||||
box-shadow: 0 0 10px rgba(253, 151, 31, 0.5);
|
||||
}
|
||||
|
||||
/* Équipements réseau (switches, routeurs, bornes WiFi) */
|
||||
/* En ligne : bordure bleue foncée + fond bleu */
|
||||
.ip-cell.network-device-online,
|
||||
.ip-cell-compact.network-device-online {
|
||||
background-color: rgba(30, 58, 138, 0.25);
|
||||
border-color: var(--monokai-blue-dark);
|
||||
border-style: solid;
|
||||
border-width: 3px;
|
||||
color: var(--monokai-text);
|
||||
}
|
||||
|
||||
.ip-cell.network-device-online:hover,
|
||||
.ip-cell-compact.network-device-online:hover {
|
||||
background-color: rgba(30, 58, 138, 0.35);
|
||||
}
|
||||
|
||||
/* Hors ligne : utilise le style normal (offline-known/offline-unknown) + point bleu en bas */
|
||||
/* Le point bleu est affiché via le template HTML */
|
||||
|
||||
/* Sélection */
|
||||
.ip-cell.selected {
|
||||
box-shadow: 0 0 20px rgba(230, 219, 116, 0.5);
|
||||
border-color: var(--monokai-yellow);
|
||||
}
|
||||
|
||||
.ip-cell-compact.selected {
|
||||
box-shadow:
|
||||
inset 0 0 6px rgba(230, 219, 116, 0.9),
|
||||
inset 0 0 12px rgba(230, 219, 116, 0.6),
|
||||
0 0 6px rgba(230, 219, 116, 0.2);
|
||||
border-color: var(--monokai-yellow);
|
||||
border-width: 2px;
|
||||
}
|
||||
|
||||
/* Scrollbar custom Monokai */
|
||||
::-webkit-scrollbar {
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: #1e1f1c;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: var(--monokai-comment);
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--monokai-cyan);
|
||||
}
|
||||
186
frontend/src/components/AppHeader.vue
Executable file
@@ -0,0 +1,186 @@
|
||||
<template>
|
||||
<header class="bg-monokai-bg border-b-2 border-monokai-comment p-4 no-select">
|
||||
<div class="flex items-center justify-between">
|
||||
<!-- Logo et titre -->
|
||||
<div class="flex items-center gap-4">
|
||||
<img :src="logoUrl" alt="IPWatch" class="w-10 h-10" />
|
||||
<h1 class="text-3xl font-bold text-monokai-green">IPWatch</h1>
|
||||
<div class="flex flex-col">
|
||||
<span class="text-monokai-comment">Scanner Réseau</span>
|
||||
<span class="text-xs text-monokai-comment/60">v{{ appVersion }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Stats et contrôles -->
|
||||
<div class="flex items-center gap-6">
|
||||
<!-- Stats système (RAM/CPU) -->
|
||||
<SystemStats />
|
||||
|
||||
<!-- Séparateur -->
|
||||
<div class="h-8 w-px bg-monokai-comment"></div>
|
||||
|
||||
<!-- Statistiques -->
|
||||
<div class="flex gap-4 text-xs">
|
||||
<div class="flex items-center gap-2">
|
||||
<span class="text-monokai-comment">Total:</span>
|
||||
<span class="text-monokai-text font-bold">{{ stats.total }}</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<span class="w-3 h-3 rounded-full bg-monokai-green"></span>
|
||||
<span class="text-monokai-text">{{ stats.online }}</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<span class="w-3 h-3 rounded-full bg-monokai-pink opacity-50"></span>
|
||||
<span class="text-monokai-text">{{ stats.offline }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Dernier scan -->
|
||||
<div v-if="lastScanDate" class="text-xs text-monokai-comment">
|
||||
Dernier scan: {{ formatScanDate(lastScanDate) }}
|
||||
</div>
|
||||
|
||||
<!-- Progression du scan -->
|
||||
<div v-if="isScanning" class="flex items-center gap-2 text-xs">
|
||||
<span class="text-monokai-cyan">
|
||||
{{ scanProgress.current }} / {{ scanProgress.total }}
|
||||
</span>
|
||||
<span v-if="scanProgress.currentIP" class="text-monokai-comment">
|
||||
({{ scanProgress.currentIP }})
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<!-- Bouton scan -->
|
||||
<button
|
||||
@click="triggerScan"
|
||||
:disabled="isScanning"
|
||||
class="px-3 py-1.5 rounded bg-monokai-cyan text-monokai-bg text-xs font-bold hover:bg-monokai-green transition-colors disabled:opacity-50"
|
||||
title="Lancer un scan réseau"
|
||||
>
|
||||
{{ isScanning ? 'Scan en cours...' : 'Lancer Scan' }}
|
||||
</button>
|
||||
|
||||
<!-- Bouton Suivi -->
|
||||
<button
|
||||
@click="goToTracking"
|
||||
class="px-3 py-1.5 rounded bg-monokai-yellow text-monokai-bg text-sm font-bold hover:bg-monokai-orange transition-colors"
|
||||
title="Ouvrir la page des équipements suivis"
|
||||
>
|
||||
<span class="mdi mdi-star"></span> Suivi
|
||||
</button>
|
||||
|
||||
<!-- Bouton Architecture -->
|
||||
<button
|
||||
@click="goToArchitecture"
|
||||
class="px-3 py-1.5 rounded bg-monokai-green text-monokai-bg text-sm font-bold hover:bg-monokai-cyan transition-colors"
|
||||
title="Ouvrir la page architecture réseau"
|
||||
>
|
||||
🧭 Architecture
|
||||
</button>
|
||||
|
||||
<!-- Bouton Test -->
|
||||
<button
|
||||
@click="goToTest"
|
||||
class="px-3 py-1.5 rounded bg-monokai-orange text-monokai-bg text-sm font-bold hover:bg-monokai-yellow transition-colors"
|
||||
title="Ouvrir la page de tests réseau"
|
||||
>
|
||||
🧪 Test
|
||||
</button>
|
||||
|
||||
<!-- Bouton Paramètres -->
|
||||
<button
|
||||
@click="openSettings"
|
||||
class="px-3 py-1.5 rounded bg-monokai-purple text-monokai-bg text-sm hover:bg-monokai-pink transition-colors"
|
||||
title="Ouvrir les paramètres"
|
||||
>
|
||||
⚙ Paramètres
|
||||
</button>
|
||||
|
||||
<!-- Indicateur WebSocket -->
|
||||
<div class="flex items-center gap-2">
|
||||
<div
|
||||
:class="[
|
||||
'w-2 h-2 rounded-full',
|
||||
wsConnected ? 'bg-monokai-green' : 'bg-monokai-pink'
|
||||
]"
|
||||
></div>
|
||||
<span class="text-sm text-monokai-comment">
|
||||
{{ wsConnected ? 'Connecté' : 'Déconnecté' }}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, onMounted } from 'vue'
|
||||
import { storeToRefs } from 'pinia'
|
||||
import { useRouter } from 'vue-router'
|
||||
import { useIPStore } from '@/stores/ipStore'
|
||||
import SystemStats from './SystemStats.vue'
|
||||
import logoUrl from '@/assets/ipwatch-logo.png'
|
||||
|
||||
const emit = defineEmits(['openSettings'])
|
||||
const router = useRouter()
|
||||
|
||||
const ipStore = useIPStore()
|
||||
const { stats, loading, wsConnected, lastScanDate, scanProgress, isScanning } = storeToRefs(ipStore)
|
||||
const appVersion = ref('')
|
||||
|
||||
onMounted(async () => {
|
||||
// Charger la version depuis le config
|
||||
try {
|
||||
const response = await fetch('/api/ips/config/options')
|
||||
if (response.ok) {
|
||||
const config = await response.json()
|
||||
appVersion.value = config.version || '1.0.0'
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur chargement version:', error)
|
||||
appVersion.value = '1.0.0'
|
||||
}
|
||||
})
|
||||
|
||||
async function triggerScan() {
|
||||
try {
|
||||
await ipStore.startScan()
|
||||
} catch (err) {
|
||||
console.error('Erreur lancement scan:', err)
|
||||
}
|
||||
}
|
||||
|
||||
function openSettings() {
|
||||
emit('openSettings')
|
||||
}
|
||||
|
||||
function goToTracking() {
|
||||
router.push('/tracking')
|
||||
}
|
||||
|
||||
function goToArchitecture() {
|
||||
router.push('/architecture')
|
||||
}
|
||||
|
||||
function goToTest() {
|
||||
router.push('/test')
|
||||
}
|
||||
|
||||
function formatScanDate(date) {
|
||||
if (!date) return ''
|
||||
const d = new Date(date)
|
||||
const now = new Date()
|
||||
const diff = now - d
|
||||
|
||||
if (diff < 60000) return 'il y a quelques secondes'
|
||||
if (diff < 3600000) return `il y a ${Math.floor(diff / 60000)} min`
|
||||
if (diff < 86400000) return `il y a ${Math.floor(diff / 3600000)}h`
|
||||
|
||||
return d.toLocaleString('fr-FR', {
|
||||
day: '2-digit',
|
||||
month: '2-digit',
|
||||
hour: '2-digit',
|
||||
minute: '2-digit'
|
||||
})
|
||||
}
|
||||
</script>
|
||||
104
frontend/src/components/IPCell.vue
Executable file
@@ -0,0 +1,104 @@
|
||||
<template>
|
||||
<div
|
||||
:class="[
|
||||
'ip-cell-compact',
|
||||
cellClass,
|
||||
{ 'selected': isSelected },
|
||||
{ 'ping-animation': isPinging },
|
||||
{ 'mac-changed': ip.mac_changed },
|
||||
{ 'network-device-offline': ip.network_device && ip.last_status === 'offline' }
|
||||
]"
|
||||
@click="selectThisIP"
|
||||
:title="getTooltip"
|
||||
>
|
||||
<!-- Afficher seulement le dernier octet -->
|
||||
<div class="font-mono">
|
||||
{{ lastOctet }}
|
||||
</div>
|
||||
|
||||
<!-- Indicateur ports ouverts (petit badge décalé et réduit) -->
|
||||
<div v-if="ip.open_ports && ip.open_ports.length > 0"
|
||||
class="absolute top-0.5 right-0.5 w-1 h-1 rounded-full bg-monokai-cyan">
|
||||
</div>
|
||||
|
||||
<!-- Indicateur IP suivie (petit rond rouge en bas) - seulement si pas network_device offline -->
|
||||
<div v-if="ip.tracked && !(ip.network_device && ip.last_status === 'offline')"
|
||||
class="absolute bottom-0.5 left-1/2 transform -translate-x-1/2 w-1.5 h-1.5 rounded-full bg-red-500"
|
||||
title="IP suivie">
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { computed } from 'vue'
|
||||
import { storeToRefs } from 'pinia'
|
||||
import { useIPStore } from '@/stores/ipStore'
|
||||
|
||||
const props = defineProps({
|
||||
ip: {
|
||||
type: Object,
|
||||
required: true
|
||||
},
|
||||
isPinging: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
}
|
||||
})
|
||||
|
||||
const ipStore = useIPStore()
|
||||
const { selectedIP } = storeToRefs(ipStore)
|
||||
|
||||
const isSelected = computed(() => {
|
||||
return selectedIP.value?.ip === props.ip.ip
|
||||
})
|
||||
|
||||
// Extraire le dernier octet de l'IP
|
||||
const lastOctet = computed(() => {
|
||||
const parts = props.ip.ip.split('.')
|
||||
return parts[parts.length - 1]
|
||||
})
|
||||
|
||||
// Tooltip avec infos complètes
|
||||
const getTooltip = computed(() => {
|
||||
let tooltip = `${props.ip.ip}`
|
||||
if (props.ip.name) tooltip += ` - ${props.ip.name}`
|
||||
if (props.ip.network_device) tooltip += `\n🔷 Équipement réseau`
|
||||
if (props.ip.hostname) tooltip += `\nHostname: ${props.ip.hostname}`
|
||||
if (props.ip.mac) tooltip += `\nMAC: ${props.ip.mac}`
|
||||
if (props.ip.vendor) tooltip += ` (${props.ip.vendor})`
|
||||
if (props.ip.mac_changed) tooltip += `\n⚠️ MAC ADDRESS CHANGÉE !`
|
||||
if (props.ip.known) tooltip += `\n✅ IP connue`
|
||||
if (props.ip.tracked) tooltip += `\n⭐ IP suivie`
|
||||
if (props.ip.vm) tooltip += `\n🖥️ VM`
|
||||
if (props.ip.hardware_bench) tooltip += `\n🔧 Hardware bench`
|
||||
if (props.ip.open_ports && props.ip.open_ports.length > 0) {
|
||||
tooltip += `\nPorts: ${props.ip.open_ports.join(', ')}`
|
||||
}
|
||||
return tooltip
|
||||
})
|
||||
|
||||
const cellClass = computed(() => {
|
||||
// Équipements réseau EN LIGNE ont bordure bleue
|
||||
if (props.ip.network_device && props.ip.last_status === 'online') {
|
||||
return 'network-device-online'
|
||||
}
|
||||
|
||||
// Équipements réseau HORS LIGNE : style normal (known/unknown) + point bleu
|
||||
// Le point bleu est géré par le template HTML, pas par la classe CSS
|
||||
|
||||
// Déterminer la classe selon l'état (guidelines-css.md)
|
||||
if (!props.ip.last_status) {
|
||||
return 'free'
|
||||
}
|
||||
|
||||
if (props.ip.last_status === 'online') {
|
||||
return props.ip.known ? 'online-known' : 'online-unknown'
|
||||
} else {
|
||||
return props.ip.known ? 'offline-known' : 'offline-unknown'
|
||||
}
|
||||
})
|
||||
|
||||
function selectThisIP() {
|
||||
ipStore.selectIP(props.ip)
|
||||
}
|
||||
</script>
|
||||
1360
frontend/src/components/IPDetails.vue
Executable file
155
frontend/src/components/IPGrid.vue
Executable file
@@ -0,0 +1,155 @@
|
||||
<template>
|
||||
<div class="flex flex-col h-full">
|
||||
<!-- Grille d'IPs par sous-réseaux -->
|
||||
<div class="flex-1 overflow-auto p-4">
|
||||
<!-- Pour chaque sous-réseau -->
|
||||
<div v-for="subnet in groupedSubnets" :key="subnet.name" class="mb-6">
|
||||
<!-- En-tête de section -->
|
||||
<div class="mb-3 pb-2 border-b-2 border-monokai-cyan/30">
|
||||
<p class="text-xs text-monokai-comment">{{ subnet.name }}</p>
|
||||
<h3 v-if="subnet.start && subnet.end" class="text-lg font-bold text-monokai-cyan mt-0.5">
|
||||
{{ subnet.start }} à {{ subnet.end }}
|
||||
</h3>
|
||||
</div>
|
||||
|
||||
<!-- Grille des IPs de ce sous-réseau -->
|
||||
<div class="grid grid-cols-4 gap-3">
|
||||
<IPCell
|
||||
v-for="ip in subnet.ips"
|
||||
:key="ip.ip"
|
||||
:ip="ip"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Message si vide -->
|
||||
<div v-if="groupedSubnets.length === 0" class="text-center text-monokai-comment mt-10">
|
||||
<p>Aucune IP à afficher</p>
|
||||
<p class="text-sm mt-2">Ajustez les filtres ou lancez un scan</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Légende -->
|
||||
<div class="bg-monokai-bg border-t border-monokai-comment p-3">
|
||||
<div class="flex gap-6 text-xs">
|
||||
<div class="flex items-center gap-2">
|
||||
<div class="w-4 h-4 rounded border-2 border-monokai-green bg-monokai-green/15"></div>
|
||||
<span class="text-monokai-text">En ligne (connue)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<div class="w-4 h-4 rounded border-2 border-monokai-cyan bg-monokai-cyan/15"></div>
|
||||
<span class="text-monokai-text">En ligne (inconnue)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<div class="w-4 h-4 rounded border-2 border-dashed border-monokai-pink bg-monokai-pink/10 opacity-50"></div>
|
||||
<span class="text-monokai-text">Hors ligne (connue)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<div class="w-4 h-4 rounded border-2 border-dashed border-monokai-purple bg-monokai-purple/10 opacity-50"></div>
|
||||
<span class="text-monokai-text">Hors ligne (inconnue)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<div class="w-4 h-4 rounded border-2 border-monokai-comment bg-monokai-comment/20"></div>
|
||||
<span class="text-monokai-text">Libre</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, computed, onMounted } from 'vue'
|
||||
import { storeToRefs } from 'pinia'
|
||||
import { useIPStore } from '@/stores/ipStore'
|
||||
import IPCell from './IPCell.vue'
|
||||
|
||||
const ipStore = useIPStore()
|
||||
const { filteredIPs } = storeToRefs(ipStore)
|
||||
|
||||
// Subnets depuis la config
|
||||
const subnets = ref([])
|
||||
|
||||
// Charger les subnets depuis la config
|
||||
onMounted(async () => {
|
||||
try {
|
||||
const response = await fetch('/api/ips/config/content')
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
// Parser le YAML pour extraire les subnets
|
||||
const yamlContent = data.content
|
||||
const subnetMatches = yamlContent.match(/subnets:[\s\S]*?(?=\n\w+:|\n$)/)?.[0]
|
||||
|
||||
if (subnetMatches) {
|
||||
// Simple parsing des subnets (améliorer si nécessaire)
|
||||
const subnetLines = subnetMatches.split('\n')
|
||||
let currentSubnet = null
|
||||
|
||||
subnetLines.forEach(line => {
|
||||
if (line.includes('- name:')) {
|
||||
if (currentSubnet) subnets.value.push(currentSubnet)
|
||||
currentSubnet = { name: line.split('"')[1] }
|
||||
} else if (currentSubnet) {
|
||||
if (line.includes('start:')) currentSubnet.start = line.split('"')[1]
|
||||
if (line.includes('end:')) currentSubnet.end = line.split('"')[1]
|
||||
if (line.includes('cidr:')) currentSubnet.cidr = line.split('"')[1]
|
||||
}
|
||||
})
|
||||
if (currentSubnet) subnets.value.push(currentSubnet)
|
||||
console.log('=== SUBNETS LOADED V2 ===', Date.now(), subnets.value)
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur chargement subnets:', error)
|
||||
}
|
||||
})
|
||||
|
||||
// Fonction pour vérifier si une IP appartient à un subnet
|
||||
function ipInSubnet(ip, start, end) {
|
||||
const ipNum = ipToNumber(ip)
|
||||
const startNum = ipToNumber(start)
|
||||
const endNum = ipToNumber(end)
|
||||
return ipNum >= startNum && ipNum <= endNum
|
||||
}
|
||||
|
||||
// Convertir une IP en nombre
|
||||
function ipToNumber(ip) {
|
||||
return ip.split('.').reduce((acc, octet) => (acc << 8) + parseInt(octet), 0) >>> 0
|
||||
}
|
||||
|
||||
// Grouper les IPs par subnet
|
||||
const groupedSubnets = computed(() => {
|
||||
const groups = []
|
||||
|
||||
// Pour chaque subnet défini
|
||||
subnets.value.forEach(subnet => {
|
||||
const subnetIPs = filteredIPs.value.filter(ip =>
|
||||
ipInSubnet(ip.ip, subnet.start, subnet.end)
|
||||
)
|
||||
|
||||
if (subnetIPs.length > 0) {
|
||||
groups.push({
|
||||
name: subnet.name,
|
||||
start: subnet.start,
|
||||
end: subnet.end,
|
||||
ips: subnetIPs.sort((a, b) => ipToNumber(a.ip) - ipToNumber(b.ip))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Ajouter section "Autres" pour les IPs hors subnets
|
||||
const otherIPs = filteredIPs.value.filter(ip => {
|
||||
return !subnets.value.some(subnet => ipInSubnet(ip.ip, subnet.start, subnet.end))
|
||||
})
|
||||
|
||||
if (otherIPs.length > 0) {
|
||||
groups.push({
|
||||
name: 'Autres',
|
||||
start: '',
|
||||
end: '',
|
||||
ips: otherIPs.sort((a, b) => ipToNumber(a.ip) - ipToNumber(b.ip))
|
||||
})
|
||||
}
|
||||
|
||||
return groups
|
||||
})
|
||||
</script>
|
||||
162
frontend/src/components/IPGridTree.vue
Executable file
@@ -0,0 +1,162 @@
|
||||
<template>
|
||||
<div class="flex flex-col h-full">
|
||||
<!-- Organisation en arbre par sous-réseaux -->
|
||||
<div class="flex-1 overflow-auto px-1 py-3 no-select">
|
||||
<div v-for="subnet in organizedSubnets" :key="subnet.name" class="mb-3">
|
||||
<!-- Header du sous-réseau (style tree) -->
|
||||
<div class="flex items-center gap-2 mb-2 text-monokai-cyan border-l-4 border-monokai-cyan pl-3 no-select">
|
||||
<span class="font-bold text-lg">{{ subnet.name }}</span>
|
||||
<span class="text-sm text-monokai-text font-semibold">{{ subnet.cidr }}</span>
|
||||
<span class="text-sm text-monokai-text font-semibold ml-auto">
|
||||
{{ subnet.ips.length }} IPs
|
||||
({{ subnet.stats.online }} en ligne)
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<!-- Grille compacte des IPs du sous-réseau -->
|
||||
<div class="flex flex-wrap pl-2 ip-grid">
|
||||
<IPCell
|
||||
v-for="ip in subnet.ips"
|
||||
:key="ip.ip"
|
||||
:ip="ip"
|
||||
:is-pinging="scanProgress.currentIP === ip.ip"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Message si vide -->
|
||||
<div v-if="organizedSubnets.length === 0" class="text-center text-monokai-comment mt-10">
|
||||
<p>Aucune IP à afficher</p>
|
||||
<p class="text-sm mt-2">Ajustez les filtres ou lancez un scan</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Légende -->
|
||||
<div class="bg-monokai-bg border-t border-monokai-comment px-3 py-2">
|
||||
<div class="flex items-center gap-3 text-[11px] whitespace-nowrap overflow-hidden no-select">
|
||||
<div class="flex items-center gap-1.5">
|
||||
<div class="w-3 h-3 rounded border-2 border-monokai-green bg-monokai-green/15"></div>
|
||||
<span class="text-monokai-text">En ligne (connue)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-1.5">
|
||||
<div class="w-3 h-3 rounded border-2 border-monokai-cyan bg-monokai-cyan/15"></div>
|
||||
<span class="text-monokai-text">En ligne (inconnue)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-1.5">
|
||||
<div class="w-3 h-3 rounded border-2 border-dashed border-monokai-pink bg-monokai-pink/10 opacity-50"></div>
|
||||
<span class="text-monokai-text">Hors ligne (connue)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-1.5">
|
||||
<div class="w-3 h-3 rounded border-2 border-dashed border-monokai-purple bg-monokai-purple/10 opacity-50"></div>
|
||||
<span class="text-monokai-text">Hors ligne (inconnue)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-1.5">
|
||||
<div class="w-3 h-3 rounded border-2 border-monokai-comment bg-monokai-comment/20"></div>
|
||||
<span class="text-monokai-text">Libre</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-1.5">
|
||||
<div class="w-3 h-3 rounded border-2 border-monokai-orange" style="box-shadow: 0 0 6px rgba(253, 151, 31, 0.5);"></div>
|
||||
<span class="text-monokai-text">⚠️ MAC changée</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-1.5">
|
||||
<div class="w-3 h-3 rounded border-[3px]" style="border-color: #1E3A8A; background-color: rgba(30, 58, 138, 0.25);"></div>
|
||||
<span class="text-monokai-text">Équip. réseau (en ligne)</span>
|
||||
</div>
|
||||
<div class="flex items-center gap-1.5">
|
||||
<div class="w-1.5 h-1.5 rounded-full" style="background-color: #FBBF24; box-shadow: 0 0 6px #FBBF24, 0 0 10px rgba(251, 191, 36, 0.6);"></div>
|
||||
<span class="text-monokai-text">Équip. réseau (hors ligne)</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { computed, ref, onMounted } from 'vue'
|
||||
import { storeToRefs } from 'pinia'
|
||||
import { useIPStore } from '@/stores/ipStore'
|
||||
import IPCell from './IPCell.vue'
|
||||
|
||||
const ipStore = useIPStore()
|
||||
const { filteredIPs, scanProgress } = storeToRefs(ipStore)
|
||||
|
||||
// Sous-réseaux chargés depuis la config
|
||||
const subnets = ref([])
|
||||
|
||||
// Charger les subnets depuis l'API
|
||||
onMounted(async () => {
|
||||
try {
|
||||
const response = await fetch('/api/ips/config/options')
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
if (data.subnets && data.subnets.length > 0) {
|
||||
subnets.value = data.subnets
|
||||
} else {
|
||||
// Fallback si pas de subnets dans la config
|
||||
subnets.value = [
|
||||
{ name: 'static_vm', cidr: '10.0.0.0/24', description: 'Machines virtuelles statiques' },
|
||||
{ name: 'dhcp', cidr: '10.0.1.0/24', description: 'DHCP' },
|
||||
{ name: 'iot', cidr: '10.0.2.0/24', description: 'IoT' },
|
||||
{ name: 'autres', cidr: '10.0.3.0/24', description: 'autres' }
|
||||
]
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur chargement subnets:', error)
|
||||
// Fallback en cas d'erreur
|
||||
subnets.value = [
|
||||
{ name: 'static_vm', cidr: '10.0.0.0/24', description: 'Machines virtuelles statiques' },
|
||||
{ name: 'dhcp', cidr: '10.0.1.0/24', description: 'DHCP' },
|
||||
{ name: 'iot', cidr: '10.0.2.0/24', description: 'IoT' },
|
||||
{ name: 'autres', cidr: '10.0.3.0/24', description: 'autres' }
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
// Fonction pour trier les IPs par ordre numérique
|
||||
function sortIPsNumerically(ips) {
|
||||
return ips.slice().sort((a, b) => {
|
||||
const partsA = a.ip.split('.').map(Number)
|
||||
const partsB = b.ip.split('.').map(Number)
|
||||
|
||||
for (let i = 0; i < 4; i++) {
|
||||
if (partsA[i] !== partsB[i]) {
|
||||
return partsA[i] - partsB[i]
|
||||
}
|
||||
}
|
||||
return 0
|
||||
})
|
||||
}
|
||||
|
||||
// Organiser les IPs par sous-réseau
|
||||
const organizedSubnets = computed(() => {
|
||||
return subnets.value.map(subnet => {
|
||||
// Extraire le préfixe du sous-réseau (ex: "10.0.0" pour 10.0.0.0/24)
|
||||
const [oct1, oct2, oct3] = subnet.cidr.split('/')[0].split('.')
|
||||
const prefix = `${oct1}.${oct2}.${oct3}`
|
||||
|
||||
// Filtrer les IPs qui appartiennent à ce sous-réseau
|
||||
const subnetIPs = filteredIPs.value.filter(ip => {
|
||||
return ip.ip.startsWith(prefix + '.')
|
||||
})
|
||||
|
||||
// Trier par ordre numérique
|
||||
const sortedIPs = sortIPsNumerically(subnetIPs)
|
||||
|
||||
// Calculer les stats
|
||||
const stats = {
|
||||
total: sortedIPs.length,
|
||||
online: sortedIPs.filter(ip => ip.last_status === 'online').length,
|
||||
offline: sortedIPs.filter(ip => ip.last_status === 'offline').length
|
||||
}
|
||||
|
||||
return {
|
||||
name: subnet.name,
|
||||
cidr: subnet.cidr,
|
||||
description: subnet.description,
|
||||
ips: sortedIPs,
|
||||
stats
|
||||
}
|
||||
}).filter(subnet => subnet.ips.length > 0) // Ne montrer que les sous-réseaux avec des IPs
|
||||
})
|
||||
</script>
|
||||
137
frontend/src/components/NewDetections.vue
Executable file
@@ -0,0 +1,137 @@
|
||||
<template>
|
||||
<div class="h-full flex flex-col bg-monokai-bg border-l border-monokai-comment no-select">
|
||||
<!-- Header -->
|
||||
<div class="p-4 border-b border-monokai-comment">
|
||||
<h2 class="text-xl font-bold text-monokai-pink">Nouvelles Détections</h2>
|
||||
</div>
|
||||
|
||||
<!-- Liste -->
|
||||
<div class="flex-1 overflow-auto p-4">
|
||||
<div v-if="newIPs.length > 0" class="space-y-2">
|
||||
<div
|
||||
v-for="ip in newIPs"
|
||||
:key="ip.ip"
|
||||
@click="selectIP(ip)"
|
||||
:class="[
|
||||
'p-2.5 rounded border-2 cursor-pointer transition-colors',
|
||||
isOlderThanOneHour(ip.first_seen)
|
||||
? 'border-monokai-purple-dark bg-monokai-purple-dark/10 hover:bg-monokai-purple-dark/20'
|
||||
: 'border-monokai-pink bg-monokai-pink/10 hover:bg-monokai-pink/20'
|
||||
]"
|
||||
>
|
||||
<!-- IP + Statut sur la même ligne -->
|
||||
<div class="flex items-center justify-between">
|
||||
<span class="font-mono font-bold text-monokai-text text-sm">
|
||||
{{ ip.ip }}
|
||||
</span>
|
||||
<div class="flex items-center gap-2 text-xs">
|
||||
<span
|
||||
:class="[
|
||||
'px-2 py-0.5 rounded',
|
||||
ip.last_status === 'online'
|
||||
? 'bg-monokai-green/20 text-monokai-green'
|
||||
: 'bg-monokai-comment/20 text-monokai-comment'
|
||||
]"
|
||||
>
|
||||
{{ ip.last_status || 'Inconnu' }}
|
||||
</span>
|
||||
<span
|
||||
v-if="!ip.known"
|
||||
class="px-2 py-0.5 rounded bg-monokai-cyan/20 text-monokai-cyan"
|
||||
>
|
||||
Inconnue
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- MAC/Vendor + Timestamp sur la même ligne -->
|
||||
<div class="flex items-center justify-between text-xs text-monokai-comment mt-1">
|
||||
<div v-if="ip.mac" class="font-mono">
|
||||
{{ ip.mac }}
|
||||
<span v-if="ip.vendor" class="ml-1">({{ ip.vendor }})</span>
|
||||
</div>
|
||||
<div>{{ formatTime(ip.first_seen) }}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Placeholder -->
|
||||
<div v-else class="text-center text-monokai-comment mt-10">
|
||||
<p>Aucune nouvelle IP détectée</p>
|
||||
<p class="text-sm mt-2">Les nouvelles IPs apparaîtront ici</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { computed } from 'vue'
|
||||
import { storeToRefs } from 'pinia'
|
||||
import { useIPStore } from '@/stores/ipStore'
|
||||
|
||||
const ipStore = useIPStore()
|
||||
const { ips } = storeToRefs(ipStore)
|
||||
|
||||
// IPs nouvellement détectées (dans les dernières 24h ET online ET non enregistrées)
|
||||
const newIPs = computed(() => {
|
||||
const oneDayAgo = new Date(Date.now() - 24 * 60 * 60 * 1000)
|
||||
|
||||
return ips.value
|
||||
.filter(ip => {
|
||||
// Doit être online
|
||||
if (ip.last_status !== 'online') return false
|
||||
// Doit avoir un first_seen récent
|
||||
if (!ip.first_seen) return false
|
||||
const firstSeen = new Date(ip.first_seen)
|
||||
if (firstSeen <= oneDayAgo) return false
|
||||
// Ne pas afficher les IPs déjà enregistrées (avec nom ou connue)
|
||||
if (ip.known || ip.name) return false
|
||||
return true
|
||||
})
|
||||
.sort((a, b) => {
|
||||
const dateA = new Date(a.first_seen)
|
||||
const dateB = new Date(b.first_seen)
|
||||
return dateB - dateA // Plus récent en premier
|
||||
})
|
||||
.slice(0, 20) // Limiter à 20
|
||||
})
|
||||
|
||||
function selectIP(ip) {
|
||||
ipStore.selectIP(ip)
|
||||
}
|
||||
|
||||
// Vérifier si la détection date de plus d'une heure
|
||||
function isOlderThanOneHour(dateString) {
|
||||
if (!dateString) return false
|
||||
const date = new Date(dateString)
|
||||
const now = new Date()
|
||||
const diff = now - date
|
||||
return diff >= 3600000 // 1 heure = 3600000 ms
|
||||
}
|
||||
|
||||
function formatTime(dateString) {
|
||||
if (!dateString) return ''
|
||||
const date = new Date(dateString)
|
||||
const now = new Date()
|
||||
const diff = now - date
|
||||
|
||||
// Moins d'une minute
|
||||
if (diff < 60000) {
|
||||
return 'À l\'instant'
|
||||
}
|
||||
|
||||
// Moins d'une heure
|
||||
if (diff < 3600000) {
|
||||
const minutes = Math.floor(diff / 60000)
|
||||
return `Il y a ${minutes} min`
|
||||
}
|
||||
|
||||
// Moins de 24h
|
||||
if (diff < 86400000) {
|
||||
const hours = Math.floor(diff / 3600000)
|
||||
return `Il y a ${hours}h`
|
||||
}
|
||||
|
||||
return date.toLocaleString('fr-FR')
|
||||
}
|
||||
</script>
|
||||
440
frontend/src/components/SettingsModal.vue
Executable file
@@ -0,0 +1,440 @@
|
||||
<template>
|
||||
<div
|
||||
v-if="isOpen"
|
||||
class="fixed inset-0 bg-black/50 flex items-center justify-center z-50"
|
||||
@click.self="close"
|
||||
>
|
||||
<div class="bg-monokai-bg border-2 border-monokai-cyan rounded-lg w-[800px] max-h-[80vh] flex flex-col">
|
||||
<!-- Header -->
|
||||
<div class="p-4 border-b border-monokai-comment flex justify-between items-center">
|
||||
<h2 class="text-xl font-bold text-monokai-cyan">Paramètres</h2>
|
||||
<button
|
||||
@click="close"
|
||||
class="text-monokai-comment hover:text-monokai-text text-2xl leading-none"
|
||||
>
|
||||
×
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<!-- Content -->
|
||||
<div class="flex-1 overflow-auto p-4 space-y-4">
|
||||
<!-- Boutons d'action -->
|
||||
<div class="flex gap-2">
|
||||
<button
|
||||
@click="reloadConfig"
|
||||
:disabled="loading"
|
||||
class="px-4 py-2 bg-monokai-green text-monokai-bg rounded hover:bg-monokai-cyan transition-colors disabled:opacity-50"
|
||||
>
|
||||
{{ loading ? 'Chargement...' : 'Recharger Config' }}
|
||||
</button>
|
||||
|
||||
<label class="px-4 py-2 bg-monokai-cyan text-monokai-bg rounded hover:bg-monokai-green transition-colors cursor-pointer">
|
||||
<input
|
||||
type="file"
|
||||
accept=".xml"
|
||||
@change="importIpscan"
|
||||
class="hidden"
|
||||
/>
|
||||
Importer IPScan XML
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<!-- Lien hardware bench -->
|
||||
<div class="border border-monokai-comment rounded p-3">
|
||||
<div class="text-sm font-bold text-monokai-cyan mb-2">Lien hardware bench</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<input
|
||||
v-model="hardwareBenchUrl"
|
||||
type="url"
|
||||
class="flex-1 px-3 py-2 bg-monokai-bg border border-monokai-comment rounded text-monokai-text text-sm"
|
||||
placeholder="http://10.0.0.50:8087/devices.html"
|
||||
/>
|
||||
<button
|
||||
@click="saveHardwareBenchUrl"
|
||||
:disabled="loading"
|
||||
class="px-3 py-2 bg-monokai-purple text-monokai-bg rounded hover:bg-monokai-pink transition-colors disabled:opacity-50"
|
||||
>
|
||||
Enregistrer
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Base OUI -->
|
||||
<div class="border border-monokai-comment rounded p-3">
|
||||
<div class="text-sm font-bold text-monokai-cyan mb-2">Base OUI (fabricants)</div>
|
||||
<div class="flex items-center justify-between gap-2">
|
||||
<div class="text-xs text-monokai-comment">
|
||||
<span v-if="ouiStatus.exists">Dernière MAJ: {{ formatDate(ouiStatus.updated_at) }}</span>
|
||||
<span v-else>Aucune liste locale</span>
|
||||
</div>
|
||||
<button
|
||||
@click="updateOui"
|
||||
:disabled="loading"
|
||||
class="px-3 py-2 bg-monokai-green text-monokai-bg rounded hover:bg-monokai-cyan transition-colors disabled:opacity-50"
|
||||
>
|
||||
Mettre à jour
|
||||
</button>
|
||||
</div>
|
||||
<label class="flex items-center gap-2 mt-3 text-xs text-monokai-text">
|
||||
<input v-model="forceVendorUpdate" type="checkbox" class="form-checkbox" @change="saveForceVendor" />
|
||||
Forcer la mise à jour du fabricant lors des scans
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<!-- Architecture -->
|
||||
<div class="border border-monokai-comment rounded p-3">
|
||||
<div class="text-sm font-bold text-monokai-cyan mb-2">Architecture</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<label class="text-xs text-monokai-comment w-40">Taille titres</label>
|
||||
<input
|
||||
v-model.number="architectureTitleFontSize"
|
||||
type="number"
|
||||
min="10"
|
||||
max="32"
|
||||
class="w-24 px-2 py-1 bg-monokai-bg border border-monokai-comment rounded text-monokai-text text-xs"
|
||||
/>
|
||||
<button
|
||||
@click="saveArchitectureUi"
|
||||
:disabled="loading"
|
||||
class="px-3 py-2 bg-monokai-purple text-monokai-bg rounded hover:bg-monokai-pink transition-colors disabled:opacity-50"
|
||||
>
|
||||
Enregistrer
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Messages -->
|
||||
<div v-if="message" :class="['p-3 rounded', messageClass]">
|
||||
{{ message }}
|
||||
</div>
|
||||
|
||||
<!-- Contenu config.yaml -->
|
||||
<div>
|
||||
<h3 class="text-monokai-cyan font-bold mb-2">Fichier config.yaml</h3>
|
||||
<pre class="bg-monokai-bg border border-monokai-comment rounded p-4 text-sm text-monokai-text overflow-auto max-h-[400px] font-mono">{{ configContent }}</pre>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Footer -->
|
||||
<div class="p-4 border-t border-monokai-comment flex justify-end">
|
||||
<button
|
||||
@click="close"
|
||||
class="px-4 py-2 bg-monokai-comment text-monokai-bg rounded hover:bg-monokai-text transition-colors"
|
||||
>
|
||||
Fermer
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, watch } from 'vue'
|
||||
|
||||
const props = defineProps({
|
||||
isOpen: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
}
|
||||
})
|
||||
|
||||
const emit = defineEmits(['close', 'configReloaded'])
|
||||
|
||||
const configContent = ref('')
|
||||
const hardwareBenchUrl = ref('')
|
||||
const ouiStatus = ref({ exists: false, updated_at: null })
|
||||
const forceVendorUpdate = ref(false)
|
||||
const architectureTitleFontSize = ref(18)
|
||||
const loading = ref(false)
|
||||
const message = ref('')
|
||||
const messageType = ref('info')
|
||||
|
||||
// Computed class pour le message
|
||||
const messageClass = ref('')
|
||||
|
||||
watch(() => props.isOpen, async (newVal) => {
|
||||
if (newVal) {
|
||||
await loadConfigContent()
|
||||
}
|
||||
})
|
||||
|
||||
async function loadConfigContent() {
|
||||
try {
|
||||
const [contentResponse, optionsResponse, ouiResponse, uiResponse] = await Promise.all([
|
||||
fetch('/api/ips/config/content'),
|
||||
fetch('/api/ips/config/options'),
|
||||
fetch('/api/ips/oui/status'),
|
||||
fetch('/api/config/ui')
|
||||
])
|
||||
if (contentResponse.ok) {
|
||||
const data = await contentResponse.json()
|
||||
configContent.value = data.content
|
||||
} else {
|
||||
configContent.value = 'Erreur de chargement du fichier config.yaml'
|
||||
}
|
||||
|
||||
if (optionsResponse.ok) {
|
||||
const options = await optionsResponse.json()
|
||||
hardwareBenchUrl.value = options.hardware_bench_url || ''
|
||||
forceVendorUpdate.value = Boolean(options.force_vendor_update)
|
||||
}
|
||||
|
||||
if (ouiResponse.ok) {
|
||||
ouiStatus.value = await ouiResponse.json()
|
||||
}
|
||||
|
||||
if (uiResponse.ok) {
|
||||
const uiData = await uiResponse.json()
|
||||
architectureTitleFontSize.value = Number(uiData.architecture_title_font_size || 18)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur chargement config:', error)
|
||||
configContent.value = 'Erreur de chargement du fichier config.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
async function updateOui() {
|
||||
loading.value = true
|
||||
message.value = ''
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/ips/oui/update', {
|
||||
method: 'POST'
|
||||
})
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
message.value = `${data.message} (${data.updated_vendors} mises à jour)`
|
||||
messageType.value = 'success'
|
||||
messageClass.value = 'bg-monokai-green/20 text-monokai-green border border-monokai-green'
|
||||
await loadConfigContent()
|
||||
} else {
|
||||
const error = await response.json()
|
||||
message.value = error.detail || 'Erreur lors de la mise à jour'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur mise à jour OUI:', error)
|
||||
message.value = 'Erreur de connexion au serveur'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
} finally {
|
||||
loading.value = false
|
||||
setTimeout(() => {
|
||||
message.value = ''
|
||||
}, 5000)
|
||||
}
|
||||
}
|
||||
|
||||
async function saveForceVendor() {
|
||||
loading.value = true
|
||||
message.value = ''
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/ips/config/force-vendor', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({ enabled: forceVendorUpdate.value })
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
message.value = data.message
|
||||
messageType.value = 'success'
|
||||
messageClass.value = 'bg-monokai-green/20 text-monokai-green border border-monokai-green'
|
||||
await loadConfigContent()
|
||||
} else {
|
||||
const error = await response.json()
|
||||
message.value = error.detail || 'Erreur lors de la mise à jour'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur mise à jour force vendeur:', error)
|
||||
message.value = 'Erreur de connexion au serveur'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
} finally {
|
||||
loading.value = false
|
||||
setTimeout(() => {
|
||||
message.value = ''
|
||||
}, 5000)
|
||||
}
|
||||
}
|
||||
|
||||
async function saveArchitectureUi() {
|
||||
loading.value = true
|
||||
message.value = ''
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/config/ui', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
architecture_title_font_size: architectureTitleFontSize.value
|
||||
})
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
message.value = data.message
|
||||
messageType.value = 'success'
|
||||
messageClass.value = 'bg-monokai-green/20 text-monokai-green border border-monokai-green'
|
||||
document.documentElement.style.setProperty(
|
||||
'--arch-title-size',
|
||||
`${architectureTitleFontSize.value}px`
|
||||
)
|
||||
} else {
|
||||
const error = await response.json()
|
||||
message.value = error.detail || 'Erreur lors de la mise à jour'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur mise à jour UI architecture:', error)
|
||||
message.value = 'Erreur de connexion au serveur'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
} finally {
|
||||
loading.value = false
|
||||
setTimeout(() => {
|
||||
message.value = ''
|
||||
}, 5000)
|
||||
}
|
||||
}
|
||||
|
||||
function formatDate(value) {
|
||||
if (!value) return 'N/A'
|
||||
const date = new Date(value)
|
||||
return date.toLocaleString('fr-FR')
|
||||
}
|
||||
|
||||
async function saveHardwareBenchUrl() {
|
||||
loading.value = true
|
||||
message.value = ''
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/ips/config/hardware-bench', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({ url: hardwareBenchUrl.value })
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
message.value = data.message
|
||||
messageType.value = 'success'
|
||||
messageClass.value = 'bg-monokai-green/20 text-monokai-green border border-monokai-green'
|
||||
emit('configReloaded')
|
||||
await loadConfigContent()
|
||||
} else {
|
||||
const error = await response.json()
|
||||
message.value = error.detail || 'Erreur lors de la mise à jour'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur mise à jour lien hardware bench:', error)
|
||||
message.value = 'Erreur de connexion au serveur'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
} finally {
|
||||
loading.value = false
|
||||
setTimeout(() => {
|
||||
message.value = ''
|
||||
}, 5000)
|
||||
}
|
||||
}
|
||||
|
||||
async function reloadConfig() {
|
||||
loading.value = true
|
||||
message.value = ''
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/ips/config/reload', {
|
||||
method: 'POST'
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
message.value = data.message
|
||||
messageType.value = 'success'
|
||||
messageClass.value = 'bg-monokai-green/20 text-monokai-green border border-monokai-green'
|
||||
emit('configReloaded')
|
||||
|
||||
// Recharger le contenu du config
|
||||
await loadConfigContent()
|
||||
} else {
|
||||
const error = await response.json()
|
||||
message.value = error.detail || 'Erreur lors du rechargement'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur rechargement config:', error)
|
||||
message.value = 'Erreur de connexion au serveur'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
} finally {
|
||||
loading.value = false
|
||||
setTimeout(() => {
|
||||
message.value = ''
|
||||
}, 5000)
|
||||
}
|
||||
}
|
||||
|
||||
async function importIpscan(event) {
|
||||
const file = event.target.files[0]
|
||||
if (!file) return
|
||||
|
||||
loading.value = true
|
||||
message.value = ''
|
||||
|
||||
const formData = new FormData()
|
||||
formData.append('file', file)
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/ips/import/ipscan', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
})
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
message.value = `Import réussi: ${data.imported} nouvelles IPs, ${data.updated} mises à jour`
|
||||
if (data.errors && data.errors.length > 0) {
|
||||
message.value += `\nErreurs: ${data.errors.join(', ')}`
|
||||
}
|
||||
messageType.value = 'success'
|
||||
messageClass.value = 'bg-monokai-green/20 text-monokai-green border border-monokai-green'
|
||||
emit('configReloaded') // Rafraîchir la liste des IPs
|
||||
} else {
|
||||
const error = await response.json()
|
||||
message.value = error.detail || 'Erreur lors de l\'import'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Erreur import:', error)
|
||||
message.value = 'Erreur de connexion au serveur'
|
||||
messageType.value = 'error'
|
||||
messageClass.value = 'bg-monokai-pink/20 text-monokai-pink border border-monokai-pink'
|
||||
} finally {
|
||||
loading.value = false
|
||||
event.target.value = '' // Reset input
|
||||
setTimeout(() => {
|
||||
message.value = ''
|
||||
}, 8000)
|
||||
}
|
||||
}
|
||||
|
||||
function close() {
|
||||
emit('close')
|
||||
}
|
||||
</script>
|
||||
190
frontend/src/components/SystemStats.vue
Executable file
@@ -0,0 +1,190 @@
|
||||
<template>
|
||||
<div class="flex items-center gap-3 text-xs">
|
||||
<!-- Indicateur CPU -->
|
||||
<div class="flex items-center gap-1.5">
|
||||
<span class="text-[11px] text-monokai-comment">CPU</span>
|
||||
<span
|
||||
:class="[
|
||||
'font-mono font-bold text-[11px]',
|
||||
cpuColorClass
|
||||
]"
|
||||
>
|
||||
{{ stats.cpu_percent }}%
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<!-- Séparateur -->
|
||||
<div class="h-6 w-px bg-monokai-comment"></div>
|
||||
|
||||
<!-- Indicateur RAM Système -->
|
||||
<div class="flex items-center gap-1.5">
|
||||
<span class="text-[11px] text-monokai-comment">RAM PC</span>
|
||||
<span
|
||||
:class="[
|
||||
'font-mono font-bold text-[11px]',
|
||||
ramColorClass
|
||||
]"
|
||||
>
|
||||
{{ stats.ram_used }} MB
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<!-- Séparateur -->
|
||||
<div class="h-6 w-px bg-monokai-comment"></div>
|
||||
|
||||
<!-- Indicateur RAM App -->
|
||||
<div class="flex items-center gap-1.5">
|
||||
<span class="text-[11px] text-monokai-comment">RAM App</span>
|
||||
<span class="font-mono font-bold text-[11px] text-monokai-purple">
|
||||
{{ stats.process_ram_mb }} MB
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<!-- Tooltip détaillé au survol -->
|
||||
<div class="relative group">
|
||||
<button
|
||||
class="text-monokai-comment hover:text-monokai-cyan transition-colors text-xs"
|
||||
title="Détails système"
|
||||
>
|
||||
ⓘ
|
||||
</button>
|
||||
|
||||
<!-- Tooltip -->
|
||||
<div
|
||||
class="absolute right-0 top-full mt-2 w-64 bg-monokai-bg border border-monokai-comment rounded-lg p-3 opacity-0 invisible group-hover:opacity-100 group-hover:visible transition-all duration-200 z-50 shadow-lg"
|
||||
>
|
||||
<div class="text-xs space-y-2">
|
||||
<!-- CPU détaillé -->
|
||||
<div>
|
||||
<div class="text-monokai-cyan font-bold mb-1">Processeur</div>
|
||||
<div class="flex justify-between text-monokai-text">
|
||||
<span>Utilisation:</span>
|
||||
<span class="font-mono">{{ stats.cpu_percent }}%</span>
|
||||
</div>
|
||||
<div class="flex justify-between text-monokai-text">
|
||||
<span>Cœurs:</span>
|
||||
<span class="font-mono">{{ stats.cpu_count }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="border-t border-monokai-comment"></div>
|
||||
|
||||
<!-- RAM détaillée -->
|
||||
<div>
|
||||
<div class="text-monokai-green font-bold mb-1">Mémoire</div>
|
||||
<div class="flex justify-between text-monokai-text">
|
||||
<span>Utilisée:</span>
|
||||
<span class="font-mono">{{ stats.ram_used }} MB</span>
|
||||
</div>
|
||||
<div class="flex justify-between text-monokai-text">
|
||||
<span>Disponible:</span>
|
||||
<span class="font-mono">{{ stats.ram_available }} MB</span>
|
||||
</div>
|
||||
<div class="flex justify-between text-monokai-text">
|
||||
<span>Total:</span>
|
||||
<span class="font-mono">{{ stats.ram_total }} MB</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="border-t border-monokai-comment"></div>
|
||||
|
||||
<!-- Processus IPWatch -->
|
||||
<div>
|
||||
<div class="text-monokai-purple font-bold mb-1">IPWatch</div>
|
||||
<div class="flex justify-between text-monokai-text">
|
||||
<span>RAM:</span>
|
||||
<span class="font-mono">{{ stats.process_ram_mb }} MB</span>
|
||||
</div>
|
||||
<div class="flex justify-between text-monokai-text">
|
||||
<span>CPU:</span>
|
||||
<span class="font-mono">{{ formatPercent(stats.process_cpu_percent) }}%</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Icône refresh -->
|
||||
<button
|
||||
@click="fetchStats"
|
||||
:disabled="loading"
|
||||
class="text-monokai-comment hover:text-monokai-cyan transition-colors disabled:opacity-50"
|
||||
:class="{ 'animate-spin': loading }"
|
||||
title="Rafraîchir"
|
||||
>
|
||||
↻
|
||||
</button>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, computed, onMounted, onUnmounted } from 'vue'
|
||||
|
||||
const stats = ref({
|
||||
cpu_percent: 0,
|
||||
cpu_count: 0,
|
||||
ram_percent: 0,
|
||||
ram_used: 0,
|
||||
ram_total: 0,
|
||||
ram_available: 0,
|
||||
process_ram_mb: 0,
|
||||
process_cpu_percent: 0
|
||||
})
|
||||
|
||||
const loading = ref(false)
|
||||
let intervalId = null
|
||||
|
||||
// Couleur CPU selon le pourcentage
|
||||
const cpuColorClass = computed(() => {
|
||||
const percent = stats.value.cpu_percent
|
||||
if (percent >= 80) return 'text-monokai-pink'
|
||||
if (percent >= 60) return 'text-monokai-yellow'
|
||||
return 'text-monokai-cyan'
|
||||
})
|
||||
|
||||
// Couleur RAM selon le pourcentage
|
||||
const ramColorClass = computed(() => {
|
||||
const percent = stats.value.ram_percent
|
||||
if (percent >= 80) return 'text-monokai-pink'
|
||||
if (percent >= 60) return 'text-monokai-yellow'
|
||||
return 'text-monokai-green'
|
||||
})
|
||||
|
||||
function formatPercent(value) {
|
||||
if (value === null || value === undefined || Number.isNaN(value)) return '0.0'
|
||||
return Number(value).toFixed(1)
|
||||
}
|
||||
|
||||
async function fetchStats() {
|
||||
loading.value = true
|
||||
try {
|
||||
const response = await fetch('/api/system/stats')
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
stats.value = data
|
||||
} else {
|
||||
// Erreur HTTP silencieuse - pas de log pour éviter le spam
|
||||
// Les stats précédentes restent affichées
|
||||
}
|
||||
} catch (error) {
|
||||
// Erreur réseau silencieuse (déconnexion temporaire, etc.)
|
||||
// Les stats précédentes restent affichées
|
||||
} finally {
|
||||
loading.value = false
|
||||
}
|
||||
}
|
||||
|
||||
onMounted(() => {
|
||||
// Charger immédiatement
|
||||
fetchStats()
|
||||
|
||||
// Rafraîchir toutes les 5 secondes
|
||||
intervalId = setInterval(fetchStats, 5000)
|
||||
})
|
||||
|
||||
onUnmounted(() => {
|
||||
if (intervalId) {
|
||||
clearInterval(intervalId)
|
||||
}
|
||||
})
|
||||
</script>
|
||||
13
frontend/src/main.js
Executable file
@@ -0,0 +1,13 @@
|
||||
import { createApp } from 'vue'
|
||||
import { createPinia } from 'pinia'
|
||||
import { router } from './router'
|
||||
import App from './App.vue'
|
||||
import './assets/main.css'
|
||||
import '@mdi/font/css/materialdesignicons.css'
|
||||
|
||||
const app = createApp(App)
|
||||
const pinia = createPinia()
|
||||
|
||||
app.use(pinia)
|
||||
app.use(router)
|
||||
app.mount('#app')
|
||||
55
frontend/src/router/index.js
Normal file
@@ -0,0 +1,55 @@
|
||||
/**
|
||||
* Configuration du router Vue Router pour IPWatch
|
||||
* Gère la navigation entre la page principale et la page de suivi
|
||||
*/
|
||||
import { createRouter, createWebHistory } from 'vue-router'
|
||||
import MainView from '@/views/MainView.vue'
|
||||
import TrackingView from '@/views/TrackingView.vue'
|
||||
import ArchitectureView from '@/views/ArchitectureView.vue'
|
||||
import TestView from '@/views/TestView.vue'
|
||||
|
||||
const routes = [
|
||||
{
|
||||
path: '/',
|
||||
name: 'main',
|
||||
component: MainView,
|
||||
meta: {
|
||||
title: 'IPWatch - Scanner réseau'
|
||||
}
|
||||
},
|
||||
{
|
||||
path: '/tracking',
|
||||
name: 'tracking',
|
||||
component: TrackingView,
|
||||
meta: {
|
||||
title: 'IPWatch - Équipements suivis'
|
||||
}
|
||||
},
|
||||
{
|
||||
path: '/architecture',
|
||||
name: 'architecture',
|
||||
component: ArchitectureView,
|
||||
meta: {
|
||||
title: 'IPWatch - Architecture réseau'
|
||||
}
|
||||
},
|
||||
{
|
||||
path: '/test',
|
||||
name: 'test',
|
||||
component: TestView,
|
||||
meta: {
|
||||
title: 'IPWatch - Tests réseau'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
export const router = createRouter({
|
||||
history: createWebHistory(),
|
||||
routes
|
||||
})
|
||||
|
||||
// Mise à jour du titre de page
|
||||
router.beforeEach((to, from, next) => {
|
||||
document.title = to.meta.title || 'IPWatch'
|
||||
next()
|
||||
})
|
||||
450
frontend/src/stores/ipStore.js
Executable file
@@ -0,0 +1,450 @@
|
||||
/**
|
||||
* Store Pinia pour la gestion des IPs
|
||||
*/
|
||||
import { defineStore } from 'pinia'
|
||||
import { ref, computed } from 'vue'
|
||||
import axios from 'axios'
|
||||
|
||||
export const useIPStore = defineStore('ip', () => {
|
||||
// État
|
||||
const ips = ref([])
|
||||
const selectedIP = ref(null)
|
||||
const loading = ref(false)
|
||||
const error = ref(null)
|
||||
const stats = ref({
|
||||
total: 0,
|
||||
online: 0,
|
||||
offline: 0,
|
||||
known: 0,
|
||||
unknown: 0
|
||||
})
|
||||
const searchQuery = ref('')
|
||||
const invertSearch = ref(false)
|
||||
const lastScanDate = ref(null)
|
||||
const scanProgress = ref({
|
||||
current: 0,
|
||||
total: 0,
|
||||
currentIP: null
|
||||
})
|
||||
const scanLogs = ref([])
|
||||
const isScanning = ref(false)
|
||||
const uiConfig = ref({
|
||||
cell_size: 30,
|
||||
architecture_title_font_size: 18
|
||||
})
|
||||
const configReloadTick = ref(0)
|
||||
|
||||
// WebSocket
|
||||
const ws = ref(null)
|
||||
const wsConnected = ref(false)
|
||||
|
||||
// Computed
|
||||
const filteredIPs = computed(() => {
|
||||
const { tokens, flags } = parseSearchQuery(searchQuery.value)
|
||||
|
||||
return ips.value.filter(ip => {
|
||||
if (flags.requireOnline && ip.last_status !== 'online') return false
|
||||
if (flags.requireOffline && ip.last_status !== 'offline') return false
|
||||
if (flags.requireFree && ip.last_status) return false
|
||||
if (flags.requireKnown && !ip.known) return false
|
||||
if (flags.requireUnknown && ip.known) return false
|
||||
if (flags.requireTracked && !ip.tracked) return false
|
||||
if (flags.requireVm && !ip.vm) return false
|
||||
if (flags.requireHardwareBench && !ip.hardware_bench) return false
|
||||
|
||||
let matches = true
|
||||
|
||||
if (tokens.length === 0) {
|
||||
matches = true
|
||||
} else {
|
||||
const haystack = normalizeText([
|
||||
ip.ip,
|
||||
ip.name,
|
||||
ip.hostname,
|
||||
ip.host,
|
||||
ip.location,
|
||||
ip.mac,
|
||||
ip.vendor,
|
||||
ip.link,
|
||||
ip.last_status,
|
||||
ip.tracked ? 'suivie' : '',
|
||||
ip.vm ? 'vm' : '',
|
||||
ip.hardware_bench ? 'hardware' : '',
|
||||
(ip.open_ports || []).join(' ')
|
||||
].filter(Boolean).join(' '))
|
||||
|
||||
// Match si au moins un mot est présent
|
||||
matches = tokens.some(token => haystack.includes(token))
|
||||
}
|
||||
|
||||
return invertSearch.value ? !matches : matches
|
||||
})
|
||||
})
|
||||
|
||||
// Actions
|
||||
async function fetchUIConfig() {
|
||||
try {
|
||||
const response = await axios.get('/api/config/ui')
|
||||
uiConfig.value = response.data
|
||||
// Appliquer la taille des cellules, de la police et de l'espacement via variables CSS
|
||||
document.documentElement.style.setProperty('--cell-size', `${response.data.cell_size}px`)
|
||||
document.documentElement.style.setProperty('--font-size', `${response.data.font_size}px`)
|
||||
document.documentElement.style.setProperty('--cell-gap', `${response.data.cell_gap}px`)
|
||||
document.documentElement.style.setProperty(
|
||||
'--arch-title-size',
|
||||
`${response.data.architecture_title_font_size}px`
|
||||
)
|
||||
} catch (err) {
|
||||
console.error('Erreur chargement config UI:', err)
|
||||
}
|
||||
}
|
||||
|
||||
async function reloadConfig() {
|
||||
try {
|
||||
const response = await axios.post('/api/config/reload')
|
||||
if (response.data.success) {
|
||||
// Appliquer la nouvelle config UI
|
||||
uiConfig.value = response.data.ui
|
||||
document.documentElement.style.setProperty('--cell-size', `${response.data.ui.cell_size}px`)
|
||||
document.documentElement.style.setProperty('--font-size', `${response.data.ui.font_size}px`)
|
||||
document.documentElement.style.setProperty('--cell-gap', `${response.data.ui.cell_gap}px`)
|
||||
document.documentElement.style.setProperty(
|
||||
'--arch-title-size',
|
||||
`${response.data.ui.architecture_title_font_size}px`
|
||||
)
|
||||
return response.data.message
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Erreur rechargement config:', err)
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
function bumpConfigReload() {
|
||||
configReloadTick.value += 1
|
||||
}
|
||||
|
||||
async function fetchIPs() {
|
||||
loading.value = true
|
||||
error.value = null
|
||||
|
||||
try {
|
||||
const response = await axios.get('/api/ips/')
|
||||
ips.value = response.data
|
||||
await fetchStats()
|
||||
} catch (err) {
|
||||
error.value = err.message
|
||||
console.error('Erreur chargement IPs:', err)
|
||||
} finally {
|
||||
loading.value = false
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchStats() {
|
||||
try {
|
||||
const response = await axios.get('/api/ips/stats/summary')
|
||||
stats.value = response.data
|
||||
} catch (err) {
|
||||
console.error('Erreur chargement stats:', err)
|
||||
}
|
||||
}
|
||||
|
||||
async function updateIP(ipAddress, data) {
|
||||
try {
|
||||
const response = await axios.put(`/api/ips/${ipAddress}`, data)
|
||||
|
||||
// Mettre à jour dans le store
|
||||
const index = ips.value.findIndex(ip => ip.ip === ipAddress)
|
||||
if (index !== -1) {
|
||||
ips.value[index] = response.data
|
||||
}
|
||||
|
||||
if (selectedIP.value?.ip === ipAddress) {
|
||||
selectedIP.value = response.data
|
||||
}
|
||||
|
||||
return response.data
|
||||
} catch (err) {
|
||||
error.value = err.message
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteIP(ipAddress) {
|
||||
try {
|
||||
await axios.delete(`/api/ips/${ipAddress}`)
|
||||
|
||||
// Retirer du store
|
||||
const index = ips.value.findIndex(ip => ip.ip === ipAddress)
|
||||
if (index !== -1) {
|
||||
ips.value.splice(index, 1)
|
||||
}
|
||||
|
||||
if (selectedIP.value?.ip === ipAddress) {
|
||||
selectedIP.value = null
|
||||
}
|
||||
|
||||
await fetchStats()
|
||||
} catch (err) {
|
||||
error.value = err.message
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
async function getIPHistory(ipAddress, hours = 24) {
|
||||
try {
|
||||
const response = await axios.get(`/api/ips/${ipAddress}/history?hours=${hours}`)
|
||||
return response.data
|
||||
} catch (err) {
|
||||
console.error('Erreur chargement historique:', err)
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
async function startScan() {
|
||||
try {
|
||||
await axios.post('/api/scan/start')
|
||||
} catch (err) {
|
||||
error.value = err.message
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
function selectIP(ip) {
|
||||
selectedIP.value = ip
|
||||
}
|
||||
|
||||
function clearSelection() {
|
||||
selectedIP.value = null
|
||||
}
|
||||
|
||||
// WebSocket
|
||||
function connectWebSocket() {
|
||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:'
|
||||
const wsUrl = `${protocol}//${window.location.host}/ws`
|
||||
|
||||
ws.value = new WebSocket(wsUrl)
|
||||
|
||||
ws.value.onopen = () => {
|
||||
console.log('WebSocket connecté')
|
||||
wsConnected.value = true
|
||||
|
||||
// Heartbeat toutes les 30s
|
||||
setInterval(() => {
|
||||
if (ws.value?.readyState === WebSocket.OPEN) {
|
||||
ws.value.send('ping')
|
||||
}
|
||||
}, 30000)
|
||||
}
|
||||
|
||||
ws.value.onmessage = (event) => {
|
||||
// Ignorer les messages ping/pong (texte brut)
|
||||
if (typeof event.data === 'string' && (event.data === 'ping' || event.data === 'pong')) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const message = JSON.parse(event.data)
|
||||
handleWebSocketMessage(message)
|
||||
} catch (err) {
|
||||
// Ignorer silencieusement les erreurs de parsing pour les messages non-JSON
|
||||
}
|
||||
}
|
||||
|
||||
ws.value.onerror = (error) => {
|
||||
// Erreur WebSocket - ne pas logger si c'est juste une déconnexion normale
|
||||
wsConnected.value = false
|
||||
}
|
||||
|
||||
ws.value.onclose = (event) => {
|
||||
wsConnected.value = false
|
||||
|
||||
// Ne logger que si c'est une fermeture anormale
|
||||
if (!event.wasClean) {
|
||||
console.log('WebSocket déconnecté - reconnexion dans 5s...')
|
||||
}
|
||||
|
||||
// Reconnexion après 5s
|
||||
setTimeout(() => {
|
||||
// Ne reconnecter que si on n'est pas déjà connecté
|
||||
if (!ws.value || ws.value.readyState === WebSocket.CLOSED) {
|
||||
connectWebSocket()
|
||||
}
|
||||
}, 5000)
|
||||
}
|
||||
}
|
||||
|
||||
function handleWebSocketMessage(message) {
|
||||
// Logger uniquement les messages importants (pas scan_progress pour éviter le spam)
|
||||
if (message.type !== 'scan_progress') {
|
||||
console.log('WebSocket:', message.type)
|
||||
}
|
||||
|
||||
switch (message.type) {
|
||||
case 'scan_start':
|
||||
// Notification début de scan
|
||||
isScanning.value = true
|
||||
scanLogs.value = []
|
||||
scanProgress.value = {
|
||||
current: 0,
|
||||
total: message.total || 0,
|
||||
currentIP: null
|
||||
}
|
||||
break
|
||||
|
||||
case 'scan_progress':
|
||||
// Progression du scan
|
||||
if (message.current) scanProgress.value.current = message.current
|
||||
if (message.total) scanProgress.value.total = message.total
|
||||
if (message.ip) scanProgress.value.currentIP = message.ip
|
||||
break
|
||||
|
||||
case 'scan_complete':
|
||||
// Rafraîchir les données après scan
|
||||
isScanning.value = false
|
||||
lastScanDate.value = new Date()
|
||||
scanProgress.value = { current: 0, total: 0, currentIP: null }
|
||||
fetchIPs()
|
||||
if (message.stats) stats.value = message.stats
|
||||
break
|
||||
|
||||
case 'ip_update':
|
||||
// Mise à jour d'une IP
|
||||
const updatedIP = ips.value.find(ip => ip.ip === message.data.ip)
|
||||
if (updatedIP) {
|
||||
Object.assign(updatedIP, message.data)
|
||||
}
|
||||
break
|
||||
|
||||
case 'new_ip':
|
||||
// Nouvelle IP détectée
|
||||
fetchIPs() // Recharger pour être sûr
|
||||
break
|
||||
case 'scan_log':
|
||||
if (message.message) {
|
||||
scanLogs.value.push(message.message)
|
||||
if (scanLogs.value.length > 200) {
|
||||
scanLogs.value.splice(0, scanLogs.value.length - 200)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
function disconnectWebSocket() {
|
||||
if (ws.value) {
|
||||
ws.value.close()
|
||||
ws.value = null
|
||||
wsConnected.value = false
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
// État
|
||||
ips,
|
||||
selectedIP,
|
||||
loading,
|
||||
error,
|
||||
stats,
|
||||
searchQuery,
|
||||
invertSearch,
|
||||
wsConnected,
|
||||
lastScanDate,
|
||||
scanProgress,
|
||||
scanLogs,
|
||||
isScanning,
|
||||
uiConfig,
|
||||
configReloadTick,
|
||||
|
||||
// Computed
|
||||
filteredIPs,
|
||||
|
||||
// Actions
|
||||
fetchUIConfig,
|
||||
reloadConfig,
|
||||
bumpConfigReload,
|
||||
fetchIPs,
|
||||
fetchStats,
|
||||
updateIP,
|
||||
deleteIP,
|
||||
getIPHistory,
|
||||
startScan,
|
||||
selectIP,
|
||||
clearSelection,
|
||||
connectWebSocket,
|
||||
disconnectWebSocket
|
||||
}
|
||||
})
|
||||
|
||||
function normalizeText(value) {
|
||||
return String(value ?? '')
|
||||
.normalize('NFD')
|
||||
.replace(/[\u0300-\u036f]/g, '')
|
||||
.toLowerCase()
|
||||
.trim()
|
||||
}
|
||||
|
||||
function parseSearchQuery(query) {
|
||||
let normalized = normalizeText(query)
|
||||
|
||||
const flags = {
|
||||
requireOnline: false,
|
||||
requireOffline: false,
|
||||
requireKnown: false,
|
||||
requireUnknown: false,
|
||||
requireFree: false,
|
||||
requireTracked: false,
|
||||
requireVm: false,
|
||||
requireHardwareBench: false
|
||||
}
|
||||
|
||||
const onlinePattern = /\ben\s+ligne\b/g
|
||||
const offlinePattern = /\bhors\s+ligne\b/g
|
||||
|
||||
if (onlinePattern.test(normalized)) flags.requireOnline = true
|
||||
if (offlinePattern.test(normalized)) flags.requireOffline = true
|
||||
|
||||
normalized = normalized
|
||||
.replace(onlinePattern, ' ')
|
||||
.replace(offlinePattern, ' ')
|
||||
|
||||
let tokens = normalized.split(/\s+/).filter(Boolean)
|
||||
|
||||
tokens = tokens.filter(token => {
|
||||
if (token === 'connue') {
|
||||
flags.requireKnown = true
|
||||
return false
|
||||
}
|
||||
if (token === 'inconnue') {
|
||||
flags.requireUnknown = true
|
||||
return false
|
||||
}
|
||||
if (token === 'libre') {
|
||||
flags.requireFree = true
|
||||
return false
|
||||
}
|
||||
if (token === 'suivie' || token === 'suivi') {
|
||||
flags.requireTracked = true
|
||||
return false
|
||||
}
|
||||
if (token === 'vm') {
|
||||
flags.requireVm = true
|
||||
return false
|
||||
}
|
||||
if (token === 'hardware' || token === 'bench' || token === 'hardware_bench') {
|
||||
flags.requireHardwareBench = true
|
||||
return false
|
||||
}
|
||||
if (token === 'enligne' || token === 'en-ligne') {
|
||||
flags.requireOnline = true
|
||||
return false
|
||||
}
|
||||
if (token === 'horsligne' || token === 'hors-ligne') {
|
||||
flags.requireOffline = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return { tokens, flags }
|
||||
}
|
||||
1542
frontend/src/views/ArchitectureView.vue
Normal file
51
frontend/src/views/MainView.vue
Normal file
@@ -0,0 +1,51 @@
|
||||
<template>
|
||||
<div class="h-screen flex flex-col bg-monokai-bg">
|
||||
<!-- Header -->
|
||||
<AppHeader @openSettings="showSettings = true" />
|
||||
|
||||
<!-- Layout 3 colonnes selon consigne-design_webui.md -->
|
||||
<div class="flex-1 flex overflow-hidden">
|
||||
<!-- Colonne gauche: Détails IP -->
|
||||
<div class="w-80 flex-shrink-0">
|
||||
<IPDetails :showFilters="false" />
|
||||
</div>
|
||||
|
||||
<!-- Colonne centrale: Grille d'IP organisée en arbre -->
|
||||
<div class="flex-1">
|
||||
<IPGridTree />
|
||||
</div>
|
||||
|
||||
<!-- Colonne droite: Nouvelles détections + Filtres -->
|
||||
<div class="w-80 flex-shrink-0 flex flex-col">
|
||||
<NewDetections class="flex-1" />
|
||||
<IPDetails :showOnlyFilters="true" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Modal Paramètres -->
|
||||
<SettingsModal
|
||||
:isOpen="showSettings"
|
||||
@close="showSettings = false"
|
||||
@configReloaded="handleConfigReloaded"
|
||||
/>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref } from 'vue'
|
||||
import { useIPStore } from '@/stores/ipStore'
|
||||
import AppHeader from '@/components/AppHeader.vue'
|
||||
import IPDetails from '@/components/IPDetails.vue'
|
||||
import IPGridTree from '@/components/IPGridTree.vue'
|
||||
import NewDetections from '@/components/NewDetections.vue'
|
||||
import SettingsModal from '@/components/SettingsModal.vue'
|
||||
|
||||
const ipStore = useIPStore()
|
||||
const showSettings = ref(false)
|
||||
|
||||
async function handleConfigReloaded() {
|
||||
// Recharger les IPs après un import ou un rechargement de config
|
||||
await ipStore.fetchIPs()
|
||||
ipStore.bumpConfigReload()
|
||||
}
|
||||
</script>
|
||||
151
frontend/src/views/TestView.vue
Normal file
@@ -0,0 +1,151 @@
|
||||
<template>
|
||||
<div class="h-screen flex flex-col bg-monokai-bg">
|
||||
<!-- Header -->
|
||||
<header class="bg-monokai-bg border-b-2 border-monokai-comment p-4">
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center gap-4">
|
||||
<h1 class="text-3xl font-bold text-monokai-green">IPWatch</h1>
|
||||
<div class="flex flex-col">
|
||||
<span class="text-monokai-comment">Tests réseau</span>
|
||||
<span class="text-xs text-monokai-comment/60">bêta</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex items-center gap-6">
|
||||
<SystemStats />
|
||||
|
||||
<div class="h-8 w-px bg-monokai-comment"></div>
|
||||
|
||||
<button
|
||||
@click="goToDashboard"
|
||||
class="px-4 py-2 rounded bg-monokai-yellow text-monokai-bg font-bold hover:bg-monokai-orange transition-colors"
|
||||
title="Retour au dashboard"
|
||||
>
|
||||
← Dashboard
|
||||
</button>
|
||||
|
||||
<button
|
||||
@click="openSettings"
|
||||
class="px-4 py-2 rounded bg-monokai-purple text-monokai-bg text-sm hover:bg-monokai-pink transition-colors"
|
||||
title="Ouvrir les paramètres"
|
||||
>
|
||||
⚙ Paramètres
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<!-- Layout -->
|
||||
<div class="flex-1 flex overflow-hidden">
|
||||
<div class="w-80 flex-shrink-0 border-r border-monokai-comment">
|
||||
<div class="p-4">
|
||||
<h2 class="text-lg font-bold text-monokai-cyan">Commandes</h2>
|
||||
<p class="text-sm text-monokai-comment mt-2">
|
||||
Commandes de test (à venir).
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex-1 overflow-auto p-6">
|
||||
<div class="border border-dashed border-monokai-comment rounded-lg h-full flex items-center justify-center text-monokai-comment">
|
||||
<div class="text-center">
|
||||
<div class="text-xl text-monokai-cyan font-bold">Zone de test</div>
|
||||
<div class="text-sm mt-2">Ping, traceroute, etc.</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div
|
||||
class="relative flex-shrink-0 border-l border-monokai-comment"
|
||||
:style="{ width: `${rightPanelWidth}px` }"
|
||||
>
|
||||
<div
|
||||
class="absolute left-0 top-0 bottom-0 w-2 cursor-ew-resize"
|
||||
@mousedown.prevent="startResize"
|
||||
></div>
|
||||
<div class="absolute left-1 top-1/2 -translate-y-1/2 flex flex-col gap-1 opacity-70 pointer-events-none">
|
||||
<span class="w-1.5 h-1.5 rounded-full bg-monokai-comment/70"></span>
|
||||
<span class="w-1.5 h-1.5 rounded-full bg-monokai-comment/70"></span>
|
||||
<span class="w-1.5 h-1.5 rounded-full bg-monokai-comment/70"></span>
|
||||
<span class="w-1.5 h-1.5 rounded-full bg-monokai-comment/70"></span>
|
||||
</div>
|
||||
<div class="p-4 h-full flex flex-col">
|
||||
<h2 class="text-lg font-bold text-monokai-pink">Historique</h2>
|
||||
<div class="mt-3 bg-monokai-bg border border-monokai-comment rounded flex-1 min-h-0 overflow-auto">
|
||||
<pre class="text-xs text-monokai-text font-mono p-3 whitespace-pre-wrap">
|
||||
{{ scanLogs.length ? scanLogs.join('\n') : 'Aucun log pour le moment.' }}
|
||||
</pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<SettingsModal
|
||||
:isOpen="showSettings"
|
||||
@close="showSettings = false"
|
||||
@configReloaded="handleConfigReloaded"
|
||||
/>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script setup>
|
||||
import { ref, onBeforeUnmount } from 'vue'
|
||||
import { useRouter } from 'vue-router'
|
||||
import { useIPStore } from '@/stores/ipStore'
|
||||
import SystemStats from '@/components/SystemStats.vue'
|
||||
import SettingsModal from '@/components/SettingsModal.vue'
|
||||
import { storeToRefs } from 'pinia'
|
||||
|
||||
const router = useRouter()
|
||||
const ipStore = useIPStore()
|
||||
const showSettings = ref(false)
|
||||
const { scanLogs } = storeToRefs(ipStore)
|
||||
const rightPanelWidth = ref(384)
|
||||
const minPanelWidth = 288
|
||||
const maxPanelWidth = 576
|
||||
let isResizing = false
|
||||
let startX = 0
|
||||
let startWidth = 0
|
||||
|
||||
function openSettings() {
|
||||
showSettings.value = true
|
||||
}
|
||||
|
||||
function goToDashboard() {
|
||||
router.push('/')
|
||||
}
|
||||
|
||||
async function handleConfigReloaded() {
|
||||
await ipStore.fetchIPs()
|
||||
ipStore.bumpConfigReload()
|
||||
}
|
||||
|
||||
function clampWidth(value) {
|
||||
return Math.max(minPanelWidth, Math.min(maxPanelWidth, value))
|
||||
}
|
||||
|
||||
function onMouseMove(event) {
|
||||
if (!isResizing) return
|
||||
const delta = startX - event.clientX
|
||||
rightPanelWidth.value = clampWidth(startWidth + delta)
|
||||
}
|
||||
|
||||
function stopResize() {
|
||||
if (!isResizing) return
|
||||
isResizing = false
|
||||
window.removeEventListener('mousemove', onMouseMove)
|
||||
window.removeEventListener('mouseup', stopResize)
|
||||
}
|
||||
|
||||
function startResize(event) {
|
||||
isResizing = true
|
||||
startX = event.clientX
|
||||
startWidth = rightPanelWidth.value
|
||||
window.addEventListener('mousemove', onMouseMove)
|
||||
window.addEventListener('mouseup', stopResize)
|
||||
}
|
||||
|
||||
onBeforeUnmount(() => {
|
||||
stopResize()
|
||||
})
|
||||
</script>
|
||||