add go bench client

This commit is contained in:
Gilles Soulier
2026-01-11 23:41:30 +01:00
parent c67befc549
commit 6abc70cdfe
80 changed files with 13311 additions and 61 deletions

View File

@@ -70,7 +70,7 @@ class HardwareSnapshot(Base):
display_server = Column(String(50), nullable=True)
session_type = Column(String(50), nullable=True)
last_boot_time = Column(String(50), nullable=True)
uptime_seconds = Column(Integer, nullable=True)
uptime_seconds = Column(Float, nullable=True)
battery_percentage = Column(Float, nullable=True)
battery_status = Column(String(50), nullable=True)
battery_health = Column(String(50), nullable=True)

View File

@@ -28,8 +28,8 @@ class DiskResults(BaseModel):
"""Disk benchmark results"""
read_mb_s: Optional[float] = Field(None, ge=0)
write_mb_s: Optional[float] = Field(None, ge=0)
iops_read: Optional[int] = Field(None, ge=0)
iops_write: Optional[int] = Field(None, ge=0)
iops_read: Optional[float] = Field(None, ge=0)
iops_write: Optional[float] = Field(None, ge=0)
latency_ms: Optional[float] = Field(None, ge=0)
score: Optional[float] = Field(None, ge=0, le=50000)

View File

@@ -133,7 +133,7 @@ class OSInfo(BaseModel):
display_server: Optional[str] = None
screen_resolution: Optional[str] = None
last_boot_time: Optional[str] = None
uptime_seconds: Optional[int] = None
uptime_seconds: Optional[float] = None
battery_percentage: Optional[float] = None
battery_status: Optional[str] = None
battery_health: Optional[str] = None
@@ -233,7 +233,7 @@ class HardwareSnapshotResponse(BaseModel):
display_server: Optional[str] = None
session_type: Optional[str] = None
last_boot_time: Optional[str] = None
uptime_seconds: Optional[int] = None
uptime_seconds: Optional[float] = None
battery_percentage: Optional[float] = None
battery_status: Optional[str] = None
battery_health: Optional[str] = None

View File

@@ -0,0 +1,157 @@
"""
File Organizer - Organize uploads by hostname
"""
import os
import re
from pathlib import Path
from typing import Tuple
def sanitize_hostname(hostname: str) -> str:
"""
Sanitize hostname for use as directory name
Args:
hostname: The hostname to sanitize
Returns:
Sanitized hostname safe for use as directory name
"""
# Remove invalid characters
sanitized = re.sub(r'[^\w\-.]', '_', hostname)
# Remove leading/trailing dots and underscores
sanitized = sanitized.strip('._')
# Replace multiple underscores with single
sanitized = re.sub(r'_+', '_', sanitized)
# Limit length
sanitized = sanitized[:100]
# Default if empty
return sanitized if sanitized else 'unknown'
def get_device_upload_paths(base_upload_dir: str, hostname: str) -> Tuple[str, str]:
"""
Get organized upload paths for a device
Args:
base_upload_dir: Base upload directory (e.g., "./uploads")
hostname: Device hostname
Returns:
Tuple of (images_path, files_path)
"""
sanitized_hostname = sanitize_hostname(hostname)
images_path = os.path.join(base_upload_dir, sanitized_hostname, "images")
files_path = os.path.join(base_upload_dir, sanitized_hostname, "files")
return images_path, files_path
def ensure_device_directories(base_upload_dir: str, hostname: str) -> Tuple[str, str]:
"""
Ensure device upload directories exist
Args:
base_upload_dir: Base upload directory
hostname: Device hostname
Returns:
Tuple of (images_path, files_path)
"""
images_path, files_path = get_device_upload_paths(base_upload_dir, hostname)
# Create directories if they don't exist
Path(images_path).mkdir(parents=True, exist_ok=True)
Path(files_path).mkdir(parents=True, exist_ok=True)
return images_path, files_path
def get_upload_path(base_upload_dir: str, hostname: str, is_image: bool, filename: str) -> str:
"""
Get the full upload path for a file
Args:
base_upload_dir: Base upload directory
hostname: Device hostname
is_image: True if file is an image, False for documents
filename: The filename to store
Returns:
Full path where file should be stored
"""
images_path, files_path = ensure_device_directories(base_upload_dir, hostname)
target_dir = images_path if is_image else files_path
return os.path.join(target_dir, filename)
def is_image_file(filename: str, mime_type: str = None) -> bool:
"""
Check if a file is an image based on extension and/or mime type
Args:
filename: The filename
mime_type: Optional MIME type
Returns:
True if file is an image
"""
# Check extension
image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.svg'}
ext = os.path.splitext(filename)[1].lower()
if ext in image_extensions:
return True
# Check MIME type if provided
if mime_type and mime_type.startswith('image/'):
return True
return False
def migrate_existing_files(base_upload_dir: str, hostname: str, file_list: list) -> dict:
"""
Migrate existing files to new organized structure
Args:
base_upload_dir: Base upload directory
hostname: Device hostname
file_list: List of tuples (filename, is_image)
Returns:
Dictionary mapping old paths to new paths
"""
images_path, files_path = ensure_device_directories(base_upload_dir, hostname)
migrations = {}
for filename, is_image in file_list:
old_path = os.path.join(base_upload_dir, filename)
if is_image:
new_path = os.path.join(images_path, filename)
else:
new_path = os.path.join(files_path, filename)
migrations[old_path] = new_path
return migrations
def get_relative_path(full_path: str, base_upload_dir: str) -> str:
"""
Get relative path from base upload directory
Args:
full_path: Full file path
base_upload_dir: Base upload directory
Returns:
Relative path from base directory
"""
return os.path.relpath(full_path, base_upload_dir)

View File

@@ -0,0 +1,381 @@
"""
lspci output parser for PCI device detection and extraction.
Parses output from 'lspci -v' and extracts individual device information.
"""
import re
from typing import List, Dict, Any, Optional, Tuple
def extract_brand_model(vendor_name: str, device_name: str, device_class: str) -> Tuple[str, str]:
"""
Extract brand (marque) and model (modele) from vendor and device names.
Args:
vendor_name: Vendor name (e.g., "NVIDIA Corporation", "Micron/Crucial Technology")
device_name: Device name (e.g., "GA106 [GeForce RTX 3060]")
device_class: Device class for context (e.g., "VGA compatible controller")
Returns:
Tuple of (brand, model)
Examples:
("NVIDIA Corporation", "GA106 [GeForce RTX 3060 Lite Hash Rate]", "VGA")
-> ("NVIDIA", "GeForce RTX 3060 Lite Hash Rate")
("Micron/Crucial Technology", "P2 [Nick P2] / P3 Plus NVMe", "Non-Volatile")
-> ("Micron", "P2/P3 Plus NVMe PCIe SSD")
"""
# Extract brand from vendor name
brand = vendor_name.split()[0] if vendor_name else ""
# Handle cases like "Micron/Crucial" - take the first one
if '/' in brand:
brand = brand.split('/')[0]
# Extract model from device name
model = device_name
# Extract content from brackets [...] as it often contains the commercial name
bracket_match = re.search(r'\[([^\]]+)\]', device_name)
if bracket_match:
bracket_content = bracket_match.group(1)
# For GPUs, prefer the bracket content (e.g., "GeForce RTX 3060")
if any(kw in device_class.lower() for kw in ['vga', 'graphics', '3d', 'display']):
model = bracket_content
# For storage, extract the commercial model name
elif any(kw in device_class.lower() for kw in ['nvme', 'non-volatile', 'sata', 'storage']):
# Pattern: "P2 [Nick P2] / P3 / P3 Plus NVMe PCIe SSD (DRAM-less)"
# We want: "P2/P3/P3 Plus NVMe PCIe SSD"
# Remove content in brackets like [Nick P2]
cleaned = re.sub(r'\[[^\]]*\]', '', device_name)
# Clean up extra slashes and spaces
cleaned = re.sub(r'\s*/\s*', '/', cleaned)
cleaned = re.sub(r'\s+', ' ', cleaned)
cleaned = re.sub(r'/+', '/', cleaned)
# Remove leading/trailing slashes
cleaned = cleaned.strip('/ ')
model = cleaned
return brand, model.strip()
def _split_vendor_device(description: str) -> Tuple[str, str]:
"""
Split description into vendor name and device name.
Args:
description: Full device description from lspci
Returns:
Tuple of (vendor_name, device_name)
Examples:
"NVIDIA Corporation GA106 [GeForce RTX 3060]"
-> ("NVIDIA Corporation", "GA106 [GeForce RTX 3060]")
"Micron/Crucial Technology P2 NVMe PCIe SSD"
-> ("Micron/Crucial Technology", "P2 NVMe PCIe SSD")
"Realtek Semiconductor Co., Ltd. RTL8111/8168"
-> ("Realtek Semiconductor Co., Ltd.", "RTL8111/8168")
"""
# Vendor suffix patterns (ordered by priority)
vendor_suffixes = [
# Multi-word patterns (must come first)
r'\bCo\.,?\s*Ltd\.?',
r'\bCo\.,?\s*Inc\.?',
r'\bInc\.,?\s*Ltd\.?',
r'\bTechnology\s+Co\.,?\s*Ltd\.?',
r'\bSemiconductor\s+Co\.,?\s*Ltd\.?',
# Single word patterns
r'\bCorporation\b',
r'\bTechnology\b',
r'\bSemiconductor\b',
r'\bInc\.?\b',
r'\bLtd\.?\b',
r'\bGmbH\b',
r'\bAG\b',
]
# Try each pattern
for pattern in vendor_suffixes:
match = re.search(pattern, description, re.IGNORECASE)
if match:
# Split at the end of the vendor suffix
split_pos = match.end()
vendor_name = description[:split_pos].strip()
device_name = description[split_pos:].strip()
return vendor_name, device_name
# No suffix found - fallback to first word
parts = description.split(' ', 1)
if len(parts) >= 2:
return parts[0], parts[1]
return description, ""
def detect_pci_devices(lspci_output: str, exclude_system_devices: bool = True) -> List[Dict[str, str]]:
"""
Detect all PCI devices from lspci -v output.
Returns a list of devices with their slot and basic info.
Args:
lspci_output: Raw output from 'lspci -v' command
exclude_system_devices: If True (default), exclude system infrastructure devices
like PCI bridges, Host bridges, ISA bridges, SMBus, etc.
Returns:
List of dicts with keys: slot, device_class, vendor_device_id, description
Example:
[
{
"slot": "04:00.0",
"device_class": "Ethernet controller",
"vendor_device_id": "10ec:8168",
"description": "Realtek Semiconductor Co., Ltd. RTL8111/8168/8211/8411..."
},
...
]
"""
# System infrastructure device classes to exclude by default
SYSTEM_DEVICE_CLASSES = [
"Host bridge",
"PCI bridge",
"ISA bridge",
"SMBus",
"IOMMU",
"Signal processing controller",
"System peripheral",
"RAM memory",
"Non-Essential Instrumentation",
]
devices = []
lines = lspci_output.strip().split('\n')
for line in lines:
line_stripped = line.strip()
# Match lines starting with slot format "XX:XX.X"
# Format: "04:00.0 Ethernet controller: Realtek Semiconductor Co., Ltd. ..."
match = re.match(r'^([0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F])\s+([^:]+):\s+(.+)$', line_stripped)
if match:
slot = match.group(1)
device_class = match.group(2).strip()
description = match.group(3).strip()
# Filter out system devices if requested
if exclude_system_devices:
# Check if device class matches any system device pattern
is_system_device = any(
sys_class.lower() in device_class.lower()
for sys_class in SYSTEM_DEVICE_CLASSES
)
if is_system_device:
continue # Skip this device
devices.append({
"slot": slot,
"device_class": device_class,
"description": description
})
return devices
def extract_device_section(lspci_output: str, slot: str) -> Optional[str]:
"""
Extract the complete section for a specific device from lspci -v output.
Args:
lspci_output: Raw output from 'lspci -v' command
slot: PCI slot (e.g., "04:00.0")
Returns:
Complete section for the device, from its slot line to the next slot line (or end)
"""
lines = lspci_output.strip().split('\n')
# Build the pattern to match the target device's slot line
target_pattern = re.compile(rf'^{re.escape(slot)}\s+')
section_lines = []
in_section = False
for line in lines:
# Check if this is the start of our target device
if target_pattern.match(line):
in_section = True
section_lines.append(line)
continue
# If we're in the section
if in_section:
# Check if we've hit the next device (new slot line - starts with hex:hex.hex)
if re.match(r'^[0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F]\s+', line):
# End of our section
break
# Add the line to our section
section_lines.append(line)
if section_lines:
return '\n'.join(section_lines)
return None
def parse_device_info(device_section: str) -> Dict[str, Any]:
"""
Parse detailed information from a PCI device section.
Args:
device_section: The complete lspci output for a single device
Returns:
Dictionary with parsed device information
"""
result = {
"slot": None,
"device_class": None,
"vendor_name": None,
"device_name": None,
"subsystem": None,
"subsystem_vendor": None,
"subsystem_device": None,
"driver": None,
"modules": [],
"vendor_device_id": None, # Will be extracted from other sources or databases
"revision": None,
"prog_if": None,
"flags": [],
"irq": None,
"iommu_group": None,
"memory_addresses": [],
"io_ports": [],
"capabilities": []
}
lines = device_section.split('\n')
# Parse the first line (slot line)
# Format: "04:00.0 Ethernet controller: Realtek Semiconductor Co., Ltd. RTL8111/8168/8211/8411..."
first_line = lines[0] if lines else ""
slot_match = re.match(r'^([0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F])\s+([^:]+):\s+(.+)$', first_line)
if slot_match:
result["slot"] = slot_match.group(1)
result["device_class"] = slot_match.group(2).strip()
description = slot_match.group(3).strip()
# Try to extract vendor and device name from description
# Common formats:
# "NVIDIA Corporation GA106 [GeForce RTX 3060 Lite Hash Rate]"
# "Micron/Crucial Technology P2 [Nick P2] / P3 / P3 Plus NVMe PCIe SSD"
# "Realtek Semiconductor Co., Ltd. RTL8111/8168/8211/8411"
# "Intel Corporation Device 1234"
# Strategy: Find vendor suffix markers (Corporation, Technology, Co., Ltd., etc.)
# Then everything after is the device name
vendor_name, device_name = _split_vendor_device(description)
result["vendor_name"] = vendor_name
result["device_name"] = device_name
# Extract revision if present
rev_match = re.search(r'\(rev\s+([0-9a-fA-F]+)\)', description)
if rev_match:
result["revision"] = rev_match.group(1)
# Clean revision from device_name
result["device_name"] = re.sub(r'\s*\(rev\s+[0-9a-fA-F]+\)', '', result["device_name"])
# Extract prog-if if present
progif_match = re.search(r'\(prog-if\s+([0-9a-fA-F]+)\s*\[([^\]]+)\]\)', description)
if progif_match:
result["prog_if"] = progif_match.group(1)
# Clean prog-if from device_name
result["device_name"] = re.sub(r'\s*\(prog-if\s+[0-9a-fA-F]+\s*\[[^\]]+\]\)', '', result["device_name"])
# Parse detailed fields
for line in lines[1:]:
line_stripped = line.strip()
# Subsystem
subsystem_match = re.match(r'^Subsystem:\s+(.+)$', line_stripped)
if subsystem_match:
result["subsystem"] = subsystem_match.group(1).strip()
# DeviceName (sometimes present)
devicename_match = re.match(r'^DeviceName:\s+(.+)$', line_stripped)
if devicename_match:
if not result["device_name"]:
result["device_name"] = devicename_match.group(1).strip()
# Flags
flags_match = re.match(r'^Flags:\s+(.+)$', line_stripped)
if flags_match:
flags_str = flags_match.group(1).strip()
# Extract IOMMU group
iommu_match = re.search(r'IOMMU group\s+(\d+)', flags_str)
if iommu_match:
result["iommu_group"] = iommu_match.group(1)
# Extract IRQ
irq_match = re.search(r'IRQ\s+(\d+)', flags_str)
if irq_match:
result["irq"] = irq_match.group(1)
# Parse flags
result["flags"] = [f.strip() for f in flags_str.split(',')]
# Memory addresses
memory_match = re.match(r'^Memory at\s+([0-9a-fA-F]+)\s+\((.+?)\)\s+\[(.+?)\]', line_stripped)
if memory_match:
result["memory_addresses"].append({
"address": memory_match.group(1),
"type": memory_match.group(2),
"info": memory_match.group(3)
})
# I/O ports
io_match = re.match(r'^I/O ports at\s+([0-9a-fA-F]+)\s+\[size=(\d+)\]', line_stripped)
if io_match:
result["io_ports"].append({
"address": io_match.group(1),
"size": io_match.group(2)
})
# Kernel driver in use
driver_match = re.match(r'^Kernel driver in use:\s+(.+)$', line_stripped)
if driver_match:
result["driver"] = driver_match.group(1).strip()
# Kernel modules
modules_match = re.match(r'^Kernel modules:\s+(.+)$', line_stripped)
if modules_match:
modules_str = modules_match.group(1).strip()
result["modules"] = [m.strip() for m in modules_str.split(',')]
# Capabilities (just capture the type for classification)
cap_match = re.match(r'^Capabilities:\s+\[([0-9a-fA-F]+)\]\s+(.+)$', line_stripped)
if cap_match:
result["capabilities"].append({
"offset": cap_match.group(1),
"type": cap_match.group(2).strip()
})
return result
def get_pci_vendor_device_id(slot: str) -> Optional[str]:
"""
Get vendor:device ID for a PCI slot using lspci -n.
This is a helper that would need to be called with subprocess.
Args:
slot: PCI slot (e.g., "04:00.0")
Returns:
Vendor:Device ID string (e.g., "10ec:8168") or None
"""
# This function would call: lspci -n -s {slot}
# Output format: "04:00.0 0200: 10ec:8168 (rev 16)"
# For now, this is a placeholder - implementation would use subprocess
pass

View File

@@ -0,0 +1,252 @@
"""
PCI Device Classifier
Classifies PCI devices based on lspci output and device class information.
"""
import re
from typing import Tuple, Optional, Dict, Any
class PCIClassifier:
"""
Classifier for PCI devices based on device class and characteristics.
"""
# PCI device class mappings to type_principal and sous_type
CLASS_MAPPINGS = {
# Storage devices
"SATA controller": ("PCI", "Contrôleur SATA"),
"NVMe": ("PCI", "SSD NVMe"),
"Non-Volatile memory controller": ("PCI", "SSD NVMe"),
"RAID bus controller": ("PCI", "Contrôleur RAID"),
"IDE interface": ("PCI", "Contrôleur IDE"),
"SCSI storage controller": ("PCI", "Contrôleur SCSI"),
# Network devices
"Ethernet controller": ("PCI", "Carte réseau Ethernet"),
"Network controller": ("PCI", "Carte réseau"),
"Wireless controller": ("PCI", "Carte WiFi"),
# Graphics
"VGA compatible controller": ("PCI", "Carte graphique"),
"3D controller": ("PCI", "Carte graphique"),
"Display controller": ("PCI", "Carte graphique"),
# Audio
"Audio device": ("PCI", "Carte son"),
"Multimedia audio controller": ("PCI", "Carte son"),
# USB
"USB controller": ("PCI", "Contrôleur USB"),
# System infrastructure
"Host bridge": ("PCI", "Pont système"),
"PCI bridge": ("PCI", "Pont PCI"),
"ISA bridge": ("PCI", "Pont ISA"),
"SMBus": ("PCI", "Contrôleur SMBus"),
"IOMMU": ("PCI", "Contrôleur IOMMU"),
# Security
"Encryption controller": ("PCI", "Contrôleur de chiffrement"),
# Other
"Serial controller": ("PCI", "Contrôleur série"),
"Communication controller": ("PCI", "Contrôleur de communication"),
"Signal processing controller": ("PCI", "Contrôleur de traitement du signal"),
}
@staticmethod
def classify_device(
device_section: str,
device_info: Optional[Dict[str, Any]] = None
) -> Tuple[str, str]:
"""
Classify a PCI device based on lspci output.
Args:
device_section: Full lspci -v output for a single device
device_info: Optional pre-parsed device information
Returns:
Tuple of (type_principal, sous_type)
"""
if not device_info:
from app.utils.lspci_parser import parse_device_info
device_info = parse_device_info(device_section)
device_class = device_info.get("device_class", "")
description = device_info.get("device_name", "")
vendor_name = device_info.get("vendor_name", "")
# Strategy 1: Direct class mapping
for class_key, (type_principal, sous_type) in PCIClassifier.CLASS_MAPPINGS.items():
if class_key.lower() in device_class.lower():
# Refine network devices
if sous_type == "Carte réseau":
refined = PCIClassifier.refine_network_type(device_section, description)
if refined:
return ("PCI", refined)
return (type_principal, sous_type)
# Strategy 2: Keyword detection in description
keyword_result = PCIClassifier.detect_from_keywords(device_section, description)
if keyword_result:
return ("PCI", keyword_result)
# Strategy 3: Vendor-specific detection
vendor_result = PCIClassifier.detect_from_vendor(vendor_name, description)
if vendor_result:
return ("PCI", vendor_result)
# Default: Generic PCI device
return ("PCI", "Autre")
@staticmethod
def refine_network_type(content: str, description: str) -> Optional[str]:
"""
Refine network device classification (WiFi vs Ethernet).
Args:
content: Full device section
description: Device description
Returns:
Refined sous_type or None
"""
normalized = content.lower() + " " + description.lower()
# WiFi patterns
wifi_patterns = [
r"wi[-]?fi", r"wireless", r"802\.11[a-z]", r"wlan",
r"wireless\s+adapter", r"wireless\s+network",
r"atheros", r"qualcomm.*wireless", r"broadcom.*wireless",
r"intel.*wireless", r"realtek.*wireless"
]
for pattern in wifi_patterns:
if re.search(pattern, normalized, re.IGNORECASE):
return "Carte WiFi"
# Ethernet patterns
ethernet_patterns = [
r"ethernet", r"gigabit", r"10/100", r"1000base",
r"rtl81\d+", r"e1000", r"bnx2", r"tg3"
]
for pattern in ethernet_patterns:
if re.search(pattern, normalized, re.IGNORECASE):
return "Carte réseau Ethernet"
return None
@staticmethod
def detect_from_keywords(content: str, description: str) -> Optional[str]:
"""
Detect device type from keywords in content and description.
Args:
content: Full device section
description: Device description
Returns:
Detected sous_type or None
"""
normalized = content.lower() + " " + description.lower()
keyword_mappings = [
# Storage
(r"nvme|ssd.*pcie|non-volatile.*memory", "SSD NVMe"),
(r"sata|ahci", "Contrôleur SATA"),
# Network
(r"wi[-]?fi|wireless|802\.11", "Carte WiFi"),
(r"ethernet|gigabit|network", "Carte réseau Ethernet"),
# Graphics
(r"nvidia|geforce|quadro|rtx|gtx", "Carte graphique"),
(r"amd.*radeon|rx\s*\d+", "Carte graphique"),
(r"intel.*graphics|intel.*hd", "Carte graphique"),
(r"vga|display|graphics", "Carte graphique"),
# Audio
(r"audio|sound|hda|ac97", "Carte son"),
# USB
(r"xhci|ehci|ohci|uhci|usb.*host", "Contrôleur USB"),
]
for pattern, sous_type in keyword_mappings:
if re.search(pattern, normalized, re.IGNORECASE):
return sous_type
return None
@staticmethod
def detect_from_vendor(vendor_name: str, description: str) -> Optional[str]:
"""
Detect device type from vendor name and description.
Args:
vendor_name: Vendor name
description: Device description
Returns:
Detected sous_type or None
"""
if not vendor_name:
return None
vendor_lower = vendor_name.lower()
# GPU vendors
if any(v in vendor_lower for v in ["nvidia", "amd", "intel", "ati"]):
if any(k in description.lower() for k in ["geforce", "radeon", "quadro", "graphics", "vga"]):
return "Carte graphique"
# Network vendors
if any(v in vendor_lower for v in ["realtek", "intel", "broadcom", "qualcomm", "atheros"]):
if any(k in description.lower() for k in ["ethernet", "network", "wireless", "wifi", "802.11"]):
if any(k in description.lower() for k in ["wireless", "wifi", "802.11"]):
return "Carte WiFi"
return "Carte réseau Ethernet"
# Storage vendors
if any(v in vendor_lower for v in ["samsung", "crucial", "micron", "western digital", "seagate"]):
if "nvme" in description.lower():
return "SSD NVMe"
return None
@staticmethod
def extract_technical_specs(device_info: Dict[str, Any]) -> Dict[str, Any]:
"""
Extract technical specifications for caracteristiques_specifiques field.
Args:
device_info: Parsed device information
Returns:
Dictionary with technical specifications
"""
specs = {
"slot": device_info.get("slot"),
"device_class": device_info.get("device_class"),
"vendor_name": device_info.get("vendor_name"),
"subsystem": device_info.get("subsystem"),
"driver": device_info.get("driver"),
"iommu_group": device_info.get("iommu_group"),
}
# Add vendor:device ID if available
if device_info.get("vendor_device_id"):
specs["pci_device_id"] = device_info.get("vendor_device_id")
# Add revision if available
if device_info.get("revision"):
specs["revision"] = device_info.get("revision")
# Add modules if available
if device_info.get("modules"):
specs["modules"] = ", ".join(device_info.get("modules", []))
# Clean None values
return {k: v for k, v in specs.items() if v is not None}

View File

@@ -0,0 +1,79 @@
"""
PCI Information Parser
Combines lspci -v and lspci -n outputs to get complete device information.
"""
import re
import subprocess
from typing import Dict, Any, Optional
def get_pci_ids_from_lspci_n(lspci_n_output: str) -> Dict[str, str]:
"""
Parse lspci -n output to extract vendor:device IDs for all slots.
Args:
lspci_n_output: Output from 'lspci -n' command
Returns:
Dictionary mapping slot -> vendor:device ID
Example: {"04:00.0": "10ec:8168", "08:00.0": "10de:2504"}
"""
slot_to_id = {}
lines = lspci_n_output.strip().split('\n')
for line in lines:
# Format: "04:00.0 0200: 10ec:8168 (rev 16)"
# Format: "00:00.0 0600: 1022:1480"
match = re.match(r'^([0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F])\s+[0-9a-fA-F]+:\s+([0-9a-fA-F]{4}):([0-9a-fA-F]{4})', line)
if match:
slot = match.group(1)
vendor_id = match.group(2).lower()
device_id = match.group(3).lower()
slot_to_id[slot] = f"{vendor_id}:{device_id}"
return slot_to_id
def enrich_device_info_with_ids(device_info: Dict[str, Any], pci_ids: Dict[str, str]) -> Dict[str, Any]:
"""
Enrich device info with vendor:device ID from lspci -n output.
Args:
device_info: Parsed device information from lspci -v
pci_ids: Mapping from slot to vendor:device ID
Returns:
Enriched device info with pci_device_id field
"""
slot = device_info.get("slot")
if slot and slot in pci_ids:
device_info["pci_device_id"] = pci_ids[slot]
# Also split into vendor_id and device_id
parts = pci_ids[slot].split(':')
if len(parts) == 2:
device_info["vendor_id"] = f"0x{parts[0]}"
device_info["device_id"] = f"0x{parts[1]}"
return device_info
def run_lspci_n() -> Optional[str]:
"""
Run lspci -n command and return output.
This is a helper function that executes the command.
Returns:
Output from lspci -n or None if command fails
"""
try:
result = subprocess.run(
['lspci', '-n'],
capture_output=True,
text=True,
timeout=10
)
if result.returncode == 0:
return result.stdout
return None
except Exception:
return None

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env python3
"""
Apply migration 012: Add pci_device_id field
"""
import sqlite3
import os
DB_PATH = "/home/gilles/projects/serv_benchmark/backend/data/peripherals.db"
def apply_migration():
if not os.path.exists(DB_PATH):
print(f"❌ Database not found: {DB_PATH}")
return False
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
try:
# Check if column already exists
cursor.execute("PRAGMA table_info(peripherals)")
columns = [col[1] for col in cursor.fetchall()]
if "pci_device_id" in columns:
print("✅ Column pci_device_id already exists, skipping migration")
return True
# Add the column
print("📝 Adding pci_device_id column...")
cursor.execute("ALTER TABLE peripherals ADD COLUMN pci_device_id VARCHAR(20)")
conn.commit()
print("✅ Migration 012 applied successfully")
return True
except Exception as e:
print(f"❌ Error applying migration: {e}")
conn.rollback()
return False
finally:
conn.close()
if __name__ == "__main__":
apply_migration()

49
backend/apply_migration_013.py Executable file
View File

@@ -0,0 +1,49 @@
#!/usr/bin/env python3
"""Apply migration 013: Add device_id field"""
import sqlite3
import os
DB_PATH = os.path.join(os.path.dirname(__file__), "data", "peripherals.db")
MIGRATION_FILE = os.path.join(os.path.dirname(__file__), "migrations", "013_add_device_id.sql")
def apply_migration():
"""Apply migration 013"""
print("Applying migration 013: Add device_id field...")
# Read migration SQL
with open(MIGRATION_FILE, 'r') as f:
migration_sql = f.read()
# Connect to database
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
try:
# Execute migration
cursor.executescript(migration_sql)
conn.commit()
print("✅ Migration 013 applied successfully")
# Verify the column was added
cursor.execute("PRAGMA table_info(peripherals)")
columns = cursor.fetchall()
device_id_col = [col for col in columns if col[1] == 'device_id']
if device_id_col:
print(f"✅ Column 'device_id' added: {device_id_col[0]}")
else:
print("⚠️ Warning: Column 'device_id' not found after migration")
except sqlite3.Error as e:
if "duplicate column name" in str(e).lower():
print(" Migration already applied (column exists)")
else:
print(f"❌ Error applying migration: {e}")
conn.rollback()
raise
finally:
conn.close()
if __name__ == "__main__":
apply_migration()

49
backend/apply_migration_014.py Executable file
View File

@@ -0,0 +1,49 @@
#!/usr/bin/env python3
"""Apply migration 014: Add pci_slot field"""
import sqlite3
import os
DB_PATH = os.path.join(os.path.dirname(__file__), "data", "peripherals.db")
MIGRATION_FILE = os.path.join(os.path.dirname(__file__), "migrations", "014_add_pci_slot.sql")
def apply_migration():
"""Apply migration 014"""
print("Applying migration 014: Add pci_slot field...")
# Read migration SQL
with open(MIGRATION_FILE, 'r') as f:
migration_sql = f.read()
# Connect to database
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
try:
# Execute migration
cursor.executescript(migration_sql)
conn.commit()
print("✅ Migration 014 applied successfully")
# Verify the column was added
cursor.execute("PRAGMA table_info(peripherals)")
columns = cursor.fetchall()
pci_slot_col = [col for col in columns if col[1] == 'pci_slot']
if pci_slot_col:
print(f"✅ Column 'pci_slot' added: {pci_slot_col[0]}")
else:
print("⚠️ Warning: Column 'pci_slot' not found after migration")
except sqlite3.Error as e:
if "duplicate column name" in str(e).lower():
print(" Migration already applied (column exists)")
else:
print(f"❌ Error applying migration: {e}")
conn.rollback()
raise
finally:
conn.close()
if __name__ == "__main__":
apply_migration()

49
backend/apply_migration_015.py Executable file
View File

@@ -0,0 +1,49 @@
#!/usr/bin/env python3
"""Apply migration 015: Add utilisation field"""
import sqlite3
import os
DB_PATH = os.path.join(os.path.dirname(__file__), "data", "peripherals.db")
MIGRATION_FILE = os.path.join(os.path.dirname(__file__), "migrations", "015_add_utilisation.sql")
def apply_migration():
"""Apply migration 015"""
print("Applying migration 015: Add utilisation field...")
# Read migration SQL
with open(MIGRATION_FILE, 'r') as f:
migration_sql = f.read()
# Connect to database
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
try:
# Execute migration
cursor.executescript(migration_sql)
conn.commit()
print("✅ Migration 015 applied successfully")
# Verify the column was added
cursor.execute("PRAGMA table_info(peripherals)")
columns = cursor.fetchall()
utilisation_col = [col for col in columns if col[1] == 'utilisation']
if utilisation_col:
print(f"✅ Column 'utilisation' added: {utilisation_col[0]}")
else:
print("⚠️ Warning: Column 'utilisation' not found after migration")
except sqlite3.Error as e:
if "duplicate column name" in str(e).lower():
print(" Migration already applied (column exists)")
else:
print(f"❌ Error applying migration: {e}")
conn.rollback()
raise
finally:
conn.close()
if __name__ == "__main__":
apply_migration()

59
backend/apply_migration_016.py Executable file
View File

@@ -0,0 +1,59 @@
#!/usr/bin/env python3
"""
Migration 016: Ajout du champ ram_max_capacity_mb
"""
import sqlite3
import sys
from pathlib import Path
# Configuration
DB_PATH = Path(__file__).parent / "data" / "data.db"
MIGRATION_FILE = Path(__file__).parent / "migrations" / "016_add_ram_max_capacity.sql"
def main():
if not DB_PATH.exists():
print(f"❌ Base de données non trouvée: {DB_PATH}")
sys.exit(1)
# Lire le fichier SQL
with open(MIGRATION_FILE, 'r') as f:
sql = f.read()
# Connexion à la BDD
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
try:
# Vérifier si la colonne existe déjà
cursor.execute("PRAGMA table_info(hardware_snapshots)")
columns = [col[1] for col in cursor.fetchall()]
if 'ram_max_capacity_mb' in columns:
print("✅ La colonne ram_max_capacity_mb existe déjà")
return
# Appliquer la migration
print("🔧 Application de la migration 016...")
cursor.executescript(sql)
conn.commit()
print("✅ Migration 016 appliquée avec succès")
# Vérifier
cursor.execute("PRAGMA table_info(hardware_snapshots)")
columns_after = [col[1] for col in cursor.fetchall()]
if 'ram_max_capacity_mb' in columns_after:
print("✅ Colonne ram_max_capacity_mb ajoutée")
else:
print("❌ Erreur: colonne non ajoutée")
sys.exit(1)
except Exception as e:
print(f"❌ Erreur lors de la migration: {e}")
conn.rollback()
sys.exit(1)
finally:
conn.close()
if __name__ == "__main__":
main()

74
backend/apply_migration_017.py Executable file
View File

@@ -0,0 +1,74 @@
#!/usr/bin/env python3
"""
Migration 017: Ajout des champs Proxmox
"""
import sqlite3
import sys
from pathlib import Path
# Configuration
DB_PATH = Path(__file__).parent / "data" / "data.db"
MIGRATION_FILE = Path(__file__).parent / "migrations" / "017_add_proxmox_fields.sql"
def main():
if not DB_PATH.exists():
print(f"❌ Base de données non trouvée: {DB_PATH}")
sys.exit(1)
# Lire le fichier SQL
with open(MIGRATION_FILE, 'r') as f:
sql = f.read()
# Connexion à la BDD
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
try:
# Vérifier si les colonnes existent déjà
cursor.execute("PRAGMA table_info(hardware_snapshots)")
columns = [col[1] for col in cursor.fetchall()]
existing = []
if 'is_proxmox_host' in columns:
existing.append('is_proxmox_host')
if 'is_proxmox_guest' in columns:
existing.append('is_proxmox_guest')
if 'proxmox_version' in columns:
existing.append('proxmox_version')
if len(existing) == 3:
print("✅ Toutes les colonnes Proxmox existent déjà")
return
elif existing:
print(f"⚠️ Colonnes existantes: {', '.join(existing)}")
# Appliquer la migration
print("🔧 Application de la migration 017...")
cursor.executescript(sql)
conn.commit()
print("✅ Migration 017 appliquée avec succès")
# Vérifier
cursor.execute("PRAGMA table_info(hardware_snapshots)")
columns_after = [col[1] for col in cursor.fetchall()]
success = True
for col in ['is_proxmox_host', 'is_proxmox_guest', 'proxmox_version']:
if col in columns_after:
print(f"✅ Colonne {col} ajoutée")
else:
print(f"❌ Erreur: colonne {col} non ajoutée")
success = False
if not success:
sys.exit(1)
except Exception as e:
print(f"❌ Erreur lors de la migration: {e}")
conn.rollback()
sys.exit(1)
finally:
conn.close()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,179 @@
#!/usr/bin/env python3
"""
Migrate existing uploads to organized structure
Moves files from uploads/ to uploads/{hostname}/images or uploads/{hostname}/files
"""
import os
import shutil
import sys
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent))
from sqlalchemy.orm import Session
from app.db.session import SessionLocal
from app.core.config import settings
from app.models.device import Device
from app.models.document import Document
from app.utils.file_organizer import (
sanitize_hostname,
is_image_file,
ensure_device_directories
)
def migrate_files(dry_run: bool = True):
"""
Migrate existing files to organized structure
Args:
dry_run: If True, only print what would be done
"""
db: Session = SessionLocal()
try:
# Get all documents
documents = db.query(Document).all()
print(f"Found {len(documents)} documents to migrate")
print(f"Mode: {'DRY RUN' if dry_run else 'ACTUAL MIGRATION'}")
print("-" * 80)
migrated_count = 0
error_count = 0
skipped_count = 0
for doc in documents:
# Get device
device = db.query(Device).filter(Device.id == doc.device_id).first()
if not device:
print(f"❌ Document {doc.id}: Device {doc.device_id} not found - SKIPPING")
error_count += 1
continue
# Check if file exists
if not os.path.exists(doc.stored_path):
print(f"⚠️ Document {doc.id}: File not found at {doc.stored_path} - SKIPPING")
skipped_count += 1
continue
# Determine if image
is_image = is_image_file(doc.filename, doc.mime_type)
file_type = "image" if is_image else "file"
# Get new path
sanitized_hostname = sanitize_hostname(device.hostname)
subdir = "images" if is_image else "files"
filename = os.path.basename(doc.stored_path)
new_path = os.path.join(
settings.UPLOAD_DIR,
sanitized_hostname,
subdir,
filename
)
# Check if already in correct location
if doc.stored_path == new_path:
print(f"✓ Document {doc.id}: Already in correct location")
skipped_count += 1
continue
print(f"📄 Document {doc.id} ({file_type}):")
print(f" Device: {device.hostname} (ID: {device.id})")
print(f" From: {doc.stored_path}")
print(f" To: {new_path}")
if not dry_run:
try:
# Create target directory
os.makedirs(os.path.dirname(new_path), exist_ok=True)
# Move file
shutil.move(doc.stored_path, new_path)
# Update database
doc.stored_path = new_path
db.add(doc)
print(f" ✅ Migrated successfully")
migrated_count += 1
except Exception as e:
print(f" ❌ Error: {e}")
error_count += 1
else:
print(f" [DRY RUN - would migrate]")
migrated_count += 1
print()
if not dry_run:
db.commit()
print("Database updated")
print("-" * 80)
print(f"Summary:")
print(f" Migrated: {migrated_count}")
print(f" Skipped: {skipped_count}")
print(f" Errors: {error_count}")
print(f" Total: {len(documents)}")
if dry_run:
print()
print("This was a DRY RUN. To actually migrate files, run:")
print(" python backend/migrate_file_organization.py --execute")
finally:
db.close()
def cleanup_empty_directories(base_dir: str):
"""Remove empty directories after migration"""
for root, dirs, files in os.walk(base_dir, topdown=False):
for dir_name in dirs:
dir_path = os.path.join(root, dir_name)
try:
if not os.listdir(dir_path): # Directory is empty
os.rmdir(dir_path)
print(f"Removed empty directory: {dir_path}")
except Exception as e:
print(f"Could not remove {dir_path}: {e}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Migrate uploads to organized structure")
parser.add_argument(
"--execute",
action="store_true",
help="Actually perform the migration (default is dry-run)"
)
parser.add_argument(
"--cleanup",
action="store_true",
help="Clean up empty directories after migration"
)
args = parser.parse_args()
print("=" * 80)
print("File Organization Migration")
print("=" * 80)
print()
migrate_files(dry_run=not args.execute)
if args.execute and args.cleanup:
print()
print("=" * 80)
print("Cleaning up empty directories")
print("=" * 80)
cleanup_empty_directories(settings.UPLOAD_DIR)
print()
print("Done!")

View File

@@ -0,0 +1,5 @@
-- Migration 012: Add pci_device_id field to peripherals table
-- Date: 2026-01-05
-- Description: Add PCI device ID field (vendor:device format, e.g., 10ec:8168)
ALTER TABLE peripherals ADD COLUMN pci_device_id VARCHAR(20);

View File

@@ -0,0 +1,10 @@
-- Migration 013: Add generic device_id field
-- This field stores the physical identifier of the device:
-- - For PCI devices: the slot (e.g., "08:00.0")
-- - For USB devices: the bus-device (e.g., "001-004")
-- - For other devices: any relevant identifier
ALTER TABLE peripherals ADD COLUMN device_id VARCHAR(50);
-- Add index for faster lookups
CREATE INDEX idx_peripherals_device_id ON peripherals(device_id);

View File

@@ -0,0 +1,7 @@
-- Migration 014: Add pci_slot field
-- This field stores the PCI slot identifier (e.g., "08:00.0")
ALTER TABLE peripherals ADD COLUMN pci_slot VARCHAR(20);
-- Add index for faster lookups
CREATE INDEX idx_peripherals_pci_slot ON peripherals(pci_slot);

View File

@@ -0,0 +1,8 @@
-- Migration 015: Add utilisation field
-- This field stores the host/device where the peripheral is used
-- Can be a reference to a host in host.yaml or "non-utilisé"
ALTER TABLE peripherals ADD COLUMN utilisation VARCHAR(255);
-- Add index for faster lookups
CREATE INDEX idx_peripherals_utilisation ON peripherals(utilisation);

View File

@@ -0,0 +1,7 @@
-- Migration 016: Ajout du champ ram_max_capacity_mb
-- Date: 2026-01-10
-- Description: Ajoute la capacité maximale de RAM supportée par la carte mère
ALTER TABLE hardware_snapshots ADD COLUMN ram_max_capacity_mb INTEGER;
-- Note: Peut être NULL pour les snapshots existants

View File

@@ -0,0 +1,9 @@
-- Migration 017: Ajout des champs Proxmox
-- Date: 2026-01-10
-- Description: Ajoute des champs pour détecter les environnements Proxmox (hôte et invité)
ALTER TABLE hardware_snapshots ADD COLUMN is_proxmox_host BOOLEAN DEFAULT FALSE;
ALTER TABLE hardware_snapshots ADD COLUMN is_proxmox_guest BOOLEAN DEFAULT FALSE;
ALTER TABLE hardware_snapshots ADD COLUMN proxmox_version TEXT;
-- Note: Peut être NULL pour les snapshots existants

View File

@@ -0,0 +1,2 @@
-- Migration 018: Add IP URL field to devices
ALTER TABLE devices ADD COLUMN ip_url VARCHAR(512);

View File

@@ -0,0 +1,2 @@
ALTER TABLE hardware_snapshots ADD COLUMN audio_hardware_json TEXT;
ALTER TABLE hardware_snapshots ADD COLUMN audio_software_json TEXT;

View File

@@ -0,0 +1,15 @@
-- Migration 020: Store uptime_seconds as REAL for fractional values
-- Date: 2026-01-11
-- Description: Change hardware_snapshots.uptime_seconds from INTEGER to REAL
BEGIN TRANSACTION;
ALTER TABLE hardware_snapshots ADD COLUMN uptime_seconds_real REAL;
UPDATE hardware_snapshots
SET uptime_seconds_real = uptime_seconds
WHERE uptime_seconds IS NOT NULL;
ALTER TABLE hardware_snapshots DROP COLUMN uptime_seconds;
ALTER TABLE hardware_snapshots RENAME COLUMN uptime_seconds_real TO uptime_seconds;
COMMIT;