Files
serv_benchmark/backend/app/schemas/benchmark.py
gilles soulier c6a8e8e83d feat: Complete MVP implementation of Linux BenchTools
 Features:
- Backend FastAPI complete (25 Python files)
  - 5 SQLAlchemy models (Device, HardwareSnapshot, Benchmark, Link, Document)
  - Pydantic schemas for validation
  - 4 API routers (benchmark, devices, links, docs)
  - Authentication with Bearer token
  - Automatic score calculation
  - File upload support

- Frontend web interface (13 files)
  - 4 HTML pages (Dashboard, Devices, Device Detail, Settings)
  - 7 JavaScript modules
  - Monokai dark theme CSS
  - Responsive design
  - Complete CRUD operations

- Client benchmark script (500+ lines Bash)
  - Hardware auto-detection
  - CPU, RAM, Disk, Network benchmarks
  - JSON payload generation
  - Robust error handling

- Docker deployment
  - Optimized Dockerfile
  - docker-compose with 2 services
  - Persistent volumes
  - Environment variables

- Documentation & Installation
  - Automated install.sh script
  - README, QUICKSTART, DEPLOYMENT guides
  - Complete API documentation
  - Project structure documentation

📊 Stats:
- ~60 files created
- ~5000 lines of code
- Full MVP feature set implemented

🚀 Ready for production deployment!

🤖 Generated with Claude Code
Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2025-12-07 14:46:10 +01:00

110 lines
2.9 KiB
Python

"""
Linux BenchTools - Benchmark Schemas
"""
from pydantic import BaseModel, Field
from typing import Optional
from app.schemas.hardware import HardwareData
class CPUResults(BaseModel):
"""CPU benchmark results"""
events_per_sec: Optional[float] = None
duration_s: Optional[float] = None
score: Optional[float] = None
class MemoryResults(BaseModel):
"""Memory benchmark results"""
throughput_mib_s: Optional[float] = None
score: Optional[float] = None
class DiskResults(BaseModel):
"""Disk benchmark results"""
read_mb_s: Optional[float] = None
write_mb_s: Optional[float] = None
iops_read: Optional[int] = None
iops_write: Optional[int] = None
latency_ms: Optional[float] = None
score: Optional[float] = None
class NetworkResults(BaseModel):
"""Network benchmark results"""
upload_mbps: Optional[float] = None
download_mbps: Optional[float] = None
ping_ms: Optional[float] = None
jitter_ms: Optional[float] = None
packet_loss_percent: Optional[float] = None
score: Optional[float] = None
class GPUResults(BaseModel):
"""GPU benchmark results"""
glmark2_score: Optional[int] = None
score: Optional[float] = None
class BenchmarkResults(BaseModel):
"""Complete benchmark results"""
cpu: Optional[CPUResults] = None
memory: Optional[MemoryResults] = None
disk: Optional[DiskResults] = None
network: Optional[NetworkResults] = None
gpu: Optional[GPUResults] = None
global_score: float = Field(..., ge=0, le=100, description="Global score (0-100)")
class BenchmarkPayload(BaseModel):
"""Complete benchmark payload from client script"""
device_identifier: str = Field(..., min_length=1, max_length=255)
bench_script_version: str = Field(..., min_length=1, max_length=50)
hardware: HardwareData
results: BenchmarkResults
class BenchmarkResponse(BaseModel):
"""Response after successful benchmark submission"""
status: str = "ok"
device_id: int
benchmark_id: int
message: Optional[str] = None
class BenchmarkDetail(BaseModel):
"""Detailed benchmark information"""
id: int
device_id: int
hardware_snapshot_id: int
run_at: str
bench_script_version: str
global_score: float
cpu_score: Optional[float] = None
memory_score: Optional[float] = None
disk_score: Optional[float] = None
network_score: Optional[float] = None
gpu_score: Optional[float] = None
details: dict # details_json parsed
class Config:
from_attributes = True
class BenchmarkSummary(BaseModel):
"""Summary benchmark information for lists"""
id: int
run_at: str
global_score: float
cpu_score: Optional[float] = None
memory_score: Optional[float] = None
disk_score: Optional[float] = None
network_score: Optional[float] = None
gpu_score: Optional[float] = None
bench_script_version: Optional[str] = None
class Config:
from_attributes = True