""" Linux BenchTools - Benchmark Schemas """ from pydantic import BaseModel, Field from typing import Optional from app.schemas.hardware import HardwareData class CPUResults(BaseModel): """CPU benchmark results""" events_per_sec: Optional[float] = None duration_s: Optional[float] = None score: Optional[float] = None class MemoryResults(BaseModel): """Memory benchmark results""" throughput_mib_s: Optional[float] = None score: Optional[float] = None class DiskResults(BaseModel): """Disk benchmark results""" read_mb_s: Optional[float] = None write_mb_s: Optional[float] = None iops_read: Optional[int] = None iops_write: Optional[int] = None latency_ms: Optional[float] = None score: Optional[float] = None class NetworkResults(BaseModel): """Network benchmark results""" upload_mbps: Optional[float] = None download_mbps: Optional[float] = None ping_ms: Optional[float] = None jitter_ms: Optional[float] = None packet_loss_percent: Optional[float] = None score: Optional[float] = None class GPUResults(BaseModel): """GPU benchmark results""" glmark2_score: Optional[int] = None score: Optional[float] = None class BenchmarkResults(BaseModel): """Complete benchmark results""" cpu: Optional[CPUResults] = None memory: Optional[MemoryResults] = None disk: Optional[DiskResults] = None network: Optional[NetworkResults] = None gpu: Optional[GPUResults] = None global_score: float = Field(..., ge=0, le=100, description="Global score (0-100)") class BenchmarkPayload(BaseModel): """Complete benchmark payload from client script""" device_identifier: str = Field(..., min_length=1, max_length=255) bench_script_version: str = Field(..., min_length=1, max_length=50) hardware: HardwareData results: BenchmarkResults class BenchmarkResponse(BaseModel): """Response after successful benchmark submission""" status: str = "ok" device_id: int benchmark_id: int message: Optional[str] = None class BenchmarkDetail(BaseModel): """Detailed benchmark information""" id: int device_id: int hardware_snapshot_id: int run_at: str bench_script_version: str global_score: float cpu_score: Optional[float] = None memory_score: Optional[float] = None disk_score: Optional[float] = None network_score: Optional[float] = None gpu_score: Optional[float] = None details: dict # details_json parsed class Config: from_attributes = True class BenchmarkSummary(BaseModel): """Summary benchmark information for lists""" id: int run_at: str global_score: float cpu_score: Optional[float] = None memory_score: Optional[float] = None disk_score: Optional[float] = None network_score: Optional[float] = None gpu_score: Optional[float] = None bench_script_version: Optional[str] = None class Config: from_attributes = True