This commit is contained in:
2026-01-11 23:40:18 +01:00
parent 29a99c55e7
commit 6452144fc0
5 changed files with 32278 additions and 16300 deletions

Binary file not shown.

File diff suppressed because one or more lines are too long

30285
debug.log

File diff suppressed because one or more lines are too long

18018
debug.txt Executable file

File diff suppressed because one or more lines are too long

143
main.go
View File

@@ -8,6 +8,7 @@ import (
"flag"
"fmt"
"io"
"math"
"net/http"
"os"
"os/exec"
@@ -375,6 +376,11 @@ type DirectDiskMetrics struct {
ReadMBs float64
}
type CacheSummary struct {
Sizes map[int]int
Counts map[int]int
}
type NetResult struct {
Upload float64 `json:"upload_mbps"`
Download float64 `json:"download_mbps"`
@@ -657,22 +663,32 @@ func collectCPUInfo(ctx context.Context) CPUDetails {
info.CacheL3KB = l3
}
}
if info.CacheL1KB == 0 && info.CacheL2KB == 0 && info.CacheL3KB == 0 {
info.CacheL1KB, info.CacheL2KB, info.CacheL3KB = collectCacheSizes(ctx, info.Cores)
cacheSummary := collectCacheSizes(ctx, info.Cores)
if info.CacheL1KB == 0 && cacheSummary.Sizes[1] > 0 {
info.CacheL1KB = cacheSummary.Sizes[1]
}
applyCpuidCacheInfo(&info)
if info.CacheL2KB == 0 && cacheSummary.Sizes[2] > 0 {
info.CacheL2KB = cacheSummary.Sizes[2]
}
if info.CacheL3KB == 0 && cacheSummary.Sizes[3] > 0 {
info.CacheL3KB = cacheSummary.Sizes[3]
}
applyCpuidCacheInfo(&info, cacheSummary)
info.Microarchitecture = detectMicroarchitecture(first.ModelName, first.Family)
info.BaseFreqGHz, info.MaxFreqGHz = determineCPUFreqs(ctx, first)
return info
}
func collectCacheSizes(ctx context.Context, physicalCores int) (l1, l2, l3 int) {
func collectCacheSizes(ctx context.Context, physicalCores int) CacheSummary {
cacheDir := "/sys/devices/system/cpu/cpu0/cache"
entries, err := os.ReadDir(cacheDir)
if err != nil {
return 0, 0, 0
summary := CacheSummary{
Sizes: map[int]int{},
Counts: map[int]int{},
}
if err != nil {
return summary
}
perLevel := map[int]int{}
for _, entry := range entries {
if !strings.HasPrefix(entry.Name(), "index") {
continue
@@ -685,16 +701,14 @@ func collectCacheSizes(ctx context.Context, physicalCores int) (l1, l2, l3 int)
sizePath := filepath.Join(cacheDir, entry.Name(), "size")
sizeKB := parseCacheSize(readStringFromFile(sizePath))
if sizeKB > 0 {
perLevel[level] += sizeKB
summary.Sizes[level] += sizeKB
summary.Counts[level]++
}
}
l1 = perLevel[1]
l2 = perLevel[2]
l3 = perLevel[3]
return l1, l2, l3
return summary
}
func applyCpuidCacheInfo(info *CPUDetails) {
func applyCpuidCacheInfo(info *CPUDetails, summary CacheSummary) {
cache := cpuid.CPU.Cache
if cache.L1I < 0 && cache.L1D < 0 && cache.L2 < 0 && cache.L3 < 0 {
return
@@ -706,6 +720,9 @@ func applyCpuidCacheInfo(info *CPUDetails) {
cores = 1
}
}
if summary.Counts == nil {
summary.Counts = map[int]int{}
}
l1Bytes := 0
if cache.L1I > 0 {
l1Bytes += cache.L1I
@@ -717,10 +734,18 @@ func applyCpuidCacheInfo(info *CPUDetails) {
info.CacheL1KB = int(int64(l1Bytes) * int64(cores) / 1024)
}
if cache.L2 > 0 && info.CacheL2KB == 0 {
info.CacheL2KB = int(int64(cache.L2) * int64(cores) / 1024)
l2Slices := summary.Counts[2]
if l2Slices <= 0 {
l2Slices = cores
}
info.CacheL2KB = maxInt(info.CacheL2KB, int((int64(cache.L2)*int64(l2Slices))/1024))
}
if cache.L3 > 0 && info.CacheL3KB == 0 {
info.CacheL3KB = int(cache.L3 / 1024)
l3Slices := summary.Counts[3]
if l3Slices <= 0 {
l3Slices = 1
}
info.CacheL3KB = maxInt(info.CacheL3KB, int((int64(cache.L3)*int64(l3Slices))/1024))
}
}
@@ -797,7 +822,7 @@ func determineCPUFreqs(ctx context.Context, info cpu.InfoStat) (float64, float64
max = maxSpeed / 1000
}
}
return base, max
return round2(base), round2(max)
}
func parseDMIDecodeSpeed(output string, re *regexp.Regexp) float64 {
@@ -888,6 +913,16 @@ func collectRAMInfo(ctx context.Context) RAMDetails {
}
slotsTotal, slotsUsed, maxCapacity, ecc, layout := collectSMBIOSMemory()
if len(layout) > 0 {
correctSlots := len(layout)
if slotsTotal == 0 || slotsTotal < correctSlots || slotsTotal > correctSlots*4 {
debugf("Correction RAM slots_total=%d => %d (layout=%d)", slotsTotal, correctSlots, len(layout))
slotsTotal = correctSlots
}
if slotsUsed == 0 {
slotsUsed = len(layout)
}
}
if slotsTotal > 0 {
res.SlotsTotal = slotsTotal
}
@@ -901,14 +936,18 @@ func collectRAMInfo(ctx context.Context) RAMDetails {
res.Layout = layout
if res.MaxCapacityMB == 0 {
debugf("RAM fallback max capacity: SMBIOS=0 => utiliser total=%dMB", res.TotalMB)
res.MaxCapacityMB = res.TotalMB
}
if res.SlotsTotal == 0 && len(layout) > 0 {
debugf("RAM fallback slots total: SMBIOS=0 => %d modules détectés", len(layout))
res.SlotsTotal = len(layout)
}
if res.SlotsUsed == 0 && len(layout) > 0 {
debugf("RAM fallback slots used: SMBIOS=0 => %d modules détectés", len(layout))
res.SlotsUsed = len(layout)
}
debugf("RAM result: max=%dMB slotsTotal=%d slotsUsed=%d layout=%d modules", res.MaxCapacityMB, res.SlotsTotal, res.SlotsUsed, len(layout))
return res
}
@@ -964,19 +1003,20 @@ func parseSMBIOSPhysicalMemoryArray(info *smbios.Info) (slotsTotal int, ecc bool
if table.Type != smbiosTableTypePhysicalMemoryArray {
continue
}
if val, err := table.GetByteAt(11); err == nil && val > 0 {
if val, err := table.GetByteAt(13); err == nil && val > 0 {
debugf("[SMBIOS RAW] Handle=%s slots_byte=0x%X (%d)", table.Handle, val, val)
slotsTotal = int(val)
}
if val, err := table.GetByteAt(6); err == nil {
ecc = val != 0 && val != 3
}
if val, err := table.GetWordAt(7); err == nil {
if val >= 0x8000 && table.Len() >= 0x10 {
if dw, err := table.GetDWordAt(7); err == nil {
if dw == 0x80000000 && table.Len() >= 0x10 {
if ext, err := table.GetDWordAt(12); err == nil && ext > 0 {
maxCapacityMB = int(ext / (1024 * 1024))
}
} else if val > 0 {
maxCapacityMB = int(val)
} else if dw > 0 {
maxCapacityMB = int(dw / 1024)
}
}
break
@@ -2140,12 +2180,12 @@ func runCPUBench(ctx context.Context) CPUResult {
singleEvents, singleDuration := runSysbenchCPU(ctx, "1")
multiThreads := resolveThreadCount(ctx)
multiEvents, multiDuration := runSysbenchCPU(ctx, multiThreads)
res.EventsPerSecSingle = singleEvents
res.EventsPerSecMulti = multiEvents
res.EventsPerSec = averagePositive(singleEvents, multiEvents)
res.DurationSec = singleDuration + multiDuration
res.ScoreSingle = singleEvents
res.ScoreMulti = multiEvents
res.EventsPerSecSingle = round2(singleEvents)
res.EventsPerSecMulti = round2(multiEvents)
res.EventsPerSec = round2(averagePositive(singleEvents, multiEvents))
res.DurationSec = round2(singleDuration + multiDuration)
res.ScoreSingle = res.EventsPerSecSingle
res.ScoreMulti = res.EventsPerSecMulti
res.Score = res.EventsPerSec
return res
}
@@ -2211,13 +2251,13 @@ func runMemBench(ctx context.Context) MemResult {
if out, err := safeRun(ctx, "sysbench", args...); err == nil {
if matches := regexp.MustCompile(`([\d\.]+)\s+MiB/sec`).FindStringSubmatch(out); len(matches) > 1 {
if val, err := strconv.ParseFloat(matches[1], 64); err == nil {
res.Throughput = val
res.Score = val
res.Throughput = round2(val)
res.Score = res.Throughput
}
} else if matches := regexp.MustCompile(`transferred:\s+([\d\.]+)\s+MiB`).FindStringSubmatch(out); len(matches) > 1 {
if val, err := strconv.ParseFloat(matches[1], 64); err == nil {
res.Throughput = val
res.Score = val
res.Throughput = round2(val)
res.Score = res.Throughput
}
}
}
@@ -2266,7 +2306,12 @@ func runDiskBench(ctx context.Context) DiskResult {
res.IOPSWrite = convertFloat(write["iops"])
}
}
res.Score = averagePositive(res.ReadMBs, res.WriteMBs)
res.Score = round2(averagePositive(res.ReadMBs, res.WriteMBs))
res.ReadMBs = round2(res.ReadMBs)
res.WriteMBs = round2(res.WriteMBs)
res.LatencyMs = round2(res.LatencyMs)
res.IOPSRead = round2(res.IOPSRead)
res.IOPSWrite = round2(res.IOPSWrite)
debugf("[3/6] Bench disque final -> read=%.2f MB/s write=%.2f MB/s score=%.2f", res.ReadMBs, res.WriteMBs, res.Score)
return res
}
@@ -2283,6 +2328,13 @@ func convertBW(value interface{}) float64 {
return 0
}
func maxInt(a, b int) int {
if b > a {
return b
}
return a
}
func runDirectDiskTests(ctx context.Context, benchDir string, mountInfo mountEntry) DirectDiskMetrics {
if benchDir == "" {
return DirectDiskMetrics{}
@@ -2334,8 +2386,8 @@ func runDirectDiskTests(ctx context.Context, benchDir string, mountInfo mountEnt
cleanupPaths(cleanup)
metrics := DirectDiskMetrics{
WriteMBs: safeThroughput(totalWriteMB, totalWriteDur),
ReadMBs: safeThroughput(totalReadMB, totalReadDur),
WriteMBs: round2(safeThroughput(totalWriteMB, totalWriteDur)),
ReadMBs: round2(safeThroughput(totalReadMB, totalReadDur)),
}
debugf("[3/6] Bench disque direct résumé -> write=%.2f MB/s read=%.2f MB/s", metrics.WriteMBs, metrics.ReadMBs)
return metrics
@@ -2430,6 +2482,13 @@ func fillBuffer(buf []byte) {
buf[i] = byte(i % 256)
}
}
func round2(v float64) float64 {
if math.IsNaN(v) || math.IsInf(v, 0) {
return v
}
return math.Round(v*100) / 100
}
func convertFloat(value interface{}) float64 {
if v, ok := value.(float64); ok {
return v
@@ -2472,23 +2531,27 @@ func runNetBench(ctx context.Context) NetResult {
}
if end, ok := iperfJSON["end"].(map[string]interface{}); ok {
if sumSent, ok := end["sum_sent"].(map[string]interface{}); ok {
res.Upload = convertFloat(sumSent["bits_per_second"]) / 1000 / 1000
if val := convertFloat(sumSent["bits_per_second"]); val > 0 {
res.Upload = round2(val / 1000 / 1000)
}
if val := convertFloat(sumSent["jitter_ms"]); val > 0 {
res.JitterMs = floatPtr(val)
res.JitterMs = floatPtr(round2(val))
}
if val := convertFloat(sumSent["lost_percent"]); val > 0 {
res.PacketLossPct = floatPtr(val)
res.PacketLossPct = floatPtr(round2(val))
}
}
if sumRecv, ok := end["sum_received"].(map[string]interface{}); ok {
res.Download = convertFloat(sumRecv["bits_per_second"]) / 1000 / 1000
if val := convertFloat(sumRecv["bits_per_second"]); val > 0 {
res.Download = round2(val / 1000 / 1000)
}
}
}
res.Score = averagePositive(res.Upload, res.Download)
res.Score = round2(averagePositive(res.Upload, res.Download))
// Ping test
if cfg.Benchmarks.Network.Server != "" {
if ping := measurePing(ctx, cfg.Benchmarks.Network.Server); ping > 0 {
res.PingMs = ping
res.PingMs = round2(ping)
}
}
return res