2
This commit is contained in:
203
NETWORK_SETUP.md
Normal file
203
NETWORK_SETUP.md
Normal file
@@ -0,0 +1,203 @@
|
||||
# Configuration Réseau - Linux BenchTools
|
||||
|
||||
Serveur déployé sur : **10.0.1.97**
|
||||
|
||||
## 🌐 Accès aux services
|
||||
|
||||
| Service | URL | Status |
|
||||
|---------|-----|--------|
|
||||
| Dashboard | http://10.0.1.97:8087 | ✅ Accessible |
|
||||
| Backend API | http://10.0.1.97:8007 | ✅ Accessible |
|
||||
| API Docs | http://10.0.1.97:8007/docs | ✅ Accessible |
|
||||
| Script bench.sh | http://10.0.1.97:8087/scripts/bench.sh | ✅ Accessible |
|
||||
|
||||
## 🔑 Token API
|
||||
|
||||
```
|
||||
29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a
|
||||
```
|
||||
|
||||
## 🚀 Commandes de benchmark
|
||||
|
||||
### Benchmark standard
|
||||
|
||||
```bash
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a"
|
||||
```
|
||||
|
||||
### Benchmark rapide (recommandé pour tests)
|
||||
|
||||
```bash
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a" \
|
||||
--short
|
||||
```
|
||||
|
||||
### Avec tests réseau (iperf3)
|
||||
|
||||
```bash
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a" \
|
||||
--iperf-server 10.0.1.97
|
||||
```
|
||||
|
||||
### Avec nom personnalisé
|
||||
|
||||
```bash
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a" \
|
||||
--device "elitedesk-800g3"
|
||||
```
|
||||
|
||||
## 📋 Exemples d'utilisation
|
||||
|
||||
### Tester sur le serveur local
|
||||
|
||||
```bash
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a" \
|
||||
--short
|
||||
```
|
||||
|
||||
### Tester sur une machine distante
|
||||
|
||||
```bash
|
||||
# SSH sur la machine
|
||||
ssh user@192.168.1.100
|
||||
|
||||
# Exécuter le benchmark
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a" \
|
||||
--device "pc-bureau"
|
||||
```
|
||||
|
||||
### Benchmark sur Raspberry Pi
|
||||
|
||||
```bash
|
||||
# SSH sur le Pi
|
||||
ssh pi@raspberrypi.local
|
||||
|
||||
# Benchmark court (recommandé pour RPi)
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a" \
|
||||
--device "raspberry-pi-4" \
|
||||
--short
|
||||
```
|
||||
|
||||
## 🔧 Options avancées
|
||||
|
||||
### Ignorer certains tests
|
||||
|
||||
```bash
|
||||
# Ignorer GPU et réseau
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a" \
|
||||
--skip-gpu \
|
||||
--skip-network
|
||||
```
|
||||
|
||||
### Tests spécifiques
|
||||
|
||||
```bash
|
||||
# Uniquement CPU et RAM
|
||||
curl -s http://10.0.1.97:8087/scripts/bench.sh | bash -s -- \
|
||||
--server http://10.0.1.97:8007/api/benchmark \
|
||||
--token "29855796dacf5cfe75ff9b02d6adf3dd0f9c52db5b53e7abfb4c0df7ece1be0a" \
|
||||
--skip-disk \
|
||||
--skip-network \
|
||||
--skip-gpu
|
||||
```
|
||||
|
||||
## 🏠 Configuration iperf3
|
||||
|
||||
Pour activer les tests réseau, lancez un serveur iperf3 :
|
||||
|
||||
```bash
|
||||
# Sur le serveur de benchmarking (10.0.1.97)
|
||||
docker run -d --name iperf3-server -p 5201:5201 \
|
||||
--network host \
|
||||
networkstatic/iperf3 -s
|
||||
|
||||
# Puis utilisez --iperf-server dans vos benchmarks
|
||||
```
|
||||
|
||||
## 📊 Vérifier les résultats
|
||||
|
||||
1. **Via le Dashboard** : http://10.0.1.97:8087
|
||||
2. **Via l'API** :
|
||||
```bash
|
||||
# Liste des devices
|
||||
curl http://10.0.1.97:8007/api/devices | jq .
|
||||
|
||||
# Stats globales
|
||||
curl http://10.0.1.97:8007/api/stats | jq .
|
||||
```
|
||||
|
||||
## 🔒 Sécurité
|
||||
|
||||
### Firewall (recommandé)
|
||||
|
||||
```bash
|
||||
# Autoriser uniquement le réseau local
|
||||
sudo ufw allow from 10.0.1.0/24 to any port 8007
|
||||
sudo ufw allow from 10.0.1.0/24 to any port 8087
|
||||
```
|
||||
|
||||
### Changer le token
|
||||
|
||||
```bash
|
||||
# Générer un nouveau token
|
||||
NEW_TOKEN=$(openssl rand -hex 32)
|
||||
|
||||
# Modifier le .env
|
||||
sed -i "s/API_TOKEN=.*/API_TOKEN=$NEW_TOKEN/" .env
|
||||
|
||||
# Redémarrer
|
||||
docker compose restart backend
|
||||
```
|
||||
|
||||
## 🐛 Dépannage
|
||||
|
||||
### Le script ne se télécharge pas
|
||||
|
||||
```bash
|
||||
# Vérifier l'accessibilité
|
||||
curl -I http://10.0.1.97:8087/scripts/bench.sh
|
||||
|
||||
# Devrait retourner HTTP 200
|
||||
```
|
||||
|
||||
### Erreur 401 (Unauthorized)
|
||||
|
||||
Vérifiez que vous utilisez le bon token dans la commande.
|
||||
|
||||
### Erreur de connexion
|
||||
|
||||
```bash
|
||||
# Vérifier que le backend est accessible
|
||||
curl http://10.0.1.97:8007/api/health
|
||||
|
||||
# Vérifier les conteneurs
|
||||
docker compose ps
|
||||
```
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- Le token API est stocké dans `.env`
|
||||
- Les données sont stockées dans `backend/data/data.db`
|
||||
- Les documents uploadés sont dans `uploads/`
|
||||
- Les logs sont accessibles via `docker compose logs -f`
|
||||
|
||||
---
|
||||
|
||||
**Serveur opérationnel sur 10.0.1.97** ✅
|
||||
Dernière mise à jour : 7 décembre 2025
|
||||
@@ -1,4 +1,4 @@
|
||||
version: "3.9"
|
||||
#version: "3.9"
|
||||
|
||||
services:
|
||||
backend:
|
||||
@@ -24,7 +24,6 @@ services:
|
||||
- "${FRONTEND_PORT:-8087}:80"
|
||||
volumes:
|
||||
- ./frontend:/usr/share/nginx/html:ro
|
||||
- ./scripts:/usr/share/nginx/html/scripts:ro
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- benchtools
|
||||
|
||||
485
frontend/scripts/bench.sh
Executable file
485
frontend/scripts/bench.sh
Executable file
@@ -0,0 +1,485 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#
|
||||
# Linux BenchTools - Client Benchmark Script
|
||||
# Version: 1.0.0
|
||||
#
|
||||
# This script collects hardware information and runs benchmarks on Linux machines,
|
||||
# then sends the results to the BenchTools backend API.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# Script version
|
||||
BENCH_SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Default values
|
||||
SERVER_URL=""
|
||||
API_TOKEN=""
|
||||
DEVICE_IDENTIFIER=""
|
||||
IPERF_SERVER=""
|
||||
SHORT_MODE=false
|
||||
SKIP_CPU=false
|
||||
SKIP_MEMORY=false
|
||||
SKIP_DISK=false
|
||||
SKIP_NETWORK=false
|
||||
SKIP_GPU=false
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions
|
||||
log_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Usage information
|
||||
show_usage() {
|
||||
cat <<EOF
|
||||
Linux BenchTools - Client Benchmark Script v${BENCH_SCRIPT_VERSION}
|
||||
|
||||
Usage: $0 --server <URL> --token <TOKEN> [OPTIONS]
|
||||
|
||||
Required:
|
||||
--server <URL> Backend API URL (e.g., http://server:8007/api/benchmark)
|
||||
--token <TOKEN> API authentication token
|
||||
|
||||
Optional:
|
||||
--device <NAME> Device identifier (default: hostname)
|
||||
--iperf-server <HOST> iperf3 server for network tests
|
||||
--short Run quick tests (reduced duration)
|
||||
--skip-cpu Skip CPU benchmark
|
||||
--skip-memory Skip memory benchmark
|
||||
--skip-disk Skip disk benchmark
|
||||
--skip-network Skip network benchmark
|
||||
--skip-gpu Skip GPU benchmark
|
||||
--help Show this help message
|
||||
|
||||
Example:
|
||||
$0 --server http://192.168.1.100:8007/api/benchmark \\
|
||||
--token YOUR_TOKEN \\
|
||||
--iperf-server 192.168.1.100
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
parse_args() {
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--server)
|
||||
SERVER_URL="$2"
|
||||
shift 2
|
||||
;;
|
||||
--token)
|
||||
API_TOKEN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--device)
|
||||
DEVICE_IDENTIFIER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--iperf-server)
|
||||
IPERF_SERVER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--short)
|
||||
SHORT_MODE=true
|
||||
shift
|
||||
;;
|
||||
--skip-cpu)
|
||||
SKIP_CPU=true
|
||||
shift
|
||||
;;
|
||||
--skip-memory)
|
||||
SKIP_MEMORY=true
|
||||
shift
|
||||
;;
|
||||
--skip-disk)
|
||||
SKIP_DISK=true
|
||||
shift
|
||||
;;
|
||||
--skip-network)
|
||||
SKIP_NETWORK=true
|
||||
shift
|
||||
;;
|
||||
--skip-gpu)
|
||||
SKIP_GPU=true
|
||||
shift
|
||||
;;
|
||||
--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
show_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate required parameters
|
||||
if [[ -z "$SERVER_URL" || -z "$API_TOKEN" ]]; then
|
||||
log_error "Missing required parameters: --server and --token"
|
||||
show_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set device identifier to hostname if not provided
|
||||
if [[ -z "$DEVICE_IDENTIFIER" ]]; then
|
||||
DEVICE_IDENTIFIER=$(hostname)
|
||||
fi
|
||||
}
|
||||
|
||||
# Check dependencies (no automatic installation)
|
||||
check_dependencies() {
|
||||
log_info "Checking dependencies..."
|
||||
|
||||
local missing_essential=()
|
||||
local missing_optional=()
|
||||
|
||||
# Essential tools (required for hardware detection)
|
||||
for tool in curl jq; do
|
||||
if ! command -v $tool &> /dev/null; then
|
||||
missing_essential+=($tool)
|
||||
fi
|
||||
done
|
||||
|
||||
# Optional hardware detection tools
|
||||
for tool in lscpu free dmidecode lsblk; do
|
||||
if ! command -v $tool &> /dev/null; then
|
||||
missing_optional+=($tool)
|
||||
fi
|
||||
done
|
||||
|
||||
# Optional benchmark tools
|
||||
if [[ "$SKIP_CPU" == false ]] && ! command -v sysbench &> /dev/null; then
|
||||
missing_optional+=(sysbench)
|
||||
SKIP_CPU=true
|
||||
log_warn "sysbench not found - CPU benchmark will be skipped"
|
||||
fi
|
||||
|
||||
if [[ "$SKIP_DISK" == false ]] && ! command -v fio &> /dev/null; then
|
||||
missing_optional+=(fio)
|
||||
SKIP_DISK=true
|
||||
log_warn "fio not found - Disk benchmark will be skipped"
|
||||
fi
|
||||
|
||||
if [[ "$SKIP_NETWORK" == false && -n "$IPERF_SERVER" ]] && ! command -v iperf3 &> /dev/null; then
|
||||
missing_optional+=(iperf3)
|
||||
SKIP_NETWORK=true
|
||||
log_warn "iperf3 not found - Network benchmark will be skipped"
|
||||
fi
|
||||
|
||||
# Check essential dependencies
|
||||
if [[ ${#missing_essential[@]} -gt 0 ]]; then
|
||||
log_error "Missing essential dependencies: ${missing_essential[*]}"
|
||||
log_error "Please install them manually. Example (Debian/Ubuntu):"
|
||||
log_error " sudo apt-get install ${missing_essential[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Info about optional dependencies
|
||||
if [[ ${#missing_optional[@]} -gt 0 ]]; then
|
||||
log_warn "Missing optional tools: ${missing_optional[*]}"
|
||||
log_info "Some hardware info or benchmarks may be limited"
|
||||
log_info "To install (Debian/Ubuntu): sudo apt-get install ${missing_optional[*]}"
|
||||
fi
|
||||
|
||||
log_info "Dependency check completed"
|
||||
}
|
||||
|
||||
# Collect CPU information
|
||||
collect_cpu_info() {
|
||||
local cpu_json="{}"
|
||||
|
||||
cpu_json=$(jq -n \
|
||||
--arg vendor "$(lscpu | grep 'Vendor ID' | awk '{print $3}' || echo 'Unknown')" \
|
||||
--arg model "$(lscpu | grep 'Model name' | sed 's/Model name: *//')" \
|
||||
--argjson cores "$(lscpu | grep '^CPU(s):' | awk '{print $2}')" \
|
||||
--argjson threads "$(nproc)" \
|
||||
'{
|
||||
vendor: $vendor,
|
||||
model: $model,
|
||||
cores: $cores,
|
||||
threads: $threads
|
||||
}'
|
||||
)
|
||||
|
||||
echo "$cpu_json"
|
||||
}
|
||||
|
||||
# Collect RAM information
|
||||
collect_ram_info() {
|
||||
local ram_total_mb=$(free -m | grep '^Mem:' | awk '{print $2}')
|
||||
|
||||
local ram_json=$(jq -n \
|
||||
--argjson total_mb "$ram_total_mb" \
|
||||
'{
|
||||
total_mb: $total_mb
|
||||
}'
|
||||
)
|
||||
|
||||
echo "$ram_json"
|
||||
}
|
||||
|
||||
# Collect OS information
|
||||
collect_os_info() {
|
||||
local os_name=$(grep '^ID=' /etc/os-release | cut -d= -f2 | tr -d '"')
|
||||
local os_version=$(grep '^VERSION=' /etc/os-release | cut -d= -f2 | tr -d '"')
|
||||
local kernel=$(uname -r)
|
||||
local arch=$(uname -m)
|
||||
|
||||
local os_json=$(jq -n \
|
||||
--arg name "$os_name" \
|
||||
--arg version "$os_version" \
|
||||
--arg kernel_version "$kernel" \
|
||||
--arg architecture "$arch" \
|
||||
'{
|
||||
name: $name,
|
||||
version: $version,
|
||||
kernel_version: $kernel_version,
|
||||
architecture: $architecture
|
||||
}'
|
||||
)
|
||||
|
||||
echo "$os_json"
|
||||
}
|
||||
|
||||
# Run CPU benchmark
|
||||
run_cpu_benchmark() {
|
||||
if [[ "$SKIP_CPU" == true ]]; then
|
||||
echo "null"
|
||||
return
|
||||
fi
|
||||
|
||||
log_info "Running CPU benchmark..."
|
||||
|
||||
local prime=10000
|
||||
[[ "$SHORT_MODE" == false ]] && prime=20000
|
||||
|
||||
local result=$(sysbench cpu --cpu-max-prime=$prime --threads=$(nproc) run 2>&1)
|
||||
|
||||
local events_per_sec=$(echo "$result" | grep 'events per second' | awk '{print $4}')
|
||||
local score=$(echo "scale=2; $events_per_sec / 100" | bc)
|
||||
|
||||
local cpu_result=$(jq -n \
|
||||
--argjson events_per_sec "${events_per_sec:-0}" \
|
||||
--argjson score "${score:-0}" \
|
||||
'{
|
||||
events_per_sec: $events_per_sec,
|
||||
score: $score
|
||||
}'
|
||||
)
|
||||
|
||||
echo "$cpu_result"
|
||||
}
|
||||
|
||||
# Run memory benchmark
|
||||
run_memory_benchmark() {
|
||||
if [[ "$SKIP_MEMORY" == true ]]; then
|
||||
echo "null"
|
||||
return
|
||||
fi
|
||||
|
||||
log_info "Running memory benchmark..."
|
||||
|
||||
local size="512M"
|
||||
[[ "$SHORT_MODE" == false ]] && size="2G"
|
||||
|
||||
local result=$(sysbench memory --memory-total-size=$size --memory-oper=write run 2>&1)
|
||||
|
||||
local throughput=$(echo "$result" | grep 'transferred' | awk '{print $4}')
|
||||
local score=$(echo "scale=2; $throughput / 200" | bc)
|
||||
|
||||
local mem_result=$(jq -n \
|
||||
--argjson throughput_mib_s "${throughput:-0}" \
|
||||
--argjson score "${score:-0}" \
|
||||
'{
|
||||
throughput_mib_s: $throughput_mib_s,
|
||||
score: $score
|
||||
}'
|
||||
)
|
||||
|
||||
echo "$mem_result"
|
||||
}
|
||||
|
||||
# Run disk benchmark
|
||||
run_disk_benchmark() {
|
||||
if [[ "$SKIP_DISK" == true ]]; then
|
||||
echo "null"
|
||||
return
|
||||
fi
|
||||
|
||||
log_info "Running disk benchmark..."
|
||||
|
||||
local size="256M"
|
||||
[[ "$SHORT_MODE" == false ]] && size="1G"
|
||||
|
||||
local tmpfile="/tmp/fio_benchfile_$$"
|
||||
|
||||
fio --name=bench --rw=readwrite --bs=1M --size=$size --numjobs=1 \
|
||||
--iodepth=16 --filename=$tmpfile --direct=1 --group_reporting \
|
||||
--output-format=json > /tmp/fio_result_$$.json 2>&1
|
||||
|
||||
local read_mb_s=$(jq -r '.jobs[0].read.bw_bytes / 1048576' /tmp/fio_result_$$.json 2>/dev/null || echo 0)
|
||||
local write_mb_s=$(jq -r '.jobs[0].write.bw_bytes / 1048576' /tmp/fio_result_$$.json 2>/dev/null || echo 0)
|
||||
local score=$(echo "scale=2; ($read_mb_s + $write_mb_s) / 20" | bc)
|
||||
|
||||
rm -f $tmpfile /tmp/fio_result_$$.json
|
||||
|
||||
local disk_result=$(jq -n \
|
||||
--argjson read_mb_s "${read_mb_s:-0}" \
|
||||
--argjson write_mb_s "${write_mb_s:-0}" \
|
||||
--argjson score "${score:-0}" \
|
||||
'{
|
||||
read_mb_s: $read_mb_s,
|
||||
write_mb_s: $write_mb_s,
|
||||
score: $score
|
||||
}'
|
||||
)
|
||||
|
||||
echo "$disk_result"
|
||||
}
|
||||
|
||||
# Run network benchmark
|
||||
run_network_benchmark() {
|
||||
if [[ "$SKIP_NETWORK" == true || -z "$IPERF_SERVER" ]]; then
|
||||
echo "null"
|
||||
return
|
||||
fi
|
||||
|
||||
log_info "Running network benchmark..."
|
||||
|
||||
local download=$(iperf3 -c "$IPERF_SERVER" -R -J 2>/dev/null | jq -r '.end.sum_received.bits_per_second / 1000000' 2>/dev/null || echo 0)
|
||||
local upload=$(iperf3 -c "$IPERF_SERVER" -J 2>/dev/null | jq -r '.end.sum_sent.bits_per_second / 1000000' 2>/dev/null || echo 0)
|
||||
|
||||
local score=$(echo "scale=2; ($download + $upload) / 20" | bc)
|
||||
|
||||
local net_result=$(jq -n \
|
||||
--argjson download_mbps "${download:-0}" \
|
||||
--argjson upload_mbps "${upload:-0}" \
|
||||
--argjson score "${score:-0}" \
|
||||
'{
|
||||
download_mbps: $download_mbps,
|
||||
upload_mbps: $upload_mbps,
|
||||
score: $score
|
||||
}'
|
||||
)
|
||||
|
||||
echo "$net_result"
|
||||
}
|
||||
|
||||
# Calculate global score
|
||||
calculate_global_score() {
|
||||
local cpu_score=$1
|
||||
local mem_score=$2
|
||||
local disk_score=$3
|
||||
local net_score=$4
|
||||
|
||||
local global=$(echo "scale=2; ($cpu_score * 0.3) + ($mem_score * 0.2) + ($disk_score * 0.25) + ($net_score * 0.15)" | bc)
|
||||
|
||||
echo "$global"
|
||||
}
|
||||
|
||||
# Build and send JSON payload
|
||||
send_benchmark() {
|
||||
log_info "Collecting hardware information..."
|
||||
|
||||
local cpu_info=$(collect_cpu_info)
|
||||
local ram_info=$(collect_ram_info)
|
||||
local os_info=$(collect_os_info)
|
||||
|
||||
log_info "Running benchmarks..."
|
||||
|
||||
local cpu_result=$(run_cpu_benchmark)
|
||||
local memory_result=$(run_memory_benchmark)
|
||||
local disk_result=$(run_disk_benchmark)
|
||||
local network_result=$(run_network_benchmark)
|
||||
|
||||
# Extract scores
|
||||
local cpu_score=$(echo "$cpu_result" | jq -r '.score // 0' 2>/dev/null || echo 0)
|
||||
local mem_score=$(echo "$memory_result" | jq -r '.score // 0' 2>/dev/null || echo 0)
|
||||
local disk_score=$(echo "$disk_result" | jq -r '.score // 0' 2>/dev/null || echo 0)
|
||||
local net_score=$(echo "$network_result" | jq -r '.score // 0' 2>/dev/null || echo 0)
|
||||
|
||||
# Calculate global score
|
||||
local global_score=$(calculate_global_score "$cpu_score" "$mem_score" "$disk_score" "$net_score")
|
||||
|
||||
log_info "Building JSON payload..."
|
||||
|
||||
local payload=$(jq -n \
|
||||
--arg device_identifier "$DEVICE_IDENTIFIER" \
|
||||
--arg bench_script_version "$BENCH_SCRIPT_VERSION" \
|
||||
--argjson cpu "$cpu_info" \
|
||||
--argjson ram "$ram_info" \
|
||||
--argjson os "$os_info" \
|
||||
--argjson cpu_result "$cpu_result" \
|
||||
--argjson memory_result "$memory_result" \
|
||||
--argjson disk_result "$disk_result" \
|
||||
--argjson network_result "$network_result" \
|
||||
--argjson global_score "$global_score" \
|
||||
'{
|
||||
device_identifier: $device_identifier,
|
||||
bench_script_version: $bench_script_version,
|
||||
hardware: {
|
||||
cpu: $cpu,
|
||||
ram: $ram,
|
||||
os: $os
|
||||
},
|
||||
results: {
|
||||
cpu: $cpu_result,
|
||||
memory: $memory_result,
|
||||
disk: $disk_result,
|
||||
network: $network_result,
|
||||
gpu: null,
|
||||
global_score: $global_score
|
||||
}
|
||||
}'
|
||||
)
|
||||
|
||||
log_info "Sending results to server..."
|
||||
|
||||
local response=$(curl -s -w "\n%{http_code}" \
|
||||
-X POST "$SERVER_URL" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $API_TOKEN" \
|
||||
-d "$payload")
|
||||
|
||||
local http_code=$(echo "$response" | tail -n1)
|
||||
local body=$(echo "$response" | head -n-1)
|
||||
|
||||
if [[ "$http_code" == "200" ]]; then
|
||||
log_info "Benchmark submitted successfully!"
|
||||
log_info "Response: $body"
|
||||
else
|
||||
log_error "Failed to submit benchmark (HTTP $http_code)"
|
||||
log_error "Response: $body"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
log_info "Linux BenchTools Client v${BENCH_SCRIPT_VERSION}"
|
||||
|
||||
parse_args "$@"
|
||||
check_dependencies
|
||||
send_benchmark
|
||||
|
||||
log_info "Benchmark completed!"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -143,47 +143,62 @@ parse_args() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Check and install required tools
|
||||
# Check dependencies (no automatic installation)
|
||||
check_dependencies() {
|
||||
log_info "Checking dependencies..."
|
||||
|
||||
local missing_deps=()
|
||||
local missing_essential=()
|
||||
local missing_optional=()
|
||||
|
||||
# Essential tools
|
||||
for tool in curl jq lscpu free dmidecode lsblk; do
|
||||
# Essential tools (required for hardware detection)
|
||||
for tool in curl jq; do
|
||||
if ! command -v $tool &> /dev/null; then
|
||||
missing_deps+=($tool)
|
||||
missing_essential+=($tool)
|
||||
fi
|
||||
done
|
||||
|
||||
# Benchmark tools
|
||||
# Optional hardware detection tools
|
||||
for tool in lscpu free dmidecode lsblk; do
|
||||
if ! command -v $tool &> /dev/null; then
|
||||
missing_optional+=($tool)
|
||||
fi
|
||||
done
|
||||
|
||||
# Optional benchmark tools
|
||||
if [[ "$SKIP_CPU" == false ]] && ! command -v sysbench &> /dev/null; then
|
||||
missing_deps+=(sysbench)
|
||||
missing_optional+=(sysbench)
|
||||
SKIP_CPU=true
|
||||
log_warn "sysbench not found - CPU benchmark will be skipped"
|
||||
fi
|
||||
|
||||
if [[ "$SKIP_DISK" == false ]] && ! command -v fio &> /dev/null; then
|
||||
missing_deps+=(fio)
|
||||
missing_optional+=(fio)
|
||||
SKIP_DISK=true
|
||||
log_warn "fio not found - Disk benchmark will be skipped"
|
||||
fi
|
||||
|
||||
if [[ "$SKIP_NETWORK" == false && -n "$IPERF_SERVER" ]] && ! command -v iperf3 &> /dev/null; then
|
||||
missing_deps+=(iperf3)
|
||||
missing_optional+=(iperf3)
|
||||
SKIP_NETWORK=true
|
||||
log_warn "iperf3 not found - Network benchmark will be skipped"
|
||||
fi
|
||||
|
||||
# Try to install missing dependencies
|
||||
if [[ ${#missing_deps[@]} -gt 0 ]]; then
|
||||
log_warn "Missing dependencies: ${missing_deps[*]}"
|
||||
|
||||
if [[ -f /etc/debian_version ]]; then
|
||||
log_info "Attempting to install dependencies (requires sudo)..."
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -y ${missing_deps[@]}
|
||||
else
|
||||
log_error "Unable to install dependencies automatically. Please install: ${missing_deps[*]}"
|
||||
exit 1
|
||||
fi
|
||||
# Check essential dependencies
|
||||
if [[ ${#missing_essential[@]} -gt 0 ]]; then
|
||||
log_error "Missing essential dependencies: ${missing_essential[*]}"
|
||||
log_error "Please install them manually. Example (Debian/Ubuntu):"
|
||||
log_error " sudo apt-get install ${missing_essential[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_info "All dependencies satisfied"
|
||||
# Info about optional dependencies
|
||||
if [[ ${#missing_optional[@]} -gt 0 ]]; then
|
||||
log_warn "Missing optional tools: ${missing_optional[*]}"
|
||||
log_info "Some hardware info or benchmarks may be limited"
|
||||
log_info "To install (Debian/Ubuntu): sudo apt-get install ${missing_optional[*]}"
|
||||
fi
|
||||
|
||||
log_info "Dependency check completed"
|
||||
}
|
||||
|
||||
# Collect CPU information
|
||||
|
||||
Reference in New Issue
Block a user