Compare commits
44 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c3b2eb2b4f | |||
| e4c40f7e80 | |||
| 6cc9ff7fc5 | |||
| 0aea6b96ca | |||
| afbf1450c2 | |||
| 6a278bc2cf | |||
| 9d1ce790d0 | |||
| fb5d4818b0 | |||
| 3a06920354 | |||
| dd8a6757d1 | |||
| d433a6a54e | |||
| c365988a52 | |||
| 6a1a985306 | |||
| 02996d6288 | |||
| 3d2671650e | |||
| 28658790c8 | |||
| 18f10a9295 | |||
| 67b7a08e4a | |||
| a014337167 | |||
| 3a5ee0a762 | |||
| 625a0244e2 | |||
| a269ba57df | |||
| 6a76b5aa26 | |||
| 939d40eb20 | |||
| ad738508e5 | |||
| 971249ba3f | |||
| 73417ca653 | |||
| a6d092983d | |||
| 1988b101e1 | |||
| 746ae76cfc | |||
| 3380023ad0 | |||
| 6362512406 | |||
| c6323fb7ce | |||
| 349c7d4def | |||
| 19ac712b78 | |||
| c95b272485 | |||
| 43231d7ec3 | |||
| 3f6537e94c | |||
| b021797919 | |||
| 7ad997cc0e | |||
| a3000fd6b0 | |||
| af59f2639c | |||
| b0ff0b3a48 | |||
| 56056b2d6a |
@@ -0,0 +1,25 @@
|
|||||||
|
services:
|
||||||
|
app:
|
||||||
|
image: mcr.microsoft.com/devcontainers/base:ubuntu-22.04
|
||||||
|
volumes:
|
||||||
|
- ..:/workspaces/scrutiny:cached
|
||||||
|
command: sleep infinity
|
||||||
|
network_mode: service:influxdb
|
||||||
|
|
||||||
|
influxdb:
|
||||||
|
image: influxdb:2.8
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "8086:8086"
|
||||||
|
environment:
|
||||||
|
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||||
|
- DOCKER_INFLUXDB_INIT_USERNAME=admin
|
||||||
|
- DOCKER_INFLUXDB_INIT_PASSWORD=password12345
|
||||||
|
- DOCKER_INFLUXDB_INIT_ORG=scrutiny
|
||||||
|
- DOCKER_INFLUXDB_INIT_BUCKET=metrics
|
||||||
|
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token
|
||||||
|
volumes:
|
||||||
|
- scrutiny-influxdb-data:/var/lib/influxdb2
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
scrutiny-influxdb-data:
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
"name": "Scrutiny Dev (rootless docker)",
|
||||||
|
"dockerComposeFile": "../docker-compose.yml",
|
||||||
|
"service": "app",
|
||||||
|
"workspaceFolder": "/workspaces/scrutiny",
|
||||||
|
|
||||||
|
"features": {
|
||||||
|
"ghcr.io/devcontainers/features/go:1": "1.25",
|
||||||
|
"ghcr.io/devcontainers/features/node:1": "lts"
|
||||||
|
},
|
||||||
|
|
||||||
|
"onCreateCommand": "sudo apt-get update && sudo apt-get install -y smartmontools iputils-ping chromium-browser",
|
||||||
|
|
||||||
|
"customizations": {
|
||||||
|
"vscode": {
|
||||||
|
"extensions": [
|
||||||
|
"golang.go",
|
||||||
|
"dbaeumer.vscode-eslint",
|
||||||
|
"esbenp.prettier-vscode"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"forwardPorts": [8080, 8086],
|
||||||
|
|
||||||
|
"postCreateCommand": "bash .devcontainer/setup.sh",
|
||||||
|
"remoteUser": "root",
|
||||||
|
"containerUser": "root",
|
||||||
|
"updateRemoteUserUID": false
|
||||||
|
}
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"name": "Scrutiny Dev (docker)",
|
||||||
|
"dockerComposeFile": "../docker-compose.yml",
|
||||||
|
"service": "app",
|
||||||
|
"workspaceFolder": "/workspaces/scrutiny",
|
||||||
|
|
||||||
|
"features": {
|
||||||
|
"ghcr.io/devcontainers/features/go:1": "1.25",
|
||||||
|
"ghcr.io/devcontainers/features/node:1": "lts"
|
||||||
|
},
|
||||||
|
|
||||||
|
"onCreateCommand": "sudo apt-get update && sudo apt-get install -y smartmontools iputils-ping chromium-browser",
|
||||||
|
|
||||||
|
"customizations": {
|
||||||
|
"vscode": {
|
||||||
|
"extensions": [
|
||||||
|
"golang.go",
|
||||||
|
"dbaeumer.vscode-eslint",
|
||||||
|
"esbenp.prettier-vscode"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"forwardPorts": [8080, 8086],
|
||||||
|
|
||||||
|
"postCreateCommand": "bash .devcontainer/setup.sh",
|
||||||
|
"remoteUser": "vscode"
|
||||||
|
}
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
{
|
||||||
|
"name": "Scrutiny Dev (podman)",
|
||||||
|
"dockerComposeFile": "../docker-compose.yml",
|
||||||
|
"service": "app",
|
||||||
|
"workspaceFolder": "/workspaces/scrutiny",
|
||||||
|
|
||||||
|
"features": {
|
||||||
|
"ghcr.io/devcontainers/features/go:1": "1.25",
|
||||||
|
"ghcr.io/devcontainers/features/node:1": "lts"
|
||||||
|
},
|
||||||
|
|
||||||
|
"onCreateCommand": "sudo apt-get update && sudo apt-get install -y smartmontools iputils-ping chromium-browser",
|
||||||
|
|
||||||
|
"customizations": {
|
||||||
|
"vscode": {
|
||||||
|
"extensions": [
|
||||||
|
"golang.go",
|
||||||
|
"dbaeumer.vscode-eslint",
|
||||||
|
"esbenp.prettier-vscode"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"forwardPorts": [8080, 8086],
|
||||||
|
|
||||||
|
"postCreateCommand": "bash .devcontainer/setup.sh",
|
||||||
|
"remoteEnv": {
|
||||||
|
"PODMAN_USERNS": "keep-id"
|
||||||
|
},
|
||||||
|
"containerUser": "vscode",
|
||||||
|
"updateRemoteUserUID": true
|
||||||
|
}
|
||||||
Executable
+40
@@ -0,0 +1,40 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "Starting Scrutiny Setup..."
|
||||||
|
|
||||||
|
if [ ! -f "scrutiny.yaml" ]; then
|
||||||
|
echo "Creating scrutiny.yaml from template..."
|
||||||
|
cat <<EOF > scrutiny.yaml
|
||||||
|
version: 1
|
||||||
|
web:
|
||||||
|
listen:
|
||||||
|
port: 8080
|
||||||
|
host: 0.0.0.0
|
||||||
|
database:
|
||||||
|
location: ./scrutiny.db
|
||||||
|
src:
|
||||||
|
frontend:
|
||||||
|
path: ./dist
|
||||||
|
influxdb:
|
||||||
|
retention_policy: false
|
||||||
|
token: "my-super-secret-auth-token"
|
||||||
|
org: "scrutiny"
|
||||||
|
bucket: "metrics"
|
||||||
|
host: "localhost"
|
||||||
|
port: 8086
|
||||||
|
log:
|
||||||
|
file: 'web.log'
|
||||||
|
level: DEBUG
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
echo "scrutiny.yaml already exists."
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Vendoring Go modules..."
|
||||||
|
go mod vendor
|
||||||
|
|
||||||
|
echo "Installing Node modules..."
|
||||||
|
cd webapp/frontend
|
||||||
|
npm install
|
||||||
|
|
||||||
|
echo "Setup Complete! Ready to code."
|
||||||
@@ -0,0 +1,163 @@
|
|||||||
|
labels: ["needs-confirmation"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> Please read through [the Discussion rules](https://github.com/AnalogJ/scrutiny/discussions/876), review [the docs](https://github.com/AnalogJ/scrutiny/tree/master/docs), and check for both existing [Discussions](https://github.com/AnalogJ/scrutiny/discussions?discussions_q=) and [Issues](https://github.com/AnalogJ/scrutiny/issues?q=sort%3Areactions-desc) prior to opening a new Discussion.
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: "# Issue Details"
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Issue Description
|
||||||
|
description: |
|
||||||
|
Provide a detailed description of the issue. Include relevant information, such as:
|
||||||
|
- The feature or configuration option you encounter the issue with.
|
||||||
|
- Screenshots, screen recordings, or other supporting media (as needed).
|
||||||
|
- If this is a regression of an existing issue that was closed or resolved, please include the previous item reference (Discussion, Issue, PR, commit) in your description.
|
||||||
|
placeholder: |
|
||||||
|
Temperature data is missing from the plots.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Expected Behavior
|
||||||
|
description: |
|
||||||
|
Describe how you expect scrutiny to behave in this situation. Include any relevant documentation links.
|
||||||
|
placeholder: |
|
||||||
|
All temperature data uploaded by collectors should make it into the plots.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Actual Behavior
|
||||||
|
description: |
|
||||||
|
Describe how scrutiny actually behaves in this situation. If it is not immediately obvious how the actual behavior differs from the expected behavior described above, please be sure to mention the deviation specifically.
|
||||||
|
placeholder: |
|
||||||
|
Only half the points appear.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Reproduction Steps
|
||||||
|
description: |
|
||||||
|
Provide a detailed set of step-by-step instructions for reproducing this issue. If you can't, describe what you were doing when the issue occurred.
|
||||||
|
placeholder: |
|
||||||
|
1. Set up the omnibus docker image
|
||||||
|
2. Launch the web dashboard
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: scrutiny debug logs
|
||||||
|
description: |
|
||||||
|
Provide any captured scrutiny logs or panic dumps during your issue reproduction in this field.
|
||||||
|
Make sure to turn on debug logging with the environment variable DEBUG=true
|
||||||
|
render: text
|
||||||
|
- type: input
|
||||||
|
attributes:
|
||||||
|
label: Scrutiny Version
|
||||||
|
description: The version of scrutiny you are using
|
||||||
|
placeholder: v0.8.2
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: input
|
||||||
|
attributes:
|
||||||
|
label: Smartmontools Version
|
||||||
|
description: The version of smartmontools you are using (or "docker", if you're using the docker image)
|
||||||
|
placeholder: "7.2"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: input
|
||||||
|
attributes:
|
||||||
|
label: OS Version Information
|
||||||
|
description: |
|
||||||
|
Please tell us what operating system (name and version) you are using.
|
||||||
|
placeholder: Ubuntu 24.04.1 (Noble Numbat)
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
attributes:
|
||||||
|
label: Component
|
||||||
|
description: Which component of scrutiny has a problem?
|
||||||
|
options:
|
||||||
|
- web
|
||||||
|
- collector
|
||||||
|
- omnibus (docker only)
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Deployment Method
|
||||||
|
description: How are you running scrutiny?
|
||||||
|
options:
|
||||||
|
- label: docker
|
||||||
|
- label: binaries
|
||||||
|
- label: systemd
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: input
|
||||||
|
attributes:
|
||||||
|
label: Hard Drive Information
|
||||||
|
description: |
|
||||||
|
If the problem is related to a specific hard drive, what are the make and model?
|
||||||
|
placeholder: Seagate ST8000DM004-2CX188
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: smartctl output
|
||||||
|
description: |
|
||||||
|
What is the output of smartctl --xall --json <drive>?
|
||||||
|
render: json
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: docker-compose.yml
|
||||||
|
description: |
|
||||||
|
If using docker, please provide your full docker-compose.yml file.
|
||||||
|
render: yaml
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: scrutiny.yaml
|
||||||
|
description: |
|
||||||
|
Please provide your full scrutiny.yaml file.
|
||||||
|
render: yaml
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: collector.yaml
|
||||||
|
description: |
|
||||||
|
Please provide your full collector.yaml file.
|
||||||
|
render: yaml
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Additional relevant configuration
|
||||||
|
description: |
|
||||||
|
Please any additional relevant configuration (e.g. systemd service definitions, OS configuration)
|
||||||
|
render: text
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
# User Acknowledgements
|
||||||
|
> [!TIP]
|
||||||
|
> Use these links to review the existing scrutiny [Discussions](https://github.com/AnalogJ/scrutiny/discussions?discussions_q=) and [Issues](https://github.com/AnalogJ/scrutiny/issues?q=sort%3Areactions-desc).
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: "I acknowledge that:"
|
||||||
|
options:
|
||||||
|
- label: I have reviewed the FAQ and confirm that my issue is NOT among them.
|
||||||
|
required: true
|
||||||
|
- label: I have searched the scrutiny repository (both open and closed Discussions and Issues) and confirm this is not a duplicate of an existing issue or discussion.
|
||||||
|
required: true
|
||||||
|
- label: I have checked the "Preview" tab on all text fields to ensure that everything looks right, and have wrapped all configuration and code in code blocks with a group of three backticks (` ``` `) on separate lines.
|
||||||
|
required: true
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: "[BUG]"
|
|
||||||
labels: bug
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Describe the bug**
|
|
||||||
A clear and concise description of what the bug is.
|
|
||||||
|
|
||||||
**Expected behavior**
|
|
||||||
A clear and concise description of what you expected to happen.
|
|
||||||
|
|
||||||
**Screenshots**
|
|
||||||
If applicable, add screenshots to help explain your problem.
|
|
||||||
|
|
||||||
**Log Files**
|
|
||||||
If related to missing devices or SMART data, please run the `collector` in DEBUG mode, and attach the log file.
|
|
||||||
See [/docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md](docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md) for other troubleshooting tips.
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run -it --rm -p 8080:8080 \
|
|
||||||
-v `pwd`/config:/opt/scrutiny/config \
|
|
||||||
-v /run/udev:/run/udev:ro \
|
|
||||||
--cap-add SYS_RAWIO \
|
|
||||||
--device=/dev/sda \
|
|
||||||
--device=/dev/sdb \
|
|
||||||
-e DEBUG=true \
|
|
||||||
-e COLLECTOR_LOG_FILE=/opt/scrutiny/config/collector.log \
|
|
||||||
-e SCRUTINY_LOG_FILE=/opt/scrutiny/config/web.log \
|
|
||||||
--name scrutiny \
|
|
||||||
ghcr.io/analogj/scrutiny:master-omnibus
|
|
||||||
|
|
||||||
# in another terminal trigger the collector
|
|
||||||
docker exec scrutiny scrutiny-collector-metrics run
|
|
||||||
```
|
|
||||||
|
|
||||||
The log files will be available on your host in the `config` directory. Please attach them to this issue.
|
|
||||||
|
|
||||||
Please also provide the output of `docker info`
|
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: Features, Bug Reports, Questions
|
||||||
|
url: https://github.com/AnalogJ/scrutiny/discussions/new/choose
|
||||||
|
about: Our preferred starting point if you have any questions or suggestions about configuration, features or behavior.
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
title: "[FEAT]"
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Is your feature request related to a problem? Please describe.**
|
|
||||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
|
||||||
|
|
||||||
**Describe the solution you'd like**
|
|
||||||
A clear and concise description of what you want to happen.
|
|
||||||
|
|
||||||
**Additional context**
|
|
||||||
Add any other context or screenshots about the feature request here.
|
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
name: Pre-Discussed and Approved Topics
|
||||||
|
about: |-
|
||||||
|
Only for topics already discussed and approved in the GitHub Discussions section.
|
||||||
|
---
|
||||||
|
|
||||||
|
**DO NOT OPEN A NEW ISSUE. PLEASE USE THE DISCUSSIONS SECTION.**
|
||||||
|
|
||||||
|
**I DIDN'T READ THE ABOVE LINE. PLEASE CLOSE THIS ISSUE.**
|
||||||
+53
-10
@@ -1,6 +1,13 @@
|
|||||||
name: CI
|
name: CI
|
||||||
# This workflow is triggered on pushes & pull requests
|
# This workflow is triggered on pushes & pull requests
|
||||||
on: [pull_request]
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test-frontend:
|
test-frontend:
|
||||||
@@ -21,11 +28,10 @@ jobs:
|
|||||||
test-backend:
|
test-backend:
|
||||||
name: Test Backend
|
name: Test Backend
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container: ghcr.io/packagrio/packagr:latest-golang
|
|
||||||
# Service containers to run with `build` (Required for end-to-end testing)
|
# Service containers to run with `build` (Required for end-to-end testing)
|
||||||
services:
|
services:
|
||||||
influxdb:
|
influxdb:
|
||||||
image: influxdb:2.2
|
image: influxdb:2.8
|
||||||
env:
|
env:
|
||||||
DOCKER_INFLUXDB_INIT_MODE: setup
|
DOCKER_INFLUXDB_INIT_MODE: setup
|
||||||
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
||||||
@@ -38,13 +44,10 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
STATIC: true
|
STATIC: true
|
||||||
steps:
|
steps:
|
||||||
- name: Git
|
- name: Add influxdb to hosts
|
||||||
run: |
|
run: echo "127.0.0.1 influxdb" | sudo tee -a /etc/hosts
|
||||||
apt-get update && apt-get install -y software-properties-common
|
|
||||||
add-apt-repository ppa:git-core/ppa && apt-get update && apt-get install -y git
|
|
||||||
git --version
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v6
|
||||||
- name: Test Backend
|
- name: Test Backend
|
||||||
run: |
|
run: |
|
||||||
make binary-clean binary-test-coverage
|
make binary-clean binary-test-coverage
|
||||||
@@ -76,6 +79,19 @@ jobs:
|
|||||||
fail_ci_if_error: true
|
fail_ci_if_error: true
|
||||||
verbose: true
|
verbose: true
|
||||||
|
|
||||||
|
golangci:
|
||||||
|
name: lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
- uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version: 1.25
|
||||||
|
- name: golangci-lint
|
||||||
|
uses: golangci/golangci-lint-action@v9
|
||||||
|
with:
|
||||||
|
args: --issues-exit-code=0
|
||||||
|
|
||||||
build:
|
build:
|
||||||
name: Build ${{ matrix.cfg.goos }}/${{ matrix.cfg.goarch }}
|
name: Build ${{ matrix.cfg.goos }}/${{ matrix.cfg.goarch }}
|
||||||
runs-on: ${{ matrix.cfg.on }}
|
runs-on: ${{ matrix.cfg.on }}
|
||||||
@@ -102,7 +118,7 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '^1.20.1'
|
go-version: '^1.25'
|
||||||
- name: Build Binaries
|
- name: Build Binaries
|
||||||
run: |
|
run: |
|
||||||
make binary-clean binary-all
|
make binary-clean binary-all
|
||||||
@@ -113,3 +129,30 @@ jobs:
|
|||||||
path: |
|
path: |
|
||||||
scrutiny-web-*
|
scrutiny-web-*
|
||||||
scrutiny-collector-metrics-*
|
scrutiny-collector-metrics-*
|
||||||
|
|
||||||
|
makefile-docker-omnibus:
|
||||||
|
name: Build Docker Omnibus From Makefile
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
- name: Build
|
||||||
|
run: make docker-omnibus
|
||||||
|
|
||||||
|
makefile-docker-web:
|
||||||
|
name: Build Docker Web From Makefile
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
- name: Build
|
||||||
|
run: make docker-web
|
||||||
|
|
||||||
|
makefile-docker-collector:
|
||||||
|
name: Build Docker Collector From Makefile
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
- name: Build
|
||||||
|
run: make docker-collector
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
name: Docker
|
name: Docker
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master, beta ]
|
|
||||||
# Publish semver tags as releases.
|
# Publish semver tags as releases.
|
||||||
tags: [ 'v*.*.*' ]
|
tags: [ 'v*.*.*' ]
|
||||||
|
|
||||||
@@ -18,20 +17,17 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v6
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
with:
|
with:
|
||||||
platforms: 'arm64,arm'
|
platforms: 'arm64,arm'
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
# Login against a Docker registry except on PR
|
# Login against a Docker registry except on PR
|
||||||
# https://github.com/docker/login-action
|
# https://github.com/docker/login-action
|
||||||
- name: Log into registry ${{ env.REGISTRY }}
|
- name: Log into registry ${{ env.REGISTRY }}
|
||||||
if: github.event_name != 'pull_request'
|
uses: docker/login-action@v3
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.REGISTRY }}
|
registry: ${{ env.REGISTRY }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
@@ -40,30 +36,30 @@ jobs:
|
|||||||
# https://github.com/docker/metadata-action
|
# https://github.com/docker/metadata-action
|
||||||
- name: Extract Docker metadata
|
- name: Extract Docker metadata
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=false
|
latest=true
|
||||||
|
suffix=-collector,onlatest=true
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,enable=true,event=branch,suffix=-collector
|
type=semver,pattern=v{{major}}.{{minor}}.{{patch}}
|
||||||
type=semver,pattern=v{{major}}.{{minor}}.{{patch}},suffix=-collector
|
type=semver,pattern=v{{major}}.{{minor}}
|
||||||
type=semver,pattern=v{{major}}.{{minor}},suffix=-collector
|
type=semver,pattern=v{{major}}
|
||||||
type=semver,pattern=v{{major}},suffix=-collector
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
# Build and push Docker image with Buildx (don't push on PR)
|
# Build and push Docker image with Buildx
|
||||||
# https://github.com/docker/build-push-action
|
# https://github.com/docker/build-push-action
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
context: .
|
context: .
|
||||||
file: docker/Dockerfile.collector
|
file: docker/Dockerfile.collector
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: true
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
# cache-from: type=gha
|
cache-from: type=gha
|
||||||
# cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
web:
|
web:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -73,20 +69,19 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v6
|
||||||
- name: "Populate frontend version information"
|
- name: "Populate frontend version information"
|
||||||
run: "cd webapp/frontend && ./git.version.sh"
|
run: "cd webapp/frontend && ./git.version.sh"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
with:
|
with:
|
||||||
platforms: 'arm64,arm'
|
platforms: 'arm64,arm'
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
# Login against a Docker registry except on PR
|
# Login against a Docker registry except on PR
|
||||||
# https://github.com/docker/login-action
|
# https://github.com/docker/login-action
|
||||||
- name: Log into registry ${{ env.REGISTRY }}
|
- name: Log into registry ${{ env.REGISTRY }}
|
||||||
if: github.event_name != 'pull_request'
|
uses: docker/login-action@v3
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.REGISTRY }}
|
registry: ${{ env.REGISTRY }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
@@ -95,29 +90,31 @@ jobs:
|
|||||||
# https://github.com/docker/metadata-action
|
# https://github.com/docker/metadata-action
|
||||||
- name: Extract Docker metadata
|
- name: Extract Docker metadata
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=false
|
latest=true
|
||||||
|
suffix=-web,onlatest=true
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,enable=true,event=branch,suffix=-web
|
type=semver,pattern=v{{major}}.{{minor}}.{{patch}}
|
||||||
type=semver,pattern=v{{major}}.{{minor}}.{{patch}},suffix=-web
|
type=semver,pattern=v{{major}}.{{minor}}
|
||||||
type=semver,pattern=v{{major}}.{{minor}},suffix=-web
|
type=semver,pattern=v{{major}}
|
||||||
type=semver,pattern=v{{major}},suffix=-web
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
# Build and push Docker image with Buildx (don't push on PR)
|
|
||||||
|
# Build and push Docker image with Buildx
|
||||||
# https://github.com/docker/build-push-action
|
# https://github.com/docker/build-push-action
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
context: .
|
context: .
|
||||||
file: docker/Dockerfile.web
|
file: docker/Dockerfile.web
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: true
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
# cache-from: type=gha
|
cache-from: type=gha
|
||||||
# cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
omnibus:
|
omnibus:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
@@ -126,20 +123,19 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v6
|
||||||
- name: "Populate frontend version information"
|
- name: "Populate frontend version information"
|
||||||
run: "cd webapp/frontend && ./git.version.sh"
|
run: "cd webapp/frontend && ./git.version.sh"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
with:
|
with:
|
||||||
platforms: 'arm64,arm'
|
platforms: 'arm64,arm'
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
# Login against a Docker registry except on PR
|
# Login against a Docker registry except on PR
|
||||||
# https://github.com/docker/login-action
|
# https://github.com/docker/login-action
|
||||||
- name: Log into registry ${{ env.REGISTRY }}
|
- name: Log into registry ${{ env.REGISTRY }}
|
||||||
if: github.event_name != 'pull_request'
|
uses: docker/login-action@v3
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.REGISTRY }}
|
registry: ${{ env.REGISTRY }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
@@ -148,24 +144,29 @@ jobs:
|
|||||||
# https://github.com/docker/metadata-action
|
# https://github.com/docker/metadata-action
|
||||||
- name: Extract Docker metadata
|
- name: Extract Docker metadata
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
|
# tag latest and latest-omnibus
|
||||||
with:
|
with:
|
||||||
|
flavor: |
|
||||||
|
latest=true
|
||||||
|
suffix=-omnibus,onlatest=false
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,enable=true,event=branch,suffix=-omnibus
|
type=raw,value=latest
|
||||||
type=semver,pattern=v{{major}}.{{minor}}.{{patch}},suffix=-omnibus
|
type=semver,pattern=v{{major}}.{{minor}}.{{patch}}
|
||||||
type=semver,pattern=v{{major}}.{{minor}},suffix=-omnibus
|
type=semver,pattern=v{{major}}.{{minor}}
|
||||||
type=semver,pattern=v{{major}},suffix=-omnibus
|
type=semver,pattern=v{{major}}
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
# Build and push Docker image with Buildx (don't push on PR)
|
|
||||||
|
# Build and push Docker image with Buildx
|
||||||
# https://github.com/docker/build-push-action
|
# https://github.com/docker/build-push-action
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
context: .
|
context: .
|
||||||
file: docker/Dockerfile
|
file: docker/Dockerfile
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: true
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
# cache-from: type=gha
|
cache-from: type=gha
|
||||||
# cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
name: Docker - Nightly
|
name: Docker - Nightly
|
||||||
on:
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
# Note: this only runs on the default branch
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '36 12 * * *'
|
- cron: '36 12 * * *'
|
||||||
|
|
||||||
@@ -8,7 +10,7 @@ env:
|
|||||||
IMAGE_NAME: ${{ github.repository }}
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
omnibus:
|
build_nightlies:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@@ -16,44 +18,86 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v6
|
||||||
- name: "Populate frontend version information"
|
- name: "Populate frontend version information"
|
||||||
run: "cd webapp/frontend && ./git.version.sh"
|
run: "cd webapp/frontend && ./git.version.sh"
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
with:
|
with:
|
||||||
platforms: 'arm64,arm'
|
platforms: 'arm64'
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
# Login against a Docker registry except on PR
|
# Login against a Docker registry except on PR
|
||||||
# https://github.com/docker/login-action
|
# https://github.com/docker/login-action
|
||||||
- name: Log into registry ${{ env.REGISTRY }}
|
- name: Log into registry ${{ env.REGISTRY }}
|
||||||
if: github.event_name != 'pull_request'
|
uses: docker/login-action@v3
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.REGISTRY }}
|
registry: ${{ env.REGISTRY }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
# Extract metadata (tags, labels) for Docker
|
# Extract metadata (tags, labels) for Docker
|
||||||
# https://github.com/docker/metadata-action
|
# https://github.com/docker/metadata-action
|
||||||
- name: Extract Docker metadata
|
- name: Extract Docker metadata for omnibus
|
||||||
id: meta
|
id: meta_omnibus
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,enable=true,event=branch,suffix=-omnibus-nightly
|
type=raw,enable=true,value=nightly,suffix=-omnibus
|
||||||
type=ref,enable=true,event=tag,suffix=-omnibus-nightly
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
# Build and push Docker image with Buildx (don't push on PR)
|
# Build and push Docker image with Buildx (don't push on PR)
|
||||||
# https://github.com/docker/build-push-action
|
# https://github.com/docker/build-push-action
|
||||||
- name: Build and push Docker image
|
- name: Build and push omnibus Docker image
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
context: .
|
context: .
|
||||||
file: docker/Dockerfile
|
file: docker/Dockerfile
|
||||||
push: false
|
push: true
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta_omnibus.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta_omnibus.outputs.labels }}
|
||||||
# cache-from: type=gha
|
cache-from: type=gha
|
||||||
# cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
# Extract metadata (tags, labels) for Docker
|
||||||
|
# https://github.com/docker/metadata-action
|
||||||
|
- name: Extract Docker metadata for collector
|
||||||
|
id: meta_collector
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
tags: |
|
||||||
|
type=raw,enable=true,value=nightly,suffix=-collector
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
# Build and push Docker image with Buildx (don't push on PR)
|
||||||
|
# https://github.com/docker/build-push-action
|
||||||
|
- name: Build and push collector Docker image
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
context: .
|
||||||
|
file: docker/Dockerfile.collector
|
||||||
|
push: true
|
||||||
|
tags: ${{ steps.meta_collector.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta_collector.outputs.labels }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
# Extract metadata (tags, labels) for Docker
|
||||||
|
# https://github.com/docker/metadata-action
|
||||||
|
- name: Extract Docker metadata for web
|
||||||
|
id: meta_web
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
tags: |
|
||||||
|
type=raw,enable=true,value=nightly,suffix=-web
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
# Build and push Docker image with Buildx (don't push on PR)
|
||||||
|
# https://github.com/docker/build-push-action
|
||||||
|
- name: Build and push web Docker image
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
context: .
|
||||||
|
file: docker/Dockerfile.web
|
||||||
|
push: true
|
||||||
|
tags: ${{ steps.meta_web.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta_web.outputs.labels }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ jobs:
|
|||||||
# Service containers to run with `build` (Required for end-to-end testing)
|
# Service containers to run with `build` (Required for end-to-end testing)
|
||||||
services:
|
services:
|
||||||
influxdb:
|
influxdb:
|
||||||
image: influxdb:2.2
|
image: influxdb:2.8
|
||||||
env:
|
env:
|
||||||
DOCKER_INFLUXDB_INIT_MODE: setup
|
DOCKER_INFLUXDB_INIT_MODE: setup
|
||||||
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
||||||
@@ -64,6 +64,7 @@ jobs:
|
|||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: workspace
|
name: workspace
|
||||||
|
include-hidden-files: true
|
||||||
path: ${{ github.workspace }}/**/*
|
path: ${{ github.workspace }}/**/*
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
@@ -96,7 +97,7 @@ jobs:
|
|||||||
name: workspace
|
name: workspace
|
||||||
- uses: actions/setup-go@v6
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.20.1' # The Go version to download (if necessary) and use.
|
go-version: '1.25' # The Go version to download (if necessary) and use.
|
||||||
- name: Build Binaries
|
- name: Build Binaries
|
||||||
run: |
|
run: |
|
||||||
make binary-clean binary-all
|
make binary-clean binary-all
|
||||||
@@ -141,7 +142,7 @@ jobs:
|
|||||||
- name: Download workspace
|
- name: Download workspace
|
||||||
uses: actions/download-artifact@v7
|
uses: actions/download-artifact@v7
|
||||||
with:
|
with:
|
||||||
name: ./
|
name: workspace
|
||||||
- name: Download binaries
|
- name: Download binaries
|
||||||
uses: actions/download-artifact@v7
|
uses: actions/download-artifact@v7
|
||||||
with:
|
with:
|
||||||
|
|||||||
@@ -0,0 +1,11 @@
|
|||||||
|
version: "2"
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- bodyclose
|
||||||
|
settings:
|
||||||
|
errcheck:
|
||||||
|
check-blank: true
|
||||||
Vendored
+37
@@ -0,0 +1,37 @@
|
|||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Run Scrutiny",
|
||||||
|
"type": "go",
|
||||||
|
"request": "launch",
|
||||||
|
"mode": "auto",
|
||||||
|
"program": "${workspaceFolder}/webapp/backend/cmd/scrutiny/scrutiny.go",
|
||||||
|
"args": ["start", "--config", "./scrutiny.yaml"],
|
||||||
|
"cwd": "${workspaceFolder}",
|
||||||
|
"env": {
|
||||||
|
"DEBUG": "true"
|
||||||
|
},
|
||||||
|
"console": "integratedTerminal",
|
||||||
|
"preLaunchTask": "Build Frontend",
|
||||||
|
"serverReadyAction": {
|
||||||
|
"action": "openExternally",
|
||||||
|
"pattern": "Listening and serving HTTP on",
|
||||||
|
"uriFormat": "http://localhost:8080/web/"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Run Collector",
|
||||||
|
"type": "go",
|
||||||
|
"request": "launch",
|
||||||
|
"mode": "auto",
|
||||||
|
"program": "${workspaceFolder}/collector/cmd/collector-metrics/collector-metrics.go",
|
||||||
|
"args": ["run", "--debug"],
|
||||||
|
"cwd": "${workspaceFolder}",
|
||||||
|
"env": {
|
||||||
|
"COLLECTOR_DEBUG": "true"
|
||||||
|
},
|
||||||
|
"console": "integratedTerminal"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
Vendored
+10
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"version": "2.0.0",
|
||||||
|
"tasks": [
|
||||||
|
{
|
||||||
|
"label": "Build Frontend",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "cd webapp/frontend && npm run build:prod -- --output-path=../../dist"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
+150
-7
@@ -1,6 +1,144 @@
|
|||||||
# Contributing
|
# Contributing to scrutiny
|
||||||
|
|
||||||
**Please see our [AI policy](./AI_POLICY.md).**
|
This document describes the process of contributing to scrutiny. It is intended
|
||||||
|
for anyone considering opening an **issue**, **discussion** or **pull request**.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
>
|
||||||
|
> The intention of these policies is not to be difficult, and
|
||||||
|
> contributions are greatly appreciated. The goal is to streamline
|
||||||
|
> and simplify the efforts of both contributers and maintainers.
|
||||||
|
|
||||||
|
## AI Usage
|
||||||
|
|
||||||
|
scrutiny has strict rules for AI usage. Please see
|
||||||
|
the [AI Usage Policy](AI_POLICY.md). **This is very important.**
|
||||||
|
|
||||||
|
## Quick Guide
|
||||||
|
|
||||||
|
### I'd like to contribute
|
||||||
|
|
||||||
|
[All issues are actionable](#issues-are-actionable). Pick one and start
|
||||||
|
working on it. Thank you. If you need help or guidance, comment on the issue.
|
||||||
|
Issues that are extra friendly to new contributors are tagged with
|
||||||
|
["contributor friendly"].
|
||||||
|
|
||||||
|
["contributor friendly"]: https://github.com/AnalogJ/scrutiny/issues?q=is%3Aissue%20is%3Aopen%20label%3A%22contributor%20friendly%22
|
||||||
|
|
||||||
|
### I have a bug! / Something isn't working
|
||||||
|
|
||||||
|
First, search the issue tracker and discussions for similar issues. Tip: also
|
||||||
|
search for [closed issues] and [discussions] — your issue might have already
|
||||||
|
been fixed!
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
>
|
||||||
|
> If there is an _open_ issue or discussion that matches your problem,
|
||||||
|
> **please do not comment on it unless you have valuable insight to add**.
|
||||||
|
>
|
||||||
|
> GitHub has a very _noisy_ set of default notification settings which
|
||||||
|
> sends an email to _every participant_ in an issue/discussion every time
|
||||||
|
> someone adds a comment. Instead, use the handy upvote button for discussions,
|
||||||
|
> and/or emoji reactions on both discussions and issues, which are a visible
|
||||||
|
> yet non-disruptive way to show your support.
|
||||||
|
|
||||||
|
If your issue hasn't been reported already, open an ["Issue Triage"] discussion
|
||||||
|
and make sure to fill in the template **completely**. They are vital for
|
||||||
|
maintainers to figure out important details about your setup.
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
>
|
||||||
|
> A _very_ common mistake is to file a bug report either as a Q&A or a Feature
|
||||||
|
> Request. **Please don't do this.** Otherwise, maintainers would have to ask
|
||||||
|
> for your system information again manually, and sometimes they will even ask
|
||||||
|
> you to create a new discussion because of how few detailed information is
|
||||||
|
> required for other discussion types compared to Issue Triage.
|
||||||
|
>
|
||||||
|
> Because of this, please make sure that you _only_ use the "Issue Triage"
|
||||||
|
> category for reporting bugs — thank you!
|
||||||
|
|
||||||
|
[closed issues]: https://github.com/AnalogJ/scrutiny/issues?q=is%3Aissue%20state%3Aclosed
|
||||||
|
[discussions]: https://github.com/AnalogJ/scrutiny/discussions?discussions_q=is%3Aclosed
|
||||||
|
["Issue Triage"]: https://github.com/AnalogJ/scrutiny/discussions/new?category=issue-triage
|
||||||
|
|
||||||
|
### I have an idea for a feature
|
||||||
|
|
||||||
|
Like bug reports, first search through both issues and discussions and try to
|
||||||
|
find if your feature has already been requested. Otherwise, open a discussion
|
||||||
|
in the ["Feature Requests, Ideas"] category.
|
||||||
|
|
||||||
|
["Feature Requests, Ideas"]: https://github.com/AnalogJ/scrutiny/discussions/new?category=feature-requests-ideas
|
||||||
|
|
||||||
|
### I've implemented a feature
|
||||||
|
|
||||||
|
1. If there is an issue for the feature, open a pull request straight away.
|
||||||
|
2. If there is no issue, open a discussion and link to your branch.
|
||||||
|
3. If you want to live dangerously, open a pull request and
|
||||||
|
[hope for the best](#pull-requests-implement-an-issue).
|
||||||
|
|
||||||
|
### I have a question which is neither a bug report nor a feature request
|
||||||
|
|
||||||
|
Open a [Q&A discussion].
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> If your question is about a missing feature, please open a discussion under
|
||||||
|
> the ["Feature Requests, Ideas"] category. If scrutiny is behaving
|
||||||
|
> unexpectedly, use the ["Issue Triage"] category.
|
||||||
|
>
|
||||||
|
> The "Q&A" category is strictly for other kinds of discussions and do not
|
||||||
|
> require detailed information unlike the two other categories, meaning that
|
||||||
|
> maintainers would have to spend the extra effort to ask for basic information
|
||||||
|
> if you submit a bug report under this category.
|
||||||
|
>
|
||||||
|
> Therefore, please **pay attention to the category** before opening
|
||||||
|
> discussions to save us all some time and energy. Thank you!
|
||||||
|
|
||||||
|
[Q&A discussion]: https://github.com/AnalogJ/scrutiny/discussions/new?category=q-a
|
||||||
|
|
||||||
|
## General Patterns
|
||||||
|
|
||||||
|
### Issues are Actionable
|
||||||
|
|
||||||
|
The scrutiny [issue tracker](https://github.com/AnalogJ/scrutiny/issues)
|
||||||
|
is for _actionable items_.
|
||||||
|
|
||||||
|
Unlike some other projects, scrutiny **does not use the issue tracker for
|
||||||
|
discussion or feature requests**. Instead, we use GitHub
|
||||||
|
[discussions](https://github.com/AnalogJ/scrutiny/discussions) for that.
|
||||||
|
Once a discussion reaches a point where a well-understood, actionable
|
||||||
|
item is identified, it is moved to the issue tracker. **This pattern
|
||||||
|
makes it easier for maintainers or contributors to find issues to work on
|
||||||
|
since _every issue_ is ready to be worked on.**
|
||||||
|
|
||||||
|
If you are experiencing a bug and have clear steps to reproduce it, please
|
||||||
|
open an issue. If you are experiencing a bug but you are not sure how to
|
||||||
|
reproduce it or aren't sure if it's a bug, please open a discussion.
|
||||||
|
If you have an idea for a feature, please open a discussion.
|
||||||
|
|
||||||
|
### Pull Requests Implement an Issue
|
||||||
|
|
||||||
|
Pull requests should be associated with a previously accepted issue.
|
||||||
|
**If you open a pull request for something that wasn't previously discussed,**
|
||||||
|
it may be closed or remain stale for an indefinite period of time. I'm not
|
||||||
|
saying it will never be accepted, but the odds are stacked against you.
|
||||||
|
|
||||||
|
Issues tagged with "feature" represent accepted, well-scoped feature requests.
|
||||||
|
If you implement an issue tagged with feature as described in the issue, your
|
||||||
|
pull request will be accepted with a high degree of certainty.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
>
|
||||||
|
> **Pull requests are NOT a place to discuss feature design.** Please do
|
||||||
|
> not open a WIP pull request to discuss a feature. Instead, use a discussion
|
||||||
|
> and link to your branch.
|
||||||
|
|
||||||
|
# Developer Guide
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
>
|
||||||
|
> **The remainder of this file is dedicated to developers actively
|
||||||
|
> working on scrutiny.** If you're a user reporting an issue, you can
|
||||||
|
> ignore the rest of this document.
|
||||||
|
|
||||||
The Scrutiny repository is a [monorepo](https://en.wikipedia.org/wiki/Monorepo) containing source code for:
|
The Scrutiny repository is a [monorepo](https://en.wikipedia.org/wiki/Monorepo) containing source code for:
|
||||||
- Scrutiny Backend Server (API)
|
- Scrutiny Backend Server (API)
|
||||||
@@ -9,9 +147,14 @@ The Scrutiny repository is a [monorepo](https://en.wikipedia.org/wiki/Monorepo)
|
|||||||
|
|
||||||
Depending on the functionality you are adding, you may need to setup a development environment for 1 or more projects.
|
Depending on the functionality you are adding, you may need to setup a development environment for 1 or more projects.
|
||||||
|
|
||||||
|
# Devcontainer
|
||||||
|
Devcontainer configurations are available to build and run Scrutiny (WebUI and Collector) in a fully isolated environment.
|
||||||
|
When opening the project with vscode, choose "Reopen in Container". Three configurations are available depending on your
|
||||||
|
container runtime and setup: docker, docker-rootless, and podman.
|
||||||
|
|
||||||
# Modifying the Scrutiny Backend Server (API)
|
# Modifying the Scrutiny Backend Server (API)
|
||||||
|
|
||||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.20+)
|
1. install the [Go runtime](https://go.dev/doc/install) (v1.25)
|
||||||
2. download the `scrutiny-web-frontend.tar.gz` for
|
2. download the `scrutiny-web-frontend.tar.gz` for
|
||||||
the [latest release](https://github.com/AnalogJ/scrutiny/releases/latest). Extract to a folder named `dist`
|
the [latest release](https://github.com/AnalogJ/scrutiny/releases/latest). Extract to a folder named `dist`
|
||||||
3. create a `scrutiny.yaml` config file
|
3. create a `scrutiny.yaml` config file
|
||||||
@@ -39,7 +182,7 @@ Depending on the functionality you are adding, you may need to setup a developme
|
|||||||
```
|
```
|
||||||
4. start a InfluxDB docker container.
|
4. start a InfluxDB docker container.
|
||||||
```bash
|
```bash
|
||||||
docker run -p 8086:8086 --rm influxdb:2.2
|
docker run -p 8086:8086 --rm influxdb:2.8
|
||||||
```
|
```
|
||||||
5. start the scrutiny web server
|
5. start the scrutiny web server
|
||||||
```bash
|
```bash
|
||||||
@@ -92,7 +235,7 @@ you'll need to follow the steps below:
|
|||||||
```
|
```
|
||||||
4. start a InfluxDB docker container.
|
4. start a InfluxDB docker container.
|
||||||
```bash
|
```bash
|
||||||
docker run -p 8086:8086 --rm influxdb:2.2
|
docker run -p 8086:8086 --rm influxdb:2.8
|
||||||
```
|
```
|
||||||
5. build the Angular Frontend Application
|
5. build the Angular Frontend Application
|
||||||
```bash
|
```bash
|
||||||
@@ -116,7 +259,7 @@ If you'd like to populate the database with some test data, you can run the fol
|
|||||||
> This is done automatically by the `webapp/backend/pkg/models/testdata/helper.go` script
|
> This is done automatically by the `webapp/backend/pkg/models/testdata/helper.go` script
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run -p 8086:8086 --rm influxdb:2.2
|
docker run -p 8086:8086 --rm influxdb:2.8
|
||||||
|
|
||||||
|
|
||||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/web/testdata/register-devices-req.json localhost:8080/api/devices/register
|
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/web/testdata/register-devices-req.json localhost:8080/api/devices/register
|
||||||
@@ -184,7 +327,7 @@ docker run -p 8086:8086 -d --rm \
|
|||||||
-e DOCKER_INFLUXDB_INIT_ORG=scrutiny \
|
-e DOCKER_INFLUXDB_INIT_ORG=scrutiny \
|
||||||
-e DOCKER_INFLUXDB_INIT_BUCKET=metrics \
|
-e DOCKER_INFLUXDB_INIT_BUCKET=metrics \
|
||||||
-e DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token \
|
-e DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token \
|
||||||
influxdb:2.2
|
influxdb:2.8
|
||||||
go test ./...
|
go test ./...
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
.ONESHELL: # Applies to every targets in the file! .ONESHELL instructs make to invoke a single instance of the shell and provide it with the entire recipe, regardless of how many lines it contains.
|
.ONESHELL: # Applies to every targets in the file! .ONESHELL instructs make to invoke a single instance of the shell and provide it with the entire recipe, regardless of how many lines it contains.
|
||||||
.SHELLFLAGS = -ec
|
.SHELLFLAGS = -ec
|
||||||
|
export GOTOOLCHAIN=go1.25.5
|
||||||
|
|
||||||
########################################################################################################################
|
########################################################################################################################
|
||||||
# Global Env Settings
|
# Global Env Settings
|
||||||
@@ -66,6 +67,11 @@ binary-dep:
|
|||||||
binary-test: binary-dep
|
binary-test: binary-dep
|
||||||
go test -v $(STATIC_TAGS) ./...
|
go test -v $(STATIC_TAGS) ./...
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint:
|
||||||
|
GOTOOLCHAIN=go1.25.5 go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.8.0
|
||||||
|
golangci-lint run ./...
|
||||||
|
|
||||||
.PHONY: binary-test-coverage
|
.PHONY: binary-test-coverage
|
||||||
binary-test-coverage: binary-dep
|
binary-test-coverage: binary-dep
|
||||||
go test -coverprofile=coverage.txt -covermode=atomic -v $(STATIC_TAGS) ./...
|
go test -coverprofile=coverage.txt -covermode=atomic -v $(STATIC_TAGS) ./...
|
||||||
@@ -115,19 +121,18 @@ binary-frontend-test-coverage:
|
|||||||
########################################################################################################################
|
########################################################################################################################
|
||||||
# Docker
|
# Docker
|
||||||
# NOTE: these docker make targets are only used for local development (not used by Github Actions/CI)
|
# NOTE: these docker make targets are only used for local development (not used by Github Actions/CI)
|
||||||
# NOTE: docker-web and docker-omnibus require `make binary-frontend` or frontend.tar.gz content in /dist before executing.
|
|
||||||
########################################################################################################################
|
########################################################################################################################
|
||||||
.PHONY: docker-collector
|
.PHONY: docker-collector
|
||||||
docker-collector:
|
docker-collector:
|
||||||
@echo "building collector docker image"
|
@echo "building collector docker image"
|
||||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.collector -t analogj/scrutiny-dev:collector .
|
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.collector -t ghcr.io/analogj/scrutiny-dev:collector .
|
||||||
|
|
||||||
.PHONY: docker-web
|
.PHONY: docker-web
|
||||||
docker-web:
|
docker-web:
|
||||||
@echo "building web docker image"
|
@echo "building web docker image"
|
||||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.web -t analogj/scrutiny-dev:web .
|
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.web -t ghcr.io/analogj/scrutiny-dev:web .
|
||||||
|
|
||||||
.PHONY: docker-omnibus
|
.PHONY: docker-omnibus
|
||||||
docker-omnibus:
|
docker-omnibus:
|
||||||
@echo "building omnibus docker image"
|
@echo "building omnibus docker image"
|
||||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile -t analogj/scrutiny-dev:omnibus .
|
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile -t ghcr.io/analogj/scrutiny-dev:omnibus .
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
# scrutiny
|
# scrutiny
|
||||||
|
|
||||||
[](https://github.com/AnalogJ/scrutiny/actions?query=workflow%3ACI)
|
[](https://github.com/AnalogJ/scrutiny/actions/workflows/ci.yaml)
|
||||||
[](https://codecov.io/gh/AnalogJ/scrutiny)
|
[](https://codecov.io/gh/AnalogJ/scrutiny)
|
||||||
[](https://github.com/AnalogJ/scrutiny/blob/master/LICENSE)
|
[](https://github.com/AnalogJ/scrutiny/blob/master/LICENSE)
|
||||||
[](https://godoc.org/github.com/analogj/scrutiny)
|
[](https://godoc.org/github.com/analogj/scrutiny)
|
||||||
@@ -67,6 +67,11 @@ See [docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md](./docs/TROUBLESHOOTING_DEVICE_COL
|
|||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> Using `latest-` tags is dangerous as it can update your image without warning. It is a best practice to pin a specific version. scrutiny pushes releases with semver tags,
|
||||||
|
> so you can use tags like `v0.8.2-omnibus`, `v0.8-web`, `v0-collector`, etc. For a list of all image tags see
|
||||||
|
> [scrutiny package versions](https://github.com/AnalogJ/scrutiny/pkgs/container/scrutiny/versions?filters%5Bversion_type%5D=tagged)
|
||||||
|
|
||||||
If you're using Docker, getting started is as simple as running the following command:
|
If you're using Docker, getting started is as simple as running the following command:
|
||||||
|
|
||||||
> See [docker/example.omnibus.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.omnibus.docker-compose.yml) for a docker-compose file.
|
> See [docker/example.omnibus.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.omnibus.docker-compose.yml) for a docker-compose file.
|
||||||
@@ -80,24 +85,24 @@ docker run -p 8080:8080 -p 8086:8086 --restart unless-stopped \
|
|||||||
--device=/dev/sda \
|
--device=/dev/sda \
|
||||||
--device=/dev/sdb \
|
--device=/dev/sdb \
|
||||||
--name scrutiny \
|
--name scrutiny \
|
||||||
ghcr.io/analogj/scrutiny:master-omnibus
|
ghcr.io/analogj/scrutiny:latest-omnibus
|
||||||
```
|
```
|
||||||
|
|
||||||
- `/run/udev` is necessary to provide the Scrutiny collector with access to your device metadata
|
- `/run/udev` is necessary to provide the Scrutiny collector with access to your device metadata
|
||||||
- `--cap-add SYS_RAWIO` is necessary to allow `smartctl` permission to query your device SMART data
|
- `--cap-add SYS_RAWIO` is necessary to allow `smartctl` permission to query your device SMART data
|
||||||
- NOTE: If you have **NVMe** drives, you must add `--cap-add SYS_ADMIN` as well. See issue [#26](https://github.com/AnalogJ/scrutiny/issues/26#issuecomment-696817130)
|
- NOTE: If you have **NVMe** drives, you must add `--cap-add SYS_ADMIN` as well. See issue [#26](https://github.com/AnalogJ/scrutiny/issues/26#issuecomment-696817130)
|
||||||
- `--device` entries are required to ensure that your hard disk devices are accessible within the container.
|
- `--device` entries are required to ensure that your hard disk devices are accessible within the container.
|
||||||
- `ghcr.io/analogj/scrutiny:master-omnibus` is a omnibus image, containing both the webapp server (frontend & api) as well as the S.M.A.R.T metric collector. (see below)
|
- `ghcr.io/analogj/scrutiny:latest-omnibus` is a omnibus image, containing both the webapp server (frontend & api) as well as the S.M.A.R.T metric collector. (see below)
|
||||||
|
|
||||||
### Hub/Spoke Deployment
|
### Hub/Spoke Deployment
|
||||||
|
|
||||||
In addition to the Omnibus image (available under the `latest` tag) you can deploy in Hub/Spoke mode, which requires 3
|
In addition to the Omnibus image (available under the `latest` tag) you can deploy in Hub/Spoke mode, which requires 3
|
||||||
other Docker images:
|
other Docker images:
|
||||||
|
|
||||||
- `ghcr.io/analogj/scrutiny:master-collector` - Contains the Scrutiny data collector, `smartctl` binary and cron-like
|
- `ghcr.io/analogj/scrutiny:latest-collector` - Contains the Scrutiny data collector, `smartctl` binary and cron-like
|
||||||
scheduler. You can run one collector on each server.
|
scheduler. You can run one collector on each server.
|
||||||
- `ghcr.io/analogj/scrutiny:master-web` - Contains the Web UI and API. Only one container necessary
|
- `ghcr.io/analogj/scrutiny:latest-web` - Contains the Web UI and API. Only one container necessary
|
||||||
- `influxdb:2.2` - InfluxDB image, used by the Web container to persist SMART data. Only one container necessary
|
- `influxdb:2.8` - InfluxDB image, used by the Web container to persist SMART data. Only one container necessary
|
||||||
See [docs/TROUBLESHOOTING_INFLUXDB.md](./docs/TROUBLESHOOTING_INFLUXDB.md)
|
See [docs/TROUBLESHOOTING_INFLUXDB.md](./docs/TROUBLESHOOTING_INFLUXDB.md)
|
||||||
|
|
||||||
> See [docker/example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml) for a docker-compose file.
|
> See [docker/example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml) for a docker-compose file.
|
||||||
@@ -106,12 +111,12 @@ other Docker images:
|
|||||||
docker run -p 8086:8086 --restart unless-stopped \
|
docker run -p 8086:8086 --restart unless-stopped \
|
||||||
-v `pwd`/influxdb2:/var/lib/influxdb2 \
|
-v `pwd`/influxdb2:/var/lib/influxdb2 \
|
||||||
--name scrutiny-influxdb \
|
--name scrutiny-influxdb \
|
||||||
influxdb:2.2
|
influxdb:2.8
|
||||||
|
|
||||||
docker run -p 8080:8080 --restart unless-stopped \
|
docker run -p 8080:8080 --restart unless-stopped \
|
||||||
-v `pwd`/scrutiny:/opt/scrutiny/config \
|
-v `pwd`/scrutiny:/opt/scrutiny/config \
|
||||||
--name scrutiny-web \
|
--name scrutiny-web \
|
||||||
ghcr.io/analogj/scrutiny:master-web
|
ghcr.io/analogj/scrutiny:latest-web
|
||||||
|
|
||||||
docker run --restart unless-stopped \
|
docker run --restart unless-stopped \
|
||||||
-v /run/udev:/run/udev:ro \
|
-v /run/udev:/run/udev:ro \
|
||||||
@@ -120,9 +125,13 @@ docker run --restart unless-stopped \
|
|||||||
--device=/dev/sdb \
|
--device=/dev/sdb \
|
||||||
-e COLLECTOR_API_ENDPOINT=http://SCRUTINY_WEB_IPADDRESS:8080 \
|
-e COLLECTOR_API_ENDPOINT=http://SCRUTINY_WEB_IPADDRESS:8080 \
|
||||||
--name scrutiny-collector \
|
--name scrutiny-collector \
|
||||||
ghcr.io/analogj/scrutiny:master-collector
|
ghcr.io/analogj/scrutiny:latest-collector
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Hub rootless installation using Podman Quadlets
|
||||||
|
|
||||||
|
See [docs/INSTALL_ROOTLESS_PODMAN.md](docs/INSTALL_ROOTLESS_PODMAN.md) for instructions.
|
||||||
|
|
||||||
## Manual Installation (without-Docker)
|
## Manual Installation (without-Docker)
|
||||||
|
|
||||||
While the easiest way to get started with [Scrutiny is using Docker](https://github.com/AnalogJ/scrutiny#docker),
|
While the easiest way to get started with [Scrutiny is using Docker](https://github.com/AnalogJ/scrutiny#docker),
|
||||||
@@ -157,7 +166,7 @@ Neither file is required, however if provided, it allows you to configure how Sc
|
|||||||
|
|
||||||
## Cron Schedule
|
## Cron Schedule
|
||||||
Unfortunately the Cron schedule cannot be configured via the `collector.yaml` (as the collector binary needs to be trigged by a scheduler/cron).
|
Unfortunately the Cron schedule cannot be configured via the `collector.yaml` (as the collector binary needs to be trigged by a scheduler/cron).
|
||||||
However, if you are using the official `ghcr.io/analogj/scrutiny:master-collector` or `ghcr.io/analogj/scrutiny:master-omnibus` docker images,
|
However, if you are using the official `ghcr.io/analogj/scrutiny:latest-collector` or `ghcr.io/analogj/scrutiny:latest-omnibus` docker images,
|
||||||
you can use the `COLLECTOR_CRON_SCHEDULE` environmental variable to override the default cron schedule (daily @ midnight - `0 0 * * *`).
|
you can use the `COLLECTOR_CRON_SCHEDULE` environmental variable to override the default cron schedule (daily @ midnight - `0 0 * * *`).
|
||||||
|
|
||||||
`docker run -e COLLECTOR_CRON_SCHEDULE="0 0 * * *" ...`
|
`docker run -e COLLECTOR_CRON_SCHEDULE="0 0 * * *" ...`
|
||||||
|
|||||||
@@ -3,17 +3,18 @@ package main
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/collector"
|
|
||||||
"github.com/analogj/scrutiny/collector/pkg/config"
|
|
||||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/collector/pkg/collector"
|
||||||
|
"github.com/analogj/scrutiny/collector/pkg/config"
|
||||||
|
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
utils "github.com/analogj/go-util/utils"
|
utils "github.com/analogj/go-util/utils"
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@@ -37,8 +38,8 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//we're going to load the config file manually, since we need to validate it.
|
//we're going to load the config file manually, since we need to validate it.
|
||||||
err = config.ReadConfig(configFilePath) // Find and read the config file
|
err = config.ReadConfig(configFilePath) // Find and read the config file
|
||||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||||
//ignore "could not find config file"
|
//ignore "could not find config file"
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -81,7 +82,7 @@ OPTIONS:
|
|||||||
|
|
||||||
subtitle := collectorMetrics + utils.LeftPad2Len(versionInfo, " ", 65-len(collectorMetrics))
|
subtitle := collectorMetrics + utils.LeftPad2Len(versionInfo, " ", 65-len(collectorMetrics))
|
||||||
|
|
||||||
color.New(color.FgGreen).Fprintf(c.App.Writer, fmt.Sprintf(utils.StripIndent(
|
color.New(color.FgGreen).Fprintf(c.App.Writer, utils.StripIndent(
|
||||||
`
|
`
|
||||||
___ ___ ____ __ __ ____ ____ _ _ _ _
|
___ ___ ____ __ __ ____ ____ _ _ _ _
|
||||||
/ __) / __)( _ \( )( )(_ _)(_ _)( \( )( \/ )
|
/ __) / __)( _ \( )( )(_ _)(_ _)( \( )( \/ )
|
||||||
@@ -89,7 +90,7 @@ OPTIONS:
|
|||||||
(___/ \___)(_)\_)(______) (__) (____)(_)\_) (__)
|
(___/ \___)(_)\_)(______) (__) (____)(_)\_) (__)
|
||||||
%s
|
%s
|
||||||
|
|
||||||
`), subtitle))
|
`), subtitle)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -2,14 +2,15 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/collector"
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/collector/pkg/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
utils "github.com/analogj/go-util/utils"
|
utils "github.com/analogj/go-util/utils"
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@@ -57,7 +58,7 @@ OPTIONS:
|
|||||||
|
|
||||||
subtitle := collectorSelfTest + utils.LeftPad2Len(versionInfo, " ", 65-len(collectorSelfTest))
|
subtitle := collectorSelfTest + utils.LeftPad2Len(versionInfo, " ", 65-len(collectorSelfTest))
|
||||||
|
|
||||||
color.New(color.FgGreen).Fprintf(c.App.Writer, fmt.Sprintf(utils.StripIndent(
|
color.New(color.FgGreen).Fprintf(c.App.Writer, utils.StripIndent(
|
||||||
`
|
`
|
||||||
___ ___ ____ __ __ ____ ____ _ _ _ _
|
___ ___ ____ __ __ ____ ____ _ _ _ _
|
||||||
/ __) / __)( _ \( )( )(_ _)(_ _)( \( )( \/ )
|
/ __) / __)( _ \( )( )(_ _)(_ _)( \( )( \/ )
|
||||||
@@ -65,7 +66,7 @@ OPTIONS:
|
|||||||
(___/ \___)(_)\_)(______) (__) (____)(_)\_) (__)
|
(___/ \___)(_)\_)(______) (__) (____)(_)\_) (__)
|
||||||
%s
|
%s
|
||||||
|
|
||||||
`), subtitle))
|
`), subtitle)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -3,9 +3,10 @@ package collector
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var httpClient = &http.Client{Timeout: 60 * time.Second}
|
var httpClient = &http.Client{Timeout: 60 * time.Second}
|
||||||
@@ -14,17 +15,6 @@ type BaseCollector struct {
|
|||||||
logger *logrus.Entry
|
logger *logrus.Entry
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *BaseCollector) getJson(url string, target interface{}) error {
|
|
||||||
|
|
||||||
r, err := httpClient.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer r.Body.Close()
|
|
||||||
|
|
||||||
return json.NewDecoder(r.Body).Decode(target)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BaseCollector) postJson(url string, body interface{}, target interface{}) error {
|
func (c *BaseCollector) postJson(url string, body interface{}, target interface{}) error {
|
||||||
requestBody, err := json.Marshal(body)
|
requestBody, err := json.Marshal(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@@ -64,9 +65,9 @@ func (mc *MetricsCollector) Run() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
//filter any device with empty wwn (they are invalid)
|
// Remove any device without a scrutiny UUID, but this should never happen...
|
||||||
detectedStorageDevices := lo.Filter[models.Device](rawDetectedStorageDevices, func(dev models.Device, _ int) bool {
|
detectedStorageDevices := lo.Filter(rawDetectedStorageDevices, func(device models.Device, _ int) bool {
|
||||||
return len(dev.WWN) > 0
|
return device.ScrutinyUUID.IsNil()
|
||||||
})
|
})
|
||||||
|
|
||||||
mc.logger.Infoln("Sending detected devices to API, for filtering & validation")
|
mc.logger.Infoln("Sending detected devices to API, for filtering & validation")
|
||||||
@@ -90,7 +91,7 @@ func (mc *MetricsCollector) Run() error {
|
|||||||
// execute collection in parallel go-routines
|
// execute collection in parallel go-routines
|
||||||
//wg.Add(1)
|
//wg.Add(1)
|
||||||
//go mc.Collect(&wg, device.WWN, device.DeviceName, device.DeviceType)
|
//go mc.Collect(&wg, device.WWN, device.DeviceName, device.DeviceType)
|
||||||
mc.Collect(device.WWN, device.DeviceName, device.DeviceType)
|
mc.Collect(device.ScrutinyUUID, device.DeviceName, device.DeviceType)
|
||||||
|
|
||||||
if mc.config.GetInt("commands.metrics_smartctl_wait") > 0 {
|
if mc.config.GetInt("commands.metrics_smartctl_wait") > 0 {
|
||||||
time.Sleep(time.Duration(mc.config.GetInt("commands.metrics_smartctl_wait")) * time.Second)
|
time.Sleep(time.Duration(mc.config.GetInt("commands.metrics_smartctl_wait")) * time.Second)
|
||||||
@@ -117,10 +118,10 @@ func (mc *MetricsCollector) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// func (mc *MetricsCollector) Collect(wg *sync.WaitGroup, deviceWWN string, deviceName string, deviceType string) {
|
// func (mc *MetricsCollector) Collect(wg *sync.WaitGroup, deviceWWN string, deviceName string, deviceType string) {
|
||||||
func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceType string) {
|
func (mc *MetricsCollector) Collect(scrutiny_uuid uuid.UUID, deviceName string, deviceType string) {
|
||||||
//defer wg.Done()
|
//defer wg.Done()
|
||||||
if len(deviceWWN) == 0 {
|
if scrutiny_uuid.IsNil() {
|
||||||
mc.logger.Errorf("no device WWN detected for %s. Skipping collection for this device (no data association possible).\n", deviceName)
|
mc.logger.Errorf("no scrutiny UUID was created for %s. Skipping collection for this device (no data association possible).\n", deviceName)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mc.logger.Infof("Collecting smartctl results for %s\n", deviceName)
|
mc.logger.Infof("Collecting smartctl results for %s\n", deviceName)
|
||||||
@@ -140,7 +141,7 @@ func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceT
|
|||||||
// smartctl command exited with an error, we should still push the data to the API server
|
// smartctl command exited with an error, we should still push the data to the API server
|
||||||
mc.logger.Errorf("smartctl returned an error code (%d) while processing %s\n", exitError.ExitCode(), deviceName)
|
mc.logger.Errorf("smartctl returned an error code (%d) while processing %s\n", exitError.ExitCode(), deviceName)
|
||||||
mc.LogSmartctlExitCode(exitError.ExitCode())
|
mc.LogSmartctlExitCode(exitError.ExitCode())
|
||||||
mc.Publish(deviceWWN, resultBytes)
|
mc.Publish(scrutiny_uuid, resultBytes)
|
||||||
} else {
|
} else {
|
||||||
mc.logger.Errorf("error while attempting to execute smartctl: %s\n", deviceName)
|
mc.logger.Errorf("error while attempting to execute smartctl: %s\n", deviceName)
|
||||||
mc.logger.Errorf("ERROR MESSAGE: %v", err)
|
mc.logger.Errorf("ERROR MESSAGE: %v", err)
|
||||||
@@ -149,19 +150,19 @@ func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceT
|
|||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
//successful run, pass the results directly to webapp backend for parsing and processing.
|
//successful run, pass the results directly to webapp backend for parsing and processing.
|
||||||
mc.Publish(deviceWWN, resultBytes)
|
mc.Publish(scrutiny_uuid, resultBytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *MetricsCollector) Publish(deviceWWN string, payload []byte) error {
|
func (mc *MetricsCollector) Publish(scrutinyUuid uuid.UUID, payload []byte) error {
|
||||||
mc.logger.Infof("Publishing smartctl results for %s\n", deviceWWN)
|
mc.logger.Infof("Publishing smartctl results for %s\n", scrutinyUuid)
|
||||||
|
|
||||||
apiEndpoint, _ := url.Parse(mc.apiEndpoint.String())
|
apiEndpoint, _ := url.Parse(mc.apiEndpoint.String())
|
||||||
apiEndpoint, _ = apiEndpoint.Parse(fmt.Sprintf("api/device/%s/smart", strings.ToLower(deviceWWN)))
|
apiEndpoint, _ = apiEndpoint.Parse(fmt.Sprintf("api/device/%s/smart", scrutinyUuid.String()))
|
||||||
|
|
||||||
resp, err := httpClient.Post(apiEndpoint.String(), "application/json", bytes.NewBuffer(payload))
|
resp, err := httpClient.Post(apiEndpoint.String(), "application/json", bytes.NewBuffer(payload))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
mc.logger.Errorf("An error occurred while publishing SMART data for device (%s): %v", deviceWWN, err)
|
mc.logger.Errorf("An error occurred while publishing SMART data for device (%s): %v", scrutinyUuid, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|||||||
@@ -3,11 +3,12 @@ package shell
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"io"
|
"io"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type localShell struct{}
|
type localShell struct{}
|
||||||
@@ -36,7 +37,7 @@ func (s *localShell) Command(logger *logrus.Entry, cmdName string, cmdArgs []str
|
|||||||
if workingDir != "" && path.IsAbs(workingDir) {
|
if workingDir != "" && path.IsAbs(workingDir) {
|
||||||
cmd.Dir = workingDir
|
cmd.Dir = workingDir
|
||||||
} else if workingDir != "" {
|
} else if workingDir != "" {
|
||||||
return "", errors.New("Working Directory must be an absolute path")
|
return "", errors.New("working directory must be an absolute path")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ package mock_shell
|
|||||||
import (
|
import (
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
logrus "github.com/sirupsen/logrus"
|
logrus "github.com/sirupsen/logrus"
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockInterface is a mock of Interface interface.
|
// MockInterface is a mock of Interface interface.
|
||||||
|
|||||||
@@ -2,15 +2,16 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/analogj/go-util/utils"
|
|
||||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
|
||||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
|
||||||
"github.com/mitchellh/mapstructure"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/analogj/go-util/utils"
|
||||||
|
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||||
|
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
|
"github.com/go-viper/mapstructure/v2"
|
||||||
|
"github.com/spf13/viper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// When initializing this class the following methods must be called:
|
// When initializing this class the following methods must be called:
|
||||||
@@ -20,7 +21,7 @@ import (
|
|||||||
type configuration struct {
|
type configuration struct {
|
||||||
*viper.Viper
|
*viper.Viper
|
||||||
|
|
||||||
deviceOverrides []models.ScanOverride
|
deviceOverrides []models.ScanOverride
|
||||||
}
|
}
|
||||||
|
|
||||||
//Viper uses the following precedence order. Each item takes precedence over the item below it:
|
//Viper uses the following precedence order. Each item takes precedence over the item below it:
|
||||||
@@ -53,7 +54,7 @@ func (c *configuration) Init() error {
|
|||||||
c.SetEnvPrefix("COLLECTOR")
|
c.SetEnvPrefix("COLLECTOR")
|
||||||
c.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_"))
|
c.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_"))
|
||||||
c.AutomaticEnv()
|
c.AutomaticEnv()
|
||||||
|
|
||||||
//c.SetDefault("collect.short.command", "-a -o on -S on")
|
//c.SetDefault("collect.short.command", "-a -o on -S on")
|
||||||
|
|
||||||
c.SetDefault("allow_listed_devices", []string{})
|
c.SetDefault("allow_listed_devices", []string{})
|
||||||
@@ -167,7 +168,7 @@ func (c *configuration) GetCommandMetricsInfoArgs(deviceName string) string {
|
|||||||
overrides := c.GetDeviceOverrides()
|
overrides := c.GetDeviceOverrides()
|
||||||
|
|
||||||
for _, deviceOverrides := range overrides {
|
for _, deviceOverrides := range overrides {
|
||||||
if strings.ToLower(deviceName) == strings.ToLower(deviceOverrides.Device) {
|
if strings.EqualFold(deviceName, deviceOverrides.Device) {
|
||||||
//found matching device
|
//found matching device
|
||||||
if len(deviceOverrides.Commands.MetricsInfoArgs) > 0 {
|
if len(deviceOverrides.Commands.MetricsInfoArgs) > 0 {
|
||||||
return deviceOverrides.Commands.MetricsInfoArgs
|
return deviceOverrides.Commands.MetricsInfoArgs
|
||||||
@@ -183,7 +184,7 @@ func (c *configuration) GetCommandMetricsSmartArgs(deviceName string) string {
|
|||||||
overrides := c.GetDeviceOverrides()
|
overrides := c.GetDeviceOverrides()
|
||||||
|
|
||||||
for _, deviceOverrides := range overrides {
|
for _, deviceOverrides := range overrides {
|
||||||
if strings.ToLower(deviceName) == strings.ToLower(deviceOverrides.Device) {
|
if strings.EqualFold(deviceName, deviceOverrides.Device) {
|
||||||
//found matching device
|
//found matching device
|
||||||
if len(deviceOverrides.Commands.MetricsSmartArgs) > 0 {
|
if len(deviceOverrides.Commands.MetricsSmartArgs) > 0 {
|
||||||
return deviceOverrides.Commands.MetricsSmartArgs
|
return deviceOverrides.Commands.MetricsSmartArgs
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
models "github.com/analogj/scrutiny/collector/pkg/models"
|
models "github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
viper "github.com/spf13/viper"
|
viper "github.com/spf13/viper"
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockInterface is a mock of Interface interface.
|
// MockInterface is a mock of Interface interface.
|
||||||
|
|||||||
@@ -101,15 +101,11 @@ func (d *Detect) SmartCtlInfo(device *models.Device) error {
|
|||||||
device.WWN = strings.ToLower(wwn.ToString())
|
device.WWN = strings.ToLower(wwn.ToString())
|
||||||
d.Logger.Debugf("NAA: %d OUI: %d Id: %d => WWN: %s", wwn.Naa, wwn.Oui, wwn.Id, device.WWN)
|
d.Logger.Debugf("NAA: %d OUI: %d Id: %d => WWN: %s", wwn.Naa, wwn.Oui, wwn.Id, device.WWN)
|
||||||
} else {
|
} else {
|
||||||
d.Logger.Info("Using WWN Fallback")
|
d.Logger.Debug("Using WWN Fallback")
|
||||||
d.wwnFallback(device)
|
d.wwnFallback(device)
|
||||||
}
|
}
|
||||||
if len(device.WWN) == 0 {
|
|
||||||
// no WWN populated after WWN lookup and fallback. we need to throw an error
|
device.ScrutinyUUID = GenerateScrutinyUUID(device.ModelName, device.SerialNumber, device.WWN)
|
||||||
errMsg := fmt.Sprintf("no WWN (or fallback) populated for device: %s. Device will be registered, but no data will be published for this device. ", device.DeviceName)
|
|
||||||
d.Logger.Errorf(errMsg)
|
|
||||||
return fmt.Errorf(errMsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,16 +9,15 @@ import (
|
|||||||
mock_config "github.com/analogj/scrutiny/collector/pkg/config/mock"
|
mock_config "github.com/analogj/scrutiny/collector/pkg/config/mock"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDetect_SmartctlScan(t *testing.T) {
|
func TestDetect_SmartctlScan(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||||
@@ -48,7 +47,6 @@ func TestDetect_SmartctlScan(t *testing.T) {
|
|||||||
func TestDetect_SmartctlScan_Megaraid(t *testing.T) {
|
func TestDetect_SmartctlScan_Megaraid(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||||
@@ -81,7 +79,6 @@ func TestDetect_SmartctlScan_Megaraid(t *testing.T) {
|
|||||||
func TestDetect_SmartctlScan_Nvme(t *testing.T) {
|
func TestDetect_SmartctlScan_Nvme(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||||
@@ -113,7 +110,6 @@ func TestDetect_SmartctlScan_Nvme(t *testing.T) {
|
|||||||
func TestDetect_TransformDetectedDevices_Empty(t *testing.T) {
|
func TestDetect_TransformDetectedDevices_Empty(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||||
@@ -147,7 +143,6 @@ func TestDetect_TransformDetectedDevices_Empty(t *testing.T) {
|
|||||||
func TestDetect_TransformDetectedDevices_Ignore(t *testing.T) {
|
func TestDetect_TransformDetectedDevices_Ignore(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}})
|
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}})
|
||||||
@@ -180,7 +175,6 @@ func TestDetect_TransformDetectedDevices_Ignore(t *testing.T) {
|
|||||||
func TestDetect_TransformDetectedDevices_Raid(t *testing.T) {
|
func TestDetect_TransformDetectedDevices_Raid(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||||
@@ -223,7 +217,6 @@ func TestDetect_TransformDetectedDevices_Raid(t *testing.T) {
|
|||||||
func TestDetect_TransformDetectedDevices_Simple(t *testing.T) {
|
func TestDetect_TransformDetectedDevices_Simple(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||||
@@ -257,7 +250,6 @@ func TestDetect_TransformDetectedDevices_Simple(t *testing.T) {
|
|||||||
func TestDetect_TransformDetectedDevices_WithoutDeviceTypeOverride(t *testing.T) {
|
func TestDetect_TransformDetectedDevices_WithoutDeviceTypeOverride(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||||
@@ -290,7 +282,6 @@ func TestDetect_TransformDetectedDevices_WithoutDeviceTypeOverride(t *testing.T)
|
|||||||
func TestDetect_TransformDetectedDevices_WhenDeviceNotDetected(t *testing.T) {
|
func TestDetect_TransformDetectedDevices_WhenDeviceNotDetected(t *testing.T) {
|
||||||
// setup
|
// setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||||
@@ -312,7 +303,6 @@ func TestDetect_TransformDetectedDevices_WhenDeviceNotDetected(t *testing.T) {
|
|||||||
|
|
||||||
func TestDetect_TransformDetectedDevices_AllowListFilters(t *testing.T) {
|
func TestDetect_TransformDetectedDevices_AllowListFilters(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||||
@@ -353,7 +343,6 @@ func TestDetect_TransformDetectedDevices_AllowListFilters(t *testing.T) {
|
|||||||
func TestDetect_SmartCtlInfo(t *testing.T) {
|
func TestDetect_SmartCtlInfo(t *testing.T) {
|
||||||
t.Run("should report nvme info", func(t *testing.T) {
|
t.Run("should report nvme info", func(t *testing.T) {
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
defer ctrl.Finish()
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
someArgs = "--info --json"
|
someArgs = "--info --json"
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package detect
|
package detect
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
"github.com/jaypipes/ghw"
|
"github.com/jaypipes/ghw"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func DevicePrefix() string {
|
func DevicePrefix() string {
|
||||||
@@ -89,7 +90,7 @@ func (d *Detect) findMissingDevices(detectedDevices []models.Device) ([]models.D
|
|||||||
return missingDevices, nil
|
return missingDevices, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//WWN values NVMe and SCSI
|
// WWN values NVMe and SCSI
|
||||||
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||||
block, err := ghw.Block()
|
block, err := ghw.Block()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -102,12 +103,6 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//no WWN found, or could not open Block devices. Either way, fallback to serial number
|
|
||||||
if len(detectedDevice.WWN) == 0 {
|
|
||||||
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
|
|
||||||
detectedDevice.WWN = detectedDevice.SerialNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
//wwn must always be lowercase.
|
//wwn must always be lowercase.
|
||||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package detect
|
package detect
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
"github.com/jaypipes/ghw"
|
"github.com/jaypipes/ghw"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func DevicePrefix() string {
|
func DevicePrefix() string {
|
||||||
@@ -27,7 +28,7 @@ func (d *Detect) Start() ([]models.Device, error) {
|
|||||||
return detectedDevices, nil
|
return detectedDevices, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//WWN values NVMe and SCSI
|
// WWN values NVMe and SCSI
|
||||||
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||||
block, err := ghw.Block()
|
block, err := ghw.Block()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -40,12 +41,6 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//no WWN found, or could not open Block devices. Either way, fallback to serial number
|
|
||||||
if len(detectedDevice.WWN) == 0 {
|
|
||||||
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
|
|
||||||
detectedDevice.WWN = detectedDevice.SerialNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
//wwn must always be lowercase.
|
//wwn must always be lowercase.
|
||||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,12 +2,13 @@ package detect
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
"github.com/jaypipes/ghw"
|
"github.com/jaypipes/ghw"
|
||||||
"io/ioutil"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func DevicePrefix() string {
|
func DevicePrefix() string {
|
||||||
@@ -23,15 +24,15 @@ func (d *Detect) Start() ([]models.Device, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//inflate device info for detected devices.
|
//inflate device info for detected devices.
|
||||||
for ndx, _ := range detectedDevices {
|
for ndx := range detectedDevices {
|
||||||
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||||
populateUdevInfo(&detectedDevices[ndx]) //ignore errors.
|
populateUdevInfo(&detectedDevices[ndx]) //ignore errors.
|
||||||
}
|
}
|
||||||
|
|
||||||
return detectedDevices, nil
|
return detectedDevices, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//WWN values NVMe and SCSI
|
// WWN values NVMe and SCSI
|
||||||
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||||
block, err := ghw.Block()
|
block, err := ghw.Block()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -44,12 +45,6 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//no WWN found, or could not open Block devices. Either way, fallback to serial number
|
|
||||||
if len(detectedDevice.WWN) == 0 {
|
|
||||||
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
|
|
||||||
detectedDevice.WWN = detectedDevice.SerialNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
//wwn must always be lowercase.
|
//wwn must always be lowercase.
|
||||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
||||||
}
|
}
|
||||||
@@ -61,7 +56,7 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
|||||||
func populateUdevInfo(detectedDevice *models.Device) error {
|
func populateUdevInfo(detectedDevice *models.Device) error {
|
||||||
// Get device major:minor numbers
|
// Get device major:minor numbers
|
||||||
// `cat /sys/class/block/sda/dev`
|
// `cat /sys/class/block/sda/dev`
|
||||||
devNo, err := ioutil.ReadFile(filepath.Join("/sys/class/block/", detectedDevice.DeviceName, "dev"))
|
devNo, err := os.ReadFile(filepath.Join("/sys/class/block/", detectedDevice.DeviceName, "dev"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -69,7 +64,7 @@ func populateUdevInfo(detectedDevice *models.Device) error {
|
|||||||
// Look up block device in udev runtime database
|
// Look up block device in udev runtime database
|
||||||
// `cat /run/udev/data/b8:0`
|
// `cat /run/udev/data/b8:0`
|
||||||
udevID := "b" + strings.TrimSpace(string(devNo))
|
udevID := "b" + strings.TrimSpace(string(devNo))
|
||||||
udevBytes, err := ioutil.ReadFile(filepath.Join("/run/udev/data/", udevID))
|
udevBytes, err := os.ReadFile(filepath.Join("/run/udev/data/", udevID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -97,7 +92,5 @@ func populateUdevInfo(detectedDevice *models.Device) error {
|
|||||||
detectedDevice.DeviceSerialID = fmt.Sprintf("%s-%s", udevInfo["ID_BUS"], deviceSerialID)
|
detectedDevice.DeviceSerialID = fmt.Sprintf("%s-%s", udevInfo["ID_BUS"], deviceSerialID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package detect
|
|||||||
import (
|
import (
|
||||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func DevicePrefix() string {
|
func DevicePrefix() string {
|
||||||
@@ -26,14 +25,7 @@ func (d *Detect) Start() ([]models.Device, error) {
|
|||||||
return detectedDevices, nil
|
return detectedDevices, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//WWN values NVMe and SCSI
|
// WWN values NVMe and SCSI
|
||||||
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||||
|
// No fallback on windows
|
||||||
//fallback to serial number
|
|
||||||
if len(detectedDevice.WWN) == 0 {
|
|
||||||
detectedDevice.WWN = detectedDevice.SerialNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
//wwn must always be lowercase.
|
|
||||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,16 @@
|
|||||||
|
package detect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Randomly generated UUID v4 namespace for Scrutiny
|
||||||
|
var ScrutinyNamespaceUUID = uuid.Must(uuid.FromString("3ea22b35-682b-49fb-a655-abffed108e48"))
|
||||||
|
|
||||||
|
// WWN's are not actually unique so we use Model Name and Serial Number
|
||||||
|
// to hopefully create something that is actually unique despite
|
||||||
|
// manufacturer laziness
|
||||||
|
func GenerateScrutinyUUID(modelName string, serialNumber string, wwn string) uuid.UUID {
|
||||||
|
name := modelName + serialNumber + wwn
|
||||||
|
return uuid.NewV5(ScrutinyNamespaceUUID, name)
|
||||||
|
}
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
package detect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGenerateScrutinyUUID(t *testing.T) {
|
||||||
|
t.Run("NVMe device from test data", func(t *testing.T) {
|
||||||
|
testData, err := os.ReadFile("testdata/smartctl_info_nvme.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var smartInfo collector.SmartInfo
|
||||||
|
err = json.Unmarshal(testData, &smartInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
device := &models.Device{
|
||||||
|
ModelName: smartInfo.ModelName,
|
||||||
|
SerialNumber: smartInfo.SerialNumber,
|
||||||
|
}
|
||||||
|
// NVMe drives don't have a WWN
|
||||||
|
// so scrutiny falls back to serial number
|
||||||
|
device.WWN = device.SerialNumber
|
||||||
|
|
||||||
|
uuid := GenerateScrutinyUUID(device.ModelName, device.SerialNumber, device.WWN)
|
||||||
|
|
||||||
|
require.NotEmpty(t, uuid.String(), "Generated UUID should not be empty")
|
||||||
|
require.Equal(t, uint8(5), uuid.Version(), "Expected UUID version 5")
|
||||||
|
|
||||||
|
uuid2 := GenerateScrutinyUUID(device.ModelName, device.SerialNumber, device.WWN)
|
||||||
|
require.True(t, bytes.Equal(uuid.Bytes(), uuid2.Bytes()), "UUID generation should be deterministic for the same input")
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test with different device data to ensure uniqueness
|
||||||
|
t.Run("different devices produce different UUIDs", func(t *testing.T) {
|
||||||
|
device1 := models.Device{
|
||||||
|
ModelName: "Samsung SSD 860 EVO 1TB",
|
||||||
|
SerialNumber: "S3ZANX0K123456A",
|
||||||
|
WWN: "5002538e40a22954",
|
||||||
|
}
|
||||||
|
|
||||||
|
device2 := device1
|
||||||
|
device2.SerialNumber = "S3ZANX0K123456B"
|
||||||
|
|
||||||
|
uuid1 := GenerateScrutinyUUID(device1.ModelName, device1.SerialNumber, device1.WWN)
|
||||||
|
uuid2 := GenerateScrutinyUUID(device2.ModelName, device2.SerialNumber, device2.WWN)
|
||||||
|
|
||||||
|
require.False(t, bytes.Equal(uuid1.Bytes(), uuid2.Bytes()), "Different devices should produce different UUIDs")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScrutinyNamespaceUUID(t *testing.T) {
|
||||||
|
// Make sure no one changes the namespace
|
||||||
|
expectedNamespace, err := uuid.FromString("3ea22b35-682b-49fb-a655-abffed108e48")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse expected namespace UUID: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.True(t, bytes.Equal(ScrutinyNamespaceUUID.Bytes(), expectedNamespace.Bytes()), "Scrutiny Namespace UUID should never change")
|
||||||
|
}
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
package detect_test
|
package detect_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"testing"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWwn_FromStringTable(t *testing.T) {
|
func TestWwn_FromStringTable(t *testing.T) {
|
||||||
@@ -25,8 +25,7 @@ func TestWwn_FromStringTable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
//test
|
//test
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
testname := fmt.Sprintf("%s", tt.wwnStr)
|
t.Run(tt.wwnStr, func(t *testing.T) {
|
||||||
t.Run(testname, func(t *testing.T) {
|
|
||||||
str := tt.wwn.ToString()
|
str := tt.wwn.ToString()
|
||||||
require.Equal(t, tt.wwnStr, str)
|
require.Equal(t, tt.wwnStr, str)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1,12 +1,17 @@
|
|||||||
package models
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
)
|
||||||
|
|
||||||
type Device struct {
|
type Device struct {
|
||||||
WWN string `json:"wwn"`
|
ScrutinyUUID uuid.UUID `json:"scrutiny_uuid"`
|
||||||
|
WWN string `json:"wwn"`
|
||||||
|
|
||||||
DeviceName string `json:"device_name"`
|
DeviceName string `json:"device_name"`
|
||||||
DeviceUUID string `json:"device_uuid"`
|
DeviceUUID string `json:"device_uuid"`
|
||||||
DeviceSerialID string `json:"device_serial_id"`
|
DeviceSerialID string `json:"device_serial_id"`
|
||||||
DeviceLabel string `json:"device_label"`
|
DeviceLabel string `json:"device_label"`
|
||||||
|
|
||||||
Manufacturer string `json:"manufacturer"`
|
Manufacturer string `json:"manufacturer"`
|
||||||
ModelName string `json:"model_name"`
|
ModelName string `json:"model_name"`
|
||||||
|
|||||||
+28
-5
@@ -6,16 +6,20 @@
|
|||||||
######## Build the frontend
|
######## Build the frontend
|
||||||
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
||||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
COPY --link Makefile /go/src/github.com/analogj/scrutiny/
|
||||||
|
COPY --link webapp/frontend /go/src/github.com/analogj/scrutiny/webapp/frontend
|
||||||
|
|
||||||
RUN make binary-frontend
|
RUN make binary-frontend
|
||||||
|
|
||||||
|
|
||||||
######## Build the backend
|
######## Build the backend
|
||||||
FROM golang:1.20-bookworm as backendbuild
|
FROM golang:1.25-trixie as backendbuild
|
||||||
|
|
||||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
COPY --link Makefile /go/src/github.com/analogj/scrutiny/
|
||||||
|
COPY --link go.mod go.sum /go/src/github.com/analogj/scrutiny/
|
||||||
|
COPY --link collector /go/src/github.com/analogj/scrutiny/collector
|
||||||
|
COPY --link webapp/backend /go/src/github.com/analogj/scrutiny/webapp/backend
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
file \
|
file \
|
||||||
@@ -23,8 +27,25 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
|||||||
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
||||||
|
|
||||||
|
|
||||||
|
######## Build smartmontools from source
|
||||||
|
FROM debian:trixie-slim AS smartmontoolsbuild
|
||||||
|
ARG SMARTMONTOOLS_VER=7.5
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
ca-certificates curl gcc g++ gnupg make \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
RUN curl -L "https://github.com/smartmontools/smartmontools/releases/download/RELEASE_$(echo ${SMARTMONTOOLS_VER} | tr '.' '_')/smartmontools-${SMARTMONTOOLS_VER}.tar.gz" -o /tmp/smartmontools.tar.gz \
|
||||||
|
&& tar -xzf /tmp/smartmontools.tar.gz -C /tmp \
|
||||||
|
&& cd /tmp/smartmontools-${SMARTMONTOOLS_VER} \
|
||||||
|
&& ./configure --prefix=/usr LDFLAGS='-static' --without-libcap-ng --without-libsystemd \
|
||||||
|
&& make -j"$(nproc)" \
|
||||||
|
&& make install \
|
||||||
|
&& /usr/sbin/update-smart-drivedb \
|
||||||
|
&& rm -rf /tmp/smartmontools*
|
||||||
|
|
||||||
|
|
||||||
######## Combine build artifacts in runtime image
|
######## Combine build artifacts in runtime image
|
||||||
FROM debian:bookworm-slim as runtime
|
FROM debian:trixie-slim AS runtime
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
WORKDIR /opt/scrutiny
|
WORKDIR /opt/scrutiny
|
||||||
@@ -40,7 +61,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
|||||||
ca-certificates \
|
ca-certificates \
|
||||||
cron \
|
cron \
|
||||||
curl \
|
curl \
|
||||||
smartmontools \
|
|
||||||
tzdata \
|
tzdata \
|
||||||
procps \
|
procps \
|
||||||
xz-utils \
|
xz-utils \
|
||||||
@@ -62,6 +82,9 @@ RUN curl -L https://dl.influxdata.com/influxdb/releases/influxdb2-${INFLUXVER}-$
|
|||||||
|
|
||||||
COPY /rootfs /
|
COPY /rootfs /
|
||||||
|
|
||||||
|
COPY --from=smartmontoolsbuild /usr/sbin/smartctl /usr/sbin/smartctl
|
||||||
|
COPY --from=smartmontoolsbuild /usr/share/smartmontools/ /usr/share/smartmontools/
|
||||||
|
|
||||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||||
COPY --link --from=frontendbuild --chmod=644 /go/src/github.com/analogj/scrutiny/dist /opt/scrutiny/web
|
COPY --link --from=frontendbuild --chmod=644 /go/src/github.com/analogj/scrutiny/dist /opt/scrutiny/web
|
||||||
|
|||||||
@@ -4,21 +4,43 @@
|
|||||||
|
|
||||||
|
|
||||||
########
|
########
|
||||||
FROM golang:1.20-bookworm as backendbuild
|
FROM golang:1.25-trixie AS backendbuild
|
||||||
|
|
||||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||||
|
|
||||||
COPY . /go/src/github.com/analogj/scrutiny
|
COPY --link Makefile /go/src/github.com/analogj/scrutiny/
|
||||||
|
COPY --link go.mod go.sum /go/src/github.com/analogj/scrutiny/
|
||||||
|
COPY --link collector /go/src/github.com/analogj/scrutiny/collector
|
||||||
|
COPY --link webapp/backend /go/src/github.com/analogj/scrutiny/webapp/backend
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y file && rm -rf /var/lib/apt/lists/*
|
RUN apt-get update && apt-get install -y file && rm -rf /var/lib/apt/lists/*
|
||||||
RUN make binary-clean binary-collector
|
RUN make binary-clean binary-collector
|
||||||
|
|
||||||
|
######## Build smartmontools from source
|
||||||
|
FROM debian:trixie-slim AS smartmontoolsbuild
|
||||||
|
ARG SMARTMONTOOLS_VER=7.5
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
ca-certificates curl gcc g++ gnupg make \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
RUN curl -L "https://github.com/smartmontools/smartmontools/releases/download/RELEASE_$(echo ${SMARTMONTOOLS_VER} | tr '.' '_')/smartmontools-${SMARTMONTOOLS_VER}.tar.gz" -o /tmp/smartmontools.tar.gz \
|
||||||
|
&& tar -xzf /tmp/smartmontools.tar.gz -C /tmp \
|
||||||
|
&& cd /tmp/smartmontools-${SMARTMONTOOLS_VER} \
|
||||||
|
&& ./configure --prefix=/usr LDFLAGS='-static' --without-libcap-ng --without-libsystemd \
|
||||||
|
&& make -j"$(nproc)" \
|
||||||
|
&& make install \
|
||||||
|
&& /usr/sbin/update-smart-drivedb \
|
||||||
|
&& rm -rf /tmp/smartmontools*
|
||||||
|
|
||||||
########
|
########
|
||||||
FROM debian:bookworm-slim as runtime
|
FROM debian:trixie-slim AS runtime
|
||||||
WORKDIR /opt/scrutiny
|
WORKDIR /opt/scrutiny
|
||||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y cron smartmontools ca-certificates tzdata && rm -rf /var/lib/apt/lists/* && update-ca-certificates
|
RUN apt-get update && apt-get install -y cron ca-certificates tzdata && rm -rf /var/lib/apt/lists/* && update-ca-certificates
|
||||||
|
|
||||||
|
COPY --from=smartmontoolsbuild /usr/sbin/smartctl /usr/sbin/smartctl
|
||||||
|
COPY --from=smartmontoolsbuild /usr/share/smartmontools/ /usr/share/smartmontools/
|
||||||
|
|
||||||
COPY /docker/entrypoint-collector.sh /entrypoint-collector.sh
|
COPY /docker/entrypoint-collector.sh /entrypoint-collector.sh
|
||||||
COPY /rootfs/etc/cron.d/scrutiny /etc/cron.d/scrutiny
|
COPY /rootfs/etc/cron.d/scrutiny /etc/cron.d/scrutiny
|
||||||
|
|||||||
@@ -0,0 +1,20 @@
|
|||||||
|
########################################################################################################################
|
||||||
|
# Smartmontools Builder
|
||||||
|
# - Builds smartctl from source as a static binary.
|
||||||
|
# - Updates the drive database to include the latest drive models since it can change between releases.
|
||||||
|
# - Used as a shared build stage by Dockerfile and Dockerfile.collector.
|
||||||
|
########################################################################################################################
|
||||||
|
FROM debian:trixie-slim
|
||||||
|
ARG SMARTMONTOOLS_VER=7.5
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
ca-certificates curl gcc g++ gnupg make \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
RUN curl -L "https://github.com/smartmontools/smartmontools/releases/download/RELEASE_$(echo ${SMARTMONTOOLS_VER} | tr '.' '_')/smartmontools-${SMARTMONTOOLS_VER}.tar.gz" -o /tmp/smartmontools.tar.gz \
|
||||||
|
&& tar -xzf /tmp/smartmontools.tar.gz -C /tmp \
|
||||||
|
&& cd /tmp/smartmontools-${SMARTMONTOOLS_VER} \
|
||||||
|
&& ./configure --prefix=/usr LDFLAGS='-static' --without-libcap-ng --without-libsystemd \
|
||||||
|
&& make -j"$(nproc)" \
|
||||||
|
&& make install \
|
||||||
|
&& /usr/sbin/update-smart-drivedb \
|
||||||
|
&& rm -rf /tmp/smartmontools*
|
||||||
@@ -6,22 +6,26 @@
|
|||||||
######## Build the frontend
|
######## Build the frontend
|
||||||
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
||||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
COPY --link Makefile /go/src/github.com/analogj/scrutiny/
|
||||||
|
COPY --link webapp/frontend /go/src/github.com/analogj/scrutiny/webapp/frontend
|
||||||
|
|
||||||
RUN make binary-frontend
|
RUN make binary-frontend
|
||||||
|
|
||||||
######## Build the backend
|
######## Build the backend
|
||||||
FROM golang:1.20-bookworm as backendbuild
|
FROM golang:1.25-trixie as backendbuild
|
||||||
|
|
||||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
COPY --link Makefile /go/src/github.com/analogj/scrutiny/
|
||||||
|
COPY --link go.mod go.sum /go/src/github.com/analogj/scrutiny/
|
||||||
|
COPY --link collector /go/src/github.com/analogj/scrutiny/collector
|
||||||
|
COPY --link webapp/backend /go/src/github.com/analogj/scrutiny/webapp/backend
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y file && rm -rf /var/lib/apt/lists/*
|
RUN apt-get update && apt-get install -y file && rm -rf /var/lib/apt/lists/*
|
||||||
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
||||||
|
|
||||||
|
|
||||||
######## Combine build artifacts in runtime image
|
######## Combine build artifacts in runtime image
|
||||||
FROM debian:bookworm-slim as runtime
|
FROM debian:trixie-slim as runtime
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
WORKDIR /opt/scrutiny
|
WORKDIR /opt/scrutiny
|
||||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ version: '2.4'
|
|||||||
services:
|
services:
|
||||||
influxdb:
|
influxdb:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
image: influxdb:2.2
|
image: influxdb:2.8
|
||||||
ports:
|
ports:
|
||||||
- '8086:8086'
|
- '8086:8086'
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
+15
-15
@@ -49,19 +49,15 @@ contains the connection and notification details but I always find it easier to
|
|||||||
docker-compose.
|
docker-compose.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
version: "3.4"
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
monitoring: # A common network for all monitoring services to communicate into
|
monitoring: # A common network for all monitoring services to communicate into
|
||||||
external: true
|
|
||||||
notifications: # To Gotify or another Notification service
|
notifications: # To Gotify or another Notification service
|
||||||
external: true
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
influxdb:
|
influxdb:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
container_name: influxdb
|
container_name: influxdb
|
||||||
image: influxdb:2.1-alpine
|
image: influxdb:2.8
|
||||||
ports:
|
ports:
|
||||||
- 8086:8086
|
- 8086:8086
|
||||||
volumes:
|
volumes:
|
||||||
@@ -73,28 +69,33 @@ services:
|
|||||||
- DOCKER_INFLUXDB_INIT_PASSWORD=${PASSWORD}
|
- DOCKER_INFLUXDB_INIT_PASSWORD=${PASSWORD}
|
||||||
- DOCKER_INFLUXDB_INIT_ORG=homelab
|
- DOCKER_INFLUXDB_INIT_ORG=homelab
|
||||||
- DOCKER_INFLUXDB_INIT_BUCKET=scrutiny
|
- DOCKER_INFLUXDB_INIT_BUCKET=scrutiny
|
||||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=your-very-secret-token
|
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=SUPER-SECRET-TOKEN
|
||||||
|
- TZ=Europe/Stockholm
|
||||||
networks:
|
networks:
|
||||||
- monitoring
|
- monitoring
|
||||||
|
|
||||||
scrutiny:
|
scrutiny:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
container_name: scrutiny
|
container_name: scrutiny
|
||||||
image: ghcr.io/analogj/scrutiny:master-web
|
# best practice: pin to a specific release instead of latest
|
||||||
|
image: ghcr.io/analogj/scrutiny:latest-web
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
volumes:
|
volumes:
|
||||||
- ${DIR_CONFIG}/scrutiny/config:/opt/scrutiny/config
|
- ${DIR_CONFIG}/config:/opt/scrutiny/config
|
||||||
environment:
|
environment:
|
||||||
- SCRUTINY_WEB_INFLUXDB_HOST=influxdb
|
- SCRUTINY_WEB_INFLUXDB_HOST=influxdb
|
||||||
- SCRUTINY_WEB_INFLUXDB_PORT=8086
|
- SCRUTINY_WEB_INFLUXDB_PORT=8086
|
||||||
- SCRUTINY_WEB_INFLUXDB_TOKEN=your-very-secret-token
|
- SCRUTINY_WEB_INFLUXDB_TOKEN=SUPER-SECRET-TOKEN
|
||||||
- SCRUTINY_WEB_INFLUXDB_ORG=homelab
|
- SCRUTINY_WEB_INFLUXDB_ORG=homelab
|
||||||
- SCRUTINY_WEB_INFLUXDB_BUCKET=scrutiny
|
- SCRUTINY_WEB_INFLUXDB_BUCKET=scrutiny
|
||||||
# Optional but highly recommended to notify you in case of a problem
|
# Optional but highly recommended to notify you in case of a problem; space-separated list of shoutrrr uri's
|
||||||
- SCRUTINY_NOTIFY_URLS=["http://gotify:80/message?token=a-gotify-token"]
|
# https://github.com/AnalogJ/scrutiny/blob/master/docs/TROUBLESHOOTING_NOTIFICATIONS.md
|
||||||
|
- SCRUTINY_NOTIFY_URLS=http://gotify:80/message?token=a-gotify-token ntfy://username:password@host:port/topic
|
||||||
|
- TZ=Europe/Stockholm
|
||||||
depends_on:
|
depends_on:
|
||||||
- influxdb
|
influxdb:
|
||||||
|
condition: service_healthy
|
||||||
networks:
|
networks:
|
||||||
- notifications
|
- notifications
|
||||||
- monitoring
|
- monitoring
|
||||||
@@ -163,13 +164,12 @@ Also all drives that you wish to monitor need to be presented to the container u
|
|||||||
The image handles the periodic scanning of the drives.
|
The image handles the periodic scanning of the drives.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
version: "3.4"
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
|
||||||
collector:
|
collector:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
image: 'ghcr.io/analogj/scrutiny:master-collector'
|
# best practice: pin to a specific release instead of latest
|
||||||
|
image: 'ghcr.io/analogj/scrutiny:latest-collector'
|
||||||
cap_add:
|
cap_add:
|
||||||
- SYS_RAWIO
|
- SYS_RAWIO
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
+315
-13
@@ -10,9 +10,9 @@ Scrutiny is made up of three components: an influxdb Database, a collector and a
|
|||||||
|
|
||||||
## InfluxDB
|
## InfluxDB
|
||||||
|
|
||||||
Please follow the official InfluxDB installation guide. Note, you'll need to install v2.2.0+.
|
Please follow the official InfluxDB installation guide. Note, you'll need to install v2.8.0+.
|
||||||
|
|
||||||
https://docs.influxdata.com/influxdb/v2.2/install/
|
https://docs.influxdata.com/influxdb/v2/install/
|
||||||
|
|
||||||
## Webapp/API
|
## Webapp/API
|
||||||
|
|
||||||
@@ -122,6 +122,11 @@ So you'll need to install the v7+ version using one of the following commands:
|
|||||||
- `dnf install smartmontools`
|
- `dnf install smartmontools`
|
||||||
- **FreeBSD:** `pkg install smartmontools`
|
- **FreeBSD:** `pkg install smartmontools`
|
||||||
|
|
||||||
|
The following additional dependencies are needed if you want to run the collector as an unprivileged user:
|
||||||
|
|
||||||
|
- systemd version > 235
|
||||||
|
- a restricted user account
|
||||||
|
|
||||||
### Directory Structure
|
### Directory Structure
|
||||||
|
|
||||||
Now let's create a directory structure to contain the Scrutiny collector binary.
|
Now let's create a directory structure to contain the Scrutiny collector binary.
|
||||||
@@ -133,40 +138,337 @@ mkdir -p /opt/scrutiny/bin
|
|||||||
|
|
||||||
### Download Files
|
### Download Files
|
||||||
|
|
||||||
Next, we'll download the Scrutiny collector binary from the [latest Github release](https://github.com/analogj/scrutiny/releases).
|
Next, we'll download the Scrutiny collector binary from the [latest Github release](https://github.com/analogj/scrutiny/releases). You are looking for the one titled **scrutiny-collector-metrics-linux-amd64** unless you know you are on arm.
|
||||||
The file you need to download is named:
|
|
||||||
|
|
||||||
- **scrutiny-collector-metrics-linux-amd64** - save this file to `/opt/scrutiny/bin`
|
```sh
|
||||||
|
wget -O /tmp/scrutiny-collector-metrics https://github.com/AnalogJ/scrutiny/releases/latest/download/scrutiny-collector-metrics-linux-amd64
|
||||||
|
```
|
||||||
|
|
||||||
|
Optional, but recommended: Before continuing it's recommended you compare the sha from the release page with the downloaded file to ensure it's the same file and not corrupted/tampered with. The command to do this is:
|
||||||
|
|
||||||
|
`echo "SHA_GOES_HERE /tmp/scrutiny-collector-metrics" | sha256sum -c`
|
||||||
|
|
||||||
|
example for the v0.8.6 release:
|
||||||
|
|
||||||
|
`echo "4c163645ce24e5487f4684a25ec73485d77a82a57f084808ff5aad0c11499ad2 /tmp/scrutiny-collector-metrics" | sha256sum -c`
|
||||||
|
|
||||||
|
followed by:
|
||||||
|
|
||||||
|
`sudo mv /tmp/scrutiny-collector-metrics /opt/scrutiny/bin/`
|
||||||
|
|
||||||
|
to move the binary to its final resting place
|
||||||
|
|
||||||
|
|
||||||
### Prepare Scrutiny
|
### Prepare Scrutiny
|
||||||
|
|
||||||
Now that we have downloaded the required files, let's prepare the filesystem.
|
Now that we have downloaded the required files, let's prepare the filesystem.
|
||||||
|
|
||||||
```
|
```sh
|
||||||
# Let's make sure the Scrutiny collector is executable.
|
# Let's make sure the Scrutiny collector is executable.
|
||||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64
|
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics
|
||||||
```
|
```
|
||||||
|
|
||||||
|
if you are using SELinux, you may need to also do the following:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# tell SELinux to allow these binaries
|
||||||
|
sudo semanage fcontext -a -t bin_t "/opt/scrutiny/bin(/.*)?"
|
||||||
|
# update labels
|
||||||
|
sudo restorecon -Rv /opt/scrutiny/bin
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Start Scrutiny Collector, Populate Webapp
|
### Start Scrutiny Collector, Populate Webapp
|
||||||
|
|
||||||
Next, we will manually trigger the collector, to populate the Scrutiny dashboard:
|
Next, we will manually trigger the collector, to populate the Scrutiny dashboard:
|
||||||
|
|
||||||
> NOTE: if you need to pass a config file to the scrutiny collector, you can provide it using the `--config` flag.
|
> NOTE: if you need to pass a config file to the scrutiny collector, you can provide it using the `--config` flag.
|
||||||
|
|
||||||
```
|
```sh
|
||||||
/opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://localhost:8080"
|
/opt/scrutiny/bin/scrutiny-collector-metrics run --api-endpoint "http://localhost:8080"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Schedule Collector with Cron
|
### Schedule Collector with (root) Cron
|
||||||
|
|
||||||
Finally you need to schedule the collector to run periodically.
|
Finally you need to schedule the collector to run periodically.
|
||||||
This may be different depending on your OS/environment, but it may look something like this:
|
This may be different depending on your OS/environment, but it may look something like this:
|
||||||
|
|
||||||
```
|
```sh
|
||||||
# open crontab
|
# open crontab
|
||||||
crontab -e
|
sudo crontab -e
|
||||||
|
|
||||||
# add a line for Scrutiny
|
# add a line for Scrutiny
|
||||||
*/15 * * * * . /etc/profile; /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://localhost:8080"
|
*/15 * * * * . /etc/profile; /opt/scrutiny/bin/scrutiny-collector-metrics run --api-endpoint "http://localhost:8080"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Schedule Collector with Systemd (rootless)
|
||||||
|
|
||||||
|
Alternatively you can run `scrutiny-collector-metrics` as non-root so long as the relevant capabilities and permissions are granted.
|
||||||
|
|
||||||
|
|
||||||
|
#### Creating a Restricted Service Account
|
||||||
|
|
||||||
|
This is the account that will run `scrutiny-collector-metrics`. Note this isn't strictly needed for all setups, but is useful from a logging/auditing perspective.
|
||||||
|
|
||||||
|
- Debian-based distros:
|
||||||
|
- `sudo adduser --system scrutiny-svc --group --home /opt/scrutiny-svc`
|
||||||
|
- RHEL-based distros:
|
||||||
|
- `sudo useradd --system --home-dir /opt/scrutiny-svc --shell /sbin/nologin scrutiny-svc`
|
||||||
|
|
||||||
|
Next, add the user to the `disk` group:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo usermod -aG disk scrutiny-svc
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Creating a Restricted Systemd Service using AmbientCapabilities (easier)
|
||||||
|
|
||||||
|
This is the simpler setup, which allows you to run scrutiny rootless, but depending on what you want, may require granting more permissions to scrutiny than you would like to.
|
||||||
|
|
||||||
|
1. go to `/etc/systemd/system`
|
||||||
|
2. create scrutiny-collector.service with the following contents:
|
||||||
|
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Unit]
|
||||||
|
Description=Daily Restricted Scrutiny Collector
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
[Unit]
|
||||||
|
Description=Daily Restricted Scrutiny Collector
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
User=scrutiny-svc
|
||||||
|
Group=disk
|
||||||
|
ExecStart=/opt/scrutiny/bin/scrutiny-collector-metrics run --api-endpoint "http://localhost:8080"
|
||||||
|
|
||||||
|
# --- PRIVILEGE LOCKDOWN ---
|
||||||
|
## CAP_SYS_RAWIO is needed for SATA drives
|
||||||
|
AmbientCapabilities=CAP_SYS_RAWIO
|
||||||
|
CapabilityBoundingSet=CAP_SYS_RAWIO
|
||||||
|
## unfortunately nvme drives require CAP_SYS_ADMIN
|
||||||
|
## if you want nvme drives you must do the following:
|
||||||
|
#AmbientCapabilities=CAP_SYS_RAWIO CAP_SYS_ADMIN
|
||||||
|
#CapabilityBoundingSet=
|
||||||
|
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
|
||||||
|
# Security/sandboxing settings
|
||||||
|
KeyringMode=private
|
||||||
|
LockPersonality=yes
|
||||||
|
MemoryDenyWriteExecute=yes
|
||||||
|
ProtectSystem=strict
|
||||||
|
ProtectHome=yes
|
||||||
|
PrivateDevices=no
|
||||||
|
## you can restrict devices using:
|
||||||
|
#DevicePolicy=closed
|
||||||
|
#DeviceAllow=/dev/sda r
|
||||||
|
#DeviceAllow=/dev/nvme0 r
|
||||||
|
ProtectKernelModules=yes
|
||||||
|
ProtectKernelTunables=yes
|
||||||
|
ProtectControlGroups=yes
|
||||||
|
ProtectClock=yes
|
||||||
|
ProtectHostname=yes
|
||||||
|
ProtectKernelLogs=yes
|
||||||
|
RemoveIPC=yes
|
||||||
|
RestrictSUIDSGID=true
|
||||||
|
|
||||||
|
|
||||||
|
# --- NETWORK LOCKDOWN
|
||||||
|
## use these to restrict what scrutiny can talk to over the network
|
||||||
|
## if using a hub on a different host you will need to change the values accordingly
|
||||||
|
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||||
|
IPAddressDeny=any
|
||||||
|
IPAddressAllow=localhost
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Additionally, for nvme drives you may need to create a udev rule on many systems, as /dev/nvme* is often owned only by root:
|
||||||
|
|
||||||
|
##### add udev rule `/etc/udev/rules.d/99-nvme.rules` with contents:
|
||||||
|
|
||||||
|
```
|
||||||
|
KERNEL=="nvme[0-9]*", GROUP="disk", MODE="0640"
|
||||||
|
```
|
||||||
|
|
||||||
|
then run the following commands to load the udev rule:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo udevadm control --reload-rules
|
||||||
|
sudo udevadm trigger --subsystem-match=nvme --action=add
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
##### Pros:
|
||||||
|
|
||||||
|
- easy to maintain
|
||||||
|
- much better than running as root (especially if you don't need nvme drives)
|
||||||
|
- there are no privilege escalations needed
|
||||||
|
|
||||||
|
|
||||||
|
##### Cons:
|
||||||
|
|
||||||
|
NOTE: These cons basically only apply if a major supply-chain attack happens against scrutiny, and reflect a worst-case scenario that is unlikely to ever occur:
|
||||||
|
|
||||||
|
- CAP_SYS_RAWIO allows for data exfiltration/modification from SATA drives (ssh keys, /etc/shadow, etc)
|
||||||
|
- CAP_SYS_ADMIN would theoretically allow for significant system compromise
|
||||||
|
- nvme drives requires a udev rule for reliable access
|
||||||
|
|
||||||
|
|
||||||
|
If you are happy with that, you can jump to [Create a Systemd Timer to run scrutiny-collector.service](#create-a-systemd-timer-to-run-scrutiny-collectorservice)
|
||||||
|
|
||||||
|
|
||||||
|
#### Creating a Restricted Systemd Service using sudo and Shim Script
|
||||||
|
|
||||||
|
If granting scrutiny `CAP_SYS_RAWIO` and/or `CAP_SYS_ADMIN` exceeds your risk appetite, you have another option, though one more complicated and with its own set of pros/cons
|
||||||
|
|
||||||
|
1. run `sudo mkdir -p /opt/smartctl-shim/bin`
|
||||||
|
2. edit `/opt/smartctl-shim/bin/smartctl` with the following content:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
#!/bin/bash
|
||||||
|
# Shim for accounts to use smartctl without being root
|
||||||
|
# for automation requires the account be in sudoers
|
||||||
|
exec /usr/bin/sudo /usr/sbin/smartctl "$@"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. create a new `scrutiny-collector` file in `/etc/sudoers.d/`
|
||||||
|
4. inside `/etc/sudoers.d/scrutiny-collector` add the following:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
scrutiny-svc ALL=(root) NOPASSWD: /usr/sbin/smartctl *
|
||||||
|
```
|
||||||
|
|
||||||
|
5. go to `/etc/systemd/system`
|
||||||
|
6. create scrutiny-collector.service with the following contents:
|
||||||
|
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Unit]
|
||||||
|
Description=Daily Restricted Scrutiny Collector
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
User=scrutiny-svc
|
||||||
|
Environment="PATH=/opt/smartctl-shim/bin:/usr/bin:/bin"
|
||||||
|
ExecStart=/opt/scrutiny/bin/scrutiny-collector-metrics run --api-endpoint "http://localhost:8080"
|
||||||
|
|
||||||
|
# --- PRIVILEGE LOCKDOWN ---
|
||||||
|
## we use sudo to elevate privileges for smartctl only, so no Ambient Capabilities are needed
|
||||||
|
AmbientCapabilities=
|
||||||
|
## CAP_SYS_RAWIO is needed for SATA drives
|
||||||
|
CapabilityBoundingSet=CAP_SETUID CAP_SETGID CAP_AUDIT_WRITE CAP_SYS_RAWIO CAP_SYS_RESOURCE
|
||||||
|
## unfortunately nvme drives require CAP_SYS_ADMIN
|
||||||
|
## if you want nvme drives you must do the following:
|
||||||
|
# CapabilityBoundingSet=CAP_SETUID CAP_SETGID CAP_AUDIT_WRITE CAP_SYS_RAWIO CAP_SYS_ADMIN CAP_SYS_RESOURCE
|
||||||
|
|
||||||
|
## since sudo needs to be used to elevate permissions in this setup, we need to allow new privileges
|
||||||
|
NoNewPrivileges=no
|
||||||
|
|
||||||
|
# Security/sandboxing settings
|
||||||
|
KeyringMode=private
|
||||||
|
LockPersonality=yes
|
||||||
|
MemoryDenyWriteExecute=yes
|
||||||
|
ProtectSystem=strict
|
||||||
|
ProtectHome=yes
|
||||||
|
PrivateDevices=no
|
||||||
|
ProtectKernelModules=yes
|
||||||
|
ProtectKernelTunables=yes
|
||||||
|
ProtectControlGroups=yes
|
||||||
|
ProtectClock=yes
|
||||||
|
ProtectHostname=yes
|
||||||
|
ProtectKernelLogs=yes
|
||||||
|
RemoveIPC=yes
|
||||||
|
RestrictSUIDSGID=true
|
||||||
|
|
||||||
|
|
||||||
|
# --- NETWORK LOCKDOWN
|
||||||
|
## use these to restrict what scrutiny can talk to over the network
|
||||||
|
## if using a hub on a different host you will need to change the values accordingly
|
||||||
|
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||||
|
IPAddressDeny=any
|
||||||
|
IPAddressAllow=localhost
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
##### Pros:
|
||||||
|
|
||||||
|
- the scrutiny binary itself will not have permissions like CAP_SYS_ADMIN
|
||||||
|
- much better than running as root (especially if you don't need nvme drives)
|
||||||
|
- `sudo` restricts privilege escalation to just `smartctl`
|
||||||
|
- no udev rule needed
|
||||||
|
|
||||||
|
|
||||||
|
##### Cons:
|
||||||
|
|
||||||
|
NOTE: These cons basically only apply if a major supply-chain attack happens against scrutiny, and reflect a worst-case scenario that is unlikely to ever occur:
|
||||||
|
|
||||||
|
- Any sort of privilege escalation attack in sudo could theoretically allow a compromised scrutiny to gain additional privileges, since the process has permission to escelate privileges in general
|
||||||
|
- Even though sudo only allows `smartctl`, it still has `CAP_SYS_RAWIO` and `CAP_SYS_ADMIN` so in theory the same attacks from the first method are possible, though now only with an exploit using smartctl instead of scrutiny directly
|
||||||
|
- even though you don't need a udev rule, this adds a lot of additional administrative overhead
|
||||||
|
- while the scrutiny binary itself isn't elevated, it has a sub-process that is running as root (systemctl)
|
||||||
|
|
||||||
|
#### Create a Systemd Timer to run scrutiny-collector.service
|
||||||
|
|
||||||
|
First, lets test our service. It doesn't matter which method you used above, as either way you need to load and run it.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# reload changes for systemd services
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
|
||||||
|
# enable the service
|
||||||
|
sudo systemctl enable scrutiny-collector.service
|
||||||
|
|
||||||
|
# now run the service
|
||||||
|
sudo systemctl start scrutiny-collector.service
|
||||||
|
```
|
||||||
|
|
||||||
|
You should see the data in your hub instance of scrutiny now. If your run into issues I recommend turning on debug logging for scrutiny and checking your system logs using journalctl. It may be a permission is missing or wrong.
|
||||||
|
|
||||||
|
Now that things have been validated, lets create the systemd timer to run the service for us on a schedule:
|
||||||
|
|
||||||
|
1. if you are not still there, go to `/etc/systemd/system`
|
||||||
|
2. create scrutiny-collector.timer with the following contents:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Unit]
|
||||||
|
Description=Run Scruitiny Collector daily at 2am
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
# Standard calendar trigger
|
||||||
|
OnCalendar=*-*-* 02:00:00
|
||||||
|
# Ensures the job runs if the computer was off at 2am
|
||||||
|
Persistent=true
|
||||||
|
# Minimizes I/O spikes by staggering start time
|
||||||
|
RandomizedDelaySec=30
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Update the schedule as you see fit for your needs
|
||||||
|
|
||||||
|
Once you are satisfied with our timer, you'll need to load and enable it:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# reload changes for systemd services
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
|
||||||
|
# now enable the timer
|
||||||
|
sudo systemctl enable --now scrutiny-collector.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
That's it! you're done. You can check the status of the timer using `sudo systemctl status scrutiny-collector.timer
|
||||||
|
`
|
||||||
@@ -0,0 +1,170 @@
|
|||||||
|
# Rootless Podman Quadlet Install
|
||||||
|
|
||||||
|
Note: These instructions are written with Podman 4.9 in mind, as that's what's available on Ubuntu 24.04. Podman 5+ can simplify the process using a .pod file to run both the hub and influxdb instance in the same pod, sharing localhost. This is a fairly trivial change should anyone want to add the documentation for it. While this document isn't Ubuntu-specific, this is being purposefully done to allow it to apply to the vast majority of Podman users, regardless of what Linux distro they use.
|
||||||
|
|
||||||
|
|
||||||
|
### Dependencies
|
||||||
|
|
||||||
|
- Podman > 4.9
|
||||||
|
- Systemd > 250 (for quadlet support)
|
||||||
|
- a restricted service account
|
||||||
|
|
||||||
|
|
||||||
|
### Creating a Service Account
|
||||||
|
|
||||||
|
See [Creating a Restricted Service Account](INSTALL_MANUAL.md#creating-a-restricted-service-account) for instructions.
|
||||||
|
|
||||||
|
While you do not need to use the same account as the collector, this guide will assume you will be for all its examples.
|
||||||
|
|
||||||
|
In addition to those steps, you will need to create sub ids and enable lingering for the user:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# add sub-uids and sub-gids, you may need to adjust numbers if you have other rootless quadlets running for other users already
|
||||||
|
# it is not recommended to go below 100000
|
||||||
|
# we choose to start at 500000 in the event you have some other podman accounts
|
||||||
|
sudo usermod --add-subuids 500000-565535 scrutiny-svc
|
||||||
|
sudo usermod --add-subgids 500000-565535 scrutiny-svc
|
||||||
|
|
||||||
|
# We want the quadlets to stay running even if the user isn't logged in
|
||||||
|
sudo loginctl enable-linger scrutiny-svc
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Directory Structure
|
||||||
|
|
||||||
|
Once the account is created, you will need to grab its id to create a few drectories for the data files and rootless quadlet files:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# create folders for config and influxdb
|
||||||
|
sudo mkdir -p /opt/scrutiny-svc/scrutiny/{config,influxdb}
|
||||||
|
|
||||||
|
# get the config file for scrutiny hub
|
||||||
|
sudo wget -O /opt/scrutiny-svc/scrutiny/config/scrutiny.yaml https://raw.githubusercontent.com/AnalogJ/scrutiny/refs/heads/master/example.scrutiny.yaml
|
||||||
|
|
||||||
|
# set permissions on everything
|
||||||
|
sudo chown -R scrutiny-svc:scrutiny-svc /opt/scrutiny-svc
|
||||||
|
|
||||||
|
# Get the ID of scrutiny-svc so you know it for your own record-keeping
|
||||||
|
id -u scrutiny-svc
|
||||||
|
|
||||||
|
# create a directory
|
||||||
|
sudo mkdir -p /etc/containers/systemd/users/$(id -u scrutiny-svc)
|
||||||
|
|
||||||
|
## go into the directory you just created for the rest of the guide
|
||||||
|
cd /etc/containers/systemd/users/$(id -u scrutiny-svc)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Quadlet Files
|
||||||
|
|
||||||
|
Now that everything is set up and configured for the account to run quadlets, we just need to create a few quadlet files.
|
||||||
|
|
||||||
|
All remaining system actions will take place in `/etc/containers/systemd/users/$(id -u scrutiny-svc)` which is why we had you cd into it.
|
||||||
|
|
||||||
|
|
||||||
|
#### Networking
|
||||||
|
|
||||||
|
We need the hub and influxdb instances to be able to talk to each other, and in the case of Podman 4.9, they will run separately not sharing a localhost, and as such we need to configure a network for them to share. The file is pretty simple:
|
||||||
|
|
||||||
|
|
||||||
|
##### scrutiny-net.network
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Network]
|
||||||
|
NetworkName=scrutiny-net
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Containers
|
||||||
|
|
||||||
|
Now we're ready for creating the containers
|
||||||
|
|
||||||
|
|
||||||
|
##### influxdb.container
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Unit]
|
||||||
|
Description=influxdb
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
ContainerName=influxdb
|
||||||
|
Image=docker.io/library/influxdb:2.8
|
||||||
|
AutoUpdate=registry
|
||||||
|
Timezone=local
|
||||||
|
## not strictly necessary, but keeps file permission sane for influxdb
|
||||||
|
PodmanArgs=--group-add keep-groups
|
||||||
|
## versions of podman after 5.1 should do the below instead
|
||||||
|
#GroupAdd=keep-groups
|
||||||
|
Volume=/opt/scrutiny-svc/scrutiny/influxdb:/var/lib/influxdb2:Z
|
||||||
|
Network=scrutiny-net
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=on-failure
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
# Start by default on boot
|
||||||
|
WantedBy=default.target
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
##### scrutiny-web.container
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Unit]
|
||||||
|
Description=scrutiny-web
|
||||||
|
After=influxdb.service
|
||||||
|
Requires=influxdb.service
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
ContainerName=scrutiny-web
|
||||||
|
Image=ghcr.io/analogj/scrutiny:latest-web
|
||||||
|
AutoUpdate=registry
|
||||||
|
Timezone=local
|
||||||
|
Volume=/opt/scrutiny-svc/scrutiny/config:/opt/scrutiny/config:Z
|
||||||
|
Network=scrutiny-net
|
||||||
|
PublishPort=8080:8080/tcp
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=on-failure
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
# Start by default on boot
|
||||||
|
WantedBy=default.target
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Update scrutiny config
|
||||||
|
|
||||||
|
Since our containers are running separately, we need to update `/opt/scrutiny-svc/scrutiny/config/scrutiny.yaml` to the new influxdb host:
|
||||||
|
|
||||||
|
1. edit `/opt/scrutiny-svc/scrutiny/config/scrutiny.yaml`
|
||||||
|
2. under `influxdb` section, change `host: 0.0.0.0` to `host: influxdb` -- remember that yaml is whitespace-sensitive! so be mindful of the indents
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
influxdb:
|
||||||
|
# scheme: 'http'
|
||||||
|
host: influxdb
|
||||||
|
port: 8086
|
||||||
|
```
|
||||||
|
|
||||||
|
# Running the hub and doing the
|
||||||
|
|
||||||
|
With that done, we're now ready to start up the services:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# reload all the systemd user files for scrutiny-svc
|
||||||
|
sudo systemctl --user -M scrutiny-svc@ daemon-reload
|
||||||
|
|
||||||
|
# start the scrutiny-net network:
|
||||||
|
sudo systemctl --user -M scrutiny-svc@ start scrutiny-net-network.service
|
||||||
|
|
||||||
|
# start influxdb first and wait for it to come up
|
||||||
|
sudo systemctl --user -M scrutiny-svc@ start influxdb.service
|
||||||
|
|
||||||
|
# check if it's fully up
|
||||||
|
sudo systemctl --user -M scrutiny-svc@ status influxdb.service
|
||||||
|
|
||||||
|
# now start scrutiny
|
||||||
|
sudo systemctl --user -M scrutiny-svc@ start scrutiny-web.service
|
||||||
|
```
|
||||||
|
|
||||||
|
You are now ready to run the collector, if you would like to run that rootless as well, see the guide at [Schedule Collector with Systemd (rootless)](INSTALL_MANUAL.md#schedule-collector-with-systemd-rootless)
|
||||||
@@ -1,22 +1,18 @@
|
|||||||
# Docker Images `master-omnibus` vs `latest`
|
# Docker Images `latest` vs `nightly`
|
||||||
|
|
||||||
> TL;DR; The `master-omnibus` and `latest` tags are almost semantically identical, as I follow a `golden master`
|
> TL;DR; The `latest-omnibus`, `latest-collector`, and `latest-web` tags point to the most recent release. (`latest` points to `latest-omnibus`)
|
||||||
development process. However if you want to ensure you're only using the latest release, you can change to `latest`
|
> The `nightly-omnibus`, `nightly-collector`, and `nightly-web` tags point to builds that are generated every night from the latest commit on the `master` branch.
|
||||||
|
|
||||||
The CI script used to orchestrate the docker image builds can be found here: https://github.com/AnalogJ/scrutiny/blob/master/.github/workflows/docker-build.yaml#L166-L184
|
The CD scripts used to orchestrate the docker image builds can be found here:
|
||||||
|
* https://github.com/AnalogJ/scrutiny/blob/master/.github/workflows/docker-build.yaml
|
||||||
|
* https://github.com/AnalogJ/scrutiny/blob/master/.github/workflows/docker-nightly.yaml
|
||||||
|
|
||||||
In general Scrutiny follows a `golden master` development process, which means that the `master` branch is not directly updated (unless its for documentation changes),
|
In general scrutiny follows a feature branch development process, which means that the `master` branch should ideally always be free of bugs
|
||||||
instead development is done in a feature branch, or committed to the `beta` branch.
|
This is driven by the requirement that every PR be reviewed and pass all tests. Unfortunately, bugs do make it through, especially because of the
|
||||||
|
enormous number of hard drives that scrutiny must support..
|
||||||
|
|
||||||
As development progresses, and we're satisfied that a feature is complete, and the quality is acceptable,
|
This means that while the nightly builds should have the latest features and bug fixes, there may be things that sneak through. Unless you need a particular
|
||||||
I merge the changes to `master` and trigger the creation of a new release -- ie, when master is updated, a new release
|
feature or bug fix, we recommend sticking to releases. Also note that using `latest` tags is generally considered a bad practice; pin a specific version instead.
|
||||||
is almost immediately created (and tagged with `latest`)
|
|
||||||
|
|
||||||
So changing from `master-omnibus -> latest` will be the same thing for all intents and purposes.
|
|
||||||
|
|
||||||
> NOTE: Previously, there was a `automated cron build` that ran on the `master` and `beta` branches.
|
|
||||||
They used to trigger a `nightly` build, even if nothing has changed on the branch. This has a couple of benefits, but one is to
|
|
||||||
ensure that there's no broken external dependencies in our (unchanged) code. This `nightly` build no longer updates the `master-omnibus` tag.
|
|
||||||
|
|
||||||
# Running Docker `rootless`
|
# Running Docker `rootless`
|
||||||
|
|
||||||
|
|||||||
@@ -41,14 +41,14 @@ The growth rate is pretty unintuitive -- see https://github.com/AnalogJ/scrutiny
|
|||||||
|
|
||||||
InfluxDB is a required dependency for Scrutiny v0.4.0+.
|
InfluxDB is a required dependency for Scrutiny v0.4.0+.
|
||||||
|
|
||||||
https://docs.influxdata.com/influxdb/v2.2/install/
|
https://docs.influxdata.com/influxdb/v2/install/
|
||||||
|
|
||||||
## Persistence
|
## Persistence
|
||||||
|
|
||||||
To ensure that all data is correctly stored, you must also persist the InfluxDB database directory
|
To ensure that all data is correctly stored, you must also persist the InfluxDB database directory
|
||||||
|
|
||||||
- If you're using the Official Scrutiny Omnibus image (`ghcr.io/analogj/scrutiny:master-omnibus`), the path is `/opt/scrutiny/influxdb`
|
- If you're using the Official Scrutiny Omnibus image (`ghcr.io/analogj/scrutiny:master-omnibus`), the path is `/opt/scrutiny/influxdb`
|
||||||
- If you're deploying in Hub/Spoke mode with the InfluxDB maintained image (`influxdb:2.2`), the path is `/var/lib/influxdb2`
|
- If you're deploying in Hub/Spoke mode with the InfluxDB maintained image (`influxdb:2.8`), the path is `/var/lib/influxdb2`
|
||||||
|
|
||||||
If you attempt to restart Scrutiny but you forgot to persist the InfluxDB directory, you will get an error message like follows:
|
If you attempt to restart Scrutiny but you forgot to persist the InfluxDB directory, you will get an error message like follows:
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,8 @@
|
|||||||
As documented in [example.scrutiny.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml#L59-L75)
|
As documented in [example.scrutiny.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml#L59-L75)
|
||||||
there are multiple ways to configure notifications for Scrutiny.
|
there are multiple ways to configure notifications for Scrutiny.
|
||||||
|
|
||||||
Under the hood we use a library called [Shoutrrr](https://github.com/containrrr/shoutrrr) to send our notifications, and you should use their documentation if you run into
|
Under the hood we use a library called [Shoutrrr](https://github.com/nicholas-fedor/shoutrrr) to send our notifications, and you should use their documentation if you run into
|
||||||
any issues: https://containrrr.dev/shoutrrr/services/overview/
|
any issues: https://shoutrrr.nickfedor.com/services/overview/
|
||||||
|
|
||||||
|
|
||||||
# Script Notifications
|
# Script Notifications
|
||||||
|
|||||||
+24
-39
@@ -59,7 +59,7 @@ log:
|
|||||||
|
|
||||||
|
|
||||||
# Notification "urls" look like the following. For more information about service specific configuration see
|
# Notification "urls" look like the following. For more information about service specific configuration see
|
||||||
# Shoutrrr's documentation: https://containrrr.dev/shoutrrr/services/overview/
|
# Shoutrrr's documentation: https://shoutrrr.nickfedor.com/services/overview/
|
||||||
#
|
#
|
||||||
# note, usernames and passwords containing special characters will need to be urlencoded.
|
# note, usernames and passwords containing special characters will need to be urlencoded.
|
||||||
# if your username is: "myname@example.com" and your password is "124@34$1"
|
# if your username is: "myname@example.com" and your password is "124@34$1"
|
||||||
@@ -67,41 +67,26 @@ log:
|
|||||||
|
|
||||||
#notify:
|
#notify:
|
||||||
# urls:
|
# urls:
|
||||||
# - "discord://token@webhookid"
|
# - discord://token@id[?thread_id=threadid]
|
||||||
# - "telegram://token@telegram?channels=channel-1[,channel-2,...]"
|
# - googlechat://chat.googleapis.com/v1/spaces/FOO/messages?key=bar&token=baz
|
||||||
# - "pushover://shoutrrr:apiToken@userKey/?priority=1&devices=device1[,device2, ...]"
|
# - hangouts://chat.googleapis.com/v1/spaces/FOO/messages?key=bar&token=baz
|
||||||
# - "slack://[botname@]token-a/token-b/token-c"
|
# - lark://host/token?secret=secret&title=title&link=url
|
||||||
# - "smtp://username:password@host:port/?fromAddress=fromAddress&toAddresses=recipient1[,recipient2,...]"
|
# - matrix://username:password@host:port/[?rooms=!roomID1[,roomAlias2]]
|
||||||
# - "teams://token-a/token-b/token-c"
|
# - mattermost://[username@]mattermost-host/token[/channel]
|
||||||
# - "gotify://gotify-host/token"
|
# - rocketchat://[username@]rocketchat-host/token[/channel|@recipient]
|
||||||
# - "pushbullet://api-token[/device/#channel/email]"
|
# - signal://[user[:password]@]host[:port]/source_phone/recipient1[,recipient2,...]
|
||||||
# - "ifttt://key/?events=event1[,event2,...]&value1=value1&value2=value2&value3=value3"
|
# - slack://[botname@]token-a/token-b/token-c
|
||||||
# - "mattermost://[username@]mattermost-host/token[/channel]"
|
# - teams://group@tenant/altId/groupOwner?host=organization.webhook.office.com
|
||||||
# - "ntfy://username:password@host:port/topic"
|
# - telegram://token@telegram?chats=@channel-1[,chat-id-1,chat-id-2:message-thread-id,...]
|
||||||
# - "hangouts://chat.googleapis.com/v1/spaces/FOO/messages?key=bar&token=baz"
|
# - wecom://key
|
||||||
# - "zulip://bot-mail:bot-key@zulip-domain/?stream=name-or-id&topic=name"
|
# - zulip://bot-mail:bot-key@zulip-domain/?stream=name-or-id&topic=name
|
||||||
# - "join://shoutrrr:api-key@join/?devices=device1[,device2, ...][&icon=icon][&title=title]"
|
# - bark://devicekey@host
|
||||||
# - "script:///file/path/on/disk"
|
# - gotify://gotify-host/token
|
||||||
# - "https://www.example.com/path"
|
# - ifttt://key/?events=event1[,event2,...]&value1=value1&value2=value2&value3=value3
|
||||||
|
# - join://shoutrrr:api-key@join/?devices=device1[,device2, ...][&icon=icon][&title=title]
|
||||||
########################################################################################################################
|
# - ntfy://username:password@ntfy.sh/topic
|
||||||
# FEATURES COMING SOON
|
# - pushbullet://api-token[/device/#channel/email]
|
||||||
#
|
# - pushover://shoutrrr:apiToken@userKey/?devices=device1[,device2, ...]
|
||||||
# The following commented out sections are a preview of additional configuration options that will be available soon.
|
# - opsgenie://host/token?responders=responder1[,responder2]
|
||||||
#
|
# - pagerduty://[host[:port]]/integration-key[?query-parameters]
|
||||||
########################################################################################################################
|
# - smtp://username:password@host:port/?fromaddress=fromAddress&toaddresses=recipient1[,recipient2,...][&additional_params]
|
||||||
|
|
||||||
#limits:
|
|
||||||
# ata:
|
|
||||||
# critical:
|
|
||||||
# error: 10
|
|
||||||
# standard:
|
|
||||||
# error: 20
|
|
||||||
# warn: 10
|
|
||||||
# scsi:
|
|
||||||
# critical: true
|
|
||||||
# standard: true
|
|
||||||
# nvme:
|
|
||||||
# critical: true
|
|
||||||
# standard: true
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,81 +1,93 @@
|
|||||||
module github.com/analogj/scrutiny
|
module github.com/analogj/scrutiny
|
||||||
|
|
||||||
go 1.20
|
go 1.25
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/analogj/go-util v0.0.0-20190301173314-5295e364eb14
|
github.com/analogj/go-util v0.0.0-20210417161720-39b497cca03b
|
||||||
github.com/containrrr/shoutrrr v0.8.0
|
github.com/fatih/color v1.18.0
|
||||||
github.com/fatih/color v1.15.0
|
github.com/gin-gonic/gin v1.11.0
|
||||||
github.com/gin-gonic/gin v1.6.3
|
github.com/glebarez/sqlite v1.11.0
|
||||||
github.com/glebarez/sqlite v1.4.5
|
github.com/go-gormigrate/gormigrate/v2 v2.1.5
|
||||||
github.com/go-gormigrate/gormigrate/v2 v2.0.0
|
github.com/go-viper/mapstructure/v2 v2.5.0
|
||||||
github.com/golang/mock v1.6.0
|
github.com/gofrs/uuid/v5 v5.4.0
|
||||||
github.com/influxdata/influxdb-client-go/v2 v2.9.0
|
github.com/influxdata/influxdb-client-go/v2 v2.14.0
|
||||||
github.com/jaypipes/ghw v0.6.1
|
github.com/jaypipes/ghw v0.21.2
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/nicholas-fedor/shoutrrr v0.13.2
|
||||||
github.com/samber/lo v1.25.0
|
github.com/samber/lo v1.52.0
|
||||||
github.com/sirupsen/logrus v1.6.0
|
github.com/sirupsen/logrus v1.9.4
|
||||||
github.com/spf13/viper v1.15.0
|
github.com/spf13/viper v1.21.0
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.11.1
|
||||||
github.com/urfave/cli/v2 v2.2.0
|
github.com/urfave/cli/v2 v2.27.7
|
||||||
golang.org/x/sync v0.1.0
|
go.uber.org/mock v0.6.0
|
||||||
gorm.io/gorm v1.23.5
|
golang.org/x/sync v0.19.0
|
||||||
|
gorm.io/gorm v1.31.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
github.com/bytedance/gopkg v0.1.3 // indirect
|
||||||
|
github.com/bytedance/sonic v1.15.0 // indirect
|
||||||
|
github.com/bytedance/sonic/loader v0.5.0 // indirect
|
||||||
|
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||||
github.com/ghodss/yaml v1.0.0 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||||
github.com/glebarez/go-sqlite v1.17.2 // indirect
|
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
github.com/go-playground/locales v0.13.0 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.2.0 // indirect
|
github.com/go-playground/validator/v10 v10.30.1 // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/goccy/go-json v0.10.5 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/goccy/go-yaml v1.19.2 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
||||||
github.com/jaypipes/pcidb v0.5.0 // indirect
|
github.com/jaypipes/pcidb v1.1.1 // indirect
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.4 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||||
github.com/kvz/logstreamer v0.0.0-20201023134116-02d20f4338f5 // indirect
|
github.com/kvz/logstreamer v0.0.0-20221024075423-bf5cfbd32e39 // indirect
|
||||||
github.com/leodido/go-urn v1.2.0 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
|
github.com/oapi-codegen/runtime v1.1.2 // indirect
|
||||||
|
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
github.com/quic-go/qpack v0.6.0 // indirect
|
||||||
|
github.com/quic-go/quic-go v0.59.0 // indirect
|
||||||
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/spf13/afero v1.9.3 // indirect
|
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/afero v1.15.0 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/cast v1.10.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.10 // indirect
|
||||||
github.com/subosito/gotenv v1.4.2 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
github.com/ugorji/go/codec v1.1.7 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
golang.org/x/crypto v0.1.0 // indirect
|
github.com/ugorji/go/codec v1.3.1 // indirect
|
||||||
golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect
|
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
|
||||||
golang.org/x/net v0.8.0 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
golang.org/x/sys v0.7.0 // indirect
|
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||||
golang.org/x/term v0.6.0 // indirect
|
golang.org/x/arch v0.23.0 // indirect
|
||||||
golang.org/x/text v0.8.0 // indirect
|
golang.org/x/crypto v0.47.0 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
golang.org/x/net v0.49.0 // indirect
|
||||||
|
golang.org/x/sys v0.40.0 // indirect
|
||||||
|
golang.org/x/term v0.39.0 // indirect
|
||||||
|
golang.org/x/text v0.33.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.36.11 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
howett.net/plist v1.0.2-0.20250314012144-ee69052608d9 // indirect
|
||||||
modernc.org/libc v1.16.8 // indirect
|
modernc.org/libc v1.67.7 // indirect
|
||||||
modernc.org/mathutil v1.4.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.1.1 // indirect
|
modernc.org/memory v1.11.0 // indirect
|
||||||
modernc.org/sqlite v1.17.2 // indirect
|
modernc.org/sqlite v1.44.3 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,15 +3,16 @@ package main
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/web"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/web"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
utils "github.com/analogj/go-util/utils"
|
utils "github.com/analogj/go-util/utils"
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
@@ -36,8 +37,8 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//we're going to load the config file manually, since we need to validate it.
|
//we're going to load the config file manually, since we need to validate it.
|
||||||
err = config.ReadConfig(configFilePath) // Find and read the config file
|
err = config.ReadConfig(configFilePath) // Find and read the config file
|
||||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||||
//ignore "could not find config file"
|
//ignore "could not find config file"
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Print(color.HiRedString("CONFIG ERROR: %v", err))
|
log.Print(color.HiRedString("CONFIG ERROR: %v", err))
|
||||||
@@ -81,7 +82,7 @@ OPTIONS:
|
|||||||
|
|
||||||
subtitle := scrutiny + utils.LeftPad2Len(versionInfo, " ", 65-len(scrutiny))
|
subtitle := scrutiny + utils.LeftPad2Len(versionInfo, " ", 65-len(scrutiny))
|
||||||
|
|
||||||
color.New(color.FgGreen).Fprintf(c.App.Writer, fmt.Sprintf(utils.StripIndent(
|
color.New(color.FgGreen).Fprintf(c.App.Writer, utils.StripIndent(
|
||||||
`
|
`
|
||||||
___ ___ ____ __ __ ____ ____ _ _ _ _
|
___ ___ ____ __ __ ____ ____ _ _ _ _
|
||||||
/ __) / __)( _ \( )( )(_ _)(_ _)( \( )( \/ )
|
/ __) / __)( _ \( )( )(_ _)(_ _)( \( )( \/ )
|
||||||
@@ -89,7 +90,7 @@ OPTIONS:
|
|||||||
(___/ \___)(_)\_)(______) (__) (____)(_)\_) (__)
|
(___/ \___)(_)\_)(______) (__) (____)(_)\_) (__)
|
||||||
%s
|
%s
|
||||||
|
|
||||||
`), subtitle))
|
`), subtitle)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
config "github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
config "github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
viper "github.com/spf13/viper"
|
viper "github.com/spf13/viper"
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockInterface is a mock of Interface interface.
|
// MockInterface is a mock of Interface interface.
|
||||||
|
|||||||
@@ -4,8 +4,9 @@ const DeviceProtocolAta = "ATA"
|
|||||||
const DeviceProtocolScsi = "SCSI"
|
const DeviceProtocolScsi = "SCSI"
|
||||||
const DeviceProtocolNvme = "NVMe"
|
const DeviceProtocolNvme = "NVMe"
|
||||||
|
|
||||||
//go:generate stringer -type=AttributeStatus
|
|
||||||
// AttributeStatus bitwise flag, 1,2,4,8,16,32,etc
|
// AttributeStatus bitwise flag, 1,2,4,8,16,32,etc
|
||||||
|
//
|
||||||
|
//go:generate stringer -type=AttributeStatus
|
||||||
type AttributeStatus uint8
|
type AttributeStatus uint8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -23,8 +24,9 @@ func AttributeStatusClear(b, flag AttributeStatus) AttributeStatus { return b &
|
|||||||
func AttributeStatusToggle(b, flag AttributeStatus) AttributeStatus { return b ^ flag }
|
func AttributeStatusToggle(b, flag AttributeStatus) AttributeStatus { return b ^ flag }
|
||||||
func AttributeStatusHas(b, flag AttributeStatus) bool { return b&flag != 0 }
|
func AttributeStatusHas(b, flag AttributeStatus) bool { return b&flag != 0 }
|
||||||
|
|
||||||
//go:generate stringer -type=DeviceStatus
|
|
||||||
// DeviceStatus bitwise flag, 1,2,4,8,16,32,etc
|
// DeviceStatus bitwise flag, 1,2,4,8,16,32,etc
|
||||||
|
//
|
||||||
|
//go:generate stringer -type=DeviceStatus
|
||||||
type DeviceStatus uint8
|
type DeviceStatus uint8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create mock using:
|
// Create mock using:
|
||||||
@@ -17,19 +18,19 @@ type DeviceRepo interface {
|
|||||||
|
|
||||||
RegisterDevice(ctx context.Context, dev models.Device) error
|
RegisterDevice(ctx context.Context, dev models.Device) error
|
||||||
GetDevices(ctx context.Context) ([]models.Device, error)
|
GetDevices(ctx context.Context) ([]models.Device, error)
|
||||||
UpdateDevice(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (models.Device, error)
|
UpdateDevice(ctx context.Context, scrutiny_uuid uuid.UUID, collectorSmartData collector.SmartInfo) (models.Device, error)
|
||||||
UpdateDeviceStatus(ctx context.Context, wwn string, status pkg.DeviceStatus) (models.Device, error)
|
UpdateDeviceStatus(ctx context.Context, scrutiny_uuid uuid.UUID, status pkg.DeviceStatus) (models.Device, error)
|
||||||
GetDeviceDetails(ctx context.Context, wwn string) (models.Device, error)
|
GetDeviceDetails(ctx context.Context, scrutiny_uuid uuid.UUID) (models.Device, error)
|
||||||
UpdateDeviceArchived(ctx context.Context, wwn string, archived bool) error
|
UpdateDeviceArchived(ctx context.Context, scrutiny_uuid uuid.UUID, archived bool) error
|
||||||
DeleteDevice(ctx context.Context, wwn string) error
|
DeleteDevice(ctx context.Context, scrutiny_uuid uuid.UUID) error
|
||||||
|
|
||||||
SaveSmartAttributes(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (measurements.Smart, error)
|
SaveSmartAttributes(ctx context.Context, scrutiny_uuid uuid.UUID, collectorSmartData collector.SmartInfo) (measurements.Smart, error)
|
||||||
GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error)
|
GetSmartAttributeHistory(ctx context.Context, scrutiny_uuid uuid.UUID, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error)
|
||||||
|
|
||||||
SaveSmartTemperature(ctx context.Context, wwn string, deviceProtocol string, collectorSmartData collector.SmartInfo, discardSCTTempHistory bool) error
|
SaveSmartTemperature(ctx context.Context, scrutiny_uuid uuid.UUID, deviceProtocol string, collectorSmartData collector.SmartInfo, discardSCTTempHistory bool) error
|
||||||
|
|
||||||
GetSummary(ctx context.Context) (map[string]*models.DeviceSummary, error)
|
GetSummary(ctx context.Context) (map[uuid.UUID]*models.DeviceSummary, error)
|
||||||
GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error)
|
GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[uuid.UUID][]measurements.SmartTemperature, error)
|
||||||
|
|
||||||
LoadSettings(ctx context.Context) (*models.Settings, error)
|
LoadSettings(ctx context.Context) (*models.Settings, error)
|
||||||
SaveSettings(ctx context.Context, settings models.Settings) error
|
SaveSettings(ctx context.Context, settings models.Settings) error
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
package m20250221084400
|
package m20250221084400
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Deprecated: m20250221084400.Device is deprecated, only used by db migrations
|
||||||
type Device struct {
|
type Device struct {
|
||||||
Archived bool `json:"archived"`
|
Archived bool `json:"archived"`
|
||||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||||
|
|||||||
@@ -0,0 +1,44 @@
|
|||||||
|
package m20260216155600
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Device struct {
|
||||||
|
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||||
|
Archived bool `json:"archived"`
|
||||||
|
CreatedAt time.Time
|
||||||
|
UpdatedAt time.Time
|
||||||
|
DeletedAt *time.Time
|
||||||
|
|
||||||
|
WWN string `json:"wwn"`
|
||||||
|
|
||||||
|
DeviceName string `json:"device_name"`
|
||||||
|
DeviceUUID string `json:"device_uuid"`
|
||||||
|
DeviceSerialID string `json:"device_serial_id"`
|
||||||
|
DeviceLabel string `json:"device_label"`
|
||||||
|
|
||||||
|
Manufacturer string `json:"manufacturer"`
|
||||||
|
ModelName string `json:"model_name"`
|
||||||
|
InterfaceType string `json:"interface_type"`
|
||||||
|
InterfaceSpeed string `json:"interface_speed"`
|
||||||
|
SerialNumber string `json:"serial_number"`
|
||||||
|
Firmware string `json:"firmware"`
|
||||||
|
RotationSpeed int `json:"rotational_speed"`
|
||||||
|
Capacity int64 `json:"capacity"`
|
||||||
|
FormFactor string `json:"form_factor"`
|
||||||
|
SmartSupport bool `json:"smart_support"`
|
||||||
|
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||||
|
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
|
||||||
|
|
||||||
|
// User provided metadata
|
||||||
|
Label string `json:"label"`
|
||||||
|
HostId string `json:"host_id"`
|
||||||
|
|
||||||
|
// Data set by Scrutiny
|
||||||
|
DeviceStatus pkg.DeviceStatus `json:"device_status"`
|
||||||
|
ScrutinyUUID uuid.UUID `json:"scrutiny_uuid" gorm:"primaryKey;uniqueIndex"`
|
||||||
|
}
|
||||||
@@ -1,5 +1,10 @@
|
|||||||
// Code generated by MockGen. DO NOT EDIT.
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
// Source: webapp/backend/pkg/database/interface.go
|
// Source: webapp/backend/pkg/database/interface.go
|
||||||
|
//
|
||||||
|
// Generated by this command:
|
||||||
|
//
|
||||||
|
// mockgen -source=webapp/backend/pkg/database/interface.go -destination=webapp/backend/pkg/database/mock/mock_database.go
|
||||||
|
//
|
||||||
|
|
||||||
// Package mock_database is a generated GoMock package.
|
// Package mock_database is a generated GoMock package.
|
||||||
package mock_database
|
package mock_database
|
||||||
@@ -12,13 +17,15 @@ import (
|
|||||||
models "github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
models "github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
collector "github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
collector "github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
measurements "github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
measurements "github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
gomock "github.com/golang/mock/gomock"
|
uuid "github.com/gofrs/uuid/v5"
|
||||||
|
gomock "go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockDeviceRepo is a mock of DeviceRepo interface.
|
// MockDeviceRepo is a mock of DeviceRepo interface.
|
||||||
type MockDeviceRepo struct {
|
type MockDeviceRepo struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockDeviceRepoMockRecorder
|
recorder *MockDeviceRepoMockRecorder
|
||||||
|
isgomock struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockDeviceRepoMockRecorder is the mock recorder for MockDeviceRepo.
|
// MockDeviceRepoMockRecorder is the mock recorder for MockDeviceRepo.
|
||||||
@@ -52,47 +59,33 @@ func (mr *MockDeviceRepoMockRecorder) Close() *gomock.Call {
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDeviceRepo)(nil).Close))
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDeviceRepo)(nil).Close))
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateDeviceArchived mocks base method.
|
|
||||||
func (m *MockDeviceRepo) UpdateDeviceArchived(ctx context.Context, wwn string, archived bool) error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "UpdateDeviceArchived", ctx, wwn)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateDeviceArchived indicates an expected call of UpdateDeviceArchived.
|
|
||||||
func (mr *MockDeviceRepoMockRecorder) UpdateDeviceArchived(ctx, wwn, archived interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDeviceArchived", reflect.TypeOf((*MockDeviceRepo)(nil).UpdateDeviceArchived), ctx, wwn, archived)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteDevice mocks base method.
|
// DeleteDevice mocks base method.
|
||||||
func (m *MockDeviceRepo) DeleteDevice(ctx context.Context, wwn string) error {
|
func (m *MockDeviceRepo) DeleteDevice(ctx context.Context, scrutiny_uuid uuid.UUID) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "DeleteDevice", ctx, wwn)
|
ret := m.ctrl.Call(m, "DeleteDevice", ctx, scrutiny_uuid)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteDevice indicates an expected call of DeleteDevice.
|
// DeleteDevice indicates an expected call of DeleteDevice.
|
||||||
func (mr *MockDeviceRepoMockRecorder) DeleteDevice(ctx, wwn interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) DeleteDevice(ctx, scrutiny_uuid any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDevice", reflect.TypeOf((*MockDeviceRepo)(nil).DeleteDevice), ctx, wwn)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDevice", reflect.TypeOf((*MockDeviceRepo)(nil).DeleteDevice), ctx, scrutiny_uuid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeviceDetails mocks base method.
|
// GetDeviceDetails mocks base method.
|
||||||
func (m *MockDeviceRepo) GetDeviceDetails(ctx context.Context, wwn string) (models.Device, error) {
|
func (m *MockDeviceRepo) GetDeviceDetails(ctx context.Context, scrutiny_uuid uuid.UUID) (models.Device, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetDeviceDetails", ctx, wwn)
|
ret := m.ctrl.Call(m, "GetDeviceDetails", ctx, scrutiny_uuid)
|
||||||
ret0, _ := ret[0].(models.Device)
|
ret0, _ := ret[0].(models.Device)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeviceDetails indicates an expected call of GetDeviceDetails.
|
// GetDeviceDetails indicates an expected call of GetDeviceDetails.
|
||||||
func (mr *MockDeviceRepoMockRecorder) GetDeviceDetails(ctx, wwn interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) GetDeviceDetails(ctx, scrutiny_uuid any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceDetails", reflect.TypeOf((*MockDeviceRepo)(nil).GetDeviceDetails), ctx, wwn)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceDetails", reflect.TypeOf((*MockDeviceRepo)(nil).GetDeviceDetails), ctx, scrutiny_uuid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDevices mocks base method.
|
// GetDevices mocks base method.
|
||||||
@@ -105,52 +98,52 @@ func (m *MockDeviceRepo) GetDevices(ctx context.Context) ([]models.Device, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetDevices indicates an expected call of GetDevices.
|
// GetDevices indicates an expected call of GetDevices.
|
||||||
func (mr *MockDeviceRepoMockRecorder) GetDevices(ctx interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) GetDevices(ctx any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevices", reflect.TypeOf((*MockDeviceRepo)(nil).GetDevices), ctx)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevices", reflect.TypeOf((*MockDeviceRepo)(nil).GetDevices), ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSmartAttributeHistory mocks base method.
|
// GetSmartAttributeHistory mocks base method.
|
||||||
func (m *MockDeviceRepo) GetSmartAttributeHistory(ctx context.Context, wwn, durationKey string, selectEntries, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error) {
|
func (m *MockDeviceRepo) GetSmartAttributeHistory(ctx context.Context, scrutiny_uuid uuid.UUID, durationKey string, selectEntries, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetSmartAttributeHistory", ctx, wwn, durationKey, selectEntries, selectEntriesOffset, attributes)
|
ret := m.ctrl.Call(m, "GetSmartAttributeHistory", ctx, scrutiny_uuid, durationKey, selectEntries, selectEntriesOffset, attributes)
|
||||||
ret0, _ := ret[0].([]measurements.Smart)
|
ret0, _ := ret[0].([]measurements.Smart)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSmartAttributeHistory indicates an expected call of GetSmartAttributeHistory.
|
// GetSmartAttributeHistory indicates an expected call of GetSmartAttributeHistory.
|
||||||
func (mr *MockDeviceRepoMockRecorder) GetSmartAttributeHistory(ctx, wwn, durationKey, selectEntries, selectEntriesOffset, attributes interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) GetSmartAttributeHistory(ctx, scrutiny_uuid, durationKey, selectEntries, selectEntriesOffset, attributes any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSmartAttributeHistory", reflect.TypeOf((*MockDeviceRepo)(nil).GetSmartAttributeHistory), ctx, wwn, durationKey, selectEntries, selectEntriesOffset, attributes)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSmartAttributeHistory", reflect.TypeOf((*MockDeviceRepo)(nil).GetSmartAttributeHistory), ctx, scrutiny_uuid, durationKey, selectEntries, selectEntriesOffset, attributes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSmartTemperatureHistory mocks base method.
|
// GetSmartTemperatureHistory mocks base method.
|
||||||
func (m *MockDeviceRepo) GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error) {
|
func (m *MockDeviceRepo) GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[uuid.UUID][]measurements.SmartTemperature, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetSmartTemperatureHistory", ctx, durationKey)
|
ret := m.ctrl.Call(m, "GetSmartTemperatureHistory", ctx, durationKey)
|
||||||
ret0, _ := ret[0].(map[string][]measurements.SmartTemperature)
|
ret0, _ := ret[0].(map[uuid.UUID][]measurements.SmartTemperature)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSmartTemperatureHistory indicates an expected call of GetSmartTemperatureHistory.
|
// GetSmartTemperatureHistory indicates an expected call of GetSmartTemperatureHistory.
|
||||||
func (mr *MockDeviceRepoMockRecorder) GetSmartTemperatureHistory(ctx, durationKey interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) GetSmartTemperatureHistory(ctx, durationKey any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSmartTemperatureHistory", reflect.TypeOf((*MockDeviceRepo)(nil).GetSmartTemperatureHistory), ctx, durationKey)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSmartTemperatureHistory", reflect.TypeOf((*MockDeviceRepo)(nil).GetSmartTemperatureHistory), ctx, durationKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSummary mocks base method.
|
// GetSummary mocks base method.
|
||||||
func (m *MockDeviceRepo) GetSummary(ctx context.Context) (map[string]*models.DeviceSummary, error) {
|
func (m *MockDeviceRepo) GetSummary(ctx context.Context) (map[uuid.UUID]*models.DeviceSummary, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "GetSummary", ctx)
|
ret := m.ctrl.Call(m, "GetSummary", ctx)
|
||||||
ret0, _ := ret[0].(map[string]*models.DeviceSummary)
|
ret0, _ := ret[0].(map[uuid.UUID]*models.DeviceSummary)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSummary indicates an expected call of GetSummary.
|
// GetSummary indicates an expected call of GetSummary.
|
||||||
func (mr *MockDeviceRepoMockRecorder) GetSummary(ctx interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) GetSummary(ctx any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSummary", reflect.TypeOf((*MockDeviceRepo)(nil).GetSummary), ctx)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSummary", reflect.TypeOf((*MockDeviceRepo)(nil).GetSummary), ctx)
|
||||||
}
|
}
|
||||||
@@ -164,7 +157,7 @@ func (m *MockDeviceRepo) HealthCheck(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HealthCheck indicates an expected call of HealthCheck.
|
// HealthCheck indicates an expected call of HealthCheck.
|
||||||
func (mr *MockDeviceRepoMockRecorder) HealthCheck(ctx interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) HealthCheck(ctx any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockDeviceRepo)(nil).HealthCheck), ctx)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockDeviceRepo)(nil).HealthCheck), ctx)
|
||||||
}
|
}
|
||||||
@@ -179,7 +172,7 @@ func (m *MockDeviceRepo) LoadSettings(ctx context.Context) (*models.Settings, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadSettings indicates an expected call of LoadSettings.
|
// LoadSettings indicates an expected call of LoadSettings.
|
||||||
func (mr *MockDeviceRepoMockRecorder) LoadSettings(ctx interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) LoadSettings(ctx any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadSettings", reflect.TypeOf((*MockDeviceRepo)(nil).LoadSettings), ctx)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadSettings", reflect.TypeOf((*MockDeviceRepo)(nil).LoadSettings), ctx)
|
||||||
}
|
}
|
||||||
@@ -193,7 +186,7 @@ func (m *MockDeviceRepo) RegisterDevice(ctx context.Context, dev models.Device)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RegisterDevice indicates an expected call of RegisterDevice.
|
// RegisterDevice indicates an expected call of RegisterDevice.
|
||||||
func (mr *MockDeviceRepoMockRecorder) RegisterDevice(ctx, dev interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) RegisterDevice(ctx, dev any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterDevice", reflect.TypeOf((*MockDeviceRepo)(nil).RegisterDevice), ctx, dev)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterDevice", reflect.TypeOf((*MockDeviceRepo)(nil).RegisterDevice), ctx, dev)
|
||||||
}
|
}
|
||||||
@@ -207,66 +200,80 @@ func (m *MockDeviceRepo) SaveSettings(ctx context.Context, settings models.Setti
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SaveSettings indicates an expected call of SaveSettings.
|
// SaveSettings indicates an expected call of SaveSettings.
|
||||||
func (mr *MockDeviceRepoMockRecorder) SaveSettings(ctx, settings interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) SaveSettings(ctx, settings any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveSettings", reflect.TypeOf((*MockDeviceRepo)(nil).SaveSettings), ctx, settings)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveSettings", reflect.TypeOf((*MockDeviceRepo)(nil).SaveSettings), ctx, settings)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveSmartAttributes mocks base method.
|
// SaveSmartAttributes mocks base method.
|
||||||
func (m *MockDeviceRepo) SaveSmartAttributes(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (measurements.Smart, error) {
|
func (m *MockDeviceRepo) SaveSmartAttributes(ctx context.Context, scrutiny_uuid uuid.UUID, collectorSmartData collector.SmartInfo) (measurements.Smart, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "SaveSmartAttributes", ctx, wwn, collectorSmartData)
|
ret := m.ctrl.Call(m, "SaveSmartAttributes", ctx, scrutiny_uuid, collectorSmartData)
|
||||||
ret0, _ := ret[0].(measurements.Smart)
|
ret0, _ := ret[0].(measurements.Smart)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveSmartAttributes indicates an expected call of SaveSmartAttributes.
|
// SaveSmartAttributes indicates an expected call of SaveSmartAttributes.
|
||||||
func (mr *MockDeviceRepoMockRecorder) SaveSmartAttributes(ctx, wwn, collectorSmartData interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) SaveSmartAttributes(ctx, scrutiny_uuid, collectorSmartData any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveSmartAttributes", reflect.TypeOf((*MockDeviceRepo)(nil).SaveSmartAttributes), ctx, wwn, collectorSmartData)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveSmartAttributes", reflect.TypeOf((*MockDeviceRepo)(nil).SaveSmartAttributes), ctx, scrutiny_uuid, collectorSmartData)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveSmartTemperature mocks base method.
|
// SaveSmartTemperature mocks base method.
|
||||||
func (m *MockDeviceRepo) SaveSmartTemperature(ctx context.Context, wwn, deviceProtocol string, collectorSmartData collector.SmartInfo, discardSCTTempHistory bool) error {
|
func (m *MockDeviceRepo) SaveSmartTemperature(ctx context.Context, scrutiny_uuid uuid.UUID, deviceProtocol string, collectorSmartData collector.SmartInfo, discardSCTTempHistory bool) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "SaveSmartTemperature", ctx, wwn, deviceProtocol, collectorSmartData, discardSCTTempHistory)
|
ret := m.ctrl.Call(m, "SaveSmartTemperature", ctx, scrutiny_uuid, deviceProtocol, collectorSmartData, discardSCTTempHistory)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveSmartTemperature indicates an expected call of SaveSmartTemperature.
|
// SaveSmartTemperature indicates an expected call of SaveSmartTemperature.
|
||||||
func (mr *MockDeviceRepoMockRecorder) SaveSmartTemperature(ctx, wwn, deviceProtocol, collectorSmartData, discardSCTTempHistory interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) SaveSmartTemperature(ctx, scrutiny_uuid, deviceProtocol, collectorSmartData, discardSCTTempHistory any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveSmartTemperature", reflect.TypeOf((*MockDeviceRepo)(nil).SaveSmartTemperature), ctx, wwn, deviceProtocol, collectorSmartData, discardSCTTempHistory)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveSmartTemperature", reflect.TypeOf((*MockDeviceRepo)(nil).SaveSmartTemperature), ctx, scrutiny_uuid, deviceProtocol, collectorSmartData, discardSCTTempHistory)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateDevice mocks base method.
|
// UpdateDevice mocks base method.
|
||||||
func (m *MockDeviceRepo) UpdateDevice(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (models.Device, error) {
|
func (m *MockDeviceRepo) UpdateDevice(ctx context.Context, scrutiny_uuid uuid.UUID, collectorSmartData collector.SmartInfo) (models.Device, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "UpdateDevice", ctx, wwn, collectorSmartData)
|
ret := m.ctrl.Call(m, "UpdateDevice", ctx, scrutiny_uuid, collectorSmartData)
|
||||||
ret0, _ := ret[0].(models.Device)
|
ret0, _ := ret[0].(models.Device)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateDevice indicates an expected call of UpdateDevice.
|
// UpdateDevice indicates an expected call of UpdateDevice.
|
||||||
func (mr *MockDeviceRepoMockRecorder) UpdateDevice(ctx, wwn, collectorSmartData interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) UpdateDevice(ctx, scrutiny_uuid, collectorSmartData any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDevice", reflect.TypeOf((*MockDeviceRepo)(nil).UpdateDevice), ctx, wwn, collectorSmartData)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDevice", reflect.TypeOf((*MockDeviceRepo)(nil).UpdateDevice), ctx, scrutiny_uuid, collectorSmartData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateDeviceArchived mocks base method.
|
||||||
|
func (m *MockDeviceRepo) UpdateDeviceArchived(ctx context.Context, scrutiny_uuid uuid.UUID, archived bool) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "UpdateDeviceArchived", ctx, scrutiny_uuid, archived)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateDeviceArchived indicates an expected call of UpdateDeviceArchived.
|
||||||
|
func (mr *MockDeviceRepoMockRecorder) UpdateDeviceArchived(ctx, scrutiny_uuid, archived any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDeviceArchived", reflect.TypeOf((*MockDeviceRepo)(nil).UpdateDeviceArchived), ctx, scrutiny_uuid, archived)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateDeviceStatus mocks base method.
|
// UpdateDeviceStatus mocks base method.
|
||||||
func (m *MockDeviceRepo) UpdateDeviceStatus(ctx context.Context, wwn string, status pkg.DeviceStatus) (models.Device, error) {
|
func (m *MockDeviceRepo) UpdateDeviceStatus(ctx context.Context, scrutiny_uuid uuid.UUID, status pkg.DeviceStatus) (models.Device, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "UpdateDeviceStatus", ctx, wwn, status)
|
ret := m.ctrl.Call(m, "UpdateDeviceStatus", ctx, scrutiny_uuid, status)
|
||||||
ret0, _ := ret[0].(models.Device)
|
ret0, _ := ret[0].(models.Device)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateDeviceStatus indicates an expected call of UpdateDeviceStatus.
|
// UpdateDeviceStatus indicates an expected call of UpdateDeviceStatus.
|
||||||
func (mr *MockDeviceRepoMockRecorder) UpdateDeviceStatus(ctx, wwn, status interface{}) *gomock.Call {
|
func (mr *MockDeviceRepoMockRecorder) UpdateDeviceStatus(ctx, scrutiny_uuid, status any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDeviceStatus", reflect.TypeOf((*MockDeviceRepo)(nil).UpdateDeviceStatus), ctx, wwn, status)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDeviceStatus", reflect.TypeOf((*MockDeviceRepo)(nil).UpdateDeviceStatus), ctx, scrutiny_uuid, status)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,18 +5,21 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/glebarez/sqlite"
|
"github.com/glebarez/sqlite"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
"github.com/influxdata/influxdb-client-go/v2/api"
|
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
"github.com/influxdata/influxdb-client-go/v2/domain"
|
"github.com/influxdata/influxdb-client-go/v2/domain"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -29,6 +32,7 @@ const (
|
|||||||
// 60seconds * 60minutes * 24hours * 7 days * (52 + 52 + 4)weeks
|
// 60seconds * 60minutes * 24hours * 7 days * (52 + 52 + 4)weeks
|
||||||
RETENTION_PERIOD_25_MONTHS_IN_SECONDS = 65_318_400
|
RETENTION_PERIOD_25_MONTHS_IN_SECONDS = 65_318_400
|
||||||
|
|
||||||
|
DURATION_KEY_DAY = "day"
|
||||||
DURATION_KEY_WEEK = "week"
|
DURATION_KEY_WEEK = "week"
|
||||||
DURATION_KEY_MONTH = "month"
|
DURATION_KEY_MONTH = "month"
|
||||||
DURATION_KEY_YEAR = "year"
|
DURATION_KEY_YEAR = "year"
|
||||||
@@ -82,7 +86,7 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
|
|||||||
DisableForeignKeyConstraintWhenMigrating: true,
|
DisableForeignKeyConstraintWhenMigrating: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to connect to database! - %v", err)
|
return nil, fmt.Errorf("failed to connect to database! - %v", err)
|
||||||
}
|
}
|
||||||
globalLogger.Infof("Successfully connected to scrutiny sqlite db: %s\n", appConfig.GetString("web.database.location"))
|
globalLogger.Infof("Successfully connected to scrutiny sqlite db: %s\n", appConfig.GetString("web.database.location"))
|
||||||
|
|
||||||
@@ -146,7 +150,7 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
|
|||||||
taskAPI := client.TasksAPI()
|
taskAPI := client.TasksAPI()
|
||||||
|
|
||||||
if writeAPI == nil || queryAPI == nil || taskAPI == nil {
|
if writeAPI == nil || queryAPI == nil || taskAPI == nil {
|
||||||
return nil, fmt.Errorf("Failed to connect to influxdb!")
|
return nil, fmt.Errorf("failed to connect to influxdb")
|
||||||
}
|
}
|
||||||
|
|
||||||
deviceRepo := scrutinyRepository{
|
deviceRepo := scrutinyRepository{
|
||||||
@@ -238,13 +242,13 @@ func InfluxSetupComplete(influxEndpoint string, tlsConfig *tls.Config) (bool, er
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}}
|
client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}}
|
||||||
res, err := client.Get(influxUri.String())
|
res, err := client.Get(influxUri.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(res.Body)
|
body, err := io.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -331,16 +335,16 @@ func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Org
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
// get a map of all devices and associated SMART data
|
// get a map of all devices and associated SMART data
|
||||||
func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*models.DeviceSummary, error) {
|
func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[uuid.UUID]*models.DeviceSummary, error) {
|
||||||
devices, err := sr.GetDevices(ctx)
|
devices, err := sr.GetDevices(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
summaries := map[string]*models.DeviceSummary{}
|
summaries := map[uuid.UUID]*models.DeviceSummary{}
|
||||||
|
|
||||||
for _, device := range devices {
|
for _, device := range devices {
|
||||||
summaries[device.WWN] = &models.DeviceSummary{Device: device}
|
summaries[device.ScrutinyUUID] = &models.DeviceSummary{Device: device}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get parser flux query result
|
// Get parser flux query result
|
||||||
@@ -355,7 +359,7 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
|
|||||||
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
||||||
|> last()
|
|> last()
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|
|
||||||
weeklyData = from(bucket: bucketBaseName + "_weekly")
|
weeklyData = from(bucket: bucketBaseName + "_weekly")
|
||||||
|> range(start: -10y, stop: now())
|
|> range(start: -10y, stop: now())
|
||||||
@@ -363,7 +367,7 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
|
|||||||
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
||||||
|> last()
|
|> last()
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|
|
||||||
monthlyData = from(bucket: bucketBaseName + "_monthly")
|
monthlyData = from(bucket: bucketBaseName + "_monthly")
|
||||||
|> range(start: -10y, stop: now())
|
|> range(start: -10y, stop: now())
|
||||||
@@ -371,7 +375,7 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
|
|||||||
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
||||||
|> last()
|
|> last()
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|
|
||||||
yearlyData = from(bucket: bucketBaseName + "_yearly")
|
yearlyData = from(bucket: bucketBaseName + "_yearly")
|
||||||
|> range(start: -10y, stop: now())
|
|> range(start: -10y, stop: now())
|
||||||
@@ -379,12 +383,12 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
|
|||||||
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
|> filter(fn: (r) => r["_field"] == "temp" or r["_field"] == "power_on_hours" or r["_field"] == "date")
|
||||||
|> last()
|
|> last()
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|
|
||||||
union(tables: [dailyData, weeklyData, monthlyData, yearlyData])
|
union(tables: [dailyData, weeklyData, monthlyData, yearlyData])
|
||||||
|> sort(columns: ["_time"], desc: false)
|
|> sort(columns: ["_time"], desc: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> last(column: "device_wwn")
|
|> last(column: "scrutiny_uuid")
|
||||||
|> yield(name: "last")
|
|> yield(name: "last")
|
||||||
`,
|
`,
|
||||||
sr.appConfig.GetString("web.influxdb.bucket"),
|
sr.appConfig.GetString("web.influxdb.bucket"),
|
||||||
@@ -402,14 +406,15 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
|
|||||||
|
|
||||||
//get summary data from Influxdb.
|
//get summary data from Influxdb.
|
||||||
//result.Record().Values()
|
//result.Record().Values()
|
||||||
if deviceWWN, ok := result.Record().Values()["device_wwn"]; ok {
|
if scrutinyUUIDString, ok := result.Record().Values()["scrutiny_uuid"]; ok {
|
||||||
|
scrutinyUUID := uuid.Must(uuid.FromString(scrutinyUUIDString.(string)))
|
||||||
|
|
||||||
//ensure summaries is intialized for this wwn
|
//ensure summaries is intialized for this scrutiny_uuid
|
||||||
if _, exists := summaries[deviceWWN.(string)]; !exists {
|
if _, exists := summaries[scrutinyUUID]; !exists {
|
||||||
summaries[deviceWWN.(string)] = &models.DeviceSummary{}
|
summaries[scrutinyUUID] = &models.DeviceSummary{}
|
||||||
}
|
}
|
||||||
|
|
||||||
summaries[deviceWWN.(string)].SmartResults = &models.SmartSummary{
|
summaries[scrutinyUUID].SmartResults = &models.SmartSummary{
|
||||||
Temp: result.Record().Values()["temp"].(int64),
|
Temp: result.Record().Values()["temp"].(int64),
|
||||||
PowerOnHours: result.Record().Values()["power_on_hours"].(int64),
|
PowerOnHours: result.Record().Values()["power_on_hours"].(int64),
|
||||||
CollectorDate: result.Record().Values()["_time"].(time.Time),
|
CollectorDate: result.Record().Values()["_time"].(time.Time),
|
||||||
@@ -432,8 +437,8 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
|
|||||||
sr.logger.Printf("========================>>>>>>>>======================")
|
sr.logger.Printf("========================>>>>>>>>======================")
|
||||||
sr.logger.Printf("Error: %v", err)
|
sr.logger.Printf("Error: %v", err)
|
||||||
}
|
}
|
||||||
for wwn, tempHistory := range deviceTempHistory {
|
for scutiny_uuid, tempHistory := range deviceTempHistory {
|
||||||
summaries[wwn].TempHistory = tempHistory
|
summaries[scutiny_uuid].TempHistory = tempHistory
|
||||||
}
|
}
|
||||||
|
|
||||||
return summaries, nil
|
return summaries, nil
|
||||||
@@ -445,6 +450,7 @@ func (sr *scrutinyRepository) GetSummary(ctx context.Context) (map[string]*model
|
|||||||
|
|
||||||
func (sr *scrutinyRepository) lookupBucketName(durationKey string) string {
|
func (sr *scrutinyRepository) lookupBucketName(durationKey string) string {
|
||||||
switch durationKey {
|
switch durationKey {
|
||||||
|
case DURATION_KEY_DAY:
|
||||||
case DURATION_KEY_WEEK:
|
case DURATION_KEY_WEEK:
|
||||||
//data stored in the last week
|
//data stored in the last week
|
||||||
return sr.appConfig.GetString("web.influxdb.bucket")
|
return sr.appConfig.GetString("web.influxdb.bucket")
|
||||||
@@ -462,8 +468,10 @@ func (sr *scrutinyRepository) lookupBucketName(durationKey string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sr *scrutinyRepository) lookupDuration(durationKey string) []string {
|
func (sr *scrutinyRepository) lookupDuration(durationKey string) []string {
|
||||||
|
|
||||||
switch durationKey {
|
switch durationKey {
|
||||||
|
case DURATION_KEY_DAY:
|
||||||
|
//data stored in the last day
|
||||||
|
return []string{"-1d", "now()"}
|
||||||
case DURATION_KEY_WEEK:
|
case DURATION_KEY_WEEK:
|
||||||
//data stored in the last week
|
//data stored in the last week
|
||||||
return []string{"-1w", "now()"}
|
return []string{"-1w", "now()"}
|
||||||
@@ -480,8 +488,22 @@ func (sr *scrutinyRepository) lookupDuration(durationKey string) []string {
|
|||||||
return []string{"-1w", "now()"}
|
return []string{"-1w", "now()"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sr *scrutinyRepository) lookupResolution(durationKey string) string {
|
||||||
|
switch durationKey {
|
||||||
|
case DURATION_KEY_DAY:
|
||||||
|
// Return data with higher resolution for daily summaries
|
||||||
|
return "10m"
|
||||||
|
default:
|
||||||
|
// Return data with 1h resolution for other summaries
|
||||||
|
return "1h"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (sr *scrutinyRepository) lookupNestedDurationKeys(durationKey string) []string {
|
func (sr *scrutinyRepository) lookupNestedDurationKeys(durationKey string) []string {
|
||||||
switch durationKey {
|
switch durationKey {
|
||||||
|
case DURATION_KEY_DAY:
|
||||||
|
//all data is stored in a single bucket, but we want a finer resolution
|
||||||
|
return []string{DURATION_KEY_DAY}
|
||||||
case DURATION_KEY_WEEK:
|
case DURATION_KEY_WEEK:
|
||||||
//all data is stored in a single bucket
|
//all data is stored in a single bucket
|
||||||
return []string{DURATION_KEY_WEEK}
|
return []string{DURATION_KEY_WEEK}
|
||||||
|
|||||||
@@ -3,11 +3,13 @@ package database
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
"gorm.io/gorm/clause"
|
"gorm.io/gorm/clause"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@@ -18,7 +20,7 @@ import (
|
|||||||
// update device fields that may change: (DeviceType, HostID)
|
// update device fields that may change: (DeviceType, HostID)
|
||||||
func (sr *scrutinyRepository) RegisterDevice(ctx context.Context, dev models.Device) error {
|
func (sr *scrutinyRepository) RegisterDevice(ctx context.Context, dev models.Device) error {
|
||||||
if err := sr.gormClient.WithContext(ctx).Clauses(clause.OnConflict{
|
if err := sr.gormClient.WithContext(ctx).Clauses(clause.OnConflict{
|
||||||
Columns: []clause.Column{{Name: "wwn"}},
|
Columns: []clause.Column{{Name: "scrutiny_uuid"}},
|
||||||
DoUpdates: clause.AssignmentColumns([]string{"host_id", "device_name", "device_type", "device_uuid", "device_serial_id", "device_label"}),
|
DoUpdates: clause.AssignmentColumns([]string{"host_id", "device_name", "device_type", "device_uuid", "device_serial_id", "device_label"}),
|
||||||
}).Create(&dev).Error; err != nil {
|
}).Create(&dev).Error; err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -31,16 +33,16 @@ func (sr *scrutinyRepository) GetDevices(ctx context.Context) ([]models.Device,
|
|||||||
//Get a list of all the active devices.
|
//Get a list of all the active devices.
|
||||||
devices := []models.Device{}
|
devices := []models.Device{}
|
||||||
if err := sr.gormClient.WithContext(ctx).Find(&devices).Error; err != nil {
|
if err := sr.gormClient.WithContext(ctx).Find(&devices).Error; err != nil {
|
||||||
return nil, fmt.Errorf("Could not get device summary from DB: %v", err)
|
return nil, fmt.Errorf("could not get device summary from DB: %v", err)
|
||||||
}
|
}
|
||||||
return devices, nil
|
return devices, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// update device (only metadata) from collector
|
// update device (only metadata) from collector
|
||||||
func (sr *scrutinyRepository) UpdateDevice(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (models.Device, error) {
|
func (sr *scrutinyRepository) UpdateDevice(ctx context.Context, scrutiny_uuid uuid.UUID, collectorSmartData collector.SmartInfo) (models.Device, error) {
|
||||||
var device models.Device
|
var device models.Device
|
||||||
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).First(&device).Error; err != nil {
|
if err := sr.gormClient.WithContext(ctx).Where("scrutiny_uuid = ?", scrutiny_uuid.String()).First(&device).Error; err != nil {
|
||||||
return device, fmt.Errorf("Could not get device from DB: %v", err)
|
return device, fmt.Errorf("could not get device from DB: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO catch GormClient err
|
//TODO catch GormClient err
|
||||||
@@ -52,22 +54,22 @@ func (sr *scrutinyRepository) UpdateDevice(ctx context.Context, wwn string, coll
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update Device Status
|
// Update Device Status
|
||||||
func (sr *scrutinyRepository) UpdateDeviceStatus(ctx context.Context, wwn string, status pkg.DeviceStatus) (models.Device, error) {
|
func (sr *scrutinyRepository) UpdateDeviceStatus(ctx context.Context, scrutiny_uuid uuid.UUID, status pkg.DeviceStatus) (models.Device, error) {
|
||||||
var device models.Device
|
var device models.Device
|
||||||
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).First(&device).Error; err != nil {
|
if err := sr.gormClient.WithContext(ctx).Where("scrutiny_uuid = ?", scrutiny_uuid.String()).First(&device).Error; err != nil {
|
||||||
return device, fmt.Errorf("Could not get device from DB: %v", err)
|
return device, fmt.Errorf("could not get device from DB: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
device.DeviceStatus = pkg.DeviceStatusSet(device.DeviceStatus, status)
|
device.DeviceStatus = pkg.DeviceStatusSet(device.DeviceStatus, status)
|
||||||
return device, sr.gormClient.Model(&device).Updates(device).Error
|
return device, sr.gormClient.Model(&device).Updates(device).Error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sr *scrutinyRepository) GetDeviceDetails(ctx context.Context, wwn string) (models.Device, error) {
|
func (sr *scrutinyRepository) GetDeviceDetails(ctx context.Context, scrutiny_uuid uuid.UUID) (models.Device, error) {
|
||||||
var device models.Device
|
var device models.Device
|
||||||
|
|
||||||
fmt.Println("GetDeviceDetails from GORM")
|
fmt.Println("GetDeviceDetails from GORM")
|
||||||
|
|
||||||
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).First(&device).Error; err != nil {
|
if err := sr.gormClient.WithContext(ctx).Where("scrutiny_uuid = ?", scrutiny_uuid.String()).First(&device).Error; err != nil {
|
||||||
return models.Device{}, err
|
return models.Device{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,17 +77,17 @@ func (sr *scrutinyRepository) GetDeviceDetails(ctx context.Context, wwn string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update Device Archived State
|
// Update Device Archived State
|
||||||
func (sr *scrutinyRepository) UpdateDeviceArchived(ctx context.Context, wwn string, archived bool) error {
|
func (sr *scrutinyRepository) UpdateDeviceArchived(ctx context.Context, scrutiny_uuid uuid.UUID, archived bool) error {
|
||||||
var device models.Device
|
var device models.Device
|
||||||
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).First(&device).Error; err != nil {
|
if err := sr.gormClient.WithContext(ctx).Where("scrutiny_uuid = ?", scrutiny_uuid.String()).First(&device).Error; err != nil {
|
||||||
return fmt.Errorf("Could not get device from DB: %v", err)
|
return fmt.Errorf("could not get device from DB: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return sr.gormClient.Model(&device).Where("wwn = ?", wwn).Update("archived", archived).Error
|
return sr.gormClient.Model(&device).Where("scrutiny_uuid = ?", scrutiny_uuid.String()).Update("archived", archived).Error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sr *scrutinyRepository) DeleteDevice(ctx context.Context, wwn string) error {
|
func (sr *scrutinyRepository) DeleteDevice(ctx context.Context, scrutiny_uuid uuid.UUID) error {
|
||||||
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).Delete(&models.Device{}).Error; err != nil {
|
if err := sr.gormClient.WithContext(ctx).Where("scrutiny_uuid = ?", scrutiny_uuid.String()).Delete(&models.Device{}).Error; err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,14 +100,14 @@ func (sr *scrutinyRepository) DeleteDevice(ctx context.Context, wwn string) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, bucket := range buckets {
|
for _, bucket := range buckets {
|
||||||
sr.logger.Infof("Deleting data for %s in bucket: %s", wwn, bucket)
|
sr.logger.Infof("Deleting data for %s in bucket: %s", scrutiny_uuid.String(), bucket)
|
||||||
if err := sr.influxClient.DeleteAPI().DeleteWithName(
|
if err := sr.influxClient.DeleteAPI().DeleteWithName(
|
||||||
ctx,
|
ctx,
|
||||||
sr.appConfig.GetString("web.influxdb.org"),
|
sr.appConfig.GetString("web.influxdb.org"),
|
||||||
bucket,
|
bucket,
|
||||||
time.Now().AddDate(-10, 0, 0),
|
time.Now().AddDate(-10, 0, 0),
|
||||||
time.Now(),
|
time.Now(),
|
||||||
fmt.Sprintf(`device_wwn="%s"`, wwn),
|
fmt.Sprintf(`scrutiny_uuid="%s"`, scrutiny_uuid.String()),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,17 +8,18 @@ import (
|
|||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
"github.com/influxdata/influxdb-client-go/v2/api"
|
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// SMART
|
// SMART
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
func (sr *scrutinyRepository) SaveSmartAttributes(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (measurements.Smart, error) {
|
func (sr *scrutinyRepository) SaveSmartAttributes(ctx context.Context, scrutiny_uuid uuid.UUID, collectorSmartData collector.SmartInfo) (measurements.Smart, error) {
|
||||||
deviceSmartData := measurements.Smart{}
|
deviceSmartData := measurements.Smart{}
|
||||||
err := deviceSmartData.FromCollectorSmartInfo(wwn, collectorSmartData)
|
err := deviceSmartData.FromCollectorSmartInfo(scrutiny_uuid, collectorSmartData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sr.logger.Errorln("Could not process SMART metrics", err)
|
sr.logger.Errorln("Could not process SMART metrics", err)
|
||||||
return measurements.Smart{}, err
|
return measurements.Smart{}, err
|
||||||
@@ -34,14 +35,14 @@ func (sr *scrutinyRepository) SaveSmartAttributes(ctx context.Context, wwn strin
|
|||||||
// When selectEntries is > 0, only the most recent selectEntries database entries are returned, starting from the selectEntriesOffset entry.
|
// When selectEntries is > 0, only the most recent selectEntries database entries are returned, starting from the selectEntriesOffset entry.
|
||||||
// For example, with selectEntries = 5, selectEntries = 0, the most recent 5 are returned. With selectEntries = 3, selectEntries = 2, entries
|
// For example, with selectEntries = 5, selectEntries = 0, the most recent 5 are returned. With selectEntries = 3, selectEntries = 2, entries
|
||||||
// 2 to 4 are returned (2 being the third newest, since it is zero-indexed)
|
// 2 to 4 are returned (2 being the third newest, since it is zero-indexed)
|
||||||
func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error) {
|
func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, scrutiny_uuid uuid.UUID, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) ([]measurements.Smart, error) {
|
||||||
// Get SMartResults from InfluxDB
|
// Get SMartResults from InfluxDB
|
||||||
|
|
||||||
//TODO: change the filter startrange to a real number.
|
//TODO: change the filter startrange to a real number.
|
||||||
|
|
||||||
// Get parser flux query result
|
// Get parser flux query result
|
||||||
//appConfig.GetString("web.influxdb.bucket")
|
//appConfig.GetString("web.influxdb.bucket")
|
||||||
queryStr := sr.aggregateSmartAttributesQuery(wwn, durationKey, selectEntries, selectEntriesOffset, attributes)
|
queryStr := sr.aggregateSmartAttributesQuery(scrutiny_uuid, durationKey, selectEntries, selectEntriesOffset, attributes)
|
||||||
log.Infoln(queryStr)
|
log.Infoln(queryStr)
|
||||||
|
|
||||||
smartResults := []measurements.Smart{}
|
smartResults := []measurements.Smart{}
|
||||||
@@ -100,7 +101,7 @@ func (sr *scrutinyRepository) saveDatapoint(influxWriteApi api.WriteAPIBlocking,
|
|||||||
return influxWriteApi.WritePoint(ctx, p)
|
return influxWriteApi.WritePoint(ctx, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) string {
|
func (sr *scrutinyRepository) aggregateSmartAttributesQuery(scrutiny_uuid uuid.UUID, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) string {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
@@ -108,28 +109,28 @@ func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, duration
|
|||||||
weekData = from(bucket: "metrics")
|
weekData = from(bucket: "metrics")
|
||||||
|> range(start: -1w, stop: now())
|
|> range(start: -1w, stop: now())
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> filter(fn: (r) => r["device_wwn"] == "0x5000c5002df89099" )
|
|> filter(fn: (r) => r["scrutiny_uuid"] == "32bda933-15be-56a3-902f-9f3674b03d59" )
|
||||||
|> tail(n: 10, offset: 0)
|
|> tail(n: 10, offset: 0)
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
monthData = from(bucket: "metrics_weekly")
|
monthData = from(bucket: "metrics_weekly")
|
||||||
|> range(start: -1mo, stop: -1w)
|
|> range(start: -1mo, stop: -1w)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> filter(fn: (r) => r["device_wwn"] == "0x5000c5002df89099" )
|
|> filter(fn: (r) => r["scrutiny_uuid"] == "32bda933-15be-56a3-902f-9f3674b03d59" )
|
||||||
|> tail(n: 10, offset: 0)
|
|> tail(n: 10, offset: 0)
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
yearData = from(bucket: "metrics_monthly")
|
yearData = from(bucket: "metrics_monthly")
|
||||||
|> range(start: -1y, stop: -1mo)
|
|> range(start: -1y, stop: -1mo)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> filter(fn: (r) => r["device_wwn"] == "0x5000c5002df89099" )
|
|> filter(fn: (r) => r["scrutiny_uuid"] == "32bda933-15be-56a3-902f-9f3674b03d59" )
|
||||||
|> tail(n: 10, offset: 0)
|
|> tail(n: 10, offset: 0)
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
foreverData = from(bucket: "metrics_yearly")
|
foreverData = from(bucket: "metrics_yearly")
|
||||||
|> range(start: -10y, stop: -1y)
|
|> range(start: -10y, stop: -1y)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> filter(fn: (r) => r["device_wwn"] == "0x5000c5002df89099" )
|
|> filter(fn: (r) => r["scrutiny_uuid"] == "32bda933-15be-56a3-902f-9f3674b03d59" )
|
||||||
|> tail(n: 10, offset: 0)
|
|> tail(n: 10, offset: 0)
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
@@ -150,7 +151,7 @@ func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, duration
|
|||||||
if len(nestedDurationKeys) == 1 {
|
if len(nestedDurationKeys) == 1 {
|
||||||
//there's only one bucket being queried, no need to union, just aggregate the dataset and return
|
//there's only one bucket being queried, no need to union, just aggregate the dataset and return
|
||||||
partialQueryStr = append(partialQueryStr, []string{
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
sr.generateSmartAttributesSubquery(wwn, nestedDurationKeys[0], selectEntries, selectEntriesOffset, attributes),
|
sr.generateSmartAttributesSubquery(scrutiny_uuid, nestedDurationKeys[0], selectEntries, selectEntriesOffset, attributes),
|
||||||
fmt.Sprintf(`%sData`, nestedDurationKeys[0]),
|
fmt.Sprintf(`%sData`, nestedDurationKeys[0]),
|
||||||
`|> sort(columns: ["_time"], desc: true)`,
|
`|> sort(columns: ["_time"], desc: true)`,
|
||||||
`|> yield()`,
|
`|> yield()`,
|
||||||
@@ -165,9 +166,9 @@ func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, duration
|
|||||||
if selectEntries > 0 {
|
if selectEntries > 0 {
|
||||||
// We only need the last `n + offset` # of entries from each table to guarantee we can
|
// We only need the last `n + offset` # of entries from each table to guarantee we can
|
||||||
// get the last `n` # of entries starting from `offset` of the union
|
// get the last `n` # of entries starting from `offset` of the union
|
||||||
subQueries = append(subQueries, sr.generateSmartAttributesSubquery(wwn, nestedDurationKey, selectEntries+selectEntriesOffset, 0, attributes))
|
subQueries = append(subQueries, sr.generateSmartAttributesSubquery(scrutiny_uuid, nestedDurationKey, selectEntries+selectEntriesOffset, 0, attributes))
|
||||||
} else {
|
} else {
|
||||||
subQueries = append(subQueries, sr.generateSmartAttributesSubquery(wwn, nestedDurationKey, 0, 0, attributes))
|
subQueries = append(subQueries, sr.generateSmartAttributesSubquery(scrutiny_uuid, nestedDurationKey, 0, 0, attributes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
partialQueryStr = append(partialQueryStr, subQueries...)
|
partialQueryStr = append(partialQueryStr, subQueries...)
|
||||||
@@ -177,14 +178,14 @@ func (sr *scrutinyRepository) aggregateSmartAttributesQuery(wwn string, duration
|
|||||||
`|> sort(columns: ["_time"], desc: true)`,
|
`|> sort(columns: ["_time"], desc: true)`,
|
||||||
}...)
|
}...)
|
||||||
if selectEntries > 0 {
|
if selectEntries > 0 {
|
||||||
partialQueryStr = append(partialQueryStr, fmt.Sprintf(`|> tail(n: %d, offset: %d)`, selectEntries, selectEntriesOffset))
|
partialQueryStr = append(partialQueryStr, fmt.Sprintf(`|> limit(n: %d, offset: %d)`, selectEntries, selectEntriesOffset))
|
||||||
}
|
}
|
||||||
partialQueryStr = append(partialQueryStr, `|> yield(name: "last")`)
|
partialQueryStr = append(partialQueryStr, `|> yield(name: "last")`)
|
||||||
|
|
||||||
return strings.Join(partialQueryStr, "\n")
|
return strings.Join(partialQueryStr, "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sr *scrutinyRepository) generateSmartAttributesSubquery(wwn string, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) string {
|
func (sr *scrutinyRepository) generateSmartAttributesSubquery(scrutiny_uuid uuid.UUID, durationKey string, selectEntries int, selectEntriesOffset int, attributes []string) string {
|
||||||
bucketName := sr.lookupBucketName(durationKey)
|
bucketName := sr.lookupBucketName(durationKey)
|
||||||
durationRange := sr.lookupDuration(durationKey)
|
durationRange := sr.lookupDuration(durationKey)
|
||||||
|
|
||||||
@@ -192,13 +193,15 @@ func (sr *scrutinyRepository) generateSmartAttributesSubquery(wwn string, durati
|
|||||||
fmt.Sprintf(`%sData = from(bucket: "%s")`, durationKey, bucketName),
|
fmt.Sprintf(`%sData = from(bucket: "%s")`, durationKey, bucketName),
|
||||||
fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]),
|
fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]),
|
||||||
`|> filter(fn: (r) => r["_measurement"] == "smart" )`,
|
`|> filter(fn: (r) => r["_measurement"] == "smart" )`,
|
||||||
fmt.Sprintf(`|> filter(fn: (r) => r["device_wwn"] == "%s" )`, wwn),
|
fmt.Sprintf(`|> filter(fn: (r) => r["scrutiny_uuid"] == "%s" )`, scrutiny_uuid.String()),
|
||||||
}
|
}
|
||||||
|
|
||||||
partialQueryStr = append(partialQueryStr, `|> aggregateWindow(every: 1d, fn: last, createEmpty: false)`)
|
partialQueryStr = append(partialQueryStr, `|> aggregateWindow(every: 1d, fn: last, createEmpty: false)`)
|
||||||
|
|
||||||
|
// ensure we are selecting the latest entries when paging
|
||||||
|
partialQueryStr = append(partialQueryStr, `|> sort(columns: ["_time"], desc: true)`)
|
||||||
if selectEntries > 0 {
|
if selectEntries > 0 {
|
||||||
partialQueryStr = append(partialQueryStr, fmt.Sprintf(`|> tail(n: %d, offset: %d)`, selectEntries, selectEntriesOffset))
|
partialQueryStr = append(partialQueryStr, fmt.Sprintf(`|> limit(n: %d, offset: %d)`, selectEntries, selectEntriesOffset))
|
||||||
}
|
}
|
||||||
partialQueryStr = append(partialQueryStr, "|> schema.fieldsAsCols()")
|
partialQueryStr = append(partialQueryStr, "|> schema.fieldsAsCols()")
|
||||||
|
|
||||||
|
|||||||
@@ -7,12 +7,14 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20201107210306"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20201107210306"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220503120000"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220503120000"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220509170100"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220509170100"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220716214900"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220716214900"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20250221084400"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20250221084400"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20260216155600"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
@@ -424,6 +426,53 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
|||||||
return tx.Create(&defaultSettings).Error
|
return tx.Create(&defaultSettings).Error
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
ID: "m20260216155600", // add ScrutinyUUID as primary key
|
||||||
|
Migrate: func(tx *gorm.DB) error {
|
||||||
|
devices := []m20260216155600.Device{}
|
||||||
|
if err := tx.Find(&devices).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sr.logger.Debug("Generating Scrutiny UUIDs")
|
||||||
|
for i := range devices {
|
||||||
|
device := &devices[i]
|
||||||
|
device.ScrutinyUUID = detect.GenerateScrutinyUUID(device.ModelName, device.SerialNumber, device.WWN)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sqlite doesn't support altering columns
|
||||||
|
// so we have to create a new one, drop the old one, then rename.
|
||||||
|
sr.logger.Debug("Creating new devices table")
|
||||||
|
tx.Table("devices_new").AutoMigrate(&m20260216155600.Device{})
|
||||||
|
if len(devices) > 0 {
|
||||||
|
if err := tx.Table("devices_new").Create(&devices).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.logger.Debug("Dropping old devices table")
|
||||||
|
if err := tx.Migrator().DropTable(&m20260216155600.Device{}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.logger.Debug("Renaming new device table")
|
||||||
|
if err := tx.Migrator().RenameTable("devices_new", "devices"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
wwnToUUID := make(map[string]string)
|
||||||
|
for _, device := range devices {
|
||||||
|
wwnToUUID[device.WWN] = device.ScrutinyUUID.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
err := m20260216155600_ChangeInfluxDBTags(sr, ctx, wwnToUUID)
|
||||||
|
if ignorePastRetentionPolicyError(err) != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
if err := m.Migrate(); err != nil {
|
if err := m.Migrate(); err != nil {
|
||||||
@@ -473,6 +522,91 @@ func ignorePastRetentionPolicyError(err error) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func m20260216155600_ChangeInfluxDBTags(sr *scrutinyRepository, ctx context.Context, wwnToUUID map[string]string) error {
|
||||||
|
bucket := sr.appConfig.GetString("web.influxdb.bucket")
|
||||||
|
org := sr.appConfig.GetString("web.influxdb.org")
|
||||||
|
bucketNames := []string{
|
||||||
|
bucket,
|
||||||
|
fmt.Sprintf("%s_weekly", bucket),
|
||||||
|
fmt.Sprintf("%s_monthly", bucket),
|
||||||
|
fmt.Sprintf("%s_yearly", bucket),
|
||||||
|
}
|
||||||
|
|
||||||
|
const batchSize = 1000
|
||||||
|
bucketsAPI := sr.influxClient.BucketsAPI()
|
||||||
|
|
||||||
|
for _, bucketName := range bucketNames {
|
||||||
|
newBucketName := fmt.Sprintf("%s_new", bucketName)
|
||||||
|
|
||||||
|
// Step 1: Create the new bucket. Copy retention rules from the original.
|
||||||
|
sr.logger.Debugf("Creating temporary bucket %s...", newBucketName)
|
||||||
|
oldBucket, err := bucketsAPI.FindBucketByName(ctx, bucketName)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to find bucket %s: %w", bucketName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete leftover _new bucket from a previous failed migration attempt.
|
||||||
|
if existingNew, _ := bucketsAPI.FindBucketByName(ctx, newBucketName); existingNew != nil {
|
||||||
|
sr.logger.Debugf("Found leftover bucket %s from previous migration, deleting...", newBucketName)
|
||||||
|
if err := bucketsAPI.DeleteBucket(ctx, existingNew); err != nil {
|
||||||
|
return fmt.Errorf("Failed to delete leftover bucket %s: %w", newBucketName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
orgObj, err := sr.influxClient.OrganizationsAPI().FindOrganizationByName(ctx, org)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to find organization %s: %w", org, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newBucket, err := bucketsAPI.CreateBucketWithName(ctx, orgObj, newBucketName, oldBucket.RetentionRules...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create bucket %s: %w", newBucketName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for wwn, scrutinyUUID := range wwnToUUID {
|
||||||
|
sr.logger.Debugf("Copying points from %s to %s for wwn %s...", bucketName, newBucketName, wwn)
|
||||||
|
|
||||||
|
offset := 0
|
||||||
|
for ; ; offset += batchSize {
|
||||||
|
queryStr := fmt.Sprintf(`
|
||||||
|
from(bucket: "%s")
|
||||||
|
|> range(start: -10y, stop: now())
|
||||||
|
|> filter(fn: (r) => r["_measurement"] == "smart" or r["_measurement"] == "temp")
|
||||||
|
|> filter(fn: (r) => r["device_wwn"] == "%s")
|
||||||
|
|> limit(n: %d, offset: %d)
|
||||||
|
|> drop(columns: ["device_wwn"])
|
||||||
|
|> set(key: "scrutiny_uuid", value: "%s")
|
||||||
|
|> to(bucket: "%s")
|
||||||
|
`, bucketName, wwn, batchSize, offset, scrutinyUUID, newBucketName)
|
||||||
|
|
||||||
|
result, err := sr.influxQueryApi.Query(ctx, queryStr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to copy points from %s to %s for wwn %s (offset %d): %w", bucketName, newBucketName, wwn, offset, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.Next() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sr.logger.Debugf("Copied approx. %d points for wwn %s", offset, wwn)
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.logger.Debugf("Replacing bucket %s with %s...", bucketName, newBucketName)
|
||||||
|
if err := bucketsAPI.DeleteBucket(ctx, oldBucket); err != nil {
|
||||||
|
return fmt.Errorf("Failed to delete old bucket %s: %w", bucketName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newBucket.Name = bucketName
|
||||||
|
if _, err := bucketsAPI.UpdateBucket(ctx, newBucket); err != nil {
|
||||||
|
return fmt.Errorf("Failed to rename bucket %s to %s: %w", newBucketName, bucketName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sr.logger.Debugf("Bucket %s migrated successfully", bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Deprecated
|
// Deprecated
|
||||||
func m20201107210306_FromPreInfluxDBTempCreatePostInfluxDBTemp(preDevice m20201107210306.Device, preSmartResult m20201107210306.Smart) (error, measurements.SmartTemperature) {
|
func m20201107210306_FromPreInfluxDBTempCreatePostInfluxDBTemp(preDevice m20201107210306.Device, preSmartResult m20201107210306.Smart) (error, measurements.SmartTemperature) {
|
||||||
//extract temperature data for every datapoint
|
//extract temperature data for every datapoint
|
||||||
@@ -647,7 +781,7 @@ func m20201107210306_FromPreInfluxDBSmartResultsCreatePostInfluxDBSmartResults(d
|
|||||||
}
|
}
|
||||||
postDeviceSmartData.ProcessScsiSmartInfo(postScsiGrownDefectList, postScsiErrorCounterLog)
|
postDeviceSmartData.ProcessScsiSmartInfo(postScsiGrownDefectList, postScsiErrorCounterLog)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Unknown device protocol: %s", preDevice.DeviceProtocol), postDeviceSmartData
|
return fmt.Errorf("unknown device protocol: %s", preDevice.DeviceProtocol), postDeviceSmartData
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, postDeviceSmartData
|
return nil, postDeviceSmartData
|
||||||
|
|||||||
@@ -3,17 +3,18 @@ package database
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/go-viper/mapstructure/v2"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LoadSettings will retrieve settings from the database, store them in the AppConfig object, and return a Settings struct
|
// LoadSettings will retrieve settings from the database, store them in the AppConfig object, and return a Settings struct
|
||||||
func (sr *scrutinyRepository) LoadSettings(ctx context.Context) (*models.Settings, error) {
|
func (sr *scrutinyRepository) LoadSettings(ctx context.Context) (*models.Settings, error) {
|
||||||
settingsEntries := []models.SettingEntry{}
|
settingsEntries := []models.SettingEntry{}
|
||||||
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
|
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
|
||||||
return nil, fmt.Errorf("Could not get settings from DB: %v", err)
|
return nil, fmt.Errorf("could not get settings from DB: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// store retrieved settings in the AppConfig obj
|
// store retrieved settings in the AppConfig obj
|
||||||
@@ -58,7 +59,7 @@ func (sr *scrutinyRepository) SaveSettings(ctx context.Context, settings models.
|
|||||||
//retrieve current settings from the database
|
//retrieve current settings from the database
|
||||||
settingsEntries := []models.SettingEntry{}
|
settingsEntries := []models.SettingEntry{}
|
||||||
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
|
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
|
||||||
return fmt.Errorf("Could not get settings from DB: %v", err)
|
return fmt.Errorf("could not get settings from DB: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//update settingsEntries
|
//update settingsEntries
|
||||||
|
|||||||
@@ -3,12 +3,13 @@ package database
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/influxdata/influxdb-client-go/v2/api"
|
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Tasks
|
// Tasks
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) error {
|
func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) error {
|
||||||
weeklyTaskName := "tsk-weekly-aggr"
|
weeklyTaskName := "tsk-weekly-aggr"
|
||||||
weeklyTaskScript := sr.DownsampleScript("weekly", weeklyTaskName, "0 1 * * 0")
|
weeklyTaskScript := sr.DownsampleScript("weekly", weeklyTaskName, "0 1 * * 0")
|
||||||
@@ -108,7 +109,7 @@ func (sr *scrutinyRepository) DownsampleScript(aggregationType string, name stri
|
|||||||
smart_data = from(bucket: sourceBucket)
|
smart_data = from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> group(columns: ["device_wwn", "_field"])
|
|> group(columns: ["scrutiny_uuid", "_field"])
|
||||||
|
|
||||||
non_numeric_smart_data = smart_data
|
non_numeric_smart_data = smart_data
|
||||||
|> filter(fn: (r) => types.isType(v: r._value, type: "string") or types.isType(v: r._value, type: "bool"))
|
|> filter(fn: (r) => types.isType(v: r._value, type: "string") or types.isType(v: r._value, type: "bool"))
|
||||||
@@ -139,20 +140,19 @@ destOrg = "%s"
|
|||||||
from(bucket: sourceBucket)
|
from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> group(columns: ["device_wwn", "_field"])
|
|> group(columns: ["scrutiny_uuid", "_field"])
|
||||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||||
|> to(bucket: destBucket, org: destOrg)
|
|> to(bucket: destBucket, org: destOrg)
|
||||||
|
|
||||||
from(bucket: sourceBucket)
|
from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||||
|> set(key: "_measurement", value: "temp")
|
|> set(key: "_measurement", value: "temp")
|
||||||
|> set(key: "_field", value: "temp")
|
|> set(key: "_field", value: "temp")
|
||||||
|> to(bucket: destBucket, org: destOrg)
|
|> to(bucket: destBucket, org: destOrg)`,
|
||||||
`,
|
|
||||||
name,
|
name,
|
||||||
cron,
|
cron,
|
||||||
sourceBucket,
|
sourceBucket,
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package database
|
package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_DownsampleScript_Weekly(t *testing.T) {
|
func Test_DownsampleScript_Weekly(t *testing.T) {
|
||||||
@@ -12,7 +13,6 @@ func Test_DownsampleScript_Weekly(t *testing.T) {
|
|||||||
|
|
||||||
//setup
|
//setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||||
@@ -43,20 +43,19 @@ destOrg = "scrutiny"
|
|||||||
from(bucket: sourceBucket)
|
from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> group(columns: ["device_wwn", "_field"])
|
|> group(columns: ["scrutiny_uuid", "_field"])
|
||||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||||
|> to(bucket: destBucket, org: destOrg)
|
|> to(bucket: destBucket, org: destOrg)
|
||||||
|
|
||||||
from(bucket: sourceBucket)
|
from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||||
|> set(key: "_measurement", value: "temp")
|
|> set(key: "_measurement", value: "temp")
|
||||||
|> set(key: "_field", value: "temp")
|
|> set(key: "_field", value: "temp")
|
||||||
|> to(bucket: destBucket, org: destOrg)
|
|> to(bucket: destBucket, org: destOrg)`, influxDbScript)
|
||||||
`, influxDbScript)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_DownsampleScript_Monthly(t *testing.T) {
|
func Test_DownsampleScript_Monthly(t *testing.T) {
|
||||||
@@ -64,7 +63,6 @@ func Test_DownsampleScript_Monthly(t *testing.T) {
|
|||||||
|
|
||||||
//setup
|
//setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||||
@@ -95,20 +93,19 @@ destOrg = "scrutiny"
|
|||||||
from(bucket: sourceBucket)
|
from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> group(columns: ["device_wwn", "_field"])
|
|> group(columns: ["scrutiny_uuid", "_field"])
|
||||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||||
|> to(bucket: destBucket, org: destOrg)
|
|> to(bucket: destBucket, org: destOrg)
|
||||||
|
|
||||||
from(bucket: sourceBucket)
|
from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||||
|> set(key: "_measurement", value: "temp")
|
|> set(key: "_measurement", value: "temp")
|
||||||
|> set(key: "_field", value: "temp")
|
|> set(key: "_field", value: "temp")
|
||||||
|> to(bucket: destBucket, org: destOrg)
|
|> to(bucket: destBucket, org: destOrg)`, influxDbScript)
|
||||||
`, influxDbScript)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_DownsampleScript_Yearly(t *testing.T) {
|
func Test_DownsampleScript_Yearly(t *testing.T) {
|
||||||
@@ -116,7 +113,6 @@ func Test_DownsampleScript_Yearly(t *testing.T) {
|
|||||||
|
|
||||||
//setup
|
//setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||||
@@ -147,18 +143,17 @@ destOrg = "scrutiny"
|
|||||||
from(bucket: sourceBucket)
|
from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||||
|> group(columns: ["device_wwn", "_field"])
|
|> group(columns: ["scrutiny_uuid", "_field"])
|
||||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||||
|> to(bucket: destBucket, org: destOrg)
|
|> to(bucket: destBucket, org: destOrg)
|
||||||
|
|
||||||
from(bucket: sourceBucket)
|
from(bucket: sourceBucket)
|
||||||
|> range(start: rangeStart, stop: rangeEnd)
|
|> range(start: rangeStart, stop: rangeEnd)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||||
|> set(key: "_measurement", value: "temp")
|
|> set(key: "_measurement", value: "temp")
|
||||||
|> set(key: "_field", value: "temp")
|
|> set(key: "_field", value: "temp")
|
||||||
|> to(bucket: destBucket, org: destOrg)
|
|> to(bucket: destBucket, org: destOrg)`, influxDbScript)
|
||||||
`, influxDbScript)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,13 +8,14 @@ import (
|
|||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Temperature Data
|
// Temperature Data
|
||||||
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn string, deviceProtocol string, collectorSmartData collector.SmartInfo, discardSCTTempHistory bool) error {
|
func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, scrutiny_uuid uuid.UUID, deviceProtocol string, collectorSmartData collector.SmartInfo, discardSCTTempHistory bool) error {
|
||||||
if len(collectorSmartData.AtaSctTemperatureHistory.Table) > 0 && !discardSCTTempHistory {
|
if len(collectorSmartData.AtaSctTemperatureHistory.Table) > 0 && !discardSCTTempHistory {
|
||||||
|
|
||||||
for ndx, temp := range collectorSmartData.AtaSctTemperatureHistory.Table {
|
for ndx, temp := range collectorSmartData.AtaSctTemperatureHistory.Table {
|
||||||
@@ -24,15 +25,15 @@ func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
intervalSec := collectorSmartData.AtaSctTemperatureHistory.LoggingIntervalMinutes * 60
|
intervalSec := collectorSmartData.AtaSctTemperatureHistory.LoggingIntervalMinutes * 60
|
||||||
datapointTime := collectorSmartData.LocalTime.TimeT - int64(ndx) * intervalSec
|
datapointTime := collectorSmartData.LocalTime.TimeT - int64(ndx)*intervalSec
|
||||||
alignedDatapointTime := datapointTime - datapointTime % intervalSec
|
alignedDatapointTime := datapointTime - datapointTime%intervalSec
|
||||||
smartTemp := measurements.SmartTemperature{
|
smartTemp := measurements.SmartTemperature{
|
||||||
Date: time.Unix(alignedDatapointTime, 0),
|
Date: time.Unix(alignedDatapointTime, 0),
|
||||||
Temp: temp,
|
Temp: temp,
|
||||||
}
|
}
|
||||||
|
|
||||||
tags, fields := smartTemp.Flatten()
|
tags, fields := smartTemp.Flatten()
|
||||||
tags["device_wwn"] = wwn
|
tags["scrutiny_uuid"] = scrutiny_uuid.String()
|
||||||
p := influxdb2.NewPoint("temp",
|
p := influxdb2.NewPoint("temp",
|
||||||
tags,
|
tags,
|
||||||
fields,
|
fields,
|
||||||
@@ -44,7 +45,6 @@ func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Even if ata_sct_temperature_history is present, also add current temperature. See #824
|
// Even if ata_sct_temperature_history is present, also add current temperature. See #824
|
||||||
smartTemp := measurements.SmartTemperature{
|
smartTemp := measurements.SmartTemperature{
|
||||||
Date: time.Unix(collectorSmartData.LocalTime.TimeT, 0),
|
Date: time.Unix(collectorSmartData.LocalTime.TimeT, 0),
|
||||||
@@ -52,7 +52,7 @@ func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags, fields := smartTemp.Flatten()
|
tags, fields := smartTemp.Flatten()
|
||||||
tags["device_wwn"] = wwn
|
tags["scrutiny_uuid"] = scrutiny_uuid.String()
|
||||||
p := influxdb2.NewPoint("temp",
|
p := influxdb2.NewPoint("temp",
|
||||||
tags,
|
tags,
|
||||||
fields,
|
fields,
|
||||||
@@ -60,10 +60,10 @@ func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn stri
|
|||||||
return sr.influxWriteApi.WritePoint(ctx, p)
|
return sr.influxWriteApi.WritePoint(ctx, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error) {
|
func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[uuid.UUID][]measurements.SmartTemperature, error) {
|
||||||
//we can get temp history for "week", "month", DURATION_KEY_YEAR, "forever"
|
//we can get temp history for "week", "month", DURATION_KEY_YEAR, "forever"
|
||||||
|
|
||||||
deviceTempHistory := map[string][]measurements.SmartTemperature{}
|
deviceTempHistory := map[uuid.UUID][]measurements.SmartTemperature{}
|
||||||
|
|
||||||
//TODO: change the query range to a variable.
|
//TODO: change the query range to a variable.
|
||||||
queryStr := sr.aggregateTempQuery(durationKey)
|
queryStr := sr.aggregateTempQuery(durationKey)
|
||||||
@@ -73,14 +73,15 @@ func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, du
|
|||||||
// Use Next() to iterate over query result lines
|
// Use Next() to iterate over query result lines
|
||||||
for result.Next() {
|
for result.Next() {
|
||||||
|
|
||||||
if deviceWWN, ok := result.Record().Values()["device_wwn"]; ok {
|
if scrutinyUUIDString, ok := result.Record().Values()["scrutiny_uuid"]; ok {
|
||||||
|
scrutinyUUID := uuid.Must(uuid.FromString(scrutinyUUIDString.(string)))
|
||||||
|
|
||||||
//check if deviceWWN has been seen and initialized already
|
//check if scrutinyUUID has been seen and initialized already
|
||||||
if _, ok := deviceTempHistory[deviceWWN.(string)]; !ok {
|
if _, ok := deviceTempHistory[scrutinyUUID]; !ok {
|
||||||
deviceTempHistory[deviceWWN.(string)] = []measurements.SmartTemperature{}
|
deviceTempHistory[scrutinyUUID] = []measurements.SmartTemperature{}
|
||||||
}
|
}
|
||||||
|
|
||||||
currentTempHistory := deviceTempHistory[deviceWWN.(string)]
|
currentTempHistory := deviceTempHistory[scrutinyUUID]
|
||||||
smartTemp := measurements.SmartTemperature{}
|
smartTemp := measurements.SmartTemperature{}
|
||||||
|
|
||||||
for key, val := range result.Record().Values() {
|
for key, val := range result.Record().Values() {
|
||||||
@@ -88,7 +89,7 @@ func (sr *scrutinyRepository) GetSmartTemperatureHistory(ctx context.Context, du
|
|||||||
}
|
}
|
||||||
smartTemp.Date = result.Record().Values()["_time"].(time.Time)
|
smartTemp.Date = result.Record().Values()["_time"].(time.Time)
|
||||||
currentTempHistory = append(currentTempHistory, smartTemp)
|
currentTempHistory = append(currentTempHistory, smartTemp)
|
||||||
deviceTempHistory[deviceWWN.(string)] = currentTempHistory
|
deviceTempHistory[scrutinyUUID] = currentTempHistory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if result.Err() != nil {
|
if result.Err() != nil {
|
||||||
@@ -113,18 +114,18 @@ func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string {
|
|||||||
|> range(start: -1w, stop: now())
|
|> range(start: -1w, stop: now())
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
monthData = from(bucket: "metrics_weekly")
|
monthData = from(bucket: "metrics_weekly")
|
||||||
|> range(start: -1mo, stop: now())
|
|> range(start: -1mo, stop: now())
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
union(tables: [weekData, monthData])
|
union(tables: [weekData, monthData])
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> sort(columns: ["_time"], desc: false)
|
|> sort(columns: ["_time"], desc: false)
|
||||||
|> schema.fieldsAsCols()
|
|> schema.fieldsAsCols()
|
||||||
|
|
||||||
@@ -140,14 +141,15 @@ func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string {
|
|||||||
for _, nestedDurationKey := range nestedDurationKeys {
|
for _, nestedDurationKey := range nestedDurationKeys {
|
||||||
bucketName := sr.lookupBucketName(nestedDurationKey)
|
bucketName := sr.lookupBucketName(nestedDurationKey)
|
||||||
durationRange := sr.lookupDuration(nestedDurationKey)
|
durationRange := sr.lookupDuration(nestedDurationKey)
|
||||||
|
durationResolution := sr.lookupResolution(nestedDurationKey)
|
||||||
|
|
||||||
subQueryNames = append(subQueryNames, fmt.Sprintf(`%sData`, nestedDurationKey))
|
subQueryNames = append(subQueryNames, fmt.Sprintf(`%sData`, nestedDurationKey))
|
||||||
partialQueryStr = append(partialQueryStr, []string{
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
fmt.Sprintf(`%sData = from(bucket: "%s")`, nestedDurationKey, bucketName),
|
fmt.Sprintf(`%sData = from(bucket: "%s")`, nestedDurationKey, bucketName),
|
||||||
fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]),
|
fmt.Sprintf(`|> range(start: %s, stop: %s)`, durationRange[0], durationRange[1]),
|
||||||
`|> filter(fn: (r) => r["_measurement"] == "temp" )`,
|
`|> filter(fn: (r) => r["_measurement"] == "temp" )`,
|
||||||
`|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)`,
|
fmt.Sprintf(`|> aggregateWindow(every: %s, fn: mean, createEmpty: false)`, durationResolution),
|
||||||
`|> group(columns: ["device_wwn"])`,
|
`|> group(columns: ["scrutiny_uuid"])`,
|
||||||
`|> toInt()`,
|
`|> toInt()`,
|
||||||
"",
|
"",
|
||||||
}...)
|
}...)
|
||||||
@@ -163,7 +165,7 @@ func (sr *scrutinyRepository) aggregateTempQuery(durationKey string) string {
|
|||||||
} else {
|
} else {
|
||||||
partialQueryStr = append(partialQueryStr, []string{
|
partialQueryStr = append(partialQueryStr, []string{
|
||||||
fmt.Sprintf("union(tables: [%s])", strings.Join(subQueryNames, ", ")),
|
fmt.Sprintf("union(tables: [%s])", strings.Join(subQueryNames, ", ")),
|
||||||
`|> group(columns: ["device_wwn"])`,
|
`|> group(columns: ["scrutiny_uuid"])`,
|
||||||
`|> sort(columns: ["_time"], desc: false)`,
|
`|> sort(columns: ["_time"], desc: false)`,
|
||||||
"|> schema.fieldsAsCols()",
|
"|> schema.fieldsAsCols()",
|
||||||
}...)
|
}...)
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package database
|
package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_aggregateTempQuery_Week(t *testing.T) {
|
func Test_aggregateTempQuery_Week(t *testing.T) {
|
||||||
@@ -12,7 +13,6 @@ func Test_aggregateTempQuery_Week(t *testing.T) {
|
|||||||
|
|
||||||
//setup
|
//setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||||
@@ -32,7 +32,7 @@ weekData = from(bucket: "metrics")
|
|||||||
|> range(start: -1w, stop: now())
|
|> range(start: -1w, stop: now())
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
weekData
|
weekData
|
||||||
@@ -45,7 +45,6 @@ func Test_aggregateTempQuery_Month(t *testing.T) {
|
|||||||
|
|
||||||
//setup
|
//setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||||
@@ -65,18 +64,18 @@ weekData = from(bucket: "metrics")
|
|||||||
|> range(start: -1w, stop: now())
|
|> range(start: -1w, stop: now())
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
monthData = from(bucket: "metrics_weekly")
|
monthData = from(bucket: "metrics_weekly")
|
||||||
|> range(start: -1mo, stop: -1w)
|
|> range(start: -1mo, stop: -1w)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
union(tables: [weekData, monthData])
|
union(tables: [weekData, monthData])
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> sort(columns: ["_time"], desc: false)
|
|> sort(columns: ["_time"], desc: false)
|
||||||
|> schema.fieldsAsCols()`, influxDbScript)
|
|> schema.fieldsAsCols()`, influxDbScript)
|
||||||
}
|
}
|
||||||
@@ -86,7 +85,6 @@ func Test_aggregateTempQuery_Year(t *testing.T) {
|
|||||||
|
|
||||||
//setup
|
//setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||||
@@ -106,25 +104,25 @@ weekData = from(bucket: "metrics")
|
|||||||
|> range(start: -1w, stop: now())
|
|> range(start: -1w, stop: now())
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
monthData = from(bucket: "metrics_weekly")
|
monthData = from(bucket: "metrics_weekly")
|
||||||
|> range(start: -1mo, stop: -1w)
|
|> range(start: -1mo, stop: -1w)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
yearData = from(bucket: "metrics_monthly")
|
yearData = from(bucket: "metrics_monthly")
|
||||||
|> range(start: -1y, stop: -1mo)
|
|> range(start: -1y, stop: -1mo)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
union(tables: [weekData, monthData, yearData])
|
union(tables: [weekData, monthData, yearData])
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> sort(columns: ["_time"], desc: false)
|
|> sort(columns: ["_time"], desc: false)
|
||||||
|> schema.fieldsAsCols()`, influxDbScript)
|
|> schema.fieldsAsCols()`, influxDbScript)
|
||||||
}
|
}
|
||||||
@@ -134,7 +132,6 @@ func Test_aggregateTempQuery_Forever(t *testing.T) {
|
|||||||
|
|
||||||
//setup
|
//setup
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||||
@@ -154,32 +151,32 @@ weekData = from(bucket: "metrics")
|
|||||||
|> range(start: -1w, stop: now())
|
|> range(start: -1w, stop: now())
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
monthData = from(bucket: "metrics_weekly")
|
monthData = from(bucket: "metrics_weekly")
|
||||||
|> range(start: -1mo, stop: -1w)
|
|> range(start: -1mo, stop: -1w)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
yearData = from(bucket: "metrics_monthly")
|
yearData = from(bucket: "metrics_monthly")
|
||||||
|> range(start: -1y, stop: -1mo)
|
|> range(start: -1y, stop: -1mo)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
foreverData = from(bucket: "metrics_yearly")
|
foreverData = from(bucket: "metrics_yearly")
|
||||||
|> range(start: -10y, stop: -1y)
|
|> range(start: -10y, stop: -1y)
|
||||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> toInt()
|
|> toInt()
|
||||||
|
|
||||||
union(tables: [weekData, monthData, yearData, foreverData])
|
union(tables: [weekData, monthData, yearData, foreverData])
|
||||||
|> group(columns: ["device_wwn"])
|
|> group(columns: ["scrutiny_uuid"])
|
||||||
|> sort(columns: ["_time"], desc: false)
|
|> sort(columns: ["_time"], desc: false)
|
||||||
|> schema.fieldsAsCols()`, influxDbScript)
|
|> schema.fieldsAsCols()`, influxDbScript)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -143,21 +143,21 @@ type SmartInfo struct {
|
|||||||
ErrorNumber int `json:"error_number"`
|
ErrorNumber int `json:"error_number"`
|
||||||
LifetimeHours int `json:"lifetime_hours"`
|
LifetimeHours int `json:"lifetime_hours"`
|
||||||
CompletionRegisters struct {
|
CompletionRegisters struct {
|
||||||
Error int `json:"error"`
|
Error int `json:"error"`
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
Lba int `json:"lba"`
|
Lba uint64 `json:"lba"`
|
||||||
Device int `json:"device"`
|
Device int `json:"device"`
|
||||||
} `json:"completion_registers"`
|
} `json:"completion_registers"`
|
||||||
ErrorDescription string `json:"error_description"`
|
ErrorDescription string `json:"error_description"`
|
||||||
PreviousCommands []struct {
|
PreviousCommands []struct {
|
||||||
Registers struct {
|
Registers struct {
|
||||||
Command int `json:"command"`
|
Command int `json:"command"`
|
||||||
Features int `json:"features"`
|
Features int `json:"features"`
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
Lba int `json:"lba"`
|
Lba uint64 `json:"lba"`
|
||||||
Device int `json:"device"`
|
Device int `json:"device"`
|
||||||
DeviceControl int `json:"device_control"`
|
DeviceControl int `json:"device_control"`
|
||||||
} `json:"registers"`
|
} `json:"registers"`
|
||||||
PowerupMilliseconds int `json:"powerup_milliseconds"`
|
PowerupMilliseconds int `json:"powerup_milliseconds"`
|
||||||
CommandName string `json:"command_name"`
|
CommandName string `json:"command_name"`
|
||||||
@@ -188,8 +188,8 @@ type SmartInfo struct {
|
|||||||
AtaSmartSelectiveSelfTestLog struct {
|
AtaSmartSelectiveSelfTestLog struct {
|
||||||
Revision int `json:"revision"`
|
Revision int `json:"revision"`
|
||||||
Table []struct {
|
Table []struct {
|
||||||
LbaMin int `json:"lba_min"`
|
LbaMin uint64 `json:"lba_min"`
|
||||||
LbaMax int `json:"lba_max"`
|
LbaMax uint64 `json:"lba_max"`
|
||||||
Status struct {
|
Status struct {
|
||||||
Value int `json:"value"`
|
Value int `json:"value"`
|
||||||
String string `json:"string"`
|
String string `json:"string"`
|
||||||
@@ -207,10 +207,10 @@ type SmartInfo struct {
|
|||||||
ID int `json:"id"`
|
ID int `json:"id"`
|
||||||
SubsystemID int `json:"subsystem_id"`
|
SubsystemID int `json:"subsystem_id"`
|
||||||
} `json:"nvme_pci_vendor"`
|
} `json:"nvme_pci_vendor"`
|
||||||
NvmeIeeeOuiIdentifier int `json:"nvme_ieee_oui_identifier"`
|
NvmeIeeeOuiIdentifier uint32 `json:"nvme_ieee_oui_identifier"`
|
||||||
NvmeTotalCapacity int64 `json:"nvme_total_capacity"`
|
NvmeTotalCapacity int64 `json:"nvme_total_capacity"`
|
||||||
NvmeControllerID int `json:"nvme_controller_id"`
|
NvmeControllerID int `json:"nvme_controller_id"`
|
||||||
NvmeNumberOfNamespaces int `json:"nvme_number_of_namespaces"`
|
NvmeNumberOfNamespaces int `json:"nvme_number_of_namespaces"`
|
||||||
NvmeNamespaces []struct {
|
NvmeNamespaces []struct {
|
||||||
ID int `json:"id"`
|
ID int `json:"id"`
|
||||||
Size struct {
|
Size struct {
|
||||||
@@ -226,6 +226,10 @@ type SmartInfo struct {
|
|||||||
Bytes int64 `json:"bytes"`
|
Bytes int64 `json:"bytes"`
|
||||||
} `json:"utilization"`
|
} `json:"utilization"`
|
||||||
FormattedLbaSize int `json:"formatted_lba_size"`
|
FormattedLbaSize int `json:"formatted_lba_size"`
|
||||||
|
Eui64 struct {
|
||||||
|
Oui uint32 `json:"oui"`
|
||||||
|
ExtId uint64 `json:"ext_id"`
|
||||||
|
} `json:"eui64"`
|
||||||
} `json:"nvme_namespaces"`
|
} `json:"nvme_namespaces"`
|
||||||
NvmeSmartHealthInformationLog NvmeSmartHealthInformationLog `json:"nvme_smart_health_information_log"`
|
NvmeSmartHealthInformationLog NvmeSmartHealthInformationLog `json:"nvme_smart_health_information_log"`
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
package models
|
package models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
"time"
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeviceWrapper struct {
|
type DeviceWrapper struct {
|
||||||
@@ -19,7 +21,7 @@ type Device struct {
|
|||||||
UpdatedAt time.Time
|
UpdatedAt time.Time
|
||||||
DeletedAt *time.Time
|
DeletedAt *time.Time
|
||||||
|
|
||||||
WWN string `json:"wwn" gorm:"primary_key"`
|
WWN string `json:"wwn"`
|
||||||
|
|
||||||
DeviceName string `json:"device_name"`
|
DeviceName string `json:"device_name"`
|
||||||
DeviceUUID string `json:"device_uuid"`
|
DeviceUUID string `json:"device_uuid"`
|
||||||
@@ -45,6 +47,7 @@ type Device struct {
|
|||||||
|
|
||||||
// Data set by Scrutiny
|
// Data set by Scrutiny
|
||||||
DeviceStatus pkg.DeviceStatus `json:"device_status"`
|
DeviceStatus pkg.DeviceStatus `json:"device_status"`
|
||||||
|
ScrutinyUUID uuid.UUID `json:"scrutiny_uuid" gorm:"primaryKey;uniqueIndex"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dv *Device) IsAta() bool {
|
func (dv *Device) IsAta() bool {
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
package models
|
package models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// This is used in server_test.go
|
||||||
type DeviceSummaryWrapper struct {
|
type DeviceSummaryWrapper struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Errors []error `json:"errors"`
|
Errors []error `json:"errors"`
|
||||||
|
|||||||
@@ -2,18 +2,21 @@ package measurements
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
|
||||||
"log"
|
"log"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Smart struct {
|
type Smart struct {
|
||||||
Date time.Time `json:"date"`
|
Date time.Time `json:"date"`
|
||||||
DeviceWWN string `json:"device_wwn"` //(tag)
|
DeviceWWN string `json:"device_wwn` // deprecated
|
||||||
|
ScrutinyUUID uuid.UUID `json:"scrutiny_uuid"` //(tag)
|
||||||
DeviceProtocol string `json:"device_protocol"`
|
DeviceProtocol string `json:"device_protocol"`
|
||||||
|
|
||||||
//Metrics (fields)
|
//Metrics (fields)
|
||||||
@@ -30,7 +33,7 @@ type Smart struct {
|
|||||||
|
|
||||||
func (sm *Smart) Flatten() (tags map[string]string, fields map[string]interface{}) {
|
func (sm *Smart) Flatten() (tags map[string]string, fields map[string]interface{}) {
|
||||||
tags = map[string]string{
|
tags = map[string]string{
|
||||||
"device_wwn": sm.DeviceWWN,
|
"scrutiny_uuid": sm.ScrutinyUUID.String(),
|
||||||
"device_protocol": sm.DeviceProtocol,
|
"device_protocol": sm.DeviceProtocol,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,10 +55,15 @@ func (sm *Smart) Flatten() (tags map[string]string, fields map[string]interface{
|
|||||||
func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
|
func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
|
||||||
//go though the massive map returned from influxdb. If a key is associated with the Smart struct, assign it. If it starts with "attr.*" group it by attributeId, and pass to attribute inflate.
|
//go though the massive map returned from influxdb. If a key is associated with the Smart struct, assign it. If it starts with "attr.*" group it by attributeId, and pass to attribute inflate.
|
||||||
|
|
||||||
|
scrutiny_uuid, err := uuid.FromString(attrs["scrutiny_uuid"].(string))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
sm := Smart{
|
sm := Smart{
|
||||||
//required fields
|
//required fields
|
||||||
Date: attrs["_time"].(time.Time),
|
Date: attrs["_time"].(time.Time),
|
||||||
DeviceWWN: attrs["device_wwn"].(string),
|
ScrutinyUUID: scrutiny_uuid,
|
||||||
DeviceProtocol: attrs["device_protocol"].(string),
|
DeviceProtocol: attrs["device_protocol"].(string),
|
||||||
|
|
||||||
Attributes: map[string]SmartAttribute{},
|
Attributes: map[string]SmartAttribute{},
|
||||||
@@ -102,7 +110,7 @@ func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
|
|||||||
} else if sm.DeviceProtocol == pkg.DeviceProtocolScsi {
|
} else if sm.DeviceProtocol == pkg.DeviceProtocolScsi {
|
||||||
sm.Attributes[attributeId] = &SmartScsiAttribute{}
|
sm.Attributes[attributeId] = &SmartScsiAttribute{}
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("Unknown Device Protocol: %s", sm.DeviceProtocol)
|
return nil, fmt.Errorf("unknown Device Protocol: %s", sm.DeviceProtocol)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,14 +119,14 @@ func NewSmartFromInfluxDB(attrs map[string]interface{}) (*Smart, error) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Found Smart Device (%s) Attributes (%v)", sm.DeviceWWN, len(sm.Attributes))
|
log.Printf("Found Smart Device (%s) Attributes (%v)", sm.ScrutinyUUID, len(sm.Attributes))
|
||||||
|
|
||||||
return &sm, nil
|
return &sm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//Parse Collector SMART data results and create Smart object (and associated SmartAtaAttribute entries)
|
// Parse Collector SMART data results and create Smart object (and associated SmartAtaAttribute entries)
|
||||||
func (sm *Smart) FromCollectorSmartInfo(wwn string, info collector.SmartInfo) error {
|
func (sm *Smart) FromCollectorSmartInfo(scrutiny_uuid uuid.UUID, info collector.SmartInfo) error {
|
||||||
sm.DeviceWWN = wwn
|
sm.ScrutinyUUID = scrutiny_uuid
|
||||||
sm.Date = time.Unix(info.LocalTime.TimeT, 0)
|
sm.Date = time.Unix(info.LocalTime.TimeT, 0)
|
||||||
|
|
||||||
//smart metrics
|
//smart metrics
|
||||||
@@ -132,18 +140,19 @@ func (sm *Smart) FromCollectorSmartInfo(wwn string, info collector.SmartInfo) er
|
|||||||
sm.DeviceProtocol = info.Device.Protocol
|
sm.DeviceProtocol = info.Device.Protocol
|
||||||
// process ATA/NVME/SCSI protocol data
|
// process ATA/NVME/SCSI protocol data
|
||||||
sm.Attributes = map[string]SmartAttribute{}
|
sm.Attributes = map[string]SmartAttribute{}
|
||||||
if sm.DeviceProtocol == pkg.DeviceProtocolAta {
|
switch sm.DeviceProtocol {
|
||||||
|
case pkg.DeviceProtocolAta:
|
||||||
sm.ProcessAtaSmartInfo(info.AtaSmartAttributes.Table)
|
sm.ProcessAtaSmartInfo(info.AtaSmartAttributes.Table)
|
||||||
} else if sm.DeviceProtocol == pkg.DeviceProtocolNvme {
|
case pkg.DeviceProtocolNvme:
|
||||||
sm.ProcessNvmeSmartInfo(info.NvmeSmartHealthInformationLog)
|
sm.ProcessNvmeSmartInfo(info.NvmeSmartHealthInformationLog)
|
||||||
} else if sm.DeviceProtocol == pkg.DeviceProtocolScsi {
|
case pkg.DeviceProtocolScsi:
|
||||||
sm.ProcessScsiSmartInfo(info.ScsiGrownDefectList, info.ScsiErrorCounterLog)
|
sm.ProcessScsiSmartInfo(info.ScsiGrownDefectList, info.ScsiErrorCounterLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//generate SmartAtaAttribute entries from Scrutiny Collector Smart data.
|
// generate SmartAtaAttribute entries from Scrutiny Collector Smart data.
|
||||||
func (sm *Smart) ProcessAtaSmartInfo(tableItems []collector.AtaSmartAttributesTableItem) {
|
func (sm *Smart) ProcessAtaSmartInfo(tableItems []collector.AtaSmartAttributesTableItem) {
|
||||||
for _, collectorAttr := range tableItems {
|
for _, collectorAttr := range tableItems {
|
||||||
attrModel := SmartAtaAttribute{
|
attrModel := SmartAtaAttribute{
|
||||||
@@ -171,7 +180,7 @@ func (sm *Smart) ProcessAtaSmartInfo(tableItems []collector.AtaSmartAttributesTa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//generate SmartNvmeAttribute entries from Scrutiny Collector Smart data.
|
// generate SmartNvmeAttribute entries from Scrutiny Collector Smart data.
|
||||||
func (sm *Smart) ProcessNvmeSmartInfo(nvmeSmartHealthInformationLog collector.NvmeSmartHealthInformationLog) {
|
func (sm *Smart) ProcessNvmeSmartInfo(nvmeSmartHealthInformationLog collector.NvmeSmartHealthInformationLog) {
|
||||||
|
|
||||||
sm.Attributes = map[string]SmartAttribute{
|
sm.Attributes = map[string]SmartAttribute{
|
||||||
@@ -201,7 +210,7 @@ func (sm *Smart) ProcessNvmeSmartInfo(nvmeSmartHealthInformationLog collector.Nv
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//generate SmartScsiAttribute entries from Scrutiny Collector Smart data.
|
// generate SmartScsiAttribute entries from Scrutiny Collector Smart data.
|
||||||
func (sm *Smart) ProcessScsiSmartInfo(defectGrownList int64, scsiErrorCounterLog collector.ScsiErrorCounterLog) {
|
func (sm *Smart) ProcessScsiSmartInfo(defectGrownList int64, scsiErrorCounterLog collector.ScsiErrorCounterLog) {
|
||||||
sm.Attributes = map[string]SmartAttribute{
|
sm.Attributes = map[string]SmartAttribute{
|
||||||
"scsi_grown_defect_list": (&SmartScsiAttribute{AttributeId: "scsi_grown_defect_list", Value: defectGrownList, Threshold: 0}).PopulateAttributeStatus(),
|
"scsi_grown_defect_list": (&SmartScsiAttribute{AttributeId: "scsi_grown_defect_list", Value: defectGrownList, Threshold: 0}).PopulateAttributeStatus(),
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ func (sa *SmartAtaAttribute) Inflate(key string, val interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//populate attribute status, using SMART Thresholds & Observed Metadata
|
// populate attribute status, using SMART Thresholds & Observed Metadata
|
||||||
// Chainable
|
// Chainable
|
||||||
func (sa *SmartAtaAttribute) PopulateAttributeStatus() *SmartAtaAttribute {
|
func (sa *SmartAtaAttribute) PopulateAttributeStatus() *SmartAtaAttribute {
|
||||||
if strings.ToUpper(sa.WhenFailed) == pkg.AttributeWhenFailedFailingNow {
|
if strings.ToUpper(sa.WhenFailed) == pkg.AttributeWhenFailedFailingNow {
|
||||||
@@ -165,6 +165,4 @@ func (sa *SmartAtaAttribute) ValidateThreshold(smartMetadata thresholds.AtaAttri
|
|||||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusWarningScrutiny)
|
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusWarningScrutiny)
|
||||||
sa.StatusReason = "Could not determine Observed Failure Rate for Critical Attribute"
|
sa.StatusReason = "Could not determine Observed Failure Rate for Critical Attribute"
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ func (sa *SmartNvmeAttribute) Inflate(key string, val interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//populate attribute status, using SMART Thresholds & Observed Metadata
|
// populate attribute status, using SMART Thresholds & Observed Metadata
|
||||||
// Chainable
|
// Chainable
|
||||||
func (sa *SmartNvmeAttribute) PopulateAttributeStatus() *SmartNvmeAttribute {
|
func (sa *SmartNvmeAttribute) PopulateAttributeStatus() *SmartNvmeAttribute {
|
||||||
|
|
||||||
|
|||||||
@@ -67,9 +67,8 @@ func (sa *SmartScsiAttribute) Inflate(key string, val interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
// populate attribute status, using SMART Thresholds & Observed Metadata
|
||||||
//populate attribute status, using SMART Thresholds & Observed Metadata
|
// Chainable
|
||||||
//Chainable
|
|
||||||
func (sa *SmartScsiAttribute) PopulateAttributeStatus() *SmartScsiAttribute {
|
func (sa *SmartScsiAttribute) PopulateAttributeStatus() *SmartScsiAttribute {
|
||||||
|
|
||||||
//-1 is a special number meaning no threshold.
|
//-1 is a special number meaning no threshold.
|
||||||
|
|||||||
@@ -2,22 +2,25 @@ package measurements_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
"io"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSmart_Flatten(t *testing.T) {
|
func TestSmart_Flatten(t *testing.T) {
|
||||||
//setup
|
//setup
|
||||||
timeNow := time.Now()
|
timeNow := time.Now()
|
||||||
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
smart := measurements.Smart{
|
smart := measurements.Smart{
|
||||||
Date: timeNow,
|
Date: timeNow,
|
||||||
DeviceWWN: "test-wwn",
|
ScrutinyUUID: smartUUID,
|
||||||
DeviceProtocol: pkg.DeviceProtocolAta,
|
DeviceProtocol: pkg.DeviceProtocolAta,
|
||||||
Temp: 50,
|
Temp: 50,
|
||||||
PowerOnHours: 10,
|
PowerOnHours: 10,
|
||||||
@@ -30,16 +33,17 @@ func TestSmart_Flatten(t *testing.T) {
|
|||||||
tags, fields := smart.Flatten()
|
tags, fields := smart.Flatten()
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.Equal(t, map[string]string{"device_protocol": "ATA", "device_wwn": "test-wwn"}, tags)
|
require.Equal(t, map[string]string{"device_protocol": "ATA", "scrutiny_uuid": smartUUID.String()}, tags)
|
||||||
require.Equal(t, map[string]interface{}{"power_cycle_count": int64(10), "power_on_hours": int64(10), "temp": int64(50)}, fields)
|
require.Equal(t, map[string]interface{}{"power_cycle_count": int64(10), "power_on_hours": int64(10), "temp": int64(50)}, fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSmart_Flatten_ATA(t *testing.T) {
|
func TestSmart_Flatten_ATA(t *testing.T) {
|
||||||
//setup
|
//setup
|
||||||
timeNow := time.Now()
|
timeNow := time.Now()
|
||||||
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
smart := measurements.Smart{
|
smart := measurements.Smart{
|
||||||
Date: timeNow,
|
Date: timeNow,
|
||||||
DeviceWWN: "test-wwn",
|
ScrutinyUUID: smartUUID,
|
||||||
DeviceProtocol: pkg.DeviceProtocolAta,
|
DeviceProtocol: pkg.DeviceProtocolAta,
|
||||||
Temp: 50,
|
Temp: 50,
|
||||||
PowerOnHours: 10,
|
PowerOnHours: 10,
|
||||||
@@ -71,7 +75,7 @@ func TestSmart_Flatten_ATA(t *testing.T) {
|
|||||||
tags, fields := smart.Flatten()
|
tags, fields := smart.Flatten()
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.Equal(t, map[string]string{"device_protocol": "ATA", "device_wwn": "test-wwn"}, tags)
|
require.Equal(t, map[string]string{"device_protocol": "ATA", "scrutiny_uuid": smartUUID.String()}, tags)
|
||||||
require.Equal(t, map[string]interface{}{
|
require.Equal(t, map[string]interface{}{
|
||||||
"attr.1.attribute_id": "1",
|
"attr.1.attribute_id": "1",
|
||||||
"attr.1.failure_rate": float64(0),
|
"attr.1.failure_rate": float64(0),
|
||||||
@@ -106,9 +110,10 @@ func TestSmart_Flatten_ATA(t *testing.T) {
|
|||||||
func TestSmart_Flatten_SCSI(t *testing.T) {
|
func TestSmart_Flatten_SCSI(t *testing.T) {
|
||||||
//setup
|
//setup
|
||||||
timeNow := time.Now()
|
timeNow := time.Now()
|
||||||
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
smart := measurements.Smart{
|
smart := measurements.Smart{
|
||||||
Date: timeNow,
|
Date: timeNow,
|
||||||
DeviceWWN: "test-wwn",
|
ScrutinyUUID: smartUUID,
|
||||||
DeviceProtocol: pkg.DeviceProtocolScsi,
|
DeviceProtocol: pkg.DeviceProtocolScsi,
|
||||||
Temp: 50,
|
Temp: 50,
|
||||||
PowerOnHours: 10,
|
PowerOnHours: 10,
|
||||||
@@ -126,7 +131,7 @@ func TestSmart_Flatten_SCSI(t *testing.T) {
|
|||||||
tags, fields := smart.Flatten()
|
tags, fields := smart.Flatten()
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.Equal(t, map[string]string{"device_protocol": "SCSI", "device_wwn": "test-wwn"}, tags)
|
require.Equal(t, map[string]string{"device_protocol": "SCSI", "scrutiny_uuid": smartUUID.String()}, tags)
|
||||||
require.Equal(t, map[string]interface{}{
|
require.Equal(t, map[string]interface{}{
|
||||||
"attr.read_errors_corrected_by_eccfast.attribute_id": "read_errors_corrected_by_eccfast",
|
"attr.read_errors_corrected_by_eccfast.attribute_id": "read_errors_corrected_by_eccfast",
|
||||||
"attr.read_errors_corrected_by_eccfast.failure_rate": float64(0),
|
"attr.read_errors_corrected_by_eccfast.failure_rate": float64(0),
|
||||||
@@ -144,9 +149,10 @@ func TestSmart_Flatten_SCSI(t *testing.T) {
|
|||||||
func TestSmart_Flatten_NVMe(t *testing.T) {
|
func TestSmart_Flatten_NVMe(t *testing.T) {
|
||||||
//setup
|
//setup
|
||||||
timeNow := time.Now()
|
timeNow := time.Now()
|
||||||
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
smart := measurements.Smart{
|
smart := measurements.Smart{
|
||||||
Date: timeNow,
|
Date: timeNow,
|
||||||
DeviceWWN: "test-wwn",
|
ScrutinyUUID: smartUUID,
|
||||||
DeviceProtocol: pkg.DeviceProtocolNvme,
|
DeviceProtocol: pkg.DeviceProtocolNvme,
|
||||||
Temp: 50,
|
Temp: 50,
|
||||||
PowerOnHours: 10,
|
PowerOnHours: 10,
|
||||||
@@ -164,7 +170,7 @@ func TestSmart_Flatten_NVMe(t *testing.T) {
|
|||||||
tags, fields := smart.Flatten()
|
tags, fields := smart.Flatten()
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.Equal(t, map[string]string{"device_protocol": "NVMe", "device_wwn": "test-wwn"}, tags)
|
require.Equal(t, map[string]string{"device_protocol": "NVMe", "scrutiny_uuid": smartUUID.String()}, tags)
|
||||||
require.Equal(t, map[string]interface{}{
|
require.Equal(t, map[string]interface{}{
|
||||||
"attr.available_spare.attribute_id": "available_spare",
|
"attr.available_spare.attribute_id": "available_spare",
|
||||||
"attr.available_spare.failure_rate": float64(0),
|
"attr.available_spare.failure_rate": float64(0),
|
||||||
@@ -181,9 +187,10 @@ func TestSmart_Flatten_NVMe(t *testing.T) {
|
|||||||
func TestNewSmartFromInfluxDB_ATA(t *testing.T) {
|
func TestNewSmartFromInfluxDB_ATA(t *testing.T) {
|
||||||
//setup
|
//setup
|
||||||
timeNow := time.Now()
|
timeNow := time.Now()
|
||||||
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
attrs := map[string]interface{}{
|
attrs := map[string]interface{}{
|
||||||
"_time": timeNow,
|
"_time": timeNow,
|
||||||
"device_wwn": "test-wwn",
|
"scrutiny_uuid": smartUUID.String(),
|
||||||
"device_protocol": pkg.DeviceProtocolAta,
|
"device_protocol": pkg.DeviceProtocolAta,
|
||||||
"attr.1.attribute_id": "1",
|
"attr.1.attribute_id": "1",
|
||||||
"attr.1.failure_rate": float64(0),
|
"attr.1.failure_rate": float64(0),
|
||||||
@@ -208,7 +215,7 @@ func TestNewSmartFromInfluxDB_ATA(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, &measurements.Smart{
|
require.Equal(t, &measurements.Smart{
|
||||||
Date: timeNow,
|
Date: timeNow,
|
||||||
DeviceWWN: "test-wwn",
|
ScrutinyUUID: smartUUID,
|
||||||
DeviceProtocol: "ATA",
|
DeviceProtocol: "ATA",
|
||||||
Temp: 50,
|
Temp: 50,
|
||||||
PowerOnHours: 10,
|
PowerOnHours: 10,
|
||||||
@@ -229,9 +236,10 @@ func TestNewSmartFromInfluxDB_ATA(t *testing.T) {
|
|||||||
func TestNewSmartFromInfluxDB_NVMe(t *testing.T) {
|
func TestNewSmartFromInfluxDB_NVMe(t *testing.T) {
|
||||||
//setup
|
//setup
|
||||||
timeNow := time.Now()
|
timeNow := time.Now()
|
||||||
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
attrs := map[string]interface{}{
|
attrs := map[string]interface{}{
|
||||||
"_time": timeNow,
|
"_time": timeNow,
|
||||||
"device_wwn": "test-wwn",
|
"scrutiny_uuid": smartUUID.String(),
|
||||||
"device_protocol": pkg.DeviceProtocolNvme,
|
"device_protocol": pkg.DeviceProtocolNvme,
|
||||||
"attr.available_spare.attribute_id": "available_spare",
|
"attr.available_spare.attribute_id": "available_spare",
|
||||||
"attr.available_spare.failure_rate": float64(0),
|
"attr.available_spare.failure_rate": float64(0),
|
||||||
@@ -252,7 +260,7 @@ func TestNewSmartFromInfluxDB_NVMe(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, &measurements.Smart{
|
require.Equal(t, &measurements.Smart{
|
||||||
Date: timeNow,
|
Date: timeNow,
|
||||||
DeviceWWN: "test-wwn",
|
ScrutinyUUID: smartUUID,
|
||||||
DeviceProtocol: "NVMe",
|
DeviceProtocol: "NVMe",
|
||||||
Temp: 50,
|
Temp: 50,
|
||||||
PowerOnHours: 10,
|
PowerOnHours: 10,
|
||||||
@@ -268,9 +276,10 @@ func TestNewSmartFromInfluxDB_NVMe(t *testing.T) {
|
|||||||
func TestNewSmartFromInfluxDB_SCSI(t *testing.T) {
|
func TestNewSmartFromInfluxDB_SCSI(t *testing.T) {
|
||||||
//setup
|
//setup
|
||||||
timeNow := time.Now()
|
timeNow := time.Now()
|
||||||
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
attrs := map[string]interface{}{
|
attrs := map[string]interface{}{
|
||||||
"_time": timeNow,
|
"_time": timeNow,
|
||||||
"device_wwn": "test-wwn",
|
"scrutiny_uuid": smartUUID.String(),
|
||||||
"device_protocol": pkg.DeviceProtocolScsi,
|
"device_protocol": pkg.DeviceProtocolScsi,
|
||||||
"attr.read_errors_corrected_by_eccfast.attribute_id": "read_errors_corrected_by_eccfast",
|
"attr.read_errors_corrected_by_eccfast.attribute_id": "read_errors_corrected_by_eccfast",
|
||||||
"attr.read_errors_corrected_by_eccfast.failure_rate": float64(0),
|
"attr.read_errors_corrected_by_eccfast.failure_rate": float64(0),
|
||||||
@@ -291,7 +300,7 @@ func TestNewSmartFromInfluxDB_SCSI(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, &measurements.Smart{
|
require.Equal(t, &measurements.Smart{
|
||||||
Date: timeNow,
|
Date: timeNow,
|
||||||
DeviceWWN: "test-wwn",
|
ScrutinyUUID: smartUUID,
|
||||||
DeviceProtocol: "SCSI",
|
DeviceProtocol: "SCSI",
|
||||||
Temp: 50,
|
Temp: 50,
|
||||||
PowerOnHours: 10,
|
PowerOnHours: 10,
|
||||||
@@ -312,18 +321,19 @@ func TestFromCollectorSmartInfo(t *testing.T) {
|
|||||||
|
|
||||||
var smartJson collector.SmartInfo
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
smartDataBytes, err := io.ReadAll(smartDataFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//test
|
//test
|
||||||
smartMdl := measurements.Smart{}
|
smartMdl := measurements.Smart{}
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
|
err = smartMdl.FromCollectorSmartInfo(smartUUID, smartJson)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
require.Equal(t, smartUUID, smartMdl.ScrutinyUUID)
|
||||||
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
||||||
require.Equal(t, 18, len(smartMdl.Attributes))
|
require.Equal(t, 18, len(smartMdl.Attributes))
|
||||||
|
|
||||||
@@ -344,18 +354,19 @@ func TestFromCollectorSmartInfo_Fail_Smart(t *testing.T) {
|
|||||||
|
|
||||||
var smartJson collector.SmartInfo
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
smartDataBytes, err := io.ReadAll(smartDataFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//test
|
//test
|
||||||
smartMdl := measurements.Smart{}
|
smartMdl := measurements.Smart{}
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
|
err = smartMdl.FromCollectorSmartInfo(smartUUID, smartJson)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
require.Equal(t, smartUUID, smartMdl.ScrutinyUUID)
|
||||||
require.Equal(t, pkg.DeviceStatusFailedSmart, smartMdl.Status)
|
require.Equal(t, pkg.DeviceStatusFailedSmart, smartMdl.Status)
|
||||||
require.Equal(t, 0, len(smartMdl.Attributes))
|
require.Equal(t, 0, len(smartMdl.Attributes))
|
||||||
}
|
}
|
||||||
@@ -368,18 +379,19 @@ func TestFromCollectorSmartInfo_Fail_ScrutinySmart(t *testing.T) {
|
|||||||
|
|
||||||
var smartJson collector.SmartInfo
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
smartDataBytes, err := io.ReadAll(smartDataFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//test
|
//test
|
||||||
smartMdl := measurements.Smart{}
|
smartMdl := measurements.Smart{}
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
|
err = smartMdl.FromCollectorSmartInfo(smartUUID, smartJson)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
require.Equal(t, smartUUID, smartMdl.ScrutinyUUID)
|
||||||
require.Equal(t, pkg.DeviceStatusFailedScrutiny|pkg.DeviceStatusFailedSmart, smartMdl.Status)
|
require.Equal(t, pkg.DeviceStatusFailedScrutiny|pkg.DeviceStatusFailedSmart, smartMdl.Status)
|
||||||
require.Equal(t, 17, len(smartMdl.Attributes))
|
require.Equal(t, 17, len(smartMdl.Attributes))
|
||||||
}
|
}
|
||||||
@@ -392,18 +404,19 @@ func TestFromCollectorSmartInfo_Fail_ScrutinyNonCriticalFailed(t *testing.T) {
|
|||||||
|
|
||||||
var smartJson collector.SmartInfo
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
smartDataBytes, err := io.ReadAll(smartDataFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//test
|
//test
|
||||||
smartMdl := measurements.Smart{}
|
smartMdl := measurements.Smart{}
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
|
err = smartMdl.FromCollectorSmartInfo(smartUUID, smartJson)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
require.Equal(t, smartUUID, smartMdl.ScrutinyUUID)
|
||||||
require.Equal(t, pkg.DeviceStatusFailedScrutiny, smartMdl.Status)
|
require.Equal(t, pkg.DeviceStatusFailedScrutiny, smartMdl.Status)
|
||||||
require.Equal(t, pkg.AttributeStatusFailedScrutiny, smartMdl.Attributes["199"].GetStatus(),
|
require.Equal(t, pkg.AttributeStatusFailedScrutiny, smartMdl.Attributes["199"].GetStatus(),
|
||||||
"scrutiny should detect that %d failed (status: %d, %s)",
|
"scrutiny should detect that %d failed (status: %d, %s)",
|
||||||
@@ -425,18 +438,19 @@ func TestFromCollectorSmartInfo_NVMe_Fail_Scrutiny(t *testing.T) {
|
|||||||
|
|
||||||
var smartJson collector.SmartInfo
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
smartDataBytes, err := io.ReadAll(smartDataFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//test
|
//test
|
||||||
smartMdl := measurements.Smart{}
|
smartMdl := measurements.Smart{}
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
|
err = smartMdl.FromCollectorSmartInfo(smartUUID, smartJson)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
require.Equal(t, smartUUID, smartMdl.ScrutinyUUID)
|
||||||
require.Equal(t, pkg.DeviceStatusFailedScrutiny, smartMdl.Status)
|
require.Equal(t, pkg.DeviceStatusFailedScrutiny, smartMdl.Status)
|
||||||
require.Equal(t, pkg.AttributeStatusFailedScrutiny, smartMdl.Attributes["media_errors"].GetStatus(),
|
require.Equal(t, pkg.AttributeStatusFailedScrutiny, smartMdl.Attributes["media_errors"].GetStatus(),
|
||||||
"scrutiny should detect that %s failed (status: %d, %s)",
|
"scrutiny should detect that %s failed (status: %d, %s)",
|
||||||
@@ -456,18 +470,19 @@ func TestFromCollectorSmartInfo_Nvme(t *testing.T) {
|
|||||||
|
|
||||||
var smartJson collector.SmartInfo
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
smartDataBytes, err := io.ReadAll(smartDataFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//test
|
//test
|
||||||
smartMdl := measurements.Smart{}
|
smartMdl := measurements.Smart{}
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
|
err = smartMdl.FromCollectorSmartInfo(smartUUID, smartJson)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
require.Equal(t, smartUUID, smartMdl.ScrutinyUUID)
|
||||||
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
||||||
require.Equal(t, 16, len(smartMdl.Attributes))
|
require.Equal(t, 16, len(smartMdl.Attributes))
|
||||||
|
|
||||||
@@ -483,18 +498,19 @@ func TestFromCollectorSmartInfo_Scsi(t *testing.T) {
|
|||||||
|
|
||||||
var smartJson collector.SmartInfo
|
var smartJson collector.SmartInfo
|
||||||
|
|
||||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
smartDataBytes, err := io.ReadAll(smartDataFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//test
|
//test
|
||||||
smartMdl := measurements.Smart{}
|
smartMdl := measurements.Smart{}
|
||||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
smartUUID := uuid.Must(uuid.NewV4())
|
||||||
|
err = smartMdl.FromCollectorSmartInfo(smartUUID, smartJson)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
require.Equal(t, smartUUID, smartMdl.ScrutinyUUID)
|
||||||
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
require.Equal(t, pkg.DeviceStatusPassed, smartMdl.Status)
|
||||||
require.Equal(t, 13, len(smartMdl.Attributes))
|
require.Equal(t, 13, len(smartMdl.Attributes))
|
||||||
|
|
||||||
|
|||||||
+2
-3
@@ -5,7 +5,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@@ -68,7 +67,7 @@ func SendPostRequest(url string, file io.Reader) ([]byte, error) {
|
|||||||
|
|
||||||
log.Printf("%v\n", response.Status)
|
log.Printf("%v\n", response.Status)
|
||||||
|
|
||||||
return ioutil.ReadAll(response.Body)
|
return io.ReadAll(response.Body)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InfluxDB will throw an error/ignore any submitted data with a timestamp older than the
|
// InfluxDB will throw an error/ignore any submitted data with a timestamp older than the
|
||||||
@@ -79,7 +78,7 @@ func readSmartDataFileFixTimestamp(daysToSubtract int, smartDataFilepath string)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
metricsFileData, err := ioutil.ReadAll(metricsfile)
|
metricsFileData, err := io.ReadAll(metricsfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,9 +19,10 @@ import (
|
|||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||||
"github.com/containrrr/shoutrrr"
|
|
||||||
shoutrrrTypes "github.com/containrrr/shoutrrr/pkg/types"
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/nicholas-fedor/shoutrrr"
|
||||||
|
shoutrrrTypes "github.com/nicholas-fedor/shoutrrr/pkg/types"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -32,7 +33,7 @@ const NotifyFailureTypeSmartFailure = "SmartFailure"
|
|||||||
const NotifyFailureTypeScrutinyFailure = "ScrutinyFailure"
|
const NotifyFailureTypeScrutinyFailure = "ScrutinyFailure"
|
||||||
|
|
||||||
// ShouldNotify check if the error Message should be filtered (level mismatch or filtered_attributes)
|
// ShouldNotify check if the error Message should be filtered (level mismatch or filtered_attributes)
|
||||||
func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs measurements.Smart, statusThreshold pkg.MetricsStatusThreshold, statusFilterAttributes pkg.MetricsStatusFilterAttributes, repeatNotifications bool, c *gin.Context, deviceRepo database.DeviceRepo) bool {
|
func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs measurements.Smart, scrutiny_uuid uuid.UUID, statusThreshold pkg.MetricsStatusThreshold, statusFilterAttributes pkg.MetricsStatusFilterAttributes, repeatNotifications bool, c *gin.Context, deviceRepo database.DeviceRepo) bool {
|
||||||
// 1. check if the device is healthy
|
// 1. check if the device is healthy
|
||||||
if device.DeviceStatus == pkg.DeviceStatusPassed {
|
if device.DeviceStatus == pkg.DeviceStatusPassed {
|
||||||
return false
|
return false
|
||||||
@@ -64,7 +65,7 @@ func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs me
|
|||||||
var failingAttributes []string
|
var failingAttributes []string
|
||||||
// Loop through the attributes to find the failing ones
|
// Loop through the attributes to find the failing ones
|
||||||
for attrId, attrData := range smartAttrs.Attributes {
|
for attrId, attrData := range smartAttrs.Attributes {
|
||||||
var status pkg.AttributeStatus = attrData.GetStatus()
|
var status = attrData.GetStatus()
|
||||||
// Skip over passing attributes
|
// Skip over passing attributes
|
||||||
if status == pkg.AttributeStatusPassed {
|
if status == pkg.AttributeStatusPassed {
|
||||||
continue
|
continue
|
||||||
@@ -100,7 +101,7 @@ func ShouldNotify(logger logrus.FieldLogger, device models.Device, smartAttrs me
|
|||||||
var lastPoints []measurements.Smart
|
var lastPoints []measurements.Smart
|
||||||
var err error
|
var err error
|
||||||
if !repeatNotifications {
|
if !repeatNotifications {
|
||||||
lastPoints, err = deviceRepo.GetSmartAttributeHistory(c, c.Param("wwn"), database.DURATION_KEY_FOREVER, 1, 1, failingAttributes)
|
lastPoints, err = deviceRepo.GetSmartAttributeHistory(c, scrutiny_uuid, database.DURATION_KEY_FOREVER, 1, 1, failingAttributes)
|
||||||
if err == nil || len(lastPoints) < 1 {
|
if err == nil || len(lastPoints) < 1 {
|
||||||
logger.Warningln("Could not get the most recent data points from the database. This is expected to happen only if this is the very first submission of data for the device.")
|
logger.Warningln("Could not get the most recent data points from the database. This is expected to happen only if this is the very first submission of data for the device.")
|
||||||
}
|
}
|
||||||
@@ -147,7 +148,7 @@ func NewPayload(device models.Device, test bool, currentTime ...time.Time) Paylo
|
|||||||
|
|
||||||
//validate that the Payload is populated
|
//validate that the Payload is populated
|
||||||
var sendDate time.Time
|
var sendDate time.Time
|
||||||
if currentTime != nil && len(currentTime) > 0 {
|
if len(currentTime) > 0 {
|
||||||
sendDate = currentTime[0]
|
sendDate = currentTime[0]
|
||||||
} else {
|
} else {
|
||||||
sendDate = time.Now()
|
sendDate = time.Now()
|
||||||
@@ -318,7 +319,7 @@ func (n *Notify) SendScriptNotification(scriptUrl string) error {
|
|||||||
|
|
||||||
if !utils.FileExists(scriptPath) {
|
if !utils.FileExists(scriptPath) {
|
||||||
n.Logger.Errorf("Script does not exist: %s", scriptPath)
|
n.Logger.Errorf("Script does not exist: %s", scriptPath)
|
||||||
return errors.New(fmt.Sprintf("custom script path does not exist: %s", scriptPath))
|
return fmt.Errorf("custom script path does not exist: %s", scriptPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
copyEnv := os.Environ()
|
copyEnv := os.Environ()
|
||||||
@@ -424,6 +425,17 @@ func (n *Notify) GenShoutrrrNotificationParams(shoutrrrUrl string) (string, *sho
|
|||||||
case "telegram":
|
case "telegram":
|
||||||
(*params)["title"] = subject
|
(*params)["title"] = subject
|
||||||
case "zulip":
|
case "zulip":
|
||||||
|
query := serviceURL.Query()
|
||||||
|
urlTopic := query["topic"]
|
||||||
|
delete(query, "topic")
|
||||||
|
if len(urlTopic) > 0 && urlTopic[len(urlTopic)-1] != "" {
|
||||||
|
subject = urlTopic[len(urlTopic)-1]
|
||||||
|
}
|
||||||
|
subjectRunes := []rune(subject)
|
||||||
|
if len(subjectRunes) > 60 {
|
||||||
|
n.Logger.Warningf("Zulip notification subject too long (%d characters), truncating to 60 characters", len(subjectRunes))
|
||||||
|
subject = string(subjectRunes[:60])
|
||||||
|
}
|
||||||
(*params)["topic"] = subject
|
(*params)["topic"] = subject
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,9 +12,10 @@ import (
|
|||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestShouldNotify_MustSkipPassingDevices(t *testing.T) {
|
func TestShouldNotify_MustSkipPassingDevices(t *testing.T) {
|
||||||
@@ -26,12 +27,12 @@ func TestShouldNotify_MustSkipPassingDevices(t *testing.T) {
|
|||||||
smartAttrs := measurements.Smart{}
|
smartAttrs := measurements.Smart{}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
|
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
//assert
|
//assert
|
||||||
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_MetricsStatusThresholdBoth_FailingSmartDevice(t *testing.T) {
|
func TestShouldNotify_MetricsStatusThresholdBoth_FailingSmartDevice(t *testing.T) {
|
||||||
@@ -43,11 +44,11 @@ func TestShouldNotify_MetricsStatusThresholdBoth_FailingSmartDevice(t *testing.T
|
|||||||
smartAttrs := measurements.Smart{}
|
smartAttrs := measurements.Smart{}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
//assert
|
//assert
|
||||||
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_MetricsStatusThresholdSmart_FailingSmartDevice(t *testing.T) {
|
func TestShouldNotify_MetricsStatusThresholdSmart_FailingSmartDevice(t *testing.T) {
|
||||||
@@ -59,11 +60,11 @@ func TestShouldNotify_MetricsStatusThresholdSmart_FailingSmartDevice(t *testing.
|
|||||||
smartAttrs := measurements.Smart{}
|
smartAttrs := measurements.Smart{}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdSmart
|
statusThreshold := pkg.MetricsStatusThresholdSmart
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
//assert
|
//assert
|
||||||
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_MetricsStatusThresholdScrutiny_FailingSmartDevice(t *testing.T) {
|
func TestShouldNotify_MetricsStatusThresholdScrutiny_FailingSmartDevice(t *testing.T) {
|
||||||
@@ -75,11 +76,11 @@ func TestShouldNotify_MetricsStatusThresholdScrutiny_FailingSmartDevice(t *testi
|
|||||||
smartAttrs := measurements.Smart{}
|
smartAttrs := measurements.Smart{}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdScrutiny
|
statusThreshold := pkg.MetricsStatusThresholdScrutiny
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
//assert
|
//assert
|
||||||
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithCriticalAttrs(t *testing.T) {
|
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithCriticalAttrs(t *testing.T) {
|
||||||
@@ -95,12 +96,12 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithCriticalAttrs(t
|
|||||||
}}
|
}}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithMultipleCriticalAttrs(t *testing.T) {
|
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithMultipleCriticalAttrs(t *testing.T) {
|
||||||
@@ -119,12 +120,12 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithMultipleCritical
|
|||||||
}}
|
}}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoCriticalAttrs(t *testing.T) {
|
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoCriticalAttrs(t *testing.T) {
|
||||||
@@ -140,12 +141,12 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoCriticalAttrs(
|
|||||||
}}
|
}}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoFailingCriticalAttrs(t *testing.T) {
|
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoFailingCriticalAttrs(t *testing.T) {
|
||||||
@@ -161,12 +162,12 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoFailingCritica
|
|||||||
}}
|
}}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_MetricsStatusThresholdSmart_WithCriticalAttrsFailingScrutiny(t *testing.T) {
|
func TestShouldNotify_MetricsStatusFilterAttributesCritical_MetricsStatusThresholdSmart_WithCriticalAttrsFailingScrutiny(t *testing.T) {
|
||||||
@@ -185,12 +186,12 @@ func TestShouldNotify_MetricsStatusFilterAttributesCritical_MetricsStatusThresho
|
|||||||
}}
|
}}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdSmart
|
statusThreshold := pkg.MetricsStatusThresholdSmart
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, true, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
func TestShouldNotify_NoRepeat_DatabaseFailure(t *testing.T) {
|
func TestShouldNotify_NoRepeat_DatabaseFailure(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
@@ -205,13 +206,13 @@ func TestShouldNotify_NoRepeat_DatabaseFailure(t *testing.T) {
|
|||||||
}}
|
}}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, "", database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{}, errors.New("")).Times(1)
|
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, scrutinyUUID, database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{}, errors.New("")).Times(1)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
|
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShouldNotify_NoRepeat_NoDatabaseData(t *testing.T) {
|
func TestShouldNotify_NoRepeat_NoDatabaseData(t *testing.T) {
|
||||||
@@ -227,13 +228,13 @@ func TestShouldNotify_NoRepeat_NoDatabaseData(t *testing.T) {
|
|||||||
}}
|
}}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, "", database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{}, nil).Times(1)
|
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, scrutinyUUID, database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{}, nil).Times(1)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
|
require.True(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
func TestShouldNotify_NoRepeat(t *testing.T) {
|
func TestShouldNotify_NoRepeat(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
@@ -249,13 +250,13 @@ func TestShouldNotify_NoRepeat(t *testing.T) {
|
|||||||
}}
|
}}
|
||||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||||
|
scrutinyUUID := uuid.Must(uuid.NewV4())
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
fakeDatabase := mock_database.NewMockDeviceRepo(mockCtrl)
|
||||||
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, "", database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{smartAttrs}, nil).Times(1)
|
fakeDatabase.EXPECT().GetSmartAttributeHistory(&gin.Context{}, scrutinyUUID, database.DURATION_KEY_FOREVER, 1, 1, []string{"5"}).Return([]measurements.Smart{smartAttrs}, nil).Times(1)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
|
require.False(t, ShouldNotify(logrus.StandardLogger(), device, smartAttrs, scrutinyUUID, statusThreshold, notifyFilterAttributes, false, &gin.Context{}, fakeDatabase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewPayload(t *testing.T) {
|
func TestNewPayload(t *testing.T) {
|
||||||
|
|||||||
@@ -2,4 +2,4 @@ package version
|
|||||||
|
|
||||||
// VERSION is the app-global version string, which will be replaced with a
|
// VERSION is the app-global version string, which will be replaced with a
|
||||||
// new value during packaging
|
// new value during packaging
|
||||||
const VERSION = "0.8.1"
|
const VERSION = "0.8.6"
|
||||||
|
|||||||
@@ -1,17 +1,26 @@
|
|||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func ArchiveDevice(c *gin.Context) {
|
func ArchiveDevice(c *gin.Context) {
|
||||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||||
|
|
||||||
err := deviceRepo.UpdateDeviceArchived(c, c.Param("wwn"), true)
|
scrutiny_uuid, err := uuid.FromString(c.Param("scrutiny_uuid"))
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorln("Invalid scrutiny uuid", err)
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deviceRepo.UpdateDeviceArchived(c, scrutiny_uuid, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while archiving device", err)
|
logger.Errorln("An error occurred while archiving device", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
|||||||
@@ -1,17 +1,24 @@
|
|||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func DeleteDevice(c *gin.Context) {
|
func DeleteDevice(c *gin.Context) {
|
||||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||||
|
scrutiny_uuid, err := uuid.FromString(c.Param("scrutiny_uuid"))
|
||||||
err := deviceRepo.DeleteDevice(c, c.Param("wwn"))
|
if err != nil {
|
||||||
|
logger.Errorln("Invalid scrutiny uuid", err)
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = deviceRepo.DeleteDevice(c, scrutiny_uuid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while deleting device", err)
|
logger.Errorln("An error occurred while deleting device", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
|||||||
@@ -6,14 +6,20 @@ import (
|
|||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetDeviceDetails(c *gin.Context) {
|
func GetDeviceDetails(c *gin.Context) {
|
||||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||||
|
scrutiny_uuid, err := uuid.FromString(c.Param("scrutiny_uuid"))
|
||||||
device, err := deviceRepo.GetDeviceDetails(c, c.Param("wwn"))
|
if err != nil {
|
||||||
|
logger.Errorln("Invalid scrutiny uuid", err)
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
device, err := deviceRepo.GetDeviceDetails(c, scrutiny_uuid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while retrieving device details", err)
|
logger.Errorln("An error occurred while retrieving device details", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
@@ -25,7 +31,7 @@ func GetDeviceDetails(c *gin.Context) {
|
|||||||
durationKey = "forever"
|
durationKey = "forever"
|
||||||
}
|
}
|
||||||
|
|
||||||
smartResults, err := deviceRepo.GetSmartAttributeHistory(c, c.Param("wwn"), durationKey, 0, 0, nil)
|
smartResults, err := deviceRepo.GetSmartAttributeHistory(c, scrutiny_uuid, durationKey, 0, 0, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while retrieving device smart results", err)
|
logger.Errorln("An error occurred while retrieving device smart results", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetDevicesSummary(c *gin.Context) {
|
func GetDevicesSummary(c *gin.Context) {
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// register devices that are detected by various collectors.
|
// register devices that are detected by various collectors.
|
||||||
@@ -23,9 +24,9 @@ func RegisterDevices(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
//filter any device with empty wwn (they are invalid)
|
// Filter any device without a scrutiny UUID. This should never happen...
|
||||||
detectedStorageDevices := lo.Filter[models.Device](collectorDeviceWrapper.Data, func(dev models.Device, _ int) bool {
|
detectedStorageDevices := lo.Filter[models.Device](collectorDeviceWrapper.Data, func(dev models.Device, _ int) bool {
|
||||||
return len(dev.WWN) > 0
|
return !dev.ScrutinyUUID.IsNil()
|
||||||
})
|
})
|
||||||
|
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
|
|||||||
@@ -1,17 +1,24 @@
|
|||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func UnarchiveDevice(c *gin.Context) {
|
func UnarchiveDevice(c *gin.Context) {
|
||||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||||
|
scrutiny_uuid, err := uuid.FromString(c.Param("scrutiny_uuid"))
|
||||||
err := deviceRepo.UpdateDeviceArchived(c, c.Param("wwn"), false)
|
if err != nil {
|
||||||
|
logger.Errorln("Invalid scrutiny uuid", err)
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = deviceRepo.UpdateDeviceArchived(c, scrutiny_uuid, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while unarchiving device", err)
|
logger.Errorln("An error occurred while unarchiving device", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/notify"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/notify"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,12 +23,15 @@ func UploadDeviceMetrics(c *gin.Context) {
|
|||||||
|
|
||||||
//appConfig := c.MustGet("CONFIG").(config.Interface)
|
//appConfig := c.MustGet("CONFIG").(config.Interface)
|
||||||
|
|
||||||
if c.Param("wwn") == "" {
|
scrutiny_uuid, err := uuid.FromString(c.Param("scrutiny_uuid"))
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"success": false})
|
if err != nil {
|
||||||
|
logger.Errorln("Invalid scrutiny uuid", err)
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var collectorSmartData collector.SmartInfo
|
var collectorSmartData collector.SmartInfo
|
||||||
err := c.BindJSON(&collectorSmartData)
|
err = c.BindJSON(&collectorSmartData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("Cannot parse SMART data", err)
|
logger.Errorln("Cannot parse SMART data", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
@@ -35,7 +39,7 @@ func UploadDeviceMetrics(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//update the device information if necessary
|
//update the device information if necessary
|
||||||
updatedDevice, err := deviceRepo.UpdateDevice(c, c.Param("wwn"), collectorSmartData)
|
updatedDevice, err := deviceRepo.UpdateDevice(c, scrutiny_uuid, collectorSmartData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while updating device data from smartctl metrics:", err)
|
logger.Errorln("An error occurred while updating device data from smartctl metrics:", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
@@ -43,7 +47,7 @@ func UploadDeviceMetrics(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// insert smart info
|
// insert smart info
|
||||||
smartData, err := deviceRepo.SaveSmartAttributes(c, c.Param("wwn"), collectorSmartData)
|
smartData, err := deviceRepo.SaveSmartAttributes(c, scrutiny_uuid, collectorSmartData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while saving smartctl metrics", err)
|
logger.Errorln("An error occurred while saving smartctl metrics", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
@@ -52,7 +56,7 @@ func UploadDeviceMetrics(c *gin.Context) {
|
|||||||
|
|
||||||
if smartData.Status != pkg.DeviceStatusPassed {
|
if smartData.Status != pkg.DeviceStatusPassed {
|
||||||
//there is a failure detected by Scrutiny, update the device status on the homepage.
|
//there is a failure detected by Scrutiny, update the device status on the homepage.
|
||||||
updatedDevice, err = deviceRepo.UpdateDeviceStatus(c, c.Param("wwn"), smartData.Status)
|
updatedDevice, err = deviceRepo.UpdateDeviceStatus(c, scrutiny_uuid, smartData.Status)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while updating device status", err)
|
logger.Errorln("An error occurred while updating device status", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
@@ -61,7 +65,7 @@ func UploadDeviceMetrics(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// save smart temperature data (ignore failures)
|
// save smart temperature data (ignore failures)
|
||||||
err = deviceRepo.SaveSmartTemperature(c, c.Param("wwn"), updatedDevice.DeviceProtocol, collectorSmartData, appConfig.GetBool(fmt.Sprintf("%s.collector.discard_sct_temp_history", config.DB_USER_SETTINGS_SUBKEY)))
|
err = deviceRepo.SaveSmartTemperature(c, scrutiny_uuid, updatedDevice.DeviceProtocol, collectorSmartData, appConfig.GetBool(fmt.Sprintf("%s.collector.discard_sct_temp_history", config.DB_USER_SETTINGS_SUBKEY)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorln("An error occurred while saving smartctl temp data", err)
|
logger.Errorln("An error occurred while saving smartctl temp data", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||||
@@ -73,6 +77,7 @@ func UploadDeviceMetrics(c *gin.Context) {
|
|||||||
logger,
|
logger,
|
||||||
updatedDevice,
|
updatedDevice,
|
||||||
smartData,
|
smartData,
|
||||||
|
scrutiny_uuid,
|
||||||
pkg.MetricsStatusThreshold(appConfig.GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY))),
|
pkg.MetricsStatusThreshold(appConfig.GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY))),
|
||||||
pkg.MetricsStatusFilterAttributes(appConfig.GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY))),
|
pkg.MetricsStatusFilterAttributes(appConfig.GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY))),
|
||||||
appConfig.GetBool(fmt.Sprintf("%s.metrics.repeat_notifications", config.DB_USER_SETTINGS_SUBKEY)),
|
appConfig.GetBool(fmt.Sprintf("%s.metrics.repeat_notifications", config.DB_USER_SETTINGS_SUBKEY)),
|
||||||
|
|||||||
@@ -3,15 +3,15 @@ package middleware
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Middleware based on https://github.com/toorop/gin-logrus/blob/master/logger.go
|
// Middleware based on https://github.com/toorop/gin-logrus/blob/master/logger.go
|
||||||
@@ -40,9 +40,9 @@ func LoggerMiddleware(logger *logrus.Entry) gin.HandlerFunc {
|
|||||||
//clone the request body reader.
|
//clone the request body reader.
|
||||||
var reqBody string
|
var reqBody string
|
||||||
if c.Request.Body != nil {
|
if c.Request.Body != nil {
|
||||||
buf, _ := ioutil.ReadAll(c.Request.Body)
|
buf, _ := io.ReadAll(c.Request.Body)
|
||||||
reqBodyReader1 := ioutil.NopCloser(bytes.NewBuffer(buf))
|
reqBodyReader1 := io.NopCloser(bytes.NewBuffer(buf))
|
||||||
reqBodyReader2 := ioutil.NopCloser(bytes.NewBuffer(buf)) //We have to create a new Buffer, because reqBodyReader1 will be read.
|
reqBodyReader2 := io.NopCloser(bytes.NewBuffer(buf)) //We have to create a new Buffer, because reqBodyReader1 will be read.
|
||||||
c.Request.Body = reqBodyReader2
|
c.Request.Body = reqBodyReader2
|
||||||
reqBody = readBody(reqBodyReader1)
|
reqBody = readBody(reqBodyReader1)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,10 @@ package web
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/analogj/go-util/utils"
|
"github.com/analogj/go-util/utils"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
|
||||||
@@ -9,9 +13,6 @@ import (
|
|||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/web/middleware"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/web/middleware"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type AppEngine struct {
|
type AppEngine struct {
|
||||||
@@ -37,15 +38,15 @@ func (ae *AppEngine) Setup(logger *logrus.Entry) *gin.Engine {
|
|||||||
api.GET("/health", handler.HealthCheck)
|
api.GET("/health", handler.HealthCheck)
|
||||||
api.POST("/health/notify", handler.SendTestNotification) //check if notifications are configured correctly
|
api.POST("/health/notify", handler.SendTestNotification) //check if notifications are configured correctly
|
||||||
|
|
||||||
api.POST("/devices/register", handler.RegisterDevices) //used by Collector to register new devices and retrieve filtered list
|
api.POST("/devices/register", handler.RegisterDevices) //used by Collector to register new devices and retrieve filtered list
|
||||||
api.GET("/summary", handler.GetDevicesSummary) //used by Dashboard
|
api.GET("/summary", handler.GetDevicesSummary) //used by Dashboard
|
||||||
api.GET("/summary/temp", handler.GetDevicesSummaryTempHistory) //used by Dashboard (Temperature history dropdown)
|
api.GET("/summary/temp", handler.GetDevicesSummaryTempHistory) //used by Dashboard (Temperature history dropdown)
|
||||||
api.POST("/device/:wwn/smart", handler.UploadDeviceMetrics) //used by Collector to upload data
|
api.POST("/device/:scrutiny_uuid/smart", handler.UploadDeviceMetrics) //used by Collector to upload data
|
||||||
api.POST("/device/:wwn/selftest", handler.UploadDeviceSelfTests)
|
api.POST("/device/:scrutiny_uuid/selftest", handler.UploadDeviceSelfTests)
|
||||||
api.GET("/device/:wwn/details", handler.GetDeviceDetails) //used by Details
|
api.GET("/device/:scrutiny_uuid/details", handler.GetDeviceDetails) //used by Details
|
||||||
api.POST("/device/:wwn/archive", handler.ArchiveDevice) //used by UI to archive device
|
api.POST("/device/:scrutiny_uuid/archive", handler.ArchiveDevice) //used by UI to archive device
|
||||||
api.POST("/device/:wwn/unarchive", handler.UnarchiveDevice) //used by UI to unarchive device
|
api.POST("/device/:scrutiny_uuid/unarchive", handler.UnarchiveDevice) //used by UI to unarchive device
|
||||||
api.DELETE("/device/:wwn", handler.DeleteDevice) //used by UI to delete device
|
api.DELETE("/device/:scrutiny_uuid", handler.DeleteDevice) //used by UI to delete device
|
||||||
|
|
||||||
api.GET("/settings", handler.GetSettings) //used to get settings
|
api.GET("/settings", handler.GetSettings) //used to get settings
|
||||||
api.POST("/settings", handler.SaveSettings) //used to save settings
|
api.POST("/settings", handler.SaveSettings) //used to save settings
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
@@ -20,10 +19,11 @@ import (
|
|||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||||
"github.com/analogj/scrutiny/webapp/backend/pkg/web"
|
"github.com/analogj/scrutiny/webapp/backend/pkg/web"
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/gofrs/uuid/v5"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -36,7 +36,7 @@ docker run --rm -it -p 8086:8086 \
|
|||||||
-e DOCKER_INFLUXDB_INIT_ORG=scrutiny \
|
-e DOCKER_INFLUXDB_INIT_ORG=scrutiny \
|
||||||
-e DOCKER_INFLUXDB_INIT_BUCKET=metrics \
|
-e DOCKER_INFLUXDB_INIT_BUCKET=metrics \
|
||||||
-e DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token \
|
-e DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token \
|
||||||
influxdb:2.0
|
influxdb:2.2
|
||||||
*/
|
*/
|
||||||
|
|
||||||
//func TestMain(m *testing.M) {
|
//func TestMain(m *testing.M) {
|
||||||
@@ -52,7 +52,7 @@ func helperReadSmartDataFileFixTimestamp(t *testing.T, smartDataFilepath string)
|
|||||||
metricsfile, err := os.Open(smartDataFilepath)
|
metricsfile, err := os.Open(smartDataFilepath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
metricsFileData, err := ioutil.ReadAll(metricsfile)
|
metricsFileData, err := io.ReadAll(metricsfile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//unmarshal because we need to change the timestamp
|
//unmarshal because we need to change the timestamp
|
||||||
@@ -87,10 +87,9 @@ func TestServerTestSuite_WithCustomBasePath(t *testing.T) {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestHealthRoute() {
|
func (suite *ServerTestSuite) TestHealthRoute() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -131,10 +130,9 @@ func (suite *ServerTestSuite) TestHealthRoute() {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestRegisterDevicesRoute() {
|
func (suite *ServerTestSuite) TestRegisterDevicesRoute() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -174,10 +172,9 @@ func (suite *ServerTestSuite) TestRegisterDevicesRoute() {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
|
func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -220,7 +217,7 @@ func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
|
|||||||
require.Equal(suite.T(), 200, wr.Code)
|
require.Equal(suite.T(), 200, wr.Code)
|
||||||
|
|
||||||
mr := httptest.NewRecorder()
|
mr := httptest.NewRecorder()
|
||||||
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/0x5000cca264eb01d7/smart", metricsfile)
|
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/9a4d34b5-b2ee-51ef-8506-90eea09be417/smart", metricsfile)
|
||||||
router.ServeHTTP(mr, req)
|
router.ServeHTTP(mr, req)
|
||||||
require.Equal(suite.T(), 200, mr.Code)
|
require.Equal(suite.T(), 200, mr.Code)
|
||||||
|
|
||||||
@@ -229,10 +226,9 @@ func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestPopulateMultiple() {
|
func (suite *ServerTestSuite) TestPopulateMultiple() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -280,28 +276,31 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
|
|||||||
router.ServeHTTP(wr, req)
|
router.ServeHTTP(wr, req)
|
||||||
require.Equal(suite.T(), 200, wr.Code)
|
require.Equal(suite.T(), 200, wr.Code)
|
||||||
|
|
||||||
|
// NOTE: The scrutiny_uuid's below must come from devicesfile because those get inserted into the database.
|
||||||
|
// They don't match the scrutiny_uuid that would be derived from the smart info files because the drives
|
||||||
|
// in those files don't match those in the registration. Currently, scrutiny does not reconcile the two.
|
||||||
mr := httptest.NewRecorder()
|
mr := httptest.NewRecorder()
|
||||||
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/0x5000cca264eb01d7/smart", metricsfile)
|
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/ecfaaf20-d1f6-558b-b33a-3e8db19a6c2c/smart", metricsfile)
|
||||||
router.ServeHTTP(mr, req)
|
router.ServeHTTP(mr, req)
|
||||||
require.Equal(suite.T(), 200, mr.Code)
|
require.Equal(suite.T(), 200, mr.Code)
|
||||||
|
|
||||||
fr := httptest.NewRecorder()
|
fr := httptest.NewRecorder()
|
||||||
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/0x5000cca264ec3183/smart", failfile)
|
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/3ea22b35-682b-49fb-a655-abffed108e48/smart", failfile)
|
||||||
router.ServeHTTP(fr, req)
|
router.ServeHTTP(fr, req)
|
||||||
require.Equal(suite.T(), 200, fr.Code)
|
require.Equal(suite.T(), 200, fr.Code)
|
||||||
|
|
||||||
nr := httptest.NewRecorder()
|
nr := httptest.NewRecorder()
|
||||||
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/0x5002538e40a22954/smart", nvmefile)
|
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/d8796fe7-2422-520c-8991-e970993dad3e/smart", nvmefile)
|
||||||
router.ServeHTTP(nr, req)
|
router.ServeHTTP(nr, req)
|
||||||
require.Equal(suite.T(), 200, nr.Code)
|
require.Equal(suite.T(), 200, nr.Code)
|
||||||
|
|
||||||
sr := httptest.NewRecorder()
|
sr := httptest.NewRecorder()
|
||||||
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/0x5000cca252c859cc/smart", scsifile)
|
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/00328b73-9f8a-53ad-8f20-8d0b1be00f47/smart", scsifile)
|
||||||
router.ServeHTTP(sr, req)
|
router.ServeHTTP(sr, req)
|
||||||
require.Equal(suite.T(), 200, sr.Code)
|
require.Equal(suite.T(), 200, sr.Code)
|
||||||
|
|
||||||
s2r := httptest.NewRecorder()
|
s2r := httptest.NewRecorder()
|
||||||
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/0x5000cca264ebc248/smart", scsi2file)
|
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/e5ccc378-24fc-5a9d-b1ce-8732096a9ea5/smart", scsi2file)
|
||||||
router.ServeHTTP(s2r, req)
|
router.ServeHTTP(s2r, req)
|
||||||
require.Equal(suite.T(), 200, s2r.Code)
|
require.Equal(suite.T(), 200, s2r.Code)
|
||||||
|
|
||||||
@@ -311,10 +310,9 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
|
|||||||
//TODO: this test should use a recorded request/response playback.
|
//TODO: this test should use a recorded request/response playback.
|
||||||
//func TestSendTestNotificationRoute(t *testing.T) {
|
//func TestSendTestNotificationRoute(t *testing.T) {
|
||||||
// //setup
|
// //setup
|
||||||
// parentPath, _ := ioutil.TempDir("", "")
|
// parentPath, _ := os.MkdirTemp("", "")
|
||||||
// defer os.RemoveAll(parentPath)
|
// defer os.RemoveAll(parentPath)
|
||||||
// mockCtrl := gomock.NewController(t)
|
// mockCtrl := gomock.NewController(t)
|
||||||
// defer mockCtrl.Finish()
|
|
||||||
// fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
// fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
// fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
// fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
||||||
// fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
// fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
||||||
@@ -335,10 +333,9 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestSendTestNotificationRoute_WebhookFailure() {
|
func (suite *ServerTestSuite) TestSendTestNotificationRoute_WebhookFailure() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -381,10 +378,9 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_WebhookFailure() {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptFailure() {
|
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptFailure() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -427,10 +423,9 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptFailure() {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptSuccess() {
|
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptSuccess() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -473,10 +468,9 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptSuccess() {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ShoutrrrFailure() {
|
func (suite *ServerTestSuite) TestSendTestNotificationRoute_ShoutrrrFailure() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -518,10 +512,9 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ShoutrrrFailure() {
|
|||||||
|
|
||||||
func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
|
func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
|
||||||
//setup
|
//setup
|
||||||
parentPath, _ := ioutil.TempDir("", "")
|
parentPath, _ := os.MkdirTemp("", "")
|
||||||
defer os.RemoveAll(parentPath)
|
defer os.RemoveAll(parentPath)
|
||||||
mockCtrl := gomock.NewController(suite.T())
|
mockCtrl := gomock.NewController(suite.T())
|
||||||
defer mockCtrl.Finish()
|
|
||||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||||
@@ -566,7 +559,7 @@ func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
|
|||||||
require.Equal(suite.T(), 200, wr.Code)
|
require.Equal(suite.T(), 200, wr.Code)
|
||||||
|
|
||||||
mr := httptest.NewRecorder()
|
mr := httptest.NewRecorder()
|
||||||
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/a4c8e8ed-11a0-4c97-9bba-306440f1b944/smart", metricsfile)
|
req, _ = http.NewRequest("POST", suite.Basepath+"/api/device/bde1d2d2-7e5c-525a-8327-6adbfa382637/smart", metricsfile)
|
||||||
router.ServeHTTP(mr, req)
|
router.ServeHTTP(mr, req)
|
||||||
require.Equal(suite.T(), 200, mr.Code)
|
require.Equal(suite.T(), 200, mr.Code)
|
||||||
|
|
||||||
@@ -579,6 +572,8 @@ func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
|
|||||||
require.NoError(suite.T(), err)
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
//assert
|
//assert
|
||||||
require.Equal(suite.T(), "a4c8e8ed-11a0-4c97-9bba-306440f1b944", deviceSummary.Data.Summary["a4c8e8ed-11a0-4c97-9bba-306440f1b944"].Device.WWN)
|
deviceUUIDString := "bde1d2d2-7e5c-525a-8327-6adbfa382637"
|
||||||
require.Equal(suite.T(), pkg.DeviceStatusPassed, deviceSummary.Data.Summary["a4c8e8ed-11a0-4c97-9bba-306440f1b944"].Device.DeviceStatus)
|
deviceUUID := uuid.Must(uuid.FromString(deviceUUIDString))
|
||||||
|
require.Equal(suite.T(), deviceUUID, deviceSummary.Data.Summary[deviceUUIDString].Device.ScrutinyUUID)
|
||||||
|
require.Equal(suite.T(), pkg.DeviceStatusPassed, deviceSummary.Data.Summary[deviceUUIDString].Device.DeviceStatus)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,8 @@
|
|||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false,
|
"smart_support": false,
|
||||||
"device_protocol": "NVMe",
|
"device_protocol": "NVMe",
|
||||||
"device_type": "nvme"
|
"device_type": "nvme",
|
||||||
|
"scrutiny_uuid": "bde1d2d2-7e5c-525a-8327-6adbfa382637"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
+17
-10
@@ -12,27 +12,29 @@
|
|||||||
"rotational_speed": 0,
|
"rotational_speed": 0,
|
||||||
"capacity": 500107862016,
|
"capacity": 500107862016,
|
||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false
|
"smart_support": false,
|
||||||
|
"scrutiny_uuid": "ecfaaf20-d1f6-558b-b33a-3e8db19a6c2c"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"wwn": "0x5000cca264eb01d7",
|
"wwn": "0x5000cca264eb01d7",
|
||||||
"device_name": "sdb",
|
"device_name": "sdb",
|
||||||
"manufacturer": "ATA",
|
"manufacturer": "ATA",
|
||||||
"model_name": "WDC_WD140EDFZ-11A0VA0",
|
"model_name": "WDC WD140EDFZ-11A0VA0",
|
||||||
"interface_type": "SCSI",
|
"interface_type": "SCSI",
|
||||||
"interface_speed": "",
|
"interface_speed": "",
|
||||||
"serial_number": "9RK1XXXXX",
|
"serial_number": "9RK1XXXX",
|
||||||
"firmware": "",
|
"firmware": "",
|
||||||
"rotational_speed": 0,
|
"rotational_speed": 0,
|
||||||
"capacity": 14000519643136,
|
"capacity": 14000519643136,
|
||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false
|
"smart_support": false,
|
||||||
|
"scrutiny_uuid": "3ea22b35-682b-49fb-a655-abffed108e48"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"wwn": "0x5000cca264ec3183",
|
"wwn": "0x5000cca264ec3183",
|
||||||
"device_name": "sdc",
|
"device_name": "sdc",
|
||||||
"manufacturer": "ATA",
|
"manufacturer": "ATA",
|
||||||
"model_name": "WDC_WD140EDFZ-11A0VA0",
|
"model_name": "WDC WD140EDFZ-11A0VA0",
|
||||||
"interface_type": "SCSI",
|
"interface_type": "SCSI",
|
||||||
"interface_speed": "",
|
"interface_speed": "",
|
||||||
"serial_number": "9RK4XXXXX",
|
"serial_number": "9RK4XXXXX",
|
||||||
@@ -40,7 +42,8 @@
|
|||||||
"rotational_speed": 0,
|
"rotational_speed": 0,
|
||||||
"capacity": 14000519643136,
|
"capacity": 14000519643136,
|
||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false
|
"smart_support": false,
|
||||||
|
"scrutiny_uuid": "42caca8a-9b95-5c75-b059-305771a2a193"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"wwn": "0x5000cca252c859cc",
|
"wwn": "0x5000cca252c859cc",
|
||||||
@@ -54,7 +57,8 @@
|
|||||||
"rotational_speed": 0,
|
"rotational_speed": 0,
|
||||||
"capacity": 8001563222016,
|
"capacity": 8001563222016,
|
||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false
|
"smart_support": false,
|
||||||
|
"scrutiny_uuid": "d8796fe7-2422-520c-8991-e970993dad3e"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"wwn": "0x5000cca264ebc248",
|
"wwn": "0x5000cca264ebc248",
|
||||||
@@ -68,7 +72,8 @@
|
|||||||
"rotational_speed": 0,
|
"rotational_speed": 0,
|
||||||
"capacity": 14000519643136,
|
"capacity": 14000519643136,
|
||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false
|
"smart_support": false,
|
||||||
|
"scrutiny_uuid": "00328b73-9f8a-53ad-8f20-8d0b1be00f47"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"wwn": "0x50014ee20b2a72a9",
|
"wwn": "0x50014ee20b2a72a9",
|
||||||
@@ -82,7 +87,8 @@
|
|||||||
"rotational_speed": 0,
|
"rotational_speed": 0,
|
||||||
"capacity": 6001175126016,
|
"capacity": 6001175126016,
|
||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false
|
"smart_support": false,
|
||||||
|
"scrutiny_uuid": "e5ccc378-24fc-5a9d-b1ce-8732096a9ea5"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"wwn": "0x5000c500673e6b5f",
|
"wwn": "0x5000c500673e6b5f",
|
||||||
@@ -96,7 +102,8 @@
|
|||||||
"rotational_speed": 0,
|
"rotational_speed": 0,
|
||||||
"capacity": 6001175126016,
|
"capacity": 6001175126016,
|
||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false
|
"smart_support": false,
|
||||||
|
"scrutiny_uuid": "acfbce7d-0e19-579b-895e-85809dab63fb"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,15 +4,16 @@
|
|||||||
"wwn": "0x5000cca264eb01d7",
|
"wwn": "0x5000cca264eb01d7",
|
||||||
"device_name": "sdb",
|
"device_name": "sdb",
|
||||||
"manufacturer": "ATA",
|
"manufacturer": "ATA",
|
||||||
"model_name": "WDC_WD140EDFZ-11A0VA0",
|
"model_name": "WDC WD140EDFZ-11A0VA0",
|
||||||
"interface_type": "SCSI",
|
"interface_type": "SCSI",
|
||||||
"interface_speed": "",
|
"interface_speed": "",
|
||||||
"serial_number": "9RK1XXXXX",
|
"serial_number": "9RK1XXXX",
|
||||||
"firmware": "",
|
"firmware": "",
|
||||||
"rotational_speed": 0,
|
"rotational_speed": 0,
|
||||||
"capacity": 14000519643136,
|
"capacity": 14000519643136,
|
||||||
"form_factor": "",
|
"form_factor": "",
|
||||||
"smart_support": false
|
"smart_support": false,
|
||||||
|
"scrutiny_uuid": "9a4d34b5-b2ee-51ef-8506-90eea09be417"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
Generated
+437
-176
File diff suppressed because it is too large
Load Diff
@@ -35,7 +35,7 @@
|
|||||||
"crypto-js": "^4.1.1",
|
"crypto-js": "^4.1.1",
|
||||||
"highlight.js": "^11.6.0",
|
"highlight.js": "^11.6.0",
|
||||||
"humanize-duration": "^3.27.3",
|
"humanize-duration": "^3.27.3",
|
||||||
"lodash": "4.17.21",
|
"lodash": "4.17.23",
|
||||||
"moment": "^2.29.4",
|
"moment": "^2.29.4",
|
||||||
"ng-apexcharts": "^1.7.4",
|
"ng-apexcharts": "^1.7.4",
|
||||||
"ngx-markdown": "^13.1.0",
|
"ngx-markdown": "^13.1.0",
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ export const appRoutes: Route[] = [
|
|||||||
|
|
||||||
// Example
|
// Example
|
||||||
{path: 'dashboard', loadChildren: () => import('app/modules/dashboard/dashboard.module').then(m => m.DashboardModule)},
|
{path: 'dashboard', loadChildren: () => import('app/modules/dashboard/dashboard.module').then(m => m.DashboardModule)},
|
||||||
{path: 'device/:wwn', loadChildren: () => import('app/modules/detail/detail.module').then(m => m.DetailModule)}
|
{path: 'device/:scrutiny_uuid', loadChildren: () => import('app/modules/detail/detail.module').then(m => m.DetailModule)}
|
||||||
|
|
||||||
// 404 & Catch all
|
// 404 & Catch all
|
||||||
// {path: '404-not-found', pathMatch: 'full', loadChildren: () => import('app/modules/admin/pages/errors/error-404/error-404.module').then(m => m.Error404Module)},
|
// {path: '404-not-found', pathMatch: 'full', loadChildren: () => import('app/modules/admin/pages/errors/error-404/error-404.module').then(m => m.Error404Module)},
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
// maps to webapp/backend/pkg/models/device.go
|
// maps to webapp/backend/pkg/models/device.go
|
||||||
export interface DeviceModel {
|
export interface DeviceModel {
|
||||||
archived?: boolean;
|
archived?: boolean;
|
||||||
|
scrutiny_uuid: string;
|
||||||
wwn: string;
|
wwn: string;
|
||||||
device_name?: string;
|
device_name?: string;
|
||||||
device_uuid?: string;
|
device_uuid?: string;
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import {SmartAttributeModel} from './smart-attribute-model';
|
|||||||
export interface SmartModel {
|
export interface SmartModel {
|
||||||
date: string;
|
date: string;
|
||||||
device_wwn: string;
|
device_wwn: string;
|
||||||
|
scrutiny_uuid: string;
|
||||||
device_protocol: string;
|
device_protocol: string;
|
||||||
|
|
||||||
temp: number;
|
temp: number;
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ export class DetailsMockApi implements TreoMockApi
|
|||||||
register(): void
|
register(): void
|
||||||
{
|
{
|
||||||
this._treoMockApiService
|
this._treoMockApiService
|
||||||
.onGet('/api/device/0x5002538e40a22954/details')
|
.onGet('/api/device/ecfaaf20-d1f6-558b-b33a-3e8db19a6c2c/details')
|
||||||
.reply(() => {
|
.reply(() => {
|
||||||
|
|
||||||
return [
|
return [
|
||||||
@@ -50,7 +50,7 @@ export class DetailsMockApi implements TreoMockApi
|
|||||||
});
|
});
|
||||||
|
|
||||||
this._treoMockApiService
|
this._treoMockApiService
|
||||||
.onGet('/api/device/0x5000cca264eb01d7/details')
|
.onGet('/api/device/3ea22b35-682b-49fb-a655-abffed108e48/details')
|
||||||
.reply(() => {
|
.reply(() => {
|
||||||
|
|
||||||
return [
|
return [
|
||||||
@@ -60,7 +60,7 @@ export class DetailsMockApi implements TreoMockApi
|
|||||||
});
|
});
|
||||||
|
|
||||||
this._treoMockApiService
|
this._treoMockApiService
|
||||||
.onGet('/api/device/0x5000cca264ec3183/details')
|
.onGet('/api/device/42caca8a-9b95-5c75-b059-305771a2a193/details')
|
||||||
.reply(() => {
|
.reply(() => {
|
||||||
|
|
||||||
return [
|
return [
|
||||||
@@ -70,7 +70,7 @@ export class DetailsMockApi implements TreoMockApi
|
|||||||
});
|
});
|
||||||
|
|
||||||
this._treoMockApiService
|
this._treoMockApiService
|
||||||
.onGet('/api/device/0x5000cca252c859cc/details')
|
.onGet('/api/device/d8796fe7-2422-520c-8991-e970993dad3e/details')
|
||||||
.reply(() => {
|
.reply(() => {
|
||||||
|
|
||||||
return [
|
return [
|
||||||
@@ -80,7 +80,17 @@ export class DetailsMockApi implements TreoMockApi
|
|||||||
});
|
});
|
||||||
|
|
||||||
this._treoMockApiService
|
this._treoMockApiService
|
||||||
.onGet('/api/device/0x5000cca264ebc248/details')
|
.onGet('/api/device/00328b73-9f8a-53ad-8f20-8d0b1be00f47/details')
|
||||||
|
.reply(() => {
|
||||||
|
|
||||||
|
return [
|
||||||
|
200,
|
||||||
|
_.cloneDeep(sde)
|
||||||
|
];
|
||||||
|
});
|
||||||
|
|
||||||
|
this._treoMockApiService
|
||||||
|
.onGet('/api/device/e5ccc378-24fc-5a9d-b1ce-8732096a9ea5/details')
|
||||||
.reply(() => {
|
.reply(() => {
|
||||||
|
|
||||||
return [
|
return [
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ export const sda = {
|
|||||||
'UpdatedAt': '2021-10-24T16:37:56.981833-07:00',
|
'UpdatedAt': '2021-10-24T16:37:56.981833-07:00',
|
||||||
'DeletedAt': null,
|
'DeletedAt': null,
|
||||||
'wwn': '0x5002538e40a22954',
|
'wwn': '0x5002538e40a22954',
|
||||||
|
'scrutiny_uuid': 'ecfaaf20-d1f6-558b-b33a-3e8db19a6c2c',
|
||||||
'device_name': 'sda',
|
'device_name': 'sda',
|
||||||
'manufacturer': 'ATA',
|
'manufacturer': 'ATA',
|
||||||
'model_name': 'Samsung_SSD_860_EVO_500GB',
|
'model_name': 'Samsung_SSD_860_EVO_500GB',
|
||||||
@@ -26,6 +27,7 @@ export const sda = {
|
|||||||
'smart_results': [{
|
'smart_results': [{
|
||||||
'date': '2021-10-24T23:20:44Z',
|
'date': '2021-10-24T23:20:44Z',
|
||||||
'device_wwn': '0x5002538e40a22954',
|
'device_wwn': '0x5002538e40a22954',
|
||||||
|
'scrutiny_uuid': 'ecfaaf20-d1f6-558b-b33a-3e8db19a6c2c',
|
||||||
'device_protocol': 'NVMe',
|
'device_protocol': 'NVMe',
|
||||||
'temp': 36,
|
'temp': 36,
|
||||||
'power_on_hours': 2401,
|
'power_on_hours': 2401,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user