Compare commits
339 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ee893cc360 | |||
| d73907d357 | |||
| 0b50305f38 | |||
| ee3d719c3a | |||
| d76cdca4a5 | |||
| b34ed607b7 | |||
| 932e191510 | |||
| 3a6c407fe7 | |||
| 8c3afc31f4 | |||
| 2e4ba44952 | |||
| 4192ac719e | |||
| 539c94595f | |||
| de21e611a3 | |||
| dea362361e | |||
| 7b77519f49 | |||
| 94df7e1ec3 | |||
| babd8d3089 | |||
| 52ef28f091 | |||
| 80d72f8a1b | |||
| 2e8f4a0581 | |||
| 7fd2e2b050 | |||
| 8c65166a90 | |||
| 733b49c2c4 | |||
| 711b5c40b1 | |||
| f94e616d8d | |||
| fb7848f341 | |||
| 007857afd5 | |||
| e4bbe8c035 | |||
| cb5226f6e4 | |||
| c69770d1dd | |||
| e07a53046f | |||
| 19a0b8c2ac | |||
| 97f73703b1 | |||
| 4fcd11f497 | |||
| 7f1023fa9b | |||
| d49497da80 | |||
| d8c359bd8a | |||
| ad4b117f6e | |||
| 22d2f9847c | |||
| 1c7f299b98 | |||
| 602fdce0ee | |||
| 61fde6a2ca | |||
| d843bcc258 | |||
| f2856e0f26 | |||
| 58ef1aa311 | |||
| 6a6570b8e3 | |||
| 29a0860caa | |||
| fb760a9f6d | |||
| c9f13f4398 | |||
| dd03a8cf63 | |||
| 0a6ade4da9 | |||
| 5c8c11d78b | |||
| 00502cc565 | |||
| 0febe3fda5 | |||
| fcd4bb4561 | |||
| 89f763e65d | |||
| 075eb94fa2 | |||
| e9cf8a9180 | |||
| 64ad353628 | |||
| 5518865bc6 | |||
| 50321d897a | |||
| e18a7e9ce0 | |||
| 536b590080 | |||
| 098ce0673a | |||
| 2677796322 | |||
| 5cc7fb30ed | |||
| 222b8103d6 | |||
| 727d5b0ace | |||
| d7b45e5f01 | |||
| 578a262d90 | |||
| c6e11f88b4 | |||
| b795331efb | |||
| f1e5bd3ed4 | |||
| d8d56f77f9 | |||
| 26b221532e | |||
| 15d3206f6f | |||
| 59e2e928a8 | |||
| 51f59e4fcd | |||
| f823127825 | |||
| d41d535ab7 | |||
| 9a4a8de341 | |||
| 2d6f60abaa | |||
| d201f798fb | |||
| a1b0108503 | |||
| f0275d2349 | |||
| 9dafde8a43 | |||
| fa8f86ab7b | |||
| 41c9daa939 | |||
| 83186ba36e | |||
| 3205e3d022 | |||
| 3f272b36d4 | |||
| b238579fe6 | |||
| ce2f990eb1 | |||
| b11b8732aa | |||
| 5cd441da7b | |||
| 2e768fb491 | |||
| e8755ff617 | |||
| e41ee47371 | |||
| 7a68a68e76 | |||
| 94594db20a | |||
| 7e672e8b8e | |||
| 54e2cacb00 | |||
| c0f1dfdb0b | |||
| 29bc79996b | |||
| 99af2b8b16 | |||
| dd0c3e6fba | |||
| 5b2746f389 | |||
| e9c1de9664 | |||
| 6ca4bd4912 | |||
| c34ee85e48 | |||
| 91e8eb1def | |||
| a01b8fe083 | |||
| 550fb542d4 | |||
| 7841063783 | |||
| 8e05b2e2f8 | |||
| 64e1c93d16 | |||
| b227054b52 | |||
| 66bd6f99c5 | |||
| c6579864b8 | |||
| 2361c329e2 | |||
| 5ea149d878 | |||
| 30bd18f816 | |||
| 0f0efac866 | |||
| 04563c0d0d | |||
| 9316eccabe | |||
| b71d6660a6 | |||
| 0e2fec4e93 | |||
| ff171282cc | |||
| ea8fe208d0 | |||
| 9ae9c387cc | |||
| 772b4f6528 | |||
| 4a16ca0d5a | |||
| 316ce856f7 | |||
| 6e0321f488 | |||
| 338d2ae04e | |||
| 4419f7f429 | |||
| 797a6b0429 | |||
| d0b545dfb7 | |||
| b0bff53bbd | |||
| b4adf3d88d | |||
| eefdc548b2 | |||
| fb918e2d6e | |||
| 3d9001a5e4 | |||
| fbe7d63a24 | |||
| d718b0898b | |||
| 44c7211b5f | |||
| 157c93b967 | |||
| 7babc280a0 | |||
| e364e480e8 | |||
| bfefe7e98a | |||
| 831cca7853 | |||
| 46f3b1c02c | |||
| 8a1ae2ffa0 | |||
| 145c819fc1 | |||
| a9ea231de0 | |||
| c2488af1c3 | |||
| ecf7a447a7 | |||
| f8e61af2f9 | |||
| ee61d986d8 | |||
| 8fe8cec09a | |||
| b953456d6b | |||
| 4057699cad | |||
| d3e7fc6067 | |||
| 09a8574d83 | |||
| 7695cc185f | |||
| fc7208020e | |||
| 75d5930835 | |||
| 3c9e16169e | |||
| 9e1076f302 | |||
| 75ab87e109 | |||
| 0b8251fce2 | |||
| f57b71ae96 | |||
| ce324c3de1 | |||
| 281b56d287 | |||
| cbd23e334b | |||
| 7a0b9c9e0d | |||
| 44b3d982dd | |||
| 769f253e7d | |||
| fbd5bb57ac | |||
| b9eb5687cd | |||
| cbd230a7e0 | |||
| 892e9685f3 | |||
| 7ba7b6efda | |||
| 453069deec | |||
| de5f2c3324 | |||
| d486f14433 | |||
| cb47dd7185 | |||
| 6ae4d233cd | |||
| f8bb185854 | |||
| 1da07caaa6 | |||
| fe96c27732 | |||
| 7287775cca | |||
| 28ac3ac7ec | |||
| a6208c0d49 | |||
| 7840fe66da | |||
| 2ca44c967e | |||
| 4b767421f3 | |||
| 6005b8609a | |||
| df23ecdf33 | |||
| f4988cbac5 | |||
| f4f5d16b4a | |||
| 1c4dd33381 | |||
| 9e0ba4d269 | |||
| d9ecf6c0d3 | |||
| 8051ad4dde | |||
| 165f98dc09 | |||
| ca7772250c | |||
| 6e02e4da02 | |||
| 9c8498cea7 | |||
| 965fbb08da | |||
| e16933eeac | |||
| 4d0fc0eae8 | |||
| 8296a973b8 | |||
| 19a9957755 | |||
| 02e3947906 | |||
| 766a73455e | |||
| 6e64ae09aa | |||
| 411eca20e0 | |||
| 0243d9e2fa | |||
| 9aa0e97be0 | |||
| 488fcfc820 | |||
| b5dad487e5 | |||
| 8b01187892 | |||
| d9d6ce0f30 | |||
| 8d203b3547 | |||
| fe5dbcff1e | |||
| 99df104cdd | |||
| a53397210c | |||
| 2533d8d34f | |||
| af2523cfee | |||
| c6e1663f8a | |||
| ab83c389f7 | |||
| 6d22702864 | |||
| d78957353d | |||
| b208493af9 | |||
| 4aa1485246 | |||
| e1e1d321dd | |||
| 3971b37abc | |||
| cf1bd3ea6b | |||
| 9b901766e3 | |||
| e19ee78e70 | |||
| c7c55ab95c | |||
| d7ddf01ea0 | |||
| c539af1a67 | |||
| d93d24b52d | |||
| 5dbfad68ad | |||
| 92c4506cfa | |||
| fe80bed6bd | |||
| b6e69021b2 | |||
| 12e624a496 | |||
| e95b44c690 | |||
| 4ee947d55c | |||
| 21212c0a1d | |||
| d1376a2200 | |||
| 7d2daf4f6a | |||
| da4562d308 | |||
| f51de52ff7 | |||
| 987632df39 | |||
| 28a3c3e53f | |||
| 1bd86f5abd | |||
| 989fbc25f8 | |||
| 0f935ceb48 | |||
| f844a435fd | |||
| 3a970e7a27 | |||
| 307c2bcdef | |||
| d62928aaae | |||
| d08a1e3ef6 | |||
| 2292041f9f | |||
| 75e4bf1d6e | |||
| 97add04276 | |||
| 1423f55d78 | |||
| 46d0b70399 | |||
| 168ca802d1 | |||
| 8c07e91f39 | |||
| 7979950c3b | |||
| 9846ba13e0 | |||
| 83839f7faf | |||
| 85fa3b1f8f | |||
| 4190f9a633 | |||
| 743ce27d2e | |||
| 399a2450ff | |||
| 934f16f0a5 | |||
| 0aeb13c181 | |||
| 5899bf2026 | |||
| 3b137964fc | |||
| 1bfdd0043f | |||
| 999c12748c | |||
| 6f283fd736 | |||
| 65d31046a0 | |||
| 601d632ae4 | |||
| 8466c5e750 | |||
| aa786c0db8 | |||
| f3faee389b | |||
| 5ac0aa8f74 | |||
| a589d11d01 | |||
| 1a05868381 | |||
| a35c3bae08 | |||
| 0c908786e0 | |||
| 2ba196d6a8 | |||
| 0e00999d79 | |||
| 5adceeb9a5 | |||
| b5920e35e3 | |||
| fb8f248366 | |||
| 7a931bd018 | |||
| a004f85145 | |||
| eeb086c77f | |||
| 54b195f851 | |||
| 5dc79134b2 | |||
| f3fad47d9e | |||
| 489534cb73 | |||
| e7801619cd | |||
| 7b75b5f9bb | |||
| 0022d848d6 | |||
| d47c4ea99a | |||
| 62354f2ab8 | |||
| 3a0adb406f | |||
| 2a39421524 | |||
| a7dc68822f | |||
| 3ad87aecc6 | |||
| 2ab714f575 | |||
| 9e60fb8d73 | |||
| a846522830 | |||
| 3d7d276236 | |||
| 2660af7ce3 | |||
| f5af86fd46 | |||
| 4bad2d7b03 | |||
| a79930916e | |||
| 9ea283e8d2 | |||
| bd6d192006 | |||
| 39848eda0b | |||
| 2f67d6f9ae | |||
| 30153f9656 | |||
| 3150201348 | |||
| bce6225e9a | |||
| 893774c557 | |||
| 145996055a | |||
| 1cae5ea864 | |||
| 0fe6e74eb4 | |||
| eb4a738746 |
@@ -1,4 +1,3 @@
|
||||
/dist
|
||||
/vendor
|
||||
/.idea
|
||||
/.github
|
||||
|
||||
@@ -22,20 +22,21 @@ See [/docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md](docs/TROUBLESHOOTING_DEVICE_COLL
|
||||
|
||||
```
|
||||
docker run -it --rm -p 8080:8080 \
|
||||
-v `pwd`/config:/opt/scrutiny/config \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
-e DEBUG=true \
|
||||
-e COLLECTOR_LOG_FILE=/tmp/collector.log \
|
||||
-e SCRUTINY_LOG_FILE=/tmp/web.log \
|
||||
-e COLLECTOR_LOG_FILE=/opt/scrutiny/config/collector.log \
|
||||
-e SCRUTINY_LOG_FILE=/opt/scrutiny/config/web.log \
|
||||
--name scrutiny \
|
||||
ghcr.io/analogj/scrutiny:master-omnibus
|
||||
|
||||
# in another terminal trigger the collector
|
||||
docker exec scrutiny scrutiny-collector-metrics run
|
||||
|
||||
# then use docker cp to copy the log files out of the container.
|
||||
docker cp scrutiny:/tmp/collector.log collector.log
|
||||
docker cp scrutiny:/tmp/web.log web.log
|
||||
```
|
||||
|
||||
The log files will be available on your host in the `config` directory. Please attach them to this issue.
|
||||
|
||||
Please also provide the output of `docker info`
|
||||
@@ -1,85 +0,0 @@
|
||||
name: CI
|
||||
# This workflow is triggered on pushes & pull requests
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
container: techknowlogick/xgo:go-1.13.x
|
||||
|
||||
# Service containers to run with `build` (Required for end-to-end testing)
|
||||
services:
|
||||
influxdb:
|
||||
image: influxdb:2.2
|
||||
env:
|
||||
DOCKER_INFLUXDB_INIT_MODE: setup
|
||||
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
||||
DOCKER_INFLUXDB_INIT_PASSWORD: password12345
|
||||
DOCKER_INFLUXDB_INIT_ORG: scrutiny
|
||||
DOCKER_INFLUXDB_INIT_BUCKET: metrics
|
||||
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: my-super-secret-auth-token
|
||||
ports:
|
||||
- 8086:8086
|
||||
env:
|
||||
PROJECT_PATH: /go/src/github.com/analogj/scrutiny
|
||||
CGO_ENABLED: 1
|
||||
steps:
|
||||
- name: Git
|
||||
run: |
|
||||
apt-get update && apt-get install -y software-properties-common
|
||||
add-apt-repository ppa:git-core/ppa && apt-get update && apt-get install -y git
|
||||
git --version
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
run: |
|
||||
mkdir -p $(dirname "$PROJECT_PATH")
|
||||
cp -a $GITHUB_WORKSPACE $PROJECT_PATH
|
||||
cd $PROJECT_PATH
|
||||
|
||||
go mod vendor
|
||||
go test -race -coverprofile=coverage.txt -covermode=atomic -v -tags "static" $(go list ./... | grep -v /vendor/)
|
||||
- name: Generate coverage report
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
files: ${{ env.PROJECT_PATH }}/coverage.txt
|
||||
flags: unittests
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
|
||||
cd $PROJECT_PATH
|
||||
make all
|
||||
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: binaries.zip
|
||||
path: |
|
||||
/build/scrutiny-web-linux-amd64
|
||||
/build/scrutiny-collector-metrics-linux-amd64
|
||||
/build/scrutiny-web-linux-arm64
|
||||
/build/scrutiny-collector-metrics-linux-arm64
|
||||
/build/scrutiny-web-linux-arm-5
|
||||
/build/scrutiny-collector-metrics-linux-arm-5
|
||||
/build/scrutiny-web-linux-arm-6
|
||||
/build/scrutiny-collector-metrics-linux-arm-6
|
||||
/build/scrutiny-web-linux-arm-7
|
||||
/build/scrutiny-collector-metrics-linux-arm-7
|
||||
/build/scrutiny-web-windows-4.0-amd64.exe
|
||||
/build/scrutiny-collector-metrics-windows-4.0-amd64.exe
|
||||
# /build/scrutiny-web-darwin-arm64
|
||||
# /build/scrutiny-collector-metrics-darwin-arm64
|
||||
# /build/scrutiny-web-darwin-amd64
|
||||
# /build/scrutiny-collector-metrics-darwin-amd64
|
||||
# /build/scrutiny-web-freebsd-amd64
|
||||
# /build/scrutiny-collector-metrics-freebsd-amd64
|
||||
- uses: codecov/codecov-action@v2
|
||||
with:
|
||||
file: ${{ env.PROJECT_PATH }}/coverage.txt
|
||||
flags: unittests
|
||||
fail_ci_if_error: false
|
||||
|
||||
|
||||
@@ -0,0 +1,114 @@
|
||||
name: CI
|
||||
# This workflow is triggered on pushes & pull requests
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
test-frontend:
|
||||
name: Test Frontend
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test Frontend
|
||||
run: |
|
||||
make binary-frontend-test-coverage
|
||||
- name: Upload coverage
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
path: ${{ github.workspace }}/webapp/frontend/coverage/lcov.info
|
||||
retention-days: 1
|
||||
test-backend:
|
||||
name: Test Backend
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/packagrio/packagr:latest-golang
|
||||
# Service containers to run with `build` (Required for end-to-end testing)
|
||||
services:
|
||||
influxdb:
|
||||
image: influxdb:2.2
|
||||
env:
|
||||
DOCKER_INFLUXDB_INIT_MODE: setup
|
||||
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
||||
DOCKER_INFLUXDB_INIT_PASSWORD: password12345
|
||||
DOCKER_INFLUXDB_INIT_ORG: scrutiny
|
||||
DOCKER_INFLUXDB_INIT_BUCKET: metrics
|
||||
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: my-super-secret-auth-token
|
||||
ports:
|
||||
- 8086:8086
|
||||
env:
|
||||
STATIC: true
|
||||
steps:
|
||||
- name: Git
|
||||
run: |
|
||||
apt-get update && apt-get install -y software-properties-common
|
||||
add-apt-repository ppa:git-core/ppa && apt-get update && apt-get install -y git
|
||||
git --version
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test Backend
|
||||
run: |
|
||||
make binary-clean binary-test-coverage
|
||||
- name: Upload coverage
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
path: ${{ github.workspace }}/coverage.txt
|
||||
retention-days: 1
|
||||
test-coverage:
|
||||
name: Test Coverage Upload
|
||||
needs:
|
||||
- test-backend
|
||||
- test-frontend
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Download coverage reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
- name: Upload coverage reports
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
files: ${{ github.workspace }}/coverage.txt,${{ github.workspace }}/lcov.info
|
||||
flags: unittests
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.cfg.goos }}/${{ matrix.cfg.goarch }}
|
||||
runs-on: ${{ matrix.cfg.on }}
|
||||
env:
|
||||
GOOS: ${{ matrix.cfg.goos }}
|
||||
GOARCH: ${{ matrix.cfg.goarch }}
|
||||
GOARM: ${{ matrix.cfg.goarm }}
|
||||
STATIC: true
|
||||
strategy:
|
||||
matrix:
|
||||
cfg:
|
||||
- { on: ubuntu-latest, goos: linux, goarch: amd64 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 5 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 6 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 7 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: amd64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: arm64 }
|
||||
- { on: macos-latest, goos: freebsd, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: arm64 }
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.20.1'
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
make binary-clean binary-all
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: binaries.zip
|
||||
path: |
|
||||
scrutiny-web-*
|
||||
scrutiny-collector-metrics-*
|
||||
@@ -1,9 +1,7 @@
|
||||
name: Docker
|
||||
on:
|
||||
schedule:
|
||||
- cron: '36 12 * * *'
|
||||
push:
|
||||
branches: [ master, influxdb ]
|
||||
branches: [ master, beta ]
|
||||
# Publish semver tags as releases.
|
||||
tags: [ 'v*.*.*' ]
|
||||
|
||||
@@ -20,16 +18,20 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -38,8 +40,10 @@ jobs:
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-collector
|
||||
type=ref,enable=true,event=tag,suffix=-collector
|
||||
@@ -48,14 +52,16 @@ jobs:
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
context: .
|
||||
file: docker/Dockerfile.collector
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
|
||||
web:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -66,15 +72,19 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -83,25 +93,27 @@ jobs:
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-web
|
||||
type=ref,enable=true,event=tag,suffix=-web
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
context: .
|
||||
file: docker/Dockerfile.web
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
omnibus:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -111,15 +123,19 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -128,17 +144,16 @@ jobs:
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-omnibus
|
||||
type=ref,enable=true,event=tag,suffix=-omnibus
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: .
|
||||
@@ -146,3 +161,5 @@ jobs:
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
name: Docker - Nightly
|
||||
on:
|
||||
schedule:
|
||||
- cron: '36 12 * * *'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
omnibus:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Extract metadata (tags, labels) for Docker
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-omnibus-nightly
|
||||
type=ref,enable=true,event=tag,suffix=-omnibus-nightly
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
push: false
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
@@ -1,77 +0,0 @@
|
||||
# compiles FreeBSD artifacts and attaches them to build
|
||||
name: Release FreeBSD
|
||||
|
||||
on:
|
||||
release:
|
||||
# Only use the types keyword to narrow down the activity types that will trigger your workflow.
|
||||
types: [published]
|
||||
jobs:
|
||||
|
||||
release-freebsd:
|
||||
name: Release FreeBSD
|
||||
runs-on: macos-latest
|
||||
env:
|
||||
PROJECT_PATH: /go/src/github.com/analogj/scrutiny
|
||||
GOPATH: /go
|
||||
GOOS: freebsd
|
||||
GOARCH: amd64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{github.event.release.tag_name}}
|
||||
- name: Build Binaries
|
||||
uses: vmactions/freebsd-vm@v0.1.3
|
||||
with:
|
||||
envs: 'PROJECT_PATH GOPATH GOOS GOARCH'
|
||||
usesh: true
|
||||
#TODO: lock go version using https://www.jeremymorgan.com/tutorials/golang/how-to-install-go-freebsd/
|
||||
prepare: pkg install -y curl go gmake
|
||||
run: |
|
||||
pwd
|
||||
ls -lah
|
||||
whoami
|
||||
freebsd-version
|
||||
|
||||
mkdir -p $(dirname "$PROJECT_PATH")
|
||||
cp -R $GITHUB_WORKSPACE $PROJECT_PATH
|
||||
cd $PROJECT_PATH
|
||||
|
||||
mkdir -p $GITHUB_WORKSPACE/dist
|
||||
|
||||
echo "building web binary (OS = ${GOOS}, ARCH = ${GOARCH})"
|
||||
go build -ldflags "-extldflags=-static -X main.goos=${GOOS} -X main.goarch=${GOARCH}" -o $GITHUB_WORKSPACE/dist/scrutiny-web-${GOOS}-${GOARCH} -tags "static netgo sqlite_omit_load_extension" webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
|
||||
chmod +x "$GITHUB_WORKSPACE/dist/scrutiny-web-${GOOS}-${GOARCH}"
|
||||
file "$GITHUB_WORKSPACE/dist/scrutiny-web-${GOOS}-${GOARCH}" || true
|
||||
ldd "$GITHUB_WORKSPACE/dist/scrutiny-web-${GOOS}-${GOARCH}" || true
|
||||
|
||||
echo "building collector binary (OS = ${GOOS}, ARCH = ${GOARCH})"
|
||||
go build -ldflags "-extldflags=-static -X main.goos=${GOOS} -X main.goarch=${GOARCH}" -o $GITHUB_WORKSPACE/dist/scrutiny-collector-metrics-${GOOS}-${GOARCH} -tags "static netgo" collector/cmd/collector-metrics/collector-metrics.go
|
||||
|
||||
chmod +x "$GITHUB_WORKSPACE/dist/scrutiny-collector-metrics-${GOOS}-${GOARCH}"
|
||||
file "$GITHUB_WORKSPACE/dist/scrutiny-collector-metrics-${GOOS}-${GOARCH}" || true
|
||||
ldd "$GITHUB_WORKSPACE/dist/scrutiny-collector-metrics-${GOOS}-${GOARCH}" || true
|
||||
|
||||
|
||||
- name: Release Asset - Web - freebsd-amd64
|
||||
id: upload-release-asset1
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: './dist/scrutiny-web-freebsd-amd64'
|
||||
asset_name: scrutiny-web-freebsd-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
- name: Release Asset - Collector - freebsd-amd64
|
||||
id: upload-release-asset2
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: './dist/scrutiny-collector-metrics-freebsd-amd64'
|
||||
asset_name: scrutiny-collector-metrics-freebsd-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
@@ -15,13 +15,12 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{github.event.release.tag_name}}
|
||||
- name: "Generate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Build Frontend
|
||||
run: |
|
||||
cd webapp/frontend
|
||||
npm install -g @angular/cli@9.1.4
|
||||
npm install
|
||||
mkdir -p dist
|
||||
ng build --output-path=dist --deploy-url="/web/" --base-href="/web/" --prod
|
||||
apt-get update && apt-get install -y make
|
||||
make binary-frontend
|
||||
tar -czf scrutiny-web-frontend.tar.gz dist
|
||||
- name: Upload Frontend Asset
|
||||
id: upload-release-asset3
|
||||
@@ -30,6 +29,6 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: './webapp/frontend/scrutiny-web-frontend.tar.gz'
|
||||
asset_path: './scrutiny-web-frontend.tar.gz'
|
||||
asset_name: scrutiny-web-frontend.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
@@ -13,10 +13,10 @@ on:
|
||||
default: 'webapp/backend/pkg/version/version.go'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
release:
|
||||
name: Create Release Commit
|
||||
runs-on: ubuntu-latest
|
||||
container: techknowlogick/xgo:go-1.13.x
|
||||
container: ghcr.io/packagrio/packagr:latest-golang
|
||||
# Service containers to run with `build` (Required for end-to-end testing)
|
||||
services:
|
||||
influxdb:
|
||||
@@ -31,8 +31,7 @@ jobs:
|
||||
ports:
|
||||
- 8086:8086
|
||||
env:
|
||||
PROJECT_PATH: /go/src/github.com/analogj/scrutiny
|
||||
CGO_ENABLED: 1
|
||||
STATIC: true
|
||||
steps:
|
||||
- name: Git
|
||||
run: |
|
||||
@@ -53,34 +52,80 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }} # Leave this line unchanged
|
||||
- name: Test
|
||||
run: |
|
||||
mkdir -p $(dirname "$PROJECT_PATH")
|
||||
cp -a $GITHUB_WORKSPACE $PROJECT_PATH
|
||||
cd $PROJECT_PATH
|
||||
|
||||
go mod vendor
|
||||
go test -v -tags "static" $(go list ./... | grep -v /vendor/)
|
||||
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
|
||||
cd $PROJECT_PATH
|
||||
make all
|
||||
|
||||
# restore modified dir to GH workspace.
|
||||
cp -arf $PROJECT_PATH/. $GITHUB_WORKSPACE/
|
||||
|
||||
# copy all the build artifacts to the GH workspace
|
||||
cp -arf /build/. $GITHUB_WORKSPACE/
|
||||
|
||||
- name: Commit Changes
|
||||
make binary-clean binary-test-coverage
|
||||
- name: Commit Changes Locally
|
||||
id: commit
|
||||
uses: packagrio/action-releasr-go@master
|
||||
env:
|
||||
# This is necessary in order to push a commit to the repo
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }} # Leave this line unchanged
|
||||
with:
|
||||
version_metadata_path: ${{ github.event.inputs.version_metadata_path }}
|
||||
- name: Publish Release
|
||||
- name: Upload workspace
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: workspace
|
||||
path: ${{ github.workspace }}/**/*
|
||||
retention-days: 1
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.cfg.goos }}/${{ matrix.cfg.goarch }}${{ matrix.cfg.goarm }}
|
||||
needs: release
|
||||
runs-on: ${{ matrix.cfg.on }}
|
||||
env:
|
||||
GOOS: ${{ matrix.cfg.goos }}
|
||||
GOARCH: ${{ matrix.cfg.goarch }}
|
||||
GOARM: ${{ matrix.cfg.goarm }}
|
||||
STATIC: true
|
||||
strategy:
|
||||
matrix:
|
||||
cfg:
|
||||
- { on: ubuntu-latest, goos: linux, goarch: amd64 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 5 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 6 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 7 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: amd64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: arm64 }
|
||||
- { on: macos-latest, goos: freebsd, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: arm64 }
|
||||
steps:
|
||||
- name: Download workspace
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: workspace
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20.1' # The Go version to download (if necessary) and use.
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
make binary-clean binary-all
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: binaries.zip
|
||||
path: |
|
||||
scrutiny-web-*
|
||||
scrutiny-collector-metrics-*
|
||||
|
||||
release-publish:
|
||||
name: Publish Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download workspace
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: workspace
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: binaries.zip
|
||||
- name: List
|
||||
shell: bash
|
||||
run: |
|
||||
ls -alt
|
||||
- name: Publish Release & Assets
|
||||
id: publish
|
||||
uses: packagrio/action-publishr-go@master
|
||||
env:
|
||||
@@ -89,15 +134,23 @@ jobs:
|
||||
with:
|
||||
version_metadata_path: ${{ github.event.inputs.version_metadata_path }}
|
||||
upload_assets:
|
||||
scrutiny-web-linux-amd64
|
||||
scrutiny-collector-metrics-darwin-amd64
|
||||
scrutiny-collector-metrics-darwin-arm64
|
||||
scrutiny-collector-metrics-freebsd-amd64
|
||||
scrutiny-collector-metrics-linux-amd64
|
||||
scrutiny-web-linux-arm64
|
||||
scrutiny-collector-metrics-linux-arm64
|
||||
scrutiny-web-linux-arm-5
|
||||
scrutiny-collector-metrics-linux-arm-5
|
||||
scrutiny-web-linux-arm-6
|
||||
scrutiny-collector-metrics-linux-arm-6
|
||||
scrutiny-web-linux-arm-7
|
||||
scrutiny-collector-metrics-linux-arm-7
|
||||
scrutiny-web-windows-4.0-amd64.exe
|
||||
scrutiny-collector-metrics-windows-4.0-amd64.exe
|
||||
scrutiny-collector-metrics-linux-arm64
|
||||
scrutiny-collector-metrics-windows-amd64.exe
|
||||
scrutiny-collector-metrics-windows-arm64.exe
|
||||
scrutiny-web-darwin-amd64
|
||||
scrutiny-web-darwin-arm64
|
||||
scrutiny-web-freebsd-amd64
|
||||
scrutiny-web-linux-amd64
|
||||
scrutiny-web-linux-arm-5
|
||||
scrutiny-web-linux-arm-6
|
||||
scrutiny-web-linux-arm-7
|
||||
scrutiny-web-linux-arm64
|
||||
scrutiny-web-windows-amd64.exe
|
||||
scrutiny-web-windows-arm64.exe
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
name: Cleanup Artifacts
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Every day at 1am
|
||||
- cron: '0 1 * * *'
|
||||
|
||||
jobs:
|
||||
remove-old-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Remove old artifacts
|
||||
uses: c-hive/gha-remove-artifacts@v1
|
||||
with:
|
||||
age: '1 day'
|
||||
skip-tags: true
|
||||
skip-recent: 5
|
||||
@@ -8,6 +8,7 @@ jobs:
|
||||
build:
|
||||
name: is-sponsor-label
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ false }}
|
||||
steps:
|
||||
- uses: JasonEtco/is-sponsor-label-action@v1.2.0
|
||||
env:
|
||||
|
||||
@@ -64,3 +64,5 @@ scrutiny-*.db
|
||||
scrutiny_test.db
|
||||
scrutiny.yaml
|
||||
coverage.txt
|
||||
/config
|
||||
/influxdb
|
||||
+122
-67
@@ -1,75 +1,110 @@
|
||||
# Contributing
|
||||
|
||||
There are multiple ways to develop on the scrutiny codebase locally. The two most popular are:
|
||||
- Docker Development Container - only requires docker
|
||||
- Run Components Locally - requires smartmontools, golang & nodejs installed locally
|
||||
The Scrutiny repository is a [monorepo](https://en.wikipedia.org/wiki/Monorepo) containing source code for:
|
||||
- Scrutiny Backend Server (API)
|
||||
- Scrutiny Frontend Angular SPA
|
||||
- S.M.A.R.T Collector
|
||||
|
||||
## Docker Development
|
||||
```
|
||||
docker build -f docker/Dockerfile . -t chcr.io/analogj/scrutiny:master-omnibus
|
||||
docker run -it --rm -p 8080:8080 \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
ghcr.io/analogj/scrutiny:master-omnibus
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
```
|
||||
Depending on the functionality you are adding, you may need to setup a development environment for 1 or more projects.
|
||||
|
||||
# Modifying the Scrutiny Backend Server (API)
|
||||
|
||||
## Local Development
|
||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.20+)
|
||||
2. download the `scrutiny-web-frontend.tar.gz` for
|
||||
the [latest release](https://github.com/AnalogJ/scrutiny/releases/latest). Extract to a folder named `dist`
|
||||
3. create a `scrutiny.yaml` config file
|
||||
```yaml
|
||||
# config file for local development. store as scrutiny.yaml
|
||||
version: 1
|
||||
|
||||
### Frontend
|
||||
The frontend is written in Angular.
|
||||
If you're working on the frontend and can use mocked data rather than a real backend, you can use
|
||||
```
|
||||
cd webapp/frontend
|
||||
npm install
|
||||
ng serve --deploy-url="/web/" --base-href="/web/"
|
||||
```
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
database:
|
||||
# can also set absolute path here
|
||||
location: ./scrutiny.db
|
||||
src:
|
||||
frontend:
|
||||
path: ./dist
|
||||
influxdb:
|
||||
retention_policy: false
|
||||
|
||||
However, if you need to also run the backend, and use real data, you'll need to run the following command:
|
||||
```
|
||||
cd webapp/frontend && ng build --watch --output-path=../../dist --prod
|
||||
```
|
||||
log:
|
||||
file: 'web.log' #absolute or relative paths allowed, eg. web.log
|
||||
level: DEBUG
|
||||
|
||||
> Note: if you do not add `--prod` flag, app will display mocked data for api calls.
|
||||
```
|
||||
4. start a InfluxDB docker container.
|
||||
```bash
|
||||
docker run -p 8086:8086 --rm influxdb:2.2
|
||||
```
|
||||
5. start the scrutiny web server
|
||||
```bash
|
||||
go mod vendor
|
||||
go run webapp/backend/cmd/scrutiny/scrutiny.go start --config ./scrutiny.yaml
|
||||
```
|
||||
6. open your browser to [http://localhost:8080/web](http://localhost:8080/web)
|
||||
|
||||
### Backend
|
||||
# Modifying the Scrutiny Frontend Angular SPA
|
||||
|
||||
If you're using the `ng build` command above to generate your frontend, you'll need to create a custom config file and
|
||||
override the `web.src.frontend.path` value.
|
||||
The frontend is written in Angular. If you're working on the frontend and can use mocked data rather than a real backend, you can follow the instructions below:
|
||||
|
||||
```
|
||||
# config file for local development. store as scrutiny.yaml
|
||||
version: 1
|
||||
1. install [NodeJS](https://nodejs.org/en/download/)
|
||||
2. start the Angular Frontend Application
|
||||
```bash
|
||||
cd webapp/frontend
|
||||
npm install
|
||||
npm run start -- --serve-path="/web/" --port 4200
|
||||
```
|
||||
3. open your browser and visit [http://localhost:4200/web](http://localhost:4200/web)
|
||||
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
database:
|
||||
# can also set absolute path here
|
||||
location: ./scrutiny.db
|
||||
src:
|
||||
frontend:
|
||||
path: ./dist
|
||||
influxdb:
|
||||
retention_policy: false
|
||||
# Modifying both Scrutiny Backend and Frontend Applications
|
||||
If you're developing a feature that requires changes to the backend and the frontend, or a frontend feature that requires real data,
|
||||
you'll need to follow the steps below:
|
||||
|
||||
log:
|
||||
file: 'web.log' #absolute or relative paths allowed, eg. web.log
|
||||
level: DEBUG
|
||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.20+)
|
||||
2. install [NodeJS](https://nodejs.org/en/download/)
|
||||
3. create a `scrutiny.yaml` config file
|
||||
```yaml
|
||||
# config file for local development. store as scrutiny.yaml
|
||||
version: 1
|
||||
|
||||
```
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
database:
|
||||
# can also set absolute path here
|
||||
location: ./scrutiny.db
|
||||
src:
|
||||
frontend:
|
||||
path: ./dist
|
||||
influxdb:
|
||||
retention_policy: false
|
||||
|
||||
Once you've created a config file, you can pass it to the scrutiny binary during startup.
|
||||
log:
|
||||
file: 'web.log' #absolute or relative paths allowed, eg. web.log
|
||||
level: DEBUG
|
||||
|
||||
```
|
||||
go run webapp/backend/cmd/scrutiny/scrutiny.go start --config ./scrutiny.yaml
|
||||
```
|
||||
|
||||
Now visit http://localhost:8080
|
||||
```
|
||||
4. start a InfluxDB docker container.
|
||||
```bash
|
||||
docker run -p 8086:8086 --rm influxdb:2.2
|
||||
```
|
||||
5. build the Angular Frontend Application
|
||||
```bash
|
||||
cd webapp/frontend
|
||||
npm install
|
||||
npm run build:prod -- --watch --output-path=../../dist
|
||||
# Note: if you do not add `--prod` flag, app will display mocked data for api calls.
|
||||
```
|
||||
6. start the scrutiny web server
|
||||
```bash
|
||||
go mod vendor
|
||||
go run webapp/backend/cmd/scrutiny/scrutiny.go start --config ./scrutiny.yaml
|
||||
```
|
||||
7. open your browser to [http://localhost:8080/web](http://localhost:8080/web)
|
||||
|
||||
|
||||
If you'd like to populate the database with some test data, you can run the following commands:
|
||||
@@ -82,15 +117,6 @@ If you'd like to populate the database with some test data, you can run the fol
|
||||
docker run -p 8086:8086 --rm influxdb:2.2
|
||||
|
||||
|
||||
docker run --rm -p 8086:8086 \
|
||||
-e DOCKER_INFLUXDB_INIT_MODE=setup \
|
||||
-e DOCKER_INFLUXDB_INIT_USERNAME=admin \
|
||||
-e DOCKER_INFLUXDB_INIT_PASSWORD=password12345 \
|
||||
-e DOCKER_INFLUXDB_INIT_ORG=scrutiny \
|
||||
-e DOCKER_INFLUXDB_INIT_BUCKET=metrics \
|
||||
influxdb:2.2
|
||||
|
||||
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/web/testdata/register-devices-req.json localhost:8080/api/devices/register
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-ata.json localhost:8080/api/device/0x5000cca264eb01d7/smart
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-ata-date.json localhost:8080/api/device/0x5000cca264eb01d7/smart
|
||||
@@ -105,14 +131,14 @@ curl localhost:8080/api/summary
|
||||
|
||||
```
|
||||
|
||||
### Collector
|
||||
# Modifying the Collector
|
||||
```
|
||||
brew install smartmontools
|
||||
go run collector/cmd/collector-metrics/collector-metrics.go run --debug
|
||||
```
|
||||
|
||||
|
||||
## Debugging
|
||||
# Debugging
|
||||
|
||||
If you need more verbose logs for debugging, you can use the following environmental variables:
|
||||
|
||||
@@ -131,3 +157,32 @@ Finally, you can copy the files from the scrutiny container to your host using t
|
||||
docker cp scrutiny:/tmp/collector.log collector.log
|
||||
docker cp scrutiny:/tmp/web.log web.log
|
||||
```
|
||||
|
||||
# Docker Development
|
||||
|
||||
```
|
||||
docker build -f docker/Dockerfile . -t chcr.io/analogj/scrutiny:master-omnibus
|
||||
docker run -it --rm -p 8080:8080 \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
ghcr.io/analogj/scrutiny:master-omnibus
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
```
|
||||
|
||||
|
||||
# Running Tests
|
||||
|
||||
```bash
|
||||
docker run -p 8086:8086 -d --rm \
|
||||
-e DOCKER_INFLUXDB_INIT_MODE=setup \
|
||||
-e DOCKER_INFLUXDB_INIT_USERNAME=admin \
|
||||
-e DOCKER_INFLUXDB_INIT_PASSWORD=password12345 \
|
||||
-e DOCKER_INFLUXDB_INIT_ORG=scrutiny \
|
||||
-e DOCKER_INFLUXDB_INIT_BUCKET=metrics \
|
||||
-e DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token \
|
||||
influxdb:2.2
|
||||
go test ./...
|
||||
|
||||
```
|
||||
|
||||
@@ -1,42 +1,133 @@
|
||||
export CGO_ENABLED = 1
|
||||
.ONESHELL: # Applies to every targets in the file! .ONESHELL instructs make to invoke a single instance of the shell and provide it with the entire recipe, regardless of how many lines it contains.
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
########################################################################################################################
|
||||
# Global Env Settings
|
||||
########################################################################################################################
|
||||
|
||||
GO_WORKSPACE ?= /go/src/github.com/analogj/scrutiny
|
||||
|
||||
BINARY=\
|
||||
linux/amd64 \
|
||||
linux/arm-5 \
|
||||
linux/arm-6 \
|
||||
linux/arm-7 \
|
||||
linux/arm64 \
|
||||
COLLECTOR_BINARY_NAME = scrutiny-collector-metrics
|
||||
WEB_BINARY_NAME = scrutiny-web
|
||||
LD_FLAGS =
|
||||
|
||||
.PHONY: all $(BINARY)
|
||||
all: $(BINARY) windows/amd64
|
||||
STATIC_TAGS =
|
||||
# enable multiarch docker image builds
|
||||
DOCKER_TARGETARCH_BUILD_ARG =
|
||||
ifdef TARGETARCH
|
||||
DOCKER_TARGETARCH_BUILD_ARG := $(DOCKER_TARGETARCH_BUILD_ARG) --build-arg TARGETARCH=$(TARGETARCH)
|
||||
endif
|
||||
|
||||
$(BINARY): OS = $(word 1,$(subst /, ,$*))
|
||||
$(BINARY): ARCH = $(word 2,$(subst /, ,$*))
|
||||
$(BINARY): build/scrutiny-web-%:
|
||||
@echo "building web binary (OS = $(OS), ARCH = $(ARCH))"
|
||||
xgo -v --targets="$(OS)/$(ARCH)" -ldflags "-extldflags=-static -X main.goos=$(OS) -X main.goarch=$(ARCH)" -out scrutiny-web -tags "static netgo sqlite_omit_load_extension" ${GO_WORKSPACE}/webapp/backend/cmd/scrutiny/
|
||||
# enable to build static binaries.
|
||||
ifdef STATIC
|
||||
export CGO_ENABLED = 0
|
||||
LD_FLAGS := $(LD_FLAGS) -extldflags=-static
|
||||
STATIC_TAGS := $(STATIC_TAGS) -tags "static netgo"
|
||||
endif
|
||||
ifdef GOOS
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOOS)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOOS)
|
||||
LD_FLAGS := $(LD_FLAGS) -X main.goos=$(GOOS)
|
||||
endif
|
||||
ifdef GOARCH
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOARCH)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOARCH)
|
||||
LD_FLAGS := $(LD_FLAGS) -X main.goarch=$(GOARCH)
|
||||
endif
|
||||
ifdef GOARM
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOARM)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOARM)
|
||||
endif
|
||||
ifeq ($(OS),Windows_NT)
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME).exe
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME).exe
|
||||
endif
|
||||
|
||||
chmod +x "/build/scrutiny-web-$(OS)-$(ARCH)"
|
||||
file "/build/scrutiny-web-$(OS)-$(ARCH)" || true
|
||||
ldd "/build/scrutiny-web-$(OS)-$(ARCH)" || true
|
||||
########################################################################################################################
|
||||
# Binary
|
||||
########################################################################################################################
|
||||
.PHONY: all
|
||||
all: binary-all
|
||||
|
||||
@echo "building collector binary (OS = $(OS), ARCH = $(ARCH))"
|
||||
xgo -v --targets="$(OS)/$(ARCH)" -ldflags "-extldflags=-static -X main.goos=$(OS) -X main.goarch=$(ARCH)" -out scrutiny-collector-metrics -tags "static netgo" ${GO_WORKSPACE}/collector/cmd/collector-metrics/
|
||||
.PHONY: binary-all
|
||||
binary-all: binary-collector binary-web
|
||||
@echo "built binary-collector and binary-web targets"
|
||||
|
||||
chmod +x "/build/scrutiny-collector-metrics-$(OS)-$(ARCH)"
|
||||
file "/build/scrutiny-collector-metrics-$(OS)-$(ARCH)" || true
|
||||
ldd "/build/scrutiny-collector-metrics-$(OS)-$(ARCH)" || true
|
||||
|
||||
windows/amd64: export OS = windows
|
||||
windows/amd64: export ARCH = amd64
|
||||
windows/amd64:
|
||||
@echo "building web binary (OS = $(OS), ARCH = $(ARCH))"
|
||||
xgo -v --targets="$(OS)/$(ARCH)" -ldflags "-extldflags=-static -X main.goos=$(OS) -X main.goarch=$(ARCH)" -out scrutiny-web -tags "static netgo sqlite_omit_load_extension" ${GO_WORKSPACE}/webapp/backend/cmd/scrutiny/
|
||||
.PHONY: binary-clean
|
||||
binary-clean:
|
||||
go clean
|
||||
|
||||
@echo "building collector binary (OS = $(OS), ARCH = $(ARCH))"
|
||||
xgo -v --targets="$(OS)/$(ARCH)" -ldflags "-extldflags=-static -X main.goos=$(OS) -X main.goarch=$(ARCH)" -out scrutiny-collector-metrics -tags "static netgo" ${GO_WORKSPACE}/collector/cmd/collector-metrics/
|
||||
.PHONY: binary-dep
|
||||
binary-dep:
|
||||
go mod vendor
|
||||
|
||||
# clean:
|
||||
# rm scrutiny-collector-metrics-* scrutiny-web-*
|
||||
.PHONY: binary-test
|
||||
binary-test: binary-dep
|
||||
go test -v $(STATIC_TAGS) ./...
|
||||
|
||||
.PHONY: binary-test-coverage
|
||||
binary-test-coverage: binary-dep
|
||||
go test -coverprofile=coverage.txt -covermode=atomic -v $(STATIC_TAGS) ./...
|
||||
|
||||
.PHONY: binary-collector
|
||||
binary-collector: binary-dep
|
||||
go build -ldflags "$(LD_FLAGS)" -o $(COLLECTOR_BINARY_NAME) $(STATIC_TAGS) ./collector/cmd/collector-metrics/
|
||||
ifneq ($(OS),Windows_NT)
|
||||
chmod +x $(COLLECTOR_BINARY_NAME)
|
||||
file $(COLLECTOR_BINARY_NAME) || true
|
||||
ldd $(COLLECTOR_BINARY_NAME) || true
|
||||
./$(COLLECTOR_BINARY_NAME) || true
|
||||
endif
|
||||
|
||||
.PHONY: binary-web
|
||||
binary-web: binary-dep
|
||||
go build -ldflags "$(LD_FLAGS)" -o $(WEB_BINARY_NAME) $(STATIC_TAGS) ./webapp/backend/cmd/scrutiny/
|
||||
ifneq ($(OS),Windows_NT)
|
||||
chmod +x $(WEB_BINARY_NAME)
|
||||
file $(WEB_BINARY_NAME) || true
|
||||
ldd $(WEB_BINARY_NAME) || true
|
||||
./$(WEB_BINARY_NAME) || true
|
||||
endif
|
||||
|
||||
########################################################################################################################
|
||||
# Binary
|
||||
########################################################################################################################
|
||||
|
||||
.PHONY: binary-frontend
|
||||
# reduce logging, disable angular-cli analytics for ci environment
|
||||
binary-frontend: export NPM_CONFIG_LOGLEVEL = warn
|
||||
binary-frontend: export NG_CLI_ANALYTICS = false
|
||||
binary-frontend:
|
||||
cd webapp/frontend
|
||||
npm install -g @angular/cli@v13-lts
|
||||
mkdir -p $(CURDIR)/dist
|
||||
npm ci
|
||||
npm run build:prod -- --output-path=$(CURDIR)/dist
|
||||
|
||||
.PHONY: binary-frontend-test-coverage
|
||||
# reduce logging, disable angular-cli analytics for ci environment
|
||||
binary-frontend-test-coverage:
|
||||
cd webapp/frontend
|
||||
npm ci
|
||||
npx ng test --watch=false --browsers=ChromeHeadless --code-coverage
|
||||
|
||||
########################################################################################################################
|
||||
# Docker
|
||||
# NOTE: these docker make targets are only used for local development (not used by Github Actions/CI)
|
||||
# NOTE: docker-web and docker-omnibus require `make binary-frontend` or frontend.tar.gz content in /dist before executing.
|
||||
########################################################################################################################
|
||||
.PHONY: docker-collector
|
||||
docker-collector:
|
||||
@echo "building collector docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.collector -t analogj/scrutiny-dev:collector .
|
||||
|
||||
.PHONY: docker-web
|
||||
docker-web:
|
||||
@echo "building web docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.web -t analogj/scrutiny-dev:web .
|
||||
|
||||
.PHONY: docker-omnibus
|
||||
docker-omnibus:
|
||||
@echo "building omnibus docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile -t analogj/scrutiny-dev:omnibus .
|
||||
|
||||
@@ -17,8 +17,6 @@
|
||||
WebUI for smartd S.M.A.R.T monitoring
|
||||
|
||||
> NOTE: Scrutiny is a Work-in-Progress and still has some rough edges.
|
||||
>
|
||||
> WARNING: Once the [InfluxDB](https://github.com/AnalogJ/scrutiny/tree/influxdb) branch is merged, Scrutiny will use both sqlite and InfluxDB for data storage. Unfortunately, this may not be backwards compatible with the database structures in the master (sqlite only) branch.
|
||||
|
||||
[](https://imgur.com/a/5k8qMzS)
|
||||
|
||||
@@ -48,7 +46,7 @@ Scrutiny is a simple but focused application, with a couple of core features:
|
||||
- Customized thresholds using real world failure rates
|
||||
- Temperature tracking
|
||||
- Provided as an all-in-one Docker image (but can be installed manually)
|
||||
- Future Configurable Alerting/Notifications via Webhooks
|
||||
- Configurable Alerting/Notifications via Webhooks
|
||||
- (Future) Hard Drive performance testing & tracking
|
||||
|
||||
# Getting Started
|
||||
@@ -60,18 +58,21 @@ Scrutiny uses `smartctl --scan` to detect devices/drives.
|
||||
- All RAID controllers supported by `smartctl` are automatically supported by Scrutiny.
|
||||
- While some RAID controllers support passing through the underlying SMART data to `smartctl` others do not.
|
||||
- In some cases `--scan` does not correctly detect the device type, returning [incomplete SMART data](https://github.com/AnalogJ/scrutiny/issues/45).
|
||||
Scrutiny will eventually support overriding detected device type via the config file.
|
||||
Scrutiny supports overriding detected device type via the config file: see [example.collector.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml)
|
||||
- If you use docker, you **must** pass though the RAID virtual disk to the container using `--device` (see below)
|
||||
- This device may be in `/dev/*` or `/dev/bus/*`.
|
||||
- If you're unsure, run `smartctl --scan` on your host, and pass all listed devices to the container.
|
||||
|
||||
See [docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md](./docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md) for help
|
||||
|
||||
## Docker
|
||||
|
||||
If you're using Docker, getting started is as simple as running the following command:
|
||||
|
||||
> See [docker/example.omnibus.docker-compose.yml](./docker/example.omnibus.docker-compose.yml) for a docker-compose file.
|
||||
|
||||
```bash
|
||||
docker run -it --rm -p 8080:8080 \
|
||||
docker run -it --rm -p 8080:8080 -p 8086:8086 \
|
||||
-v `pwd`/scrutiny:/opt/scrutiny/config \
|
||||
-v `pwd`/influxdb2:/opt/scrutiny/influxdb \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
@@ -90,10 +91,16 @@ docker run -it --rm -p 8080:8080 \
|
||||
|
||||
### Hub/Spoke Deployment
|
||||
|
||||
In addition to the Omnibus image (available under the `latest` tag) there are 2 other Docker images available:
|
||||
In addition to the Omnibus image (available under the `latest` tag) you can deploy in Hub/Spoke mode, which requires 3
|
||||
other Docker images:
|
||||
|
||||
- `ghcr.io/analogj/scrutiny:master-collector` - Contains the Scrutiny data collector, `smartctl` binary and cron-like scheduler. You can run one collector on each server.
|
||||
- `ghcr.io/analogj/scrutiny:master-web` - Contains the Web UI, API and Database. Only one container necessary
|
||||
- `ghcr.io/analogj/scrutiny:master-collector` - Contains the Scrutiny data collector, `smartctl` binary and cron-like
|
||||
scheduler. You can run one collector on each server.
|
||||
- `ghcr.io/analogj/scrutiny:master-web` - Contains the Web UI and API. Only one container necessary
|
||||
- `influxdb:2.2` - InfluxDB image, used by the Web container to persist SMART data. Only one container necessary
|
||||
See [docs/TROUBLESHOOTING_INFLUXDB.md](./docs/TROUBLESHOOTING_INFLUXDB.md)
|
||||
|
||||
> See [docker/example.hubspoke.docker-compose.yml](./docker/example.hubspoke.docker-compose.yml) for a docker-compose file.
|
||||
|
||||
```bash
|
||||
docker run --rm -p 8086:8086 \
|
||||
@@ -111,7 +118,7 @@ docker run --rm \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
-e SCRUTINY_API_ENDPOINT=http://SCRUTINY_WEB_IPADDRESS:8080 \
|
||||
-e COLLECTOR_API_ENDPOINT=http://SCRUTINY_WEB_IPADDRESS:8080 \
|
||||
--name scrutiny-collector \
|
||||
ghcr.io/analogj/scrutiny:master-collector
|
||||
```
|
||||
@@ -150,7 +157,7 @@ Neither file is required, however if provided, it allows you to configure how Sc
|
||||
|
||||
## Cron Schedule
|
||||
Unfortunately the Cron schedule cannot be configured via the `collector.yaml` (as the collector binary needs to be trigged by a scheduler/cron).
|
||||
However, if you are using the official `ghcr.io/analogj/scrutiny:master-collector` or `ghcr.io/analogj/scrutiny:master-omnibus` docker images,
|
||||
However, if you are using the official `ghcr.io/analogj/scrutiny:master-collector` or `ghcr.io/analogj/scrutiny:master-omnibus` docker images,
|
||||
you can use the `COLLECTOR_CRON_SCHEDULE` environmental variable to override the default cron schedule (daily @ midnight - `0 0 * * *`).
|
||||
|
||||
`docker run -e COLLECTOR_CRON_SCHEDULE="0 0 * * *" ...`
|
||||
@@ -167,6 +174,7 @@ Scrutiny supports sending SMART device failure notifications via the following s
|
||||
- IFTTT
|
||||
- Join
|
||||
- Mattermost
|
||||
- ntfy
|
||||
- Pushbullet
|
||||
- Pushover
|
||||
- Slack
|
||||
@@ -174,7 +182,9 @@ Scrutiny supports sending SMART device failure notifications via the following s
|
||||
- Telegram
|
||||
- Tulip
|
||||
|
||||
Check the `notify.urls` section of [example.scrutiny.yml](example.scrutiny.yaml) for more information and documentation for service specific setup.
|
||||
Check the `notify.urls` section of [example.scrutiny.yml](example.scrutiny.yaml) for examples.
|
||||
|
||||
For more information and troubleshooting, see the [TROUBLESHOOTING_NOTIFICATIONS.md](./docs/TROUBLESHOOTING_NOTIFICATIONS.md) file
|
||||
|
||||
### Testing Notifications
|
||||
|
||||
@@ -225,6 +235,22 @@ Or if you're not using docker, you can pass CLI arguments to the collector durin
|
||||
scrutiny-collector-metrics run --debug --log-file /tmp/collector.log
|
||||
```
|
||||
|
||||
# Supported Architectures
|
||||
|
||||
| Architecture Name | Binaries | Docker |
|
||||
| --- | --- | --- |
|
||||
| linux-amd64 | :white_check_mark: | :white_check_mark: |
|
||||
| linux-arm-5 | :white_check_mark: | |
|
||||
| linux-arm-6 | :white_check_mark: | |
|
||||
| linux-arm-7 | :white_check_mark: | web/collector only. see [#236](https://github.com/AnalogJ/scrutiny/issues/236) |
|
||||
| linux-arm64 | :white_check_mark: | :white_check_mark: |
|
||||
| freebsd-amd64 | :white_check_mark: | |
|
||||
| macos-amd64 | :white_check_mark: | :white_check_mark: |
|
||||
| macos-arm64 | :white_check_mark: | :white_check_mark: |
|
||||
| windows-amd64 | :white_check_mark: | WIP, see [#15](https://github.com/AnalogJ/scrutiny/issues/15) |
|
||||
| windows-arm64 | :white_check_mark: | |
|
||||
|
||||
|
||||
# Contributing
|
||||
|
||||
Please see the [CONTRIBUTING.md](CONTRIBUTING.md) for instructions for how to develop and contribute to the scrutiny codebase.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/collector/pkg/collector"
|
||||
"github.com/analogj/scrutiny/collector/pkg/config"
|
||||
@@ -29,8 +30,14 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configFilePath := "/opt/scrutiny/config/collector.yaml"
|
||||
configFilePathAlternative := "/opt/scrutiny/config/collector.yml"
|
||||
if !utils.FileExists(configFilePath) && utils.FileExists(configFilePathAlternative) {
|
||||
configFilePath = configFilePathAlternative
|
||||
}
|
||||
|
||||
//we're going to load the config file manually, since we need to validate it.
|
||||
err = config.ReadConfig("/opt/scrutiny/config/collector.yaml") // Find and read the config file
|
||||
err = config.ReadConfig(configFilePath) // Find and read the config file
|
||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||
//ignore "could not find config file"
|
||||
} else if err != nil {
|
||||
@@ -120,26 +127,16 @@ OPTIONS:
|
||||
config.Set("api.endpoint", apiEndpoint)
|
||||
}
|
||||
|
||||
collectorLogger := logrus.WithFields(logrus.Fields{
|
||||
"type": "metrics",
|
||||
})
|
||||
|
||||
if level, err := logrus.ParseLevel(config.GetString("log.level")); err == nil {
|
||||
logrus.SetLevel(level)
|
||||
} else {
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
if config.IsSet("log.file") && len(config.GetString("log.file")) > 0 {
|
||||
logFile, err := os.OpenFile(config.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to open log file %s for output: %s", config.GetString("log.file"), err)
|
||||
return err
|
||||
}
|
||||
collectorLogger, logFile, err := CreateLogger(config)
|
||||
if logFile != nil {
|
||||
defer logFile.Close()
|
||||
logrus.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settingsData, err := json.MarshalIndent(config.AllSettings(), "", "\t")
|
||||
collectorLogger.Debug(string(settingsData), err)
|
||||
metricCollector, err := collector.CreateMetricsCollector(
|
||||
config,
|
||||
collectorLogger,
|
||||
@@ -161,7 +158,8 @@ OPTIONS:
|
||||
&cli.StringFlag{
|
||||
Name: "api-endpoint",
|
||||
Usage: "The api server endpoint",
|
||||
EnvVars: []string{"SCRUTINY_API_ENDPOINT"},
|
||||
EnvVars: []string{"COLLECTOR_API_ENDPOINT", "SCRUTINY_API_ENDPOINT"},
|
||||
//SCRUTINY_API_ENDPOINT is deprecated, but kept for backwards compatibility
|
||||
},
|
||||
|
||||
&cli.StringFlag{
|
||||
@@ -191,5 +189,28 @@ OPTIONS:
|
||||
if err != nil {
|
||||
log.Fatal(color.HiRedString("ERROR: %v", err))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
|
||||
logger := logrus.WithFields(logrus.Fields{
|
||||
"type": "metrics",
|
||||
})
|
||||
|
||||
if level, err := logrus.ParseLevel(appConfig.GetString("log.level")); err == nil {
|
||||
logger.Logger.SetLevel(level)
|
||||
} else {
|
||||
logger.Logger.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
var logFile *os.File
|
||||
var err error
|
||||
if appConfig.IsSet("log.file") && len(appConfig.GetString("log.file")) > 0 {
|
||||
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Logger.Errorf("Failed to open log file %s for output: %s", appConfig.GetString("log.file"), err)
|
||||
return nil, logFile, err
|
||||
}
|
||||
logger.Logger.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
return logger, logFile, nil
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ OPTIONS:
|
||||
Name: "api-endpoint",
|
||||
Usage: "The api server endpoint",
|
||||
Value: "http://localhost:8080",
|
||||
EnvVars: []string{"SCRUTINY_API_ENDPOINT"},
|
||||
EnvVars: []string{"COLLECTOR_API_ENDPOINT"},
|
||||
},
|
||||
|
||||
&cli.StringFlag{
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var httpClient = &http.Client{Timeout: 10 * time.Second}
|
||||
var httpClient = &http.Client{Timeout: 60 * time.Second}
|
||||
|
||||
type BaseCollector struct {
|
||||
logger *logrus.Entry
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/samber/lo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -56,11 +57,16 @@ func (mc *MetricsCollector) Run() error {
|
||||
Logger: mc.logger,
|
||||
Config: mc.config,
|
||||
}
|
||||
detectedStorageDevices, err := deviceDetector.Start()
|
||||
rawDetectedStorageDevices, err := deviceDetector.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//filter any device with empty wwn (they are invalid)
|
||||
detectedStorageDevices := lo.Filter[models.Device](rawDetectedStorageDevices, func(dev models.Device, _ int) bool {
|
||||
return len(dev.WWN) > 0
|
||||
})
|
||||
|
||||
mc.logger.Infoln("Sending detected devices to API, for filtering & validation")
|
||||
jsonObj, _ := json.Marshal(detectedStorageDevices)
|
||||
mc.logger.Debugf("Detected devices: %v", string(jsonObj))
|
||||
@@ -98,10 +104,10 @@ func (mc *MetricsCollector) Run() error {
|
||||
|
||||
func (mc *MetricsCollector) Validate() error {
|
||||
mc.logger.Infoln("Verifying required tools")
|
||||
_, lookErr := exec.LookPath("smartctl")
|
||||
_, lookErr := exec.LookPath(mc.config.GetString("commands.metrics_smartctl_bin"))
|
||||
|
||||
if lookErr != nil {
|
||||
return errors.DependencyMissingError("smartctl is missing")
|
||||
return errors.DependencyMissingError(fmt.Sprintf("%s binary is missing", mc.config.GetString("commands.metrics_smartctl_bin")))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -116,14 +122,15 @@ func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceT
|
||||
}
|
||||
mc.logger.Infof("Collecting smartctl results for %s\n", deviceName)
|
||||
|
||||
args := []string{"-x", "-j"}
|
||||
fullDeviceName := fmt.Sprintf("%s%s", detect.DevicePrefix(), deviceName)
|
||||
args := strings.Split(mc.config.GetCommandMetricsSmartArgs(fullDeviceName), " ")
|
||||
//only include the device type if its a non-standard one. In some cases ata drives are detected as scsi in docker, and metadata is lost.
|
||||
if len(deviceType) > 0 && deviceType != "scsi" && deviceType != "ata" {
|
||||
args = append(args, "-d", deviceType)
|
||||
args = append(args, "--device", deviceType)
|
||||
}
|
||||
args = append(args, fmt.Sprintf("%s%s", detect.DevicePrefix(), deviceName))
|
||||
args = append(args, fullDeviceName)
|
||||
|
||||
result, err := mc.shell.Command(mc.logger, "smartctl", args, "", os.Environ())
|
||||
result, err := mc.shell.Command(mc.logger, mc.config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
resultBytes := []byte(result)
|
||||
if err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/analogj/go-util/utils"
|
||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
@@ -8,6 +9,8 @@ import (
|
||||
"github.com/spf13/viper"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// When initializing this class the following methods must be called:
|
||||
@@ -16,6 +19,8 @@ import (
|
||||
// This is done automatically when created via the Factory.
|
||||
type configuration struct {
|
||||
*viper.Viper
|
||||
|
||||
deviceOverrides []models.ScanOverride
|
||||
}
|
||||
|
||||
//Viper uses the following precedence order. Each item takes precedence over the item below it:
|
||||
@@ -38,6 +43,11 @@ func (c *configuration) Init() error {
|
||||
|
||||
c.SetDefault("api.endpoint", "http://localhost:8080")
|
||||
|
||||
c.SetDefault("commands.metrics_smartctl_bin", "smartctl")
|
||||
c.SetDefault("commands.metrics_scan_args", "--scan --json")
|
||||
c.SetDefault("commands.metrics_info_args", "--info --json")
|
||||
c.SetDefault("commands.metrics_smart_args", "--xall --json")
|
||||
|
||||
//c.SetDefault("collect.short.command", "-a -o on -S on")
|
||||
|
||||
//if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
|
||||
@@ -90,16 +100,89 @@ func (c *configuration) ValidateConfig() error {
|
||||
// check that device prefix matches OS
|
||||
// check that schema of config file is valid
|
||||
|
||||
return nil
|
||||
// check that the collector commands are valid
|
||||
commandArgStrings := map[string]string{
|
||||
"commands.metrics_scan_args": c.GetString("commands.metrics_scan_args"),
|
||||
"commands.metrics_info_args": c.GetString("commands.metrics_info_args"),
|
||||
"commands.metrics_smart_args": c.GetString("commands.metrics_smart_args"),
|
||||
}
|
||||
|
||||
errorStrings := []string{}
|
||||
for configKey, commandArgString := range commandArgStrings {
|
||||
args := strings.Split(commandArgString, " ")
|
||||
//ensure that the args string contains `--json` or `-j` flag
|
||||
containsJsonFlag := false
|
||||
containsDeviceFlag := false
|
||||
for _, flag := range args {
|
||||
if strings.HasPrefix(flag, "--json") || strings.HasPrefix(flag, "-j") {
|
||||
containsJsonFlag = true
|
||||
}
|
||||
if strings.HasPrefix(flag, "--device") || strings.HasPrefix(flag, "-d") {
|
||||
containsDeviceFlag = true
|
||||
}
|
||||
}
|
||||
|
||||
if !containsJsonFlag {
|
||||
errorStrings = append(errorStrings, fmt.Sprintf("configuration key '%s' is missing '--json' flag", configKey))
|
||||
}
|
||||
|
||||
if containsDeviceFlag {
|
||||
errorStrings = append(errorStrings, fmt.Sprintf("configuration key '%s' must not contain '--device' or '-d' flag", configKey))
|
||||
}
|
||||
}
|
||||
//sort(errorStrings)
|
||||
sort.Strings(errorStrings)
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
} else {
|
||||
return errors.ConfigValidationError(strings.Join(errorStrings, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *configuration) GetScanOverrides() []models.ScanOverride {
|
||||
func (c *configuration) GetDeviceOverrides() []models.ScanOverride {
|
||||
// we have to support 2 types of device types.
|
||||
// - simple device type (device_type: 'sat')
|
||||
// and list of device types (type: \n- 3ware,0 \n- 3ware,1 \n- 3ware,2)
|
||||
// GetString will return "" if this is a list of device types.
|
||||
|
||||
overrides := []models.ScanOverride{}
|
||||
c.UnmarshalKey("devices", &overrides, func(c *mapstructure.DecoderConfig) { c.WeaklyTypedInput = true })
|
||||
return overrides
|
||||
if c.deviceOverrides == nil {
|
||||
overrides := []models.ScanOverride{}
|
||||
c.UnmarshalKey("devices", &overrides, func(c *mapstructure.DecoderConfig) { c.WeaklyTypedInput = true })
|
||||
c.deviceOverrides = overrides
|
||||
}
|
||||
|
||||
return c.deviceOverrides
|
||||
}
|
||||
|
||||
func (c *configuration) GetCommandMetricsInfoArgs(deviceName string) string {
|
||||
overrides := c.GetDeviceOverrides()
|
||||
|
||||
for _, deviceOverrides := range overrides {
|
||||
if strings.ToLower(deviceName) == strings.ToLower(deviceOverrides.Device) {
|
||||
//found matching device
|
||||
if len(deviceOverrides.Commands.MetricsInfoArgs) > 0 {
|
||||
return deviceOverrides.Commands.MetricsInfoArgs
|
||||
} else {
|
||||
return c.GetString("commands.metrics_info_args")
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.GetString("commands.metrics_info_args")
|
||||
}
|
||||
|
||||
func (c *configuration) GetCommandMetricsSmartArgs(deviceName string) string {
|
||||
overrides := c.GetDeviceOverrides()
|
||||
|
||||
for _, deviceOverrides := range overrides {
|
||||
if strings.ToLower(deviceName) == strings.ToLower(deviceOverrides.Device) {
|
||||
//found matching device
|
||||
if len(deviceOverrides.Commands.MetricsSmartArgs) > 0 {
|
||||
return deviceOverrides.Commands.MetricsSmartArgs
|
||||
} else {
|
||||
return c.GetString("commands.metrics_smart_args")
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.GetString("commands.metrics_smart_args")
|
||||
}
|
||||
|
||||
@@ -30,12 +30,31 @@ func TestConfiguration_GetScanOverrides_Simple(t *testing.T) {
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "simple_device.yaml"))
|
||||
require.NoError(t, err, "should correctly load simple device config")
|
||||
scanOverrides := testConfig.GetScanOverrides()
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat"}, Ignore: false}}, scanOverrides)
|
||||
}
|
||||
|
||||
// fixes #418
|
||||
func TestConfiguration_GetScanOverrides_DeviceTypeComma(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "device_type_comma.yaml"))
|
||||
require.NoError(t, err, "should correctly load simple device config")
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{
|
||||
{Device: "/dev/sda", DeviceType: []string{"sat", "auto"}, Ignore: false},
|
||||
{Device: "/dev/sdb", DeviceType: []string{"sat,auto"}, Ignore: false},
|
||||
}, scanOverrides)
|
||||
}
|
||||
|
||||
func TestConfiguration_GetScanOverrides_Ignore(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -45,7 +64,7 @@ func TestConfiguration_GetScanOverrides_Ignore(t *testing.T) {
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "ignore_device.yaml"))
|
||||
require.NoError(t, err, "should correctly load ignore device config")
|
||||
scanOverrides := testConfig.GetScanOverrides()
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}}, scanOverrides)
|
||||
@@ -60,7 +79,7 @@ func TestConfiguration_GetScanOverrides_Raid(t *testing.T) {
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "raid_device.yaml"))
|
||||
require.NoError(t, err, "should correctly load ignore device config")
|
||||
scanOverrides := testConfig.GetScanOverrides()
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{
|
||||
@@ -75,3 +94,53 @@ func TestConfiguration_GetScanOverrides_Raid(t *testing.T) {
|
||||
Ignore: false,
|
||||
}}, scanOverrides)
|
||||
}
|
||||
|
||||
func TestConfiguration_InvalidCommands_MissingJson(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "invalid_commands_missing_json.yaml"))
|
||||
require.EqualError(t, err, `ConfigValidationError: "configuration key 'commands.metrics_scan_args' is missing '--json' flag"`, "should throw an error because json flag is missing")
|
||||
}
|
||||
|
||||
func TestConfiguration_InvalidCommands_IncludesDevice(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "invalid_commands_includes_device.yaml"))
|
||||
require.EqualError(t, err, `ConfigValidationError: "configuration key 'commands.metrics_info_args' must not contain '--device' or '-d' flag, configuration key 'commands.metrics_smart_args' must not contain '--device' or '-d' flag"`, "should throw an error because device flags detected")
|
||||
}
|
||||
|
||||
func TestConfiguration_OverrideCommands(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "override_commands.yaml"))
|
||||
require.NoError(t, err, "should not throw an error")
|
||||
require.Equal(t, "--xall --json -T permissive", testConfig.GetString("commands.metrics_smart_args"))
|
||||
}
|
||||
|
||||
func TestConfiguration_OverrideDeviceCommands_MetricsInfoArgs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "override_device_commands.yaml"))
|
||||
require.NoError(t, err, "should correctly override device command")
|
||||
|
||||
//assert
|
||||
require.Equal(t, "--info --json -T permissive", testConfig.GetCommandMetricsInfoArgs("/dev/sda"))
|
||||
require.Equal(t, "--info --json", testConfig.GetCommandMetricsInfoArgs("/dev/sdb"))
|
||||
//require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Commands: {MetricsInfoArgs: "--info --json -T "}}}, scanOverrides)
|
||||
}
|
||||
|
||||
@@ -22,5 +22,7 @@ type Interface interface {
|
||||
GetStringSlice(key string) []string
|
||||
UnmarshalKey(key string, rawVal interface{}, decoderOpts ...viper.DecoderConfigOption) error
|
||||
|
||||
GetScanOverrides() []models.ScanOverride
|
||||
GetDeviceOverrides() []models.ScanOverride
|
||||
GetCommandMetricsInfoArgs(deviceName string) string
|
||||
GetCommandMetricsSmartArgs(deviceName string) string
|
||||
}
|
||||
|
||||
@@ -5,88 +5,37 @@
|
||||
package mock_config
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
models "github.com/analogj/scrutiny/collector/pkg/models"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
viper "github.com/spf13/viper"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// MockInterface is a mock of Interface interface
|
||||
// MockInterface is a mock of Interface interface.
|
||||
type MockInterface struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockInterfaceMockRecorder
|
||||
}
|
||||
|
||||
// MockInterfaceMockRecorder is the mock recorder for MockInterface
|
||||
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
|
||||
type MockInterfaceMockRecorder struct {
|
||||
mock *MockInterface
|
||||
}
|
||||
|
||||
// NewMockInterface creates a new mock instance
|
||||
// NewMockInterface creates a new mock instance.
|
||||
func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
|
||||
mock := &MockInterface{ctrl: ctrl}
|
||||
mock.recorder = &MockInterfaceMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Init mocks base method
|
||||
func (m *MockInterface) Init() error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Init")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Init indicates an expected call of Init
|
||||
func (mr *MockInterfaceMockRecorder) Init() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockInterface)(nil).Init))
|
||||
}
|
||||
|
||||
// ReadConfig mocks base method
|
||||
func (m *MockInterface) ReadConfig(configFilePath string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ReadConfig", configFilePath)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ReadConfig indicates an expected call of ReadConfig
|
||||
func (mr *MockInterfaceMockRecorder) ReadConfig(configFilePath interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadConfig", reflect.TypeOf((*MockInterface)(nil).ReadConfig), configFilePath)
|
||||
}
|
||||
|
||||
// Set mocks base method
|
||||
func (m *MockInterface) Set(key string, value interface{}) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Set", key, value)
|
||||
}
|
||||
|
||||
// Set indicates an expected call of Set
|
||||
func (mr *MockInterfaceMockRecorder) Set(key, value interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockInterface)(nil).Set), key, value)
|
||||
}
|
||||
|
||||
// SetDefault mocks base method
|
||||
func (m *MockInterface) SetDefault(key string, value interface{}) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "SetDefault", key, value)
|
||||
}
|
||||
|
||||
// SetDefault indicates an expected call of SetDefault
|
||||
func (mr *MockInterfaceMockRecorder) SetDefault(key, value interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefault", reflect.TypeOf((*MockInterface)(nil).SetDefault), key, value)
|
||||
}
|
||||
|
||||
// AllSettings mocks base method
|
||||
// AllSettings mocks base method.
|
||||
func (m *MockInterface) AllSettings() map[string]interface{} {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "AllSettings")
|
||||
@@ -94,27 +43,13 @@ func (m *MockInterface) AllSettings() map[string]interface{} {
|
||||
return ret0
|
||||
}
|
||||
|
||||
// AllSettings indicates an expected call of AllSettings
|
||||
// AllSettings indicates an expected call of AllSettings.
|
||||
func (mr *MockInterfaceMockRecorder) AllSettings() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllSettings", reflect.TypeOf((*MockInterface)(nil).AllSettings))
|
||||
}
|
||||
|
||||
// IsSet mocks base method
|
||||
func (m *MockInterface) IsSet(key string) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsSet", key)
|
||||
ret0, _ := ret[0].(bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// IsSet indicates an expected call of IsSet
|
||||
func (mr *MockInterfaceMockRecorder) IsSet(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSet", reflect.TypeOf((*MockInterface)(nil).IsSet), key)
|
||||
}
|
||||
|
||||
// Get mocks base method
|
||||
// Get mocks base method.
|
||||
func (m *MockInterface) Get(key string) interface{} {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", key)
|
||||
@@ -122,13 +57,13 @@ func (m *MockInterface) Get(key string) interface{} {
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get
|
||||
// Get indicates an expected call of Get.
|
||||
func (mr *MockInterfaceMockRecorder) Get(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), key)
|
||||
}
|
||||
|
||||
// GetBool mocks base method
|
||||
// GetBool mocks base method.
|
||||
func (m *MockInterface) GetBool(key string) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetBool", key)
|
||||
@@ -136,13 +71,55 @@ func (m *MockInterface) GetBool(key string) bool {
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetBool indicates an expected call of GetBool
|
||||
// GetBool indicates an expected call of GetBool.
|
||||
func (mr *MockInterfaceMockRecorder) GetBool(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBool", reflect.TypeOf((*MockInterface)(nil).GetBool), key)
|
||||
}
|
||||
|
||||
// GetInt mocks base method
|
||||
// GetCommandMetricsInfoArgs mocks base method.
|
||||
func (m *MockInterface) GetCommandMetricsInfoArgs(deviceName string) string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCommandMetricsInfoArgs", deviceName)
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetCommandMetricsInfoArgs indicates an expected call of GetCommandMetricsInfoArgs.
|
||||
func (mr *MockInterfaceMockRecorder) GetCommandMetricsInfoArgs(deviceName interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommandMetricsInfoArgs", reflect.TypeOf((*MockInterface)(nil).GetCommandMetricsInfoArgs), deviceName)
|
||||
}
|
||||
|
||||
// GetCommandMetricsSmartArgs mocks base method.
|
||||
func (m *MockInterface) GetCommandMetricsSmartArgs(deviceName string) string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCommandMetricsSmartArgs", deviceName)
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetCommandMetricsSmartArgs indicates an expected call of GetCommandMetricsSmartArgs.
|
||||
func (mr *MockInterfaceMockRecorder) GetCommandMetricsSmartArgs(deviceName interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommandMetricsSmartArgs", reflect.TypeOf((*MockInterface)(nil).GetCommandMetricsSmartArgs), deviceName)
|
||||
}
|
||||
|
||||
// GetDeviceOverrides mocks base method.
|
||||
func (m *MockInterface) GetDeviceOverrides() []models.ScanOverride {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetDeviceOverrides")
|
||||
ret0, _ := ret[0].([]models.ScanOverride)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetDeviceOverrides indicates an expected call of GetDeviceOverrides.
|
||||
func (mr *MockInterfaceMockRecorder) GetDeviceOverrides() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceOverrides", reflect.TypeOf((*MockInterface)(nil).GetDeviceOverrides))
|
||||
}
|
||||
|
||||
// GetInt mocks base method.
|
||||
func (m *MockInterface) GetInt(key string) int {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetInt", key)
|
||||
@@ -150,13 +127,13 @@ func (m *MockInterface) GetInt(key string) int {
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetInt indicates an expected call of GetInt
|
||||
// GetInt indicates an expected call of GetInt.
|
||||
func (mr *MockInterfaceMockRecorder) GetInt(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInt", reflect.TypeOf((*MockInterface)(nil).GetInt), key)
|
||||
}
|
||||
|
||||
// GetString mocks base method
|
||||
// GetString mocks base method.
|
||||
func (m *MockInterface) GetString(key string) string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetString", key)
|
||||
@@ -164,13 +141,13 @@ func (m *MockInterface) GetString(key string) string {
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetString indicates an expected call of GetString
|
||||
// GetString indicates an expected call of GetString.
|
||||
func (mr *MockInterfaceMockRecorder) GetString(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetString", reflect.TypeOf((*MockInterface)(nil).GetString), key)
|
||||
}
|
||||
|
||||
// GetStringSlice mocks base method
|
||||
// GetStringSlice mocks base method.
|
||||
func (m *MockInterface) GetStringSlice(key string) []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetStringSlice", key)
|
||||
@@ -178,13 +155,79 @@ func (m *MockInterface) GetStringSlice(key string) []string {
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetStringSlice indicates an expected call of GetStringSlice
|
||||
// GetStringSlice indicates an expected call of GetStringSlice.
|
||||
func (mr *MockInterfaceMockRecorder) GetStringSlice(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStringSlice", reflect.TypeOf((*MockInterface)(nil).GetStringSlice), key)
|
||||
}
|
||||
|
||||
// UnmarshalKey mocks base method
|
||||
// Init mocks base method.
|
||||
func (m *MockInterface) Init() error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Init")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Init indicates an expected call of Init.
|
||||
func (mr *MockInterfaceMockRecorder) Init() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockInterface)(nil).Init))
|
||||
}
|
||||
|
||||
// IsSet mocks base method.
|
||||
func (m *MockInterface) IsSet(key string) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsSet", key)
|
||||
ret0, _ := ret[0].(bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// IsSet indicates an expected call of IsSet.
|
||||
func (mr *MockInterfaceMockRecorder) IsSet(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSet", reflect.TypeOf((*MockInterface)(nil).IsSet), key)
|
||||
}
|
||||
|
||||
// ReadConfig mocks base method.
|
||||
func (m *MockInterface) ReadConfig(configFilePath string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ReadConfig", configFilePath)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ReadConfig indicates an expected call of ReadConfig.
|
||||
func (mr *MockInterfaceMockRecorder) ReadConfig(configFilePath interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadConfig", reflect.TypeOf((*MockInterface)(nil).ReadConfig), configFilePath)
|
||||
}
|
||||
|
||||
// Set mocks base method.
|
||||
func (m *MockInterface) Set(key string, value interface{}) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Set", key, value)
|
||||
}
|
||||
|
||||
// Set indicates an expected call of Set.
|
||||
func (mr *MockInterfaceMockRecorder) Set(key, value interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockInterface)(nil).Set), key, value)
|
||||
}
|
||||
|
||||
// SetDefault mocks base method.
|
||||
func (m *MockInterface) SetDefault(key string, value interface{}) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "SetDefault", key, value)
|
||||
}
|
||||
|
||||
// SetDefault indicates an expected call of SetDefault.
|
||||
func (mr *MockInterfaceMockRecorder) SetDefault(key, value interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefault", reflect.TypeOf((*MockInterface)(nil).SetDefault), key, value)
|
||||
}
|
||||
|
||||
// UnmarshalKey mocks base method.
|
||||
func (m *MockInterface) UnmarshalKey(key string, rawVal interface{}, decoderOpts ...viper.DecoderConfigOption) error {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{key, rawVal}
|
||||
@@ -196,23 +239,9 @@ func (m *MockInterface) UnmarshalKey(key string, rawVal interface{}, decoderOpts
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UnmarshalKey indicates an expected call of UnmarshalKey
|
||||
// UnmarshalKey indicates an expected call of UnmarshalKey.
|
||||
func (mr *MockInterfaceMockRecorder) UnmarshalKey(key, rawVal interface{}, decoderOpts ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{key, rawVal}, decoderOpts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnmarshalKey", reflect.TypeOf((*MockInterface)(nil).UnmarshalKey), varargs...)
|
||||
}
|
||||
|
||||
// GetScanOverrides mocks base method
|
||||
func (m *MockInterface) GetScanOverrides() []models.ScanOverride {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetScanOverrides")
|
||||
ret0, _ := ret[0].([]models.ScanOverride)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetScanOverrides indicates an expected call of GetScanOverrides
|
||||
func (mr *MockInterfaceMockRecorder) GetScanOverrides() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScanOverrides", reflect.TypeOf((*MockInterface)(nil).GetScanOverrides))
|
||||
}
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
version: 1
|
||||
devices:
|
||||
# the scrutiny config parser will detect `sat,auto` as two separate items in a list. If you want to use `-d sat,auto` you must
|
||||
# set 'sat,auto' in a list (see eg. /dev/sbd)
|
||||
- device: /dev/sda
|
||||
type: 'sat,auto'
|
||||
- device: /dev/sdb
|
||||
type:
|
||||
- sat,auto
|
||||
@@ -0,0 +1,4 @@
|
||||
commands:
|
||||
metrics_scan_args: '--scan --json' # used to detect devices
|
||||
metrics_info_args: '--info --json --device=sat' # used to determine device unique ID & register device with Scrutiny
|
||||
metrics_smart_args: '--xall --json -d sat' # used to retrieve smart data for each device.
|
||||
@@ -0,0 +1,4 @@
|
||||
commands:
|
||||
metrics_scan_args: '--scan' # used to detect devices
|
||||
metrics_info_args: '--info -j' # used to determine device unique ID & register device with Scrutiny
|
||||
metrics_smart_args: '--xall --json' # used to retrieve smart data for each device.
|
||||
@@ -0,0 +1,4 @@
|
||||
commands:
|
||||
metrics_scan_args: '--scan --json' # used to detect devices
|
||||
metrics_info_args: '--info -j' # used to determine device unique ID & register device with Scrutiny
|
||||
metrics_smart_args: '--xall --json -T permissive' # used to retrieve smart data for each device.
|
||||
@@ -0,0 +1,5 @@
|
||||
version: 1
|
||||
devices:
|
||||
- device: /dev/sda
|
||||
commands:
|
||||
metrics_info_args: "--info --json -T permissive"
|
||||
@@ -28,7 +28,8 @@ type Detect struct {
|
||||
// models.Device returned from this function only contain the minimum data for smartctl to execute: device type and device name (device file).
|
||||
func (d *Detect) SmartctlScan() ([]models.Device, error) {
|
||||
//we use smartctl to detect all the drives available.
|
||||
detectedDeviceConnJson, err := d.Shell.Command(d.Logger, "smartctl", []string{"--scan", "-j"}, "", os.Environ())
|
||||
args := strings.Split(d.Config.GetString("commands.metrics_scan_args"), " ")
|
||||
detectedDeviceConnJson, err := d.Shell.Command(d.Logger, d.Config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
if err != nil {
|
||||
d.Logger.Errorf("Error scanning for devices: %v", err)
|
||||
return nil, err
|
||||
@@ -51,15 +52,15 @@ func (d *Detect) SmartctlScan() ([]models.Device, error) {
|
||||
// - WWN is provided as component data, rather than a "string". We'll have to generate the WWN value ourselves
|
||||
// - WWN from smartctl only provided for ATA protocol drives, NVMe and SCSI drives do not include WWN.
|
||||
func (d *Detect) SmartCtlInfo(device *models.Device) error {
|
||||
|
||||
args := []string{"--info", "-j"}
|
||||
fullDeviceName := fmt.Sprintf("%s%s", DevicePrefix(), device.DeviceName)
|
||||
args := strings.Split(d.Config.GetCommandMetricsInfoArgs(fullDeviceName), " ")
|
||||
//only include the device type if its a non-standard one. In some cases ata drives are detected as scsi in docker, and metadata is lost.
|
||||
if len(device.DeviceType) > 0 && device.DeviceType != "scsi" && device.DeviceType != "ata" {
|
||||
args = append(args, "-d", device.DeviceType)
|
||||
args = append(args, "--device", device.DeviceType)
|
||||
}
|
||||
args = append(args, fmt.Sprintf("%s%s", DevicePrefix(), device.DeviceName))
|
||||
args = append(args, fullDeviceName)
|
||||
|
||||
availableDeviceInfoJson, err := d.Shell.Command(d.Logger, "smartctl", args, "", os.Environ())
|
||||
availableDeviceInfoJson, err := d.Shell.Command(d.Logger, d.Config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
if err != nil {
|
||||
d.Logger.Errorf("Could not retrieve device information for %s: %v", device.DeviceName, err)
|
||||
return err
|
||||
@@ -138,7 +139,7 @@ func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []mod
|
||||
|
||||
//now tha we've "grouped" all the devices, lets override any groups specified in the config file.
|
||||
|
||||
for _, overrideDevice := range d.Config.GetScanOverrides() {
|
||||
for _, overrideDevice := range d.Config.GetDeviceOverrides() {
|
||||
overrideDeviceFile := strings.ToLower(overrideDevice.Device)
|
||||
|
||||
if overrideDevice.Ignore {
|
||||
@@ -148,10 +149,35 @@ func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []mod
|
||||
//create a new device group, and replace the one generated by smartctl --scan
|
||||
overrideDeviceGroup := []models.Device{}
|
||||
|
||||
for _, overrideDeviceType := range overrideDevice.DeviceType {
|
||||
if overrideDevice.DeviceType != nil {
|
||||
for _, overrideDeviceType := range overrideDevice.DeviceType {
|
||||
overrideDeviceGroup = append(overrideDeviceGroup, models.Device{
|
||||
HostId: d.Config.GetString("host.id"),
|
||||
DeviceType: overrideDeviceType,
|
||||
DeviceName: strings.TrimPrefix(overrideDeviceFile, DevicePrefix()),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
//user may have specified device in config file without device type (default to scanned device type)
|
||||
|
||||
//check if the device file was detected by the scanner
|
||||
var deviceType string
|
||||
if scannedDevice, foundScannedDevice := groupedDevices[overrideDeviceFile]; foundScannedDevice {
|
||||
if len(scannedDevice) > 0 {
|
||||
//take the device type from the first grouped device
|
||||
deviceType = scannedDevice[0].DeviceType
|
||||
} else {
|
||||
deviceType = "ata"
|
||||
}
|
||||
|
||||
} else {
|
||||
//fallback to ata if no scanned device detected
|
||||
deviceType = "ata"
|
||||
}
|
||||
|
||||
overrideDeviceGroup = append(overrideDeviceGroup, models.Device{
|
||||
HostId: d.Config.GetString("host.id"),
|
||||
DeviceType: overrideDeviceType,
|
||||
DeviceType: deviceType,
|
||||
DeviceName: strings.TrimPrefix(overrideDeviceFile, DevicePrefix()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ func TestDetect_SmartctlScan(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetScanOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
testScanResults, err := ioutil.ReadFile("testdata/smartctl_scan_simple.json")
|
||||
@@ -45,7 +47,9 @@ func TestDetect_SmartctlScan_Megaraid(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetScanOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
testScanResults, err := ioutil.ReadFile("testdata/smartctl_scan_megaraid.json")
|
||||
@@ -75,7 +79,9 @@ func TestDetect_SmartctlScan_Nvme(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetScanOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
testScanResults, err := ioutil.ReadFile("testdata/smartctl_scan_nvme.json")
|
||||
@@ -104,7 +110,10 @@ func TestDetect_TransformDetectedDevices_Empty(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetScanOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
@@ -134,7 +143,10 @@ func TestDetect_TransformDetectedDevices_Ignore(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetScanOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}})
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
@@ -163,7 +175,9 @@ func TestDetect_TransformDetectedDevices_Raid(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetScanOverrides().AnyTimes().Return([]models.ScanOverride{
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{
|
||||
{
|
||||
Device: "/dev/bus/0",
|
||||
DeviceType: []string{"megaraid,14", "megaraid,15", "megaraid,18", "megaraid,19", "megaraid,20", "megaraid,21"},
|
||||
@@ -202,7 +216,9 @@ func TestDetect_TransformDetectedDevices_Simple(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetScanOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat+megaraid"}}})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat+megaraid"}}})
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
@@ -225,3 +241,59 @@ func TestDetect_TransformDetectedDevices_Simple(t *testing.T) {
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "sat+megaraid", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
// test https://github.com/AnalogJ/scrutiny/issues/255#issuecomment-1164024126
|
||||
func TestDetect_TransformDetectedDevices_WithoutDeviceTypeOverride(t *testing.T) {
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda"}})
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
Name: "/dev/sda",
|
||||
InfoName: "/dev/sda",
|
||||
Protocol: "ata",
|
||||
Type: "scsi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
//test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
//assert
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "scsi", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
func TestDetect_TransformDetectedDevices_WhenDeviceNotDetected(t *testing.T) {
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda"}})
|
||||
detectedDevices := models.Scan{}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
//test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
//assert
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "ata", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ func DevicePrefix() string {
|
||||
|
||||
func (d *Detect) Start() ([]models.Device, error) {
|
||||
d.Shell = shell.Create()
|
||||
// call the base/common functionality to get a list of devicess
|
||||
// call the base/common functionality to get a list of devices
|
||||
detectedDevices, err := d.SmartctlScan()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package detect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/jaypipes/ghw"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -22,6 +25,7 @@ func (d *Detect) Start() ([]models.Device, error) {
|
||||
//inflate device info for detected devices.
|
||||
for ndx, _ := range detectedDevices {
|
||||
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
populateUdevInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
}
|
||||
|
||||
return detectedDevices, nil
|
||||
@@ -49,3 +53,51 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||
//wwn must always be lowercase.
|
||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
||||
}
|
||||
|
||||
// as discussed in
|
||||
// - https://github.com/AnalogJ/scrutiny/issues/225
|
||||
// - https://github.com/jaypipes/ghw/issues/59#issue-361915216
|
||||
// udev exposes its data in a standardized way under /run/udev/data/....
|
||||
func populateUdevInfo(detectedDevice *models.Device) error {
|
||||
// Get device major:minor numbers
|
||||
// `cat /sys/class/block/sda/dev`
|
||||
devNo, err := ioutil.ReadFile(filepath.Join("/sys/class/block/", detectedDevice.DeviceName, "dev"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Look up block device in udev runtime database
|
||||
// `cat /run/udev/data/b8:0`
|
||||
udevID := "b" + strings.TrimSpace(string(devNo))
|
||||
udevBytes, err := ioutil.ReadFile(filepath.Join("/run/udev/data/", udevID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deviceMountPaths := []string{}
|
||||
udevInfo := make(map[string]string)
|
||||
for _, udevLine := range strings.Split(string(udevBytes), "\n") {
|
||||
if strings.HasPrefix(udevLine, "E:") {
|
||||
if s := strings.SplitN(udevLine[2:], "=", 2); len(s) == 2 {
|
||||
udevInfo[s[0]] = s[1]
|
||||
}
|
||||
} else if strings.HasPrefix(udevLine, "S:") {
|
||||
deviceMountPaths = append(deviceMountPaths, udevLine[2:])
|
||||
}
|
||||
}
|
||||
|
||||
//Set additional device information.
|
||||
if deviceLabel, exists := udevInfo["ID_FS_LABEL"]; exists {
|
||||
detectedDevice.DeviceLabel = deviceLabel
|
||||
}
|
||||
if deviceUUID, exists := udevInfo["ID_FS_UUID"]; exists {
|
||||
detectedDevice.DeviceUUID = deviceUUID
|
||||
}
|
||||
if deviceSerialID, exists := udevInfo["ID_SERIAL"]; exists {
|
||||
detectedDevice.DeviceSerialID = fmt.Sprintf("%s-%s", udevInfo["ID_BUS"], deviceSerialID)
|
||||
}
|
||||
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package models
|
||||
|
||||
type Device struct {
|
||||
WWN string `json:"wwn"`
|
||||
HostId string `json:"host_id"`
|
||||
WWN string `json:"wwn"`
|
||||
|
||||
DeviceName string `json:"device_name"`
|
||||
DeviceUUID string `json:"device_uuid"`
|
||||
DeviceSerialID string `json:"device_serial_id"`
|
||||
DeviceLabel string `json:"device_label"`
|
||||
|
||||
Manufacturer string `json:"manufacturer"`
|
||||
ModelName string `json:"model_name"`
|
||||
InterfaceType string `json:"interface_type"`
|
||||
@@ -17,6 +20,10 @@ type Device struct {
|
||||
SmartSupport bool `json:"smart_support"`
|
||||
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
|
||||
|
||||
// User provided metadata
|
||||
Label string `json:"label"`
|
||||
HostId string `json:"host_id"`
|
||||
}
|
||||
|
||||
type DeviceWrapper struct {
|
||||
|
||||
@@ -4,4 +4,8 @@ type ScanOverride struct {
|
||||
Device string `mapstructure:"device"`
|
||||
DeviceType []string `mapstructure:"type"`
|
||||
Ignore bool `mapstructure:"ignore"`
|
||||
Commands struct {
|
||||
MetricsInfoArgs string `mapstructure:"metrics_info_args"`
|
||||
MetricsSmartArgs string `mapstructure:"metrics_smart_args"`
|
||||
} `mapstructure:"commands"`
|
||||
}
|
||||
|
||||
+38
-41
@@ -1,65 +1,62 @@
|
||||
########
|
||||
FROM golang:1.14.4-buster as backendbuild
|
||||
# syntax=docker/dockerfile:1.4
|
||||
########################################################################################################################
|
||||
# Omnibus Image
|
||||
########################################################################################################################
|
||||
|
||||
######## Build the frontend
|
||||
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN make binary-frontend
|
||||
|
||||
|
||||
######## Build the backend
|
||||
FROM golang:1.20-bullseye as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny webapp/backend/cmd/scrutiny/scrutiny.go && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-selftest collector/cmd/collector-selftest/collector-selftest.go && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-metrics collector/cmd/collector-metrics/collector-metrics.go
|
||||
|
||||
########
|
||||
FROM node:lts-slim as frontendbuild
|
||||
|
||||
#reduce logging, disable angular-cli analytics for ci environment
|
||||
ENV NPM_CONFIG_LOGLEVEL=warn NG_CLI_ANALYTICS=false
|
||||
|
||||
WORKDIR /opt/scrutiny/src
|
||||
COPY webapp/frontend /opt/scrutiny/src
|
||||
|
||||
RUN npm install -g @angular/cli@9.1.4 && \
|
||||
mkdir -p /scrutiny/dist && \
|
||||
npm install && \
|
||||
ng build --output-path=/opt/scrutiny/dist --prod
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
||||
|
||||
|
||||
########
|
||||
FROM ubuntu:bionic as runtime
|
||||
######## Combine build artifacts in runtime image
|
||||
FROM debian:bullseye-slim as runtime
|
||||
ARG TARGETARCH
|
||||
EXPOSE 8080
|
||||
WORKDIR /opt/scrutiny
|
||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
ENV INFLUXD_CONFIG_PATH=/opt/scrutiny/influxdb
|
||||
ENV S6VER="1.21.8.0"
|
||||
ENV INFLUXVER="2.2.0"
|
||||
|
||||
RUN apt-get update && apt-get install -y cron smartmontools=7.0-0ubuntu1~ubuntu18.04.1 ca-certificates curl tzdata \
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
cron \
|
||||
curl \
|
||||
smartmontools \
|
||||
tzdata \
|
||||
&& update-ca-certificates \
|
||||
&& case ${TARGETARCH} in \
|
||||
"amd64") S6_ARCH=amd64 ;; \
|
||||
"arm64") S6_ARCH=aarch64 ;; \
|
||||
esac \
|
||||
&& curl https://github.com/just-containers/s6-overlay/releases/download/v1.21.8.0/s6-overlay-${S6_ARCH}.tar.gz -L -s --output /tmp/s6-overlay-${S6_ARCH}.tar.gz \
|
||||
&& tar xzf /tmp/s6-overlay-${S6_ARCH}.tar.gz -C /
|
||||
|
||||
ADD https://dl.influxdata.com/influxdb/releases/influxdb2-2.2.0-${TARGETARCH}.deb /tmp/
|
||||
RUN dpkg -i /tmp/influxdb2-2.2.0-${TARGETARCH}.deb && rm -rf /tmp/influxdb2-2.2.0-${TARGETARCH}.deb
|
||||
&& curl https://github.com/just-containers/s6-overlay/releases/download/v${S6VER}/s6-overlay-${S6_ARCH}.tar.gz -L -s --output /tmp/s6-overlay-${S6_ARCH}.tar.gz \
|
||||
&& tar xzf /tmp/s6-overlay-${S6_ARCH}.tar.gz -C / \
|
||||
&& rm -rf /tmp/s6-overlay-${S6_ARCH}.tar.gz \
|
||||
&& curl -L https://dl.influxdata.com/influxdb/releases/influxdb2-${INFLUXVER}-${TARGETARCH}.deb --output /tmp/influxdb2-${INFLUXVER}-${TARGETARCH}.deb \
|
||||
&& dpkg -i --force-all /tmp/influxdb2-${INFLUXVER}-${TARGETARCH}.deb \
|
||||
&& rm -rf /tmp/influxdb2-2.2.0-${TARGETARCH}.deb
|
||||
|
||||
COPY /rootfs /
|
||||
|
||||
COPY /rootfs/etc/cron.d/scrutiny /etc/cron.d/scrutiny
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-selftest /opt/scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||
COPY --from=frontendbuild /opt/scrutiny/dist /opt/scrutiny/web
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-selftest && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics && \
|
||||
chmod 0644 /etc/cron.d/scrutiny && \
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||
COPY --link --from=frontendbuild --chmod=644 /go/src/github.com/analogj/scrutiny/dist /opt/scrutiny/web
|
||||
RUN chmod 0644 /etc/cron.d/scrutiny && \
|
||||
rm -f /etc/cron.daily/* && \
|
||||
mkdir -p /opt/scrutiny/web && \
|
||||
mkdir -p /opt/scrutiny/config && \
|
||||
chmod -R ugo+rwx /opt/scrutiny/config
|
||||
|
||||
|
||||
CMD ["/init"]
|
||||
|
||||
+11
-10
@@ -1,27 +1,28 @@
|
||||
########################################################################################################################
|
||||
# Collector Image
|
||||
########################################################################################################################
|
||||
|
||||
|
||||
########
|
||||
FROM golang:1.14.4-buster as backendbuild
|
||||
FROM golang:1.20-bullseye as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-selftest collector/cmd/collector-selftest/collector-selftest.go && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-metrics collector/cmd/collector-metrics/collector-metrics.go
|
||||
RUN make binary-clean binary-collector
|
||||
|
||||
########
|
||||
FROM ubuntu:bionic as runtime
|
||||
WORKDIR /scrutiny
|
||||
FROM debian:bullseye-slim as runtime
|
||||
WORKDIR /opt/scrutiny
|
||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
|
||||
RUN apt-get update && apt-get install -y cron smartmontools=7.0-0ubuntu1~ubuntu18.04.1 ca-certificates tzdata && update-ca-certificates
|
||||
RUN apt-get update && apt-get install -y cron smartmontools ca-certificates tzdata && update-ca-certificates
|
||||
|
||||
COPY /docker/entrypoint-collector.sh /entrypoint-collector.sh
|
||||
COPY /rootfs/etc/cron.d/scrutiny /etc/cron.d/scrutiny
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-selftest /opt/scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny-collector-selftest && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics && \
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics && \
|
||||
chmod +x /entrypoint-collector.sh && \
|
||||
chmod 0644 /etc/cron.d/scrutiny && \
|
||||
rm -f /etc/cron.daily/apt /etc/cron.daily/dpkg /etc/cron.daily/passwd
|
||||
|
||||
+21
-26
@@ -1,40 +1,35 @@
|
||||
########
|
||||
FROM golang:1.14.4-buster as backendbuild
|
||||
# syntax=docker/dockerfile:1.4
|
||||
########################################################################################################################
|
||||
# Web Image
|
||||
########################################################################################################################
|
||||
|
||||
######## Build the frontend
|
||||
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN make binary-frontend
|
||||
|
||||
######## Build the backend
|
||||
FROM golang:1.20-bullseye as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
|
||||
########
|
||||
FROM node:lts-slim as frontendbuild
|
||||
|
||||
#reduce logging, disable angular-cli analytics for ci environment
|
||||
ENV NPM_CONFIG_LOGLEVEL=warn NG_CLI_ANALYTICS=false
|
||||
|
||||
WORKDIR /opt/scrutiny/src
|
||||
COPY webapp/frontend /opt/scrutiny/src
|
||||
|
||||
RUN npm install -g @angular/cli@9.1.4 && \
|
||||
mkdir -p /opt/scrutiny/dist && \
|
||||
npm install && \
|
||||
ng build --output-path=/opt/scrutiny/dist --prod
|
||||
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
||||
|
||||
|
||||
########
|
||||
FROM ubuntu:bionic as runtime
|
||||
######## Combine build artifacts in runtime image
|
||||
FROM debian:bullseye-slim as runtime
|
||||
EXPOSE 8080
|
||||
WORKDIR /opt/scrutiny
|
||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates curl tzdata && update-ca-certificates
|
||||
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --from=frontendbuild /opt/scrutiny/dist /opt/scrutiny/web
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny && \
|
||||
mkdir -p /opt/scrutiny/web && \
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --link --from=frontendbuild --chmod=644 /go/src/github.com/analogj/scrutiny/dist /opt/scrutiny/web
|
||||
RUN mkdir -p /opt/scrutiny/web && \
|
||||
mkdir -p /opt/scrutiny/config && \
|
||||
chmod -R ugo+rwx /opt/scrutiny/config
|
||||
CMD ["/opt/scrutiny/bin/scrutiny", "start"]
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
FROM techknowlogick/xgo:go-1.13.x
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN make all
|
||||
Vendored
-18
@@ -1,18 +0,0 @@
|
||||
# This vagrant file is only used for local development & testing.
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.guest = :freebsd
|
||||
config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true
|
||||
config.vm.box = "freebsd/FreeBSD-11.0-CURRENT"
|
||||
config.ssh.shell = "sh"
|
||||
config.vm.base_mac = "080027D14C66"
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
vb.customize ["modifyvm", :id, "--memory", "1024"]
|
||||
vb.customize ["modifyvm", :id, "--cpus", "1"]
|
||||
vb.customize ["modifyvm", :id, "--hwvirtex", "on"]
|
||||
vb.customize ["modifyvm", :id, "--audio", "none"]
|
||||
vb.customize ["modifyvm", :id, "--nictype1", "virtio"]
|
||||
vb.customize ["modifyvm", :id, "--nictype2", "virtio"]
|
||||
end
|
||||
end
|
||||
@@ -1,14 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Cron runs in its own isolated environment (usually using only /etc/environment )
|
||||
# So when the container starts up, we will do a dump of the runtime environment into a .env file that we
|
||||
# will then source into the crontab file (/etc/cron.d/scrutiny.sh)
|
||||
|
||||
printenv | sed 's/^\(.*\)$/export \1/g' > /env.sh
|
||||
# will then source into the crontab file (/etc/cron.d/scrutiny)
|
||||
(set -o posix; export -p) > /env.sh
|
||||
|
||||
# adding ability to customize the cron schedule.
|
||||
COLLECTOR_CRON_SCHEDULE=${COLLECTOR_CRON_SCHEDULE:-"0 0 * * *"}
|
||||
COLLECTOR_RUN_STARTUP=${COLLECTOR_RUN_STARTUP:-"false"}
|
||||
COLLECTOR_RUN_STARTUP_SLEEP=${COLLECTOR_RUN_STARTUP_SLEEP:-"1"}
|
||||
|
||||
# if the cron schedule has been overridden via env variable (eg docker-compose) we should make sure to strip quotes
|
||||
[[ "${COLLECTOR_CRON_SCHEDULE}" == \"*\" || "${COLLECTOR_CRON_SCHEDULE}" == \'*\' ]] && COLLECTOR_CRON_SCHEDULE="${COLLECTOR_CRON_SCHEDULE:1:-1}"
|
||||
|
||||
# replace placeholder with correct value
|
||||
sed -i 's|{COLLECTOR_CRON_SCHEDULE}|'"${COLLECTOR_CRON_SCHEDULE}"'|g' /etc/cron.d/scrutiny
|
||||
|
||||
if [[ "${COLLECTOR_RUN_STARTUP}" == "true" ]]; then
|
||||
sleep ${COLLECTOR_RUN_STARTUP_SLEEP}
|
||||
echo "starting scrutiny collector (run-once mode. subsequent calls will be triggered via cron service)"
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
fi
|
||||
|
||||
|
||||
# now that we have the env start cron in the foreground
|
||||
echo "starting cron"
|
||||
su -c "cron -l 8 -f" root
|
||||
su -c "cron -f -L 15" root
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
version: '2.4'
|
||||
|
||||
services:
|
||||
influxdb:
|
||||
image: influxdb:2.2
|
||||
ports:
|
||||
- '8086:8086'
|
||||
volumes:
|
||||
- './influxdb:/var/lib/influxdb2'
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8086/health"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 20
|
||||
|
||||
|
||||
web:
|
||||
image: 'ghcr.io/analogj/scrutiny:master-web'
|
||||
ports:
|
||||
- '8080:8080'
|
||||
volumes:
|
||||
- './config:/opt/scrutiny/config'
|
||||
environment:
|
||||
SCRUTINY_WEB_INFLUXDB_HOST: 'influxdb'
|
||||
depends_on:
|
||||
influxdb:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/api/health"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 20
|
||||
start_period: 10s
|
||||
|
||||
collector:
|
||||
image: 'ghcr.io/analogj/scrutiny:master-collector'
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
volumes:
|
||||
- '/run/udev:/run/udev:ro'
|
||||
environment:
|
||||
COLLECTOR_API_ENDPOINT: 'http://web:8080'
|
||||
depends_on:
|
||||
web:
|
||||
condition: service_healthy
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
@@ -7,11 +7,12 @@ services:
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "8080:8080" # webapp
|
||||
- "8086:8086" # influxDB admin
|
||||
volumes:
|
||||
- /run/udev:/run/udev:ro
|
||||
- ./config:/opt/scrutiny/config
|
||||
- ./influxdb:/var/lib/influxdb2
|
||||
- ./influxdb:/opt/scrutiny/influxdb
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
@@ -0,0 +1,17 @@
|
||||
# Ansible Install
|
||||
|
||||
[Zorlin](https://github.com/Zorlin) has developed and now maintains [an Ansible playbook](https://github.com/Zorlin/scrutiny-playbook) which automates the steps involved in manually setting up Scrutiny.
|
||||
|
||||
Using it is simple:
|
||||
|
||||
* Grab a copy of the playbook
|
||||
* Follow the directions in the playbook repository
|
||||
* Run `ansible-playbook site.yml`
|
||||
* Visit http://your-machine:8080 to see your new Scrutiny installation.
|
||||
|
||||
It will automatically pull metrics from machines once a day, at 1am.
|
||||
|
||||
You can see it in action below.
|
||||
|
||||
[](https://asciinema.org/a/493531)
|
||||
|
||||
@@ -0,0 +1,181 @@
|
||||
>
|
||||
See [docker/example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
for a docker-compose file.
|
||||
|
||||
> The following guide was contributed by @TinJoy59 in #417
|
||||
> It describes how to deploy the Scrutiny in Hub/Spoke mode, where the Hub is running in Docker, and the Spokes (
|
||||
> collectors) are running as binaries.
|
||||
> He's using Proxmox & Synology in his guide, however this should be applicable for almost anyone
|
||||
|
||||
# S.M.A.R.T. Monitoring with Scrutiny across machines
|
||||
|
||||

|
||||
|
||||
### 🤔 The problem:
|
||||
|
||||
Scrutiny offers a nice Docker package called "Omnibus" that can monitor HDDs attached to a Docker host with relative
|
||||
ease. Scrutiny can also be installed in a Hub-Spoke layout where Web interface, Database and Collector come in 3
|
||||
separate packages. The official documentation assumes that the spokes in the "Hub-Spokes layout" run Docker, which is
|
||||
not always the case. The third approach is to install Scrutiny manually, entirely outside of Docker.
|
||||
|
||||
### 💡 The solution:
|
||||
|
||||
This tutorial provides a hybrid configuration where the Hub lives in a Docker instance while the spokes have only
|
||||
Scrutiny Collector installed manually. The Collector periodically send data to the Hub. It's not mind-boggling hard to
|
||||
understand but someone might struggle with the setup. This is for them.
|
||||
|
||||
### 🖥️ My setup:
|
||||
|
||||
I have a Proxmox cluster where one VM runs Docker and all monitoring services - Grafana, Prometheus, various exporters,
|
||||
InfluxDB and so forth. Another VM runs the NAS - OpenMediaVault v6, where all hard drives reside. The Scrutiny Collector
|
||||
is triggered every 30min to collect data on the drives. The data is sent to the Docker VM, running InfluxDB.
|
||||
|
||||
## Setting up the Hub
|
||||
|
||||

|
||||
|
||||
The Hub consists of Scrutiny Web - a web interface for viewing the SMART data. And InfluxDB, where the smartmon data is
|
||||
stored.
|
||||
|
||||
[🔗This is the official Hub-Spoke layout in docker-compose.](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
We are going to reuse parts of it. The ENV variables provide the necessary configuration for the initial setup, both for
|
||||
InfluxDB and Scrutiny.
|
||||
|
||||
If you are working with and existing InfluxDB instance, you can forgo all the `INIT` variables as they already exist.
|
||||
|
||||
The official Scrutiny documentation has a
|
||||
sample [scrutiny.yaml ](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml)file that normally
|
||||
contains the connection and notification details but I always find it easier to configure as much as possible in the
|
||||
docker-compose.
|
||||
|
||||
```yaml
|
||||
version: "3.4"
|
||||
|
||||
networks:
|
||||
monitoring: # A common network for all monitoring services to communicate into
|
||||
external: true
|
||||
notifications: # To Gotify or another Notification service
|
||||
external: true
|
||||
|
||||
services:
|
||||
influxdb:
|
||||
container_name: influxdb
|
||||
image: influxdb:2.1-alpine
|
||||
ports:
|
||||
- 8086:8086
|
||||
volumes:
|
||||
- ${DIR_CONFIG}/influxdb2/db:/var/lib/influxdb2
|
||||
- ${DIR_CONFIG}/influxdb2/config:/etc/influxdb2
|
||||
environment:
|
||||
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||
- DOCKER_INFLUXDB_INIT_USERNAME=Admin
|
||||
- DOCKER_INFLUXDB_INIT_PASSWORD=${PASSWORD}
|
||||
- DOCKER_INFLUXDB_INIT_ORG=homelab
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=scrutiny
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=your-very-secret-token
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- monitoring
|
||||
|
||||
scrutiny:
|
||||
container_name: scrutiny
|
||||
image: ghcr.io/analogj/scrutiny:master-web
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ${DIR_CONFIG}/scrutiny/config:/opt/scrutiny/config
|
||||
environment:
|
||||
- SCRUTINY_WEB_INFLUXDB_HOST=influxdb
|
||||
- SCRUTINY_WEB_INFLUXDB_PORT=8086
|
||||
- SCRUTINY_WEB_INFLUXDB_TOKEN=your-very-secret-token
|
||||
- SCRUTINY_WEB_INFLUXDB_ORG=homelab
|
||||
- SCRUTINY_WEB_INFLUXDB_BUCKET=scrutiny
|
||||
# Optional but highly recommended to notify you in case of a problem
|
||||
- SCRUTINY_WEB_NOTIFY_URLS=["http://gotify:80/message?token=a-gotify-token"]
|
||||
depends_on:
|
||||
- influxdb
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- notifications
|
||||
- monitoring
|
||||
```
|
||||
|
||||
A freshly initialized Scrutiny instance can be accessed on port 8080, eg. `192.168.0.100:8080`. The interface will be
|
||||
empty because no metrics have been collected yet.
|
||||
|
||||
## Setting up a Spoke ***without*** Docker
|
||||
|
||||

|
||||
|
||||
A spoke consists of the Scrutiny Collector binary that is run on a set interval via crontab and sends the data to the
|
||||
Hub. The official
|
||||
documentation [describes the manual setup of the Collector](https://github.com/AnalogJ/scrutiny/blob/master/docs/INSTALL_MANUAL.md#collector)
|
||||
- dependencies and step by step commands. I have a shortened version that does the same thing but in one line of code.
|
||||
|
||||
```bash
|
||||
# Installing dependencies
|
||||
apt install smartmontools -y
|
||||
|
||||
# 1. Create directory for the binary
|
||||
# 2. Download the binary into that directory
|
||||
# 3. Make it exacutable
|
||||
# 4. List the contents of the library for confirmation
|
||||
mkdir -p /opt/scrutiny/bin && \
|
||||
curl -L https://github.com/AnalogJ/scrutiny/releases/download/v0.5.0/scrutiny-collector-metrics-linux-amd64 > /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 && \
|
||||
ls -lha /opt/scrutiny/bin
|
||||
```
|
||||
|
||||
<p class="callout warning">When downloading Github Release Assests, make sure that you have the correct version. The provided example is with Release v0.5.0. [The release list can be found here.](https://github.com/analogj/scrutiny/releases) </p>
|
||||
|
||||
Once the Collector is installed, you can run it with the following command. Make sure to add the correct address and
|
||||
port of your Hub as `--api-endpoint`.
|
||||
|
||||
```bash
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://192.168.0.100:8080"
|
||||
```
|
||||
|
||||
This will run the Collector once and populate the Web interface of your Scrutiny instance. In order to collect metrics
|
||||
for a time series, you need to run the command repeatedly. Here is an example for crontab, running the Collector every
|
||||
15min.
|
||||
|
||||
```bash
|
||||
# open crontab
|
||||
crontab -e
|
||||
|
||||
# add a line for Scrutiny
|
||||
*/15 * * * * /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://192.168.0.100:8080"
|
||||
```
|
||||
|
||||
The Collector has its own independent config file that lives in `/opt/scrutiny/config/collector.yaml` but I did not find
|
||||
a need to modify
|
||||
it. [A default collector.yaml can be found in the official documentation.](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml)
|
||||
|
||||
## Setting up a Spoke ***with*** Docker
|
||||
|
||||

|
||||
|
||||
Setting up a remote Spoke in Docker requires you to split
|
||||
the [official Hub-Spoke layout docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
. In the following docker-compose you need to provide the `${API_ENDPOINT}`, in my case `http://192.168.0.100:8080`.
|
||||
Also all drives that you wish to monitor need to be presented to the container under `devices`.
|
||||
|
||||
The image handles the periodic scanning of the drives.
|
||||
|
||||
```yaml
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
|
||||
collector:
|
||||
image: 'ghcr.io/analogj/scrutiny:master-collector'
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
volumes:
|
||||
- '/run/udev:/run/udev:ro'
|
||||
environment:
|
||||
COLLECTOR_API_ENDPOINT: ${API_ENDPOINT}
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
```
|
||||
|
||||
+23
-3
@@ -2,12 +2,18 @@
|
||||
|
||||
While the easiest way to get started with [Scrutiny is using Docker](https://github.com/AnalogJ/scrutiny#docker),
|
||||
it is possible to run it manually without much work. You can even mix and match, using Docker for one component and
|
||||
a manual installation for the other.
|
||||
a manual installation for the other. There's also [an installer](INSTALL_ANSIBLE.md) which automates this manual installation procedure.
|
||||
|
||||
Scrutiny is made up of two components: a collector and a webapp/api. Here's how each component can be deployed manually.
|
||||
Scrutiny is made up of three components: an influxdb Database, a collector and a webapp/api. Here's how each component can be deployed manually.
|
||||
|
||||
> Note: the `/opt/scrutiny` directory is not hardcoded, you can use any directory name/path.
|
||||
|
||||
## InfluxDB
|
||||
|
||||
Please follow the official InfluxDB installation guide. Note, you'll need to install v2.2.0+.
|
||||
|
||||
https://docs.influxdata.com/influxdb/v2.2/install/
|
||||
|
||||
## Webapp/API
|
||||
|
||||
### Dependencies
|
||||
@@ -45,6 +51,17 @@ web:
|
||||
# The path to the Scrutiny frontend files (js, css, images) must be specified.
|
||||
# We'll populate it with files in the next section
|
||||
path: /opt/scrutiny/web
|
||||
|
||||
# if you're runnning influxdb on a different host (or using a cloud-provider) you'll need to update the host & port below.
|
||||
# token, org, bucket are unnecessary for a new InfluxDB installation, as Scrutiny will automatically run the InfluxDB setup,
|
||||
# and store the information in the config file. If you 're re-using an existing influxdb installation, you'll need to provide
|
||||
# the `token`
|
||||
influxdb:
|
||||
host: localhost
|
||||
port: 8086
|
||||
# token: 'my-token'
|
||||
# org: 'my-org'
|
||||
# bucket: 'bucket'
|
||||
```
|
||||
|
||||
> Note: for a full list of available configuration options, please check the [example.scrutiny.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml) file.
|
||||
@@ -66,9 +83,11 @@ Now that we have downloaded the required files, let's prepare the filesystem.
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-web-linux-amd64
|
||||
|
||||
# Next, lets extract the frontend files.
|
||||
# NOTE: after extraction, there **should not** be a `dist` subdirectory in `/opt/scrutiny/web` directory.
|
||||
cd /opt/scrutiny/web
|
||||
tar xvzf scrutiny-web-frontend.tar.gz --strip-components 1 -C .
|
||||
|
||||
|
||||
# Cleanup
|
||||
rm -rf scrutiny-web-frontend.tar.gz
|
||||
```
|
||||
@@ -96,7 +115,8 @@ Unlike the webapp, the collector does have some dependencies:
|
||||
Unfortunately the version of `smartmontools` (which contains `smartctl`) available in some of the base OS repositories is ancient.
|
||||
So you'll need to install the v7+ version using one of the following commands:
|
||||
|
||||
- **Ubuntu:** `apt-get install -y smartmontools=7.0-0ubuntu1~ubuntu18.04.1`
|
||||
- **Ubuntu (22.04/Jammy/LTS):** `apt-get install -y smartmontools`
|
||||
- **Ubuntu (18.04/Bionic):** `apt-get install -y smartmontools=7.0-0ubuntu1~ubuntu18.04.1`
|
||||
- **Centos8:**
|
||||
- `dnf install https://extras.getpagespeed.com/release-el8-latest.rpm`
|
||||
- `dnf install smartmontools`
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
# Manual Windows Install
|
||||
|
||||
This guide is specifically for people who are on a Windows machine using [WSL](https://learn.microsoft.com/en-us/windows/wsl/about) with Docker.
|
||||
|
||||
Scrutiny is made up of three components: an influxdb Database, a collector and a webapp/api. Docker will be used for
|
||||
the influxdb and webapp/API, the collector component will be facilitated by [Windows Task Scheduler](https://learn.microsoft.com/en-us/windows/win32/taskschd/task-scheduler-start-page).
|
||||
|
||||
> **NOTE:** If you are **NOT** using WSL with docker, then the easiest way to get started with [Scrutiny is the omnibus Docker image](https://github.com/AnalogJ/scrutiny#docker).
|
||||
|
||||
## InfluxDB and Webapp/API (Docker)
|
||||
|
||||
1. Copy the [example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
file and delete the collector section near the bottom of the file.
|
||||
2. Run `docker-compose up -d` to verify that the DB and webapp are working correctly and once its completed, your webapp
|
||||
should be up and running but the dashboard will be empty (default location is `localhost:8080`)
|
||||
|
||||
## Collector (Windows Task Scheduler)
|
||||
|
||||
1. Download the latest `scrutiny-collector-metrics-windows-amd64.exe` from the [releases page](https://github.com/AnalogJ/scrutiny/releases) (under assets)
|
||||
2. On your windows host, open [Windows Task Scheduler](https://www.wikihow.com/Open-Task-Scheduler-in-Windows-10) as **Administrator**
|
||||
1. In the **Start Menu** (Windows key), type `Task Scheduler` and then right click `Run as Administrator` to open
|
||||
3. On the status bar (under the `action` tab), click `Create Task...`
|
||||
4. A new window should open with the `General` Tab open, enter relevant information into the `Name` and `Description` fields
|
||||
1. Under **Security Options** check:
|
||||
1. **Run whether user is logged on or not**
|
||||
2. **Run with highest privileges**
|
||||
5. Next, click the `Triggers` tab and then click `New...` (bottom left-hand side of the window)
|
||||
6. Here you can set how often you want this task to run, example settings are the following:
|
||||
1. **Settings:**
|
||||
1. `Daily`, start at `TODAYS_DATE` `12:00:00 AM`, Recur every `1` days,
|
||||
2. **Advanced Settings:**
|
||||
1. Repeat Task every: `1 hour` for a duration of `Indefinitely`
|
||||
2. Stop task if it runs longer than: `30 minutes`
|
||||
3. Click Ok when satisfied with your schedule
|
||||
> **NOTE:** The above settings will trigger the task **every day at midnight** and then **run every hour after that** (modify as needed)
|
||||
7. Next, click the `Actions` tab and then click `New...` (bottom left-hand side of the window)
|
||||
1. **Action Settings:**
|
||||
1. In the **Program/Script** field, put: `scrutiny-collector-metrics-windows-amd64.exe`
|
||||
2. In the **Add arguments (optional)** field, put: `run --api-endpoint "http://localhost:8080" --config collector.yaml`
|
||||
> **NOTE:**
|
||||
> * Make sure that you put the correct port number (as specified in the docker-compose file) for the webapp (default is `8080`)
|
||||
> * The `--config` param is optional and is not needed if you just want to use the default collector config, see
|
||||
[example.collector.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml) for more info on the collector config.
|
||||
3. In the **Start in (optional)** field, put: FOLDER_PATH_TO_YOUR `scrutiny-collector-metrics-windows-amd64.exe` file
|
||||
> **NOTE:** Must be exact and do not include `scrutiny-collector-metrics-windows-amd64.exe` in the path
|
||||
4. Click Ok when finished
|
||||
8. Next, click the `Conditions` tab and make sure that everything is unchecked (unless you want to specify otherwise)
|
||||
9. Next, click the `Settings` tab and check everything except for the last checkbox
|
||||
1. **Examples for the following settings:**
|
||||
1. If the task fails, restart every: `5 minutes`
|
||||
2. Attempt restart up to: `3` times
|
||||
3. Stop the task if it runs longer than `1 hour`
|
||||
10. Next, once satisfied with everything, click Ok
|
||||
11. Then, find your newly created task (by its name) in the scheduler task list and then manually run it (right click it and then click `Run`)
|
||||
12. Finally, refresh your dashboard after a minute or two and your drive information should have populated the webapp dashboard.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,72 @@
|
||||
# pfsense Install
|
||||
|
||||
This bascially follows the [Manual collector instructions](https://github.com/AnalogJ/scrutiny/blob/master/docs/INSTALL_MANUAL.md#collector) and assumes you are running a hub and spoke deployment and already have the web app setup.
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
SSH into pfsense, hit `8` for the shell and install the required dependencies.
|
||||
|
||||
```
|
||||
pkg install smartmontools
|
||||
```
|
||||
|
||||
Ensure smartmontools is v7+. This won't be a problem in pfsense 2.6.0+
|
||||
|
||||
|
||||
### Directory Structure
|
||||
|
||||
Now let's create a directory structure to contain the Scrutiny collector binary.
|
||||
|
||||
```
|
||||
mkdir -p /opt/scrutiny/bin
|
||||
```
|
||||
|
||||
|
||||
### Download Files
|
||||
|
||||
Next, we'll download the Scrutiny collector binary from the [latest Github release](https://github.com/analogj/scrutiny/releases).
|
||||
|
||||
> NOTE: Ensure you have the latest version in the below command
|
||||
|
||||
```
|
||||
fetch -o /opt/scrutiny/bin https://github.com/AnalogJ/scrutiny/releases/download/vX.X.X/scrutiny-collector-metrics-freebsd-amd64
|
||||
```
|
||||
|
||||
|
||||
### Prepare Scrutiny
|
||||
|
||||
Now that we have downloaded the required files, let's prepare the filesystem.
|
||||
|
||||
```
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics-freebsd-amd64
|
||||
```
|
||||
|
||||
|
||||
### Start Scrutiny Collector, Populate Webapp
|
||||
|
||||
Next, we will manually trigger the collector, to populate the Scrutiny dashboard:
|
||||
|
||||
> NOTE: if you need to pass a config file to the scrutiny collector, you can provide it using the `--config` flag.
|
||||
|
||||
```
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics-freebsd-amd64 run --api-endpoint "http://localhost:8080"
|
||||
```
|
||||
> NOTE: change the IP address to that of your web app
|
||||
|
||||
### Schedule Collector with Cron
|
||||
|
||||
Finally you need to schedule the collector to run periodically.
|
||||
|
||||
Login to the pfsense webGUI and head to `Services/Cron` add an entry with the following details:
|
||||
|
||||
```
|
||||
Minute: */15
|
||||
Hour: *
|
||||
Day of the Month: *
|
||||
Month of the Year: *
|
||||
Day of the Week: *
|
||||
User: root
|
||||
Command: /opt/scrutiny/bin/scrutiny-collector-metrics-freebsd-amd64 run --api-endpoint "http://localhost:8080" >/dev/null 2>&1
|
||||
```
|
||||
> NOTE: `>/dev/null 2>&1` is used to stop cron confirmation emails being sent.
|
||||
@@ -0,0 +1,138 @@
|
||||
# Install collector on Synology
|
||||
|
||||
## Install Entware
|
||||
|
||||
This will allow you to install a newer version of smartmontools on your Synology. Follow the instructions here (This is tested on DSM7) - https://github.com/Entware/Entware/wiki/Install-on-Synology-NAS
|
||||
|
||||
**PLEASE NOTE THAT IF YOU UPDATE DSM FIRMWARE YOU MAY BORK THE EXISTING ENTWARE INSTALLATION, FOR ANYTHING THAT MAY RELATE TO ENTWARE PLEASE VISIT THEIR REPO**
|
||||
|
||||
## Collector Setup
|
||||
|
||||
**1. Run an update**
|
||||
|
||||
`sudo opkg update`
|
||||
|
||||
**2. Run an upgrade**
|
||||
|
||||
`sudo opkg upgrade`
|
||||
|
||||
**3. Install smartmontools**
|
||||
|
||||
`sudo opkg install smartmontools`
|
||||
|
||||
*It should install v7.2-2*
|
||||
|
||||
`Installing smartmontools (7.2-2) to root...`
|
||||
|
||||
**4. We will now create the directories.**
|
||||
|
||||
```
|
||||
mkdir -p /volume1/\@Entware/scrutiny/bin
|
||||
mkdir -p /volume1/\@Entware/scrutiny/conf
|
||||
```
|
||||
|
||||
**5. change into the bin directory**
|
||||
|
||||
`cd /volume1/\@Entware/scrutiny/bin`
|
||||
|
||||
**6. Download the collector binary for your architecture and make it executable**
|
||||
|
||||
`wget https://github.com/AnalogJ/scrutiny/releases/download/v0.4.12/scrutiny-collector-metrics-linux-arm64`
|
||||
|
||||
`chmod +x /volume1/\@Entware/scrutiny/bin/scrutiny-collector-metrics-linux-arm64`
|
||||
|
||||
**7. Create a config file for the collector**
|
||||
|
||||
```
|
||||
cd /volume1/\@Entware/scrutiny/conf
|
||||
wget https://raw.githubusercontent.com/AnalogJ/scrutiny/master/example.collector.yaml
|
||||
mv example.collector.yaml collector.yaml
|
||||
```
|
||||
|
||||
**8. Lets make some changes in the [collector config file](../example.collector.yaml), these are what i uncommented/added, please tweak the device paths to your needs**
|
||||
|
||||
```
|
||||
host:
|
||||
id: 'Server_Name'
|
||||
|
||||
|
||||
devices:
|
||||
# # example for forcing device type detection for a single disk
|
||||
- device: /dev/sda
|
||||
type: 'sat'
|
||||
- device: /dev/sdb
|
||||
type: 'sat'
|
||||
- device: /dev/sdc
|
||||
type: 'sat'
|
||||
- device: /dev/sdd
|
||||
type: 'sat'
|
||||
|
||||
api:
|
||||
endpoint: 'http://<url>:8080'
|
||||
```
|
||||
|
||||
**9. Let's update the smartd db**
|
||||
|
||||
```
|
||||
cd /volume1/\@Entware/scrutiny/bin/
|
||||
wget https://raw.githubusercontent.com/smartmontools/smartmontools/master/smartmontools/drivedb.h
|
||||
```
|
||||
|
||||
**10. I ran it like this but you can tweak to your liking, the most important part is the --drivedb, as this loads it into the aplication for future use**
|
||||
|
||||
`smartctl -d sat --all /dev/sda --drivedb=/volume1/\@Entware/scrutiny/bin/drivedb.h`
|
||||
|
||||
**11. Now lets create a small bash script, this will be used for the scheduled task inside Synology**
|
||||
|
||||
`vim /volume1/\@Entware/scrutiny/bin/run_collect.sh`
|
||||
|
||||
**The contents are below, copy and paste them in**
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
|
||||
/volume1/\@Entware/scrutiny/bin/scrutiny-collector-metrics-linux-arm64 run --config /volume1/\@Entware/scrutiny/conf/collector.yaml
|
||||
```
|
||||
|
||||
**Make `run_collect.sh` executable**
|
||||
|
||||
`chmod +x /volume1/\@Entware/scrutiny/bin/run_collect.sh`
|
||||
|
||||
## Set up Synology to run a scheduled task.
|
||||
|
||||
Log in to DSM and do the following:
|
||||
|
||||
Goto: DSM > Control Panel > Task Scheduler
|
||||
|
||||
Create > Scheduled Task > User Defined Script
|
||||
|
||||
###### General
|
||||
|
||||
```
|
||||
Task: Scrutiny_Collector
|
||||
User: root
|
||||
Enabled: yes
|
||||
```
|
||||
|
||||
###### Schedule
|
||||
```
|
||||
Run on the following days: Daily
|
||||
```
|
||||
###### Time:
|
||||
|
||||
```
|
||||
Frequency: <Your desired frequency>
|
||||
```
|
||||
|
||||
###### Task Settings
|
||||
|
||||
**Run Command**
|
||||
|
||||
```
|
||||
. /opt/etc/profile; /volume1/\@Entware/scrutiny/bin/run_collect.sh
|
||||
```
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you have any issues with your devices being detected, or incorrect data, please take a look at [TROUBLESHOOTING_DEVICE_COLLECTOR.md](./TROUBLESHOOTING_DEVICE_COLLECTOR.md)
|
||||
@@ -1,14 +1,21 @@
|
||||
# Officially Supported NAS OS's
|
||||
# Officially Supported NAS/OS's
|
||||
|
||||
These are the officially supported NAS OS's (with documentation and setup guides).
|
||||
Once a guide is created (in `docs/guides/`) it will be linked here.
|
||||
These are the officially supported NAS OS's (with documentation and setup guides). Once a guide is created (
|
||||
in `docs/guides/` or elsewhere) it will be linked here.
|
||||
|
||||
- [ ] freenas/truenas
|
||||
- [x] [unraid](https://github.com/AnalogJ/scrutiny/blob/master/docs/INSTALL_UNRAID.md)
|
||||
- [x] [freenas/truenas](https://blog.stefandroid.com/2022/01/14/smart-scrutiny.html)
|
||||
- [x] [unraid](./INSTALL_UNRAID.md)
|
||||
- [ ] ESXI
|
||||
- [ ] Proxmox
|
||||
- [ ] Synology
|
||||
- [x] Synology
|
||||
- [Hub/Spoke Deployment - Collector](./INSTALL_SYNOLOGY_COLLECTOR.md)
|
||||
- [Omnibus Deployment](https://drfrankenstein.co.uk/2022/07/28/scrutiny-in-docker-on-a-synology-nas)
|
||||
- [ ] OMV
|
||||
- [ ] Amahi
|
||||
- [ ] Running in a LXC container
|
||||
|
||||
- [x] [PFSense](./INSTALL_PFSENSE.md)
|
||||
- [x] QNAP
|
||||
- [x] [RockStor](https://rockstor.com/docs/interface/docker-based-rock-ons/scrutiny.html)
|
||||
- [ ] Solaris/OmniOS CE Support
|
||||
- [ ] Kubernetes
|
||||
- [x] [Windows](./INSTALL_MANUAL_WINDOWS.md)
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
# Testers
|
||||
|
||||
Scrutiny supports many operating systems, CPU architectures and runtime environments. Unfortunately that makes it incredibly
|
||||
difficult to test.
|
||||
Thankfully the following users have been gracious enough to test/validate Scrutiny works on their system.
|
||||
|
||||
> NOTE: If you're interested in volunteering to test Scrutiny beta builds on your system, please [open an issue](https://github.com/AnalogJ/scrutiny/issues).
|
||||
|
||||
| Architecture Name | Binaries | Docker |
|
||||
| --- | --- | --- |
|
||||
| linux-amd64 | -- | @feroxy @rshxyz |
|
||||
| linux-arm-5 | -- | |
|
||||
| linux-arm-6 | -- | |
|
||||
| linux-arm-7 | @Zorlin | @martini1992 |
|
||||
| linux-arm64 | @SiM22 @Zorlin | @ViRb3 @agneevX @benamajin |
|
||||
| freebsd-amd64 | @BadCo-NZ @varunsridharan @martadinata666 @KenwoodFox @FingerlessGlov3s | |
|
||||
| macos-amd64 | -- | -- |
|
||||
| macos-arm64 | -- | -- |
|
||||
| windows-amd64 | @gabrielv33 | -- |
|
||||
| windows-arm64 | -- | -- |
|
||||
@@ -19,6 +19,25 @@ Scrutiny stores and references the devices by their `WWN` which is globally uniq
|
||||
As such, passing devices to the Scrutiny collector container using `/dev/disk/by-id/`, `/dev/disk/by-label/`, `/dev/disk/by-path/` and `/dev/disk/by-uuid/`
|
||||
paths are unnecessary, unless you'd like to ensure the docker run command never needs to change.
|
||||
|
||||
#### Force /dev/disk/by-id paths
|
||||
|
||||
Since Scrutiny uses WWN under the hood, it really doesn't care about `/dev/sd*` vs `/dev/disk/by-id/`. The problem is the interaction between docker and smartmontools when using `--device /dev/disk/by-id` paths.
|
||||
|
||||
Basically Scrutiny offloads all device detection to smartmontools, which doesn't seem to detect devices that have been passed into the docker container using `/dev/disk/by-id` paths.
|
||||
|
||||
If you must use "static" device references, you can map the host device id/uuid/wwn references to device names within the container:
|
||||
|
||||
```
|
||||
# --device=<Host Device>:<Container Device Mapping>
|
||||
|
||||
docker run ....
|
||||
--device=/dev/disk/by-id/wwn-0x5000xxxxx:/dev/sda
|
||||
--device=/dev/disk/by-id/wwn-0x5001xxxxx:/dev/sdb
|
||||
--device=/dev/disk/by-id/wwn-0x5003xxxxx:/dev/sdc
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Device Detection By Smartctl
|
||||
|
||||
@@ -52,6 +71,8 @@ If the output is the same, your devices will be processed by Scrutiny.
|
||||
In some cases `--scan` does not correctly detect the device type, returning [incomplete SMART data](https://github.com/AnalogJ/scrutiny/issues/45).
|
||||
Scrutiny will supports overriding the detected device type via the config file.
|
||||
|
||||
[example.collector.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml)
|
||||
|
||||
### RAID Controllers (Megaraid/3ware/HBA/Adaptec/HPE/etc)
|
||||
Smartctl has support for a large number of [RAID controllers](https://www.smartmontools.org/wiki/Supported_RAID-Controllers), however this
|
||||
support is not automatic, and may require some additional device type hinting. You can provide this information to the Scrutiny collector
|
||||
@@ -59,7 +80,8 @@ using a collector config file. See [example.collector.yaml](/example.collector.y
|
||||
|
||||
> NOTE: If you use docker, you **must** pass though the RAID virtual disk to the container using `--device` (see below)
|
||||
>
|
||||
> This device may be in `/dev/*` or `/dev/bus/*`.
|
||||
> This device may be in `/dev/*` or `/dev/bus/*`.
|
||||
> If you do not see a virtual device file `/dev/bus/*` you may need to use the `--privileged` flag. See [#366 for more info](https://github.com/AnalogJ/scrutiny/issues/366#issuecomment-1253196407)
|
||||
>
|
||||
> If you're unsure, run `smartctl --scan` on your host, and pass all listed devices to the container.
|
||||
|
||||
@@ -90,7 +112,7 @@ devices:
|
||||
type:
|
||||
- aacraid,0,0,0
|
||||
- aacraid,0,0,1
|
||||
|
||||
|
||||
# HPE Smart Array example: https://github.com/AnalogJ/scrutiny/issues/213
|
||||
- device: /dev/sda
|
||||
type:
|
||||
@@ -98,11 +120,14 @@ devices:
|
||||
- 'cciss,1'
|
||||
```
|
||||
|
||||
>
|
||||
|
||||
### NVMe Drives
|
||||
As mentioned in the [README.md](/README.md), NVMe devices require both `--cap-add SYS_RAWIO` and `--cap-add SYS_ADMIN`
|
||||
|
||||
As mentioned in the [README.md](/README.md), NVMe devices require both `--cap-add SYS_RAWIO` and `--cap-add SYS_ADMIN`
|
||||
to allow smartctl permission to query your NVMe device SMART data [#26](https://github.com/AnalogJ/scrutiny/issues/26)
|
||||
|
||||
When attaching NVMe devices using `--device=/dev/nvme..`, make sure to provide the device controller (`/dev/nvme0`)
|
||||
When attaching NVMe devices using `--device=/dev/nvme..`, make sure to provide the device controller (`/dev/nvme0`)
|
||||
instead of the block device (`/dev/nvme0n1`). See [#209](https://github.com/AnalogJ/scrutiny/issues/209).
|
||||
|
||||
> The character device /dev/nvme0 is the NVME device controller, and block devices like /dev/nvme0n1 are the NVME storage namespaces: the devices you use for actual storage, which will behave essentially as disks.
|
||||
@@ -111,17 +136,204 @@ instead of the block device (`/dev/nvme0n1`). See [#209](https://github.com/Anal
|
||||
|
||||
### ATA
|
||||
|
||||
### Standby/Sleeping Disks
|
||||
### USB Devices
|
||||
|
||||
The following information is extracted from [#266](https://github.com/AnalogJ/scrutiny/issues/266)
|
||||
|
||||
External HDDs support two modes of operation usb-storage (old, slower, stable) and uas (new, faster, sometimes unstable)
|
||||
. On some external HDDs, uas mode does not properly pass through SMART information, or even causes hardware issues, so
|
||||
it has been disabled by the kernel. No amount of smartctl parameters will fix this, as it is being rejected by the
|
||||
kernel. This is especially true with Seagate HDDs. One solution is to force these devices into usb-storage mode, which
|
||||
will incur some performance penalty, but may work well enough for you. More info:
|
||||
|
||||
- https://smartmontools.org/wiki/Supported_USB-Devices
|
||||
- https://smartmontools.org/wiki/SAT-with-UAS-Linux
|
||||
- https://forums.raspberrypi.com/viewtopic.php?t=245931
|
||||
|
||||
### Exit Codes
|
||||
|
||||
If you see an error message similar to `smartctl returned an error code (2) while processing /dev/sda`, this means that
|
||||
`smartctl` (not Scrutiny) exited with an error code. Scrutiny will attempt to print a helpful error message to help you
|
||||
debug, but you can look at the table (and associated links) below to debug `smartctl`.
|
||||
|
||||
> smartctl Return Values
|
||||
> The return values of smartctl are defined by a bitmask. If all is well with the disk, the return value (exit status) of
|
||||
> smartctl is 0 (all bits turned off). If a problem occurs, or an error, potential error, or fault is detected, then
|
||||
> a non-zero status is returned. In this case, the eight different bits in the return value have the following meanings
|
||||
> for ATA disks; some of these values may also be returned for SCSI disks.
|
||||
>
|
||||
> source: http://www.linuxguide.it/command_line/linux-manpage/do.php?file=smartctl#sect7
|
||||
|
||||
|
||||
| Exit Code (Isolated) | Binary | Problem Message |
|
||||
| --- | --- | --- |
|
||||
| 1 | Bit 0 | Command line did not parse. |
|
||||
| 2 | Bit 1 | Device open failed, or device did not return an IDENTIFY DEVICE structure. |
|
||||
| 4 | Bit 2 | Some SMART command to the disk failed, or there was a checksum error in a SMART data structure (see В´-bВ´ option above). |
|
||||
| 8 | Bit 3 | SMART status check returned “DISK FAILING". |
|
||||
| 16 | Bit 4 | We found prefail Attributes <= threshold. |
|
||||
| 32 | Bit 5 | SMART status check returned “DISK OK” but we found that some (usage or prefail) Attributes have been <= threshold at some time in the past. |
|
||||
| 64 | Bit 6 | The device error log contains records of errors. |
|
||||
| 128 | Bit 7 | The device self-test log contains records of errors. |
|
||||
|
||||
#### Standby/Sleeping Disks
|
||||
|
||||
Disks in Standby/Sleep can also cause `smartctl` to exit abnormally, usually with `exit code: 2`.
|
||||
|
||||
- https://github.com/AnalogJ/scrutiny/issues/221
|
||||
- https://github.com/AnalogJ/scrutiny/issues/157
|
||||
|
||||
### Volume Mount All Devices (`/dev`) - Privileged
|
||||
|
||||
> WARNING: This is an insecure/dangerous workaround. Running Scrutiny (or any Docker image) with `--privileged` is equivalent to running it with root access.
|
||||
|
||||
If you have exhausted all other mechanisms to get your disks working with `smartctl` running within a container, you can try running the docker image with the following additional flags:
|
||||
|
||||
- `--privileged` (instead of `--cap-add`) - this gives the docker container full access to your system. Scrutiny does not require this permission, however it can be helpful for `smartctl`
|
||||
- `-v /dev:/dev:ro` (instead of `--device`) - this mounts the `/dev` folder (containing all your device files) into the container, allowing `smartctl` to see your disks, exactly as if it were running on your host directly.
|
||||
|
||||
With this workaround your `docker run` command would look similar to the following:
|
||||
|
||||
```bash
|
||||
docker run -it --rm -p 8080:8080 -p 8086:8086 \
|
||||
-v `pwd`/scrutiny:/opt/scrutiny/config \
|
||||
-v `pwd`/influxdb2:/opt/scrutiny/influxdb \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--privileged \
|
||||
-v /dev:/dev \
|
||||
--name scrutiny \
|
||||
ghcr.io/analogj/scrutiny:master-omnibus
|
||||
```
|
||||
|
||||
## Scrutiny detects Failure but SMART Passed?
|
||||
|
||||
There's 2 different mechanisms that Scrutiny uses to detect failures.
|
||||
|
||||
The first is simple SMART failures. If SMART thinks an attribute is in a failed state, Scrutiny will display it as failed as well.
|
||||
|
||||
The second is using BackBlaze failure data: [https://backblaze.com/blog-smart-stats-2014-8.html](https://backblaze.com/blog-smart-stats-2014-8.html)
|
||||
If Scrutiny detects that an attribute corresponds with a high rate of failure using BackBlaze's data, it will also mark that attribute (and disk) as failed (even though SMART may think the device is still healthy).
|
||||
|
||||
This can cause some confusion when comparing Scrutiny's dashboard against other SMART analysis tools.
|
||||
If you hover over the "failed" label beside an attribute, Scrutiny will tell you if the failure was due to SMART or Scrutiny/BackBlaze data.
|
||||
|
||||
### Device failed but Smart & Scrutiny passed
|
||||
|
||||
Device SMART results are the source of truth for Scrutiny, however we don't just take into account the current SMART results, but also historical analysis of a disk.
|
||||
This means that if a device is marked as failed at any point in its history, it will continue to be stored in the database as failed until the device is removed (or status is reset -- see below).
|
||||
|
||||
In some cases, this historical failure may have been due to attribute analysis/thresholds that have since been relaxed:
|
||||
|
||||
- NVME - Numb Error Log Entries (v0.4.7)
|
||||
- ATA - Power Cycle Count (v0.4.7)
|
||||
- ATA - Read Error Rate (v0.4.13)
|
||||
- ATA - Seek Error Rate (v0.4.13)
|
||||
|
||||
If you'd like to reset the status of a disk (to healthy) and allow the next run of the collector to determine the actual status, you can run the following command:
|
||||
|
||||
```bash
|
||||
# connect to scrutiny docker container
|
||||
docker exec -it scrutiny bash
|
||||
|
||||
# install sqlite CLI tools (inside container)
|
||||
apt update && apt install -y sqlite3
|
||||
|
||||
# connect to the scrutiny database
|
||||
sqlite3 /opt/scrutiny/config/scrutiny.db
|
||||
|
||||
# reset/update the devices table, unset the failure status.
|
||||
UPDATE devices SET device_status = null;
|
||||
|
||||
# exit sqlite CLI
|
||||
.exit
|
||||
```
|
||||
|
||||
### Seagate Drives Failing
|
||||
|
||||
As thoroughly discussed in [#255](https://github.com/AnalogJ/scrutiny/issues/255), Seagate (Ironwolf & others) drives are almost always marked as failed by Scrutiny.
|
||||
|
||||
> The `Seek Error Rate` & `Read Error Rate` attribute raw values are typically very high, and the
|
||||
> normalised values (Current / Worst / Threshold) are usually quite low. Despite this, the numbers in most cases are perfectly OK
|
||||
>
|
||||
> The anxiety arises because we intuitively expect that the normalised values should reflect a "health" score, with
|
||||
> 100 being the ideal value. Similarly, we would expect that the raw values should reflect an error count, in
|
||||
> which case a value of 0 would be most desirable. However, Seagate calculates and applies these attribute values
|
||||
> in a counterintuitive way.
|
||||
>
|
||||
> http://www.users.on.net/~fzabkar/HDD/Seagate_SER_RRER_HEC.html
|
||||
|
||||
Some analysis has been done which shows that Seagate drives break the common SMART conventions, which also causes Scrutiny's
|
||||
comparison against BackBlaze data to detect these drives as failed.
|
||||
|
||||
**So what's the Solution?**
|
||||
|
||||
After taking a look at the BackBlaze data for the relevant Attributes (`Seek Error Rate` & `Read Error Rate`), I've decided
|
||||
to disable Scrutiny analysis for them. Both are non-critical, and have low-correlation with failure.
|
||||
|
||||
> Please note: SMART failures for these attributes will still cause the drive to be marked as failed. Only BackBlaze analysis has been disabled
|
||||
|
||||
If this is effecting your drives, you'll need to do the following:
|
||||
|
||||
1. Upgrade to v0.4.13+
|
||||
2. Reset your drive status using the SQLite script
|
||||
in [#device-failed-but-smart--scrutiny-passed](https://github.com/AnalogJ/scrutiny/blob/master/docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md#device-failed-but-smart--scrutiny-passed)
|
||||
3. Wait for (or manually start) the collector.
|
||||
|
||||
If you'd like to learn more about how the Seagate Ironwolf SMART attributes work under the hood, and how they differ
|
||||
from
|
||||
other drives, please read the following:
|
||||
|
||||
- http://www.users.on.net/~fzabkar/HDD/Seagate_SER_RRER_HEC.html
|
||||
- https://www.truenas.com/community/threads/seagate-ironwolf-smart-test-raw_read_error_rate-seek_error_rate.68634/
|
||||
|
||||
## Hub & Spoke model, with multiple Hosts.
|
||||
|
||||
When deploying Scrutiny in a hub & spoke model, it can be difficult to determine exactly which node a set of devices are associated with.
|
||||
Thankfully the collector has a special `--host-id` flag (or `COLLECTOR_HOST_ID` env variable) that can be used to associate devices with a friendly host name.
|
||||

|
||||
|
||||
See the [docs/INSTALL_HUB_SPOKE.md](/docs/INSTALL_HUB_SPOKE.md) guide for more information.
|
||||
When deploying Scrutiny in a hub & spoke model, it can be difficult to determine exactly which node a set of devices are
|
||||
associated with.
|
||||
Thankfully the collector has a special `--host-id` flag (or `COLLECTOR_HOST_ID` env variable) that can be used to
|
||||
associate devices with a friendly host name.
|
||||
|
||||
The host-id is passed from the collector to the web-api when SMART device data is uploaded. There's 3 ways you can set
|
||||
the host-id:
|
||||
|
||||
- using the collector config
|
||||
file: [master/example.collector.yaml#L19-L22](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml?rgh-link-date=2022-05-25T15%3A08%3A56Z#L19-L22)
|
||||
- using the `--host-id` collector CLI
|
||||
argument: [master/collector/cmd/collector-metrics/collector-metrics.go#L180-L185](https://github.com/AnalogJ/scrutiny/blob/master/collector/cmd/collector-metrics/collector-metrics.go?rgh-link-date=2022-05-25T15%3A08%3A56Z#L180-L185)
|
||||
- using the `COLLECTOR_HOST_ID` environmental variable.
|
||||
|
||||
See the [docs/INSTALL_HUB_SPOKE.md](/docs/INSTALL_HUB_SPOKE.md) guide for more information.
|
||||
|
||||
## Collector DEBUG mode
|
||||
|
||||
You can use environmental variables to enable debug logging and/or log files for the collector:
|
||||
|
||||
```bash
|
||||
DEBUG=true
|
||||
COLLECTOR_LOG_FILE=/tmp/collector.log
|
||||
```
|
||||
|
||||
Or if you're not using docker, you can pass CLI arguments to the collector during startup:
|
||||
|
||||
```bash
|
||||
scrutiny-collector-metrics run --debug --log-file /tmp/collector.log
|
||||
```
|
||||
|
||||
## Collector trigger on startup
|
||||
|
||||
When the `omnibus` docker image starts up, it will automatically trigger the collector, which will populate the Scrutiny
|
||||
Webui with your disks.
|
||||
This is not the case when running the collector docker image in **hub/spoke** mode, as the collector and webui are
|
||||
running in different containers (and potentially different host machines), so
|
||||
the web container may not be ready for incoming connections. By default the container will only run the collector at the
|
||||
time specified in the cron schedule.
|
||||
|
||||
You can force the collector to run on startup using the following env variables:
|
||||
|
||||
- `-e COLLECTOR_RUN_STARTUP=true` - forces the collector to run on startup (cron will be started after the collector
|
||||
completes)
|
||||
- `-e COLLECTOR_RUN_STARTUP_SLEEP=10` - if `COLLECTOR_RUN_STARTUP` is enabled, you can use this env variable to
|
||||
configure the delay before the collector is run (default: `1` second). Used to ensure the web container has started
|
||||
successfully.
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
# Docker Images `master-omnibus` vs `latest`
|
||||
|
||||
> TL;DR; The `master-omnibus` and `latest` tags are almost semantically identical, as I follow a `golden master`
|
||||
development process. However if you want to ensure you're only using the latest release, you can change to `latest`
|
||||
|
||||
The CI script used to orchestrate the docker image builds can be found here: https://github.com/AnalogJ/scrutiny/blob/master/.github/workflows/docker-build.yaml#L166-L184
|
||||
|
||||
In general Scrutiny follows a `golden master` development process, which means that the `master` branch is not directly updated (unless its for documentation changes),
|
||||
instead development is done in a feature branch, or committed to the `beta` branch.
|
||||
|
||||
As development progresses, and we're satisfied that a feature is complete, and the quality is acceptable,
|
||||
I merge the changes to `master` and trigger the creation of a new release -- ie, when master is updated, a new release
|
||||
is almost immediately created (and tagged with `latest`)
|
||||
|
||||
So changing from `master-omnibus -> latest` will be the same thing for all intents and purposes.
|
||||
|
||||
> NOTE: Previously, there was a `automated cron build` that ran on the `master` and `beta` branches.
|
||||
They used to trigger a `nightly` build, even if nothing has changed on the branch. This has a couple of benefits, but one is to
|
||||
ensure that there's no broken external dependencies in our (unchanged) code. This `nightly` build no longer updates the `master-omnibus` tag.
|
||||
@@ -0,0 +1,426 @@
|
||||
# InfluxDB Troubleshooting
|
||||
|
||||
## Why??
|
||||
|
||||
Scrutiny has many features, but the relevant one to this conversation is the "S.M.A.R.T metric tracking for historical
|
||||
trends". Basically Scrutiny not only shows you the current SMART values, but how they've changed over weeks, months (or
|
||||
even years).
|
||||
|
||||
To efficiently handle that data at scale (and to make my life easier as a developer) I decided to add InfluxDB as a
|
||||
dependency. It's a dedicated timeseries database, as opposed to the general purpose sqlite DB I used before. I also did
|
||||
a bunch of testing and analysis before I made the change. With InfluxDB the memory footprint for Scrutiny (at idle) is ~
|
||||
100mb, which is still fairly reasonable.
|
||||
|
||||
## Installation
|
||||
|
||||
InfluxDB is a required dependency for Scrutiny v0.4.0+.
|
||||
|
||||
https://docs.influxdata.com/influxdb/v2.2/install/
|
||||
|
||||
## Persistence
|
||||
|
||||
To ensure that all data is correctly stored, you must also persist the InfluxDB database directory
|
||||
|
||||
- If you're using the Official Scrutiny Omnibus image (`ghcr.io/analogj/scrutiny:master-omnibus`), the path is `/opt/scrutiny/influxdb`
|
||||
- If you're deploying in Hub/Spoke mode with the InfluxDB maintained image (`influxdb:2.2`), the path is `/var/lib/influxdb2`
|
||||
|
||||
If you attempt to restart Scrutiny but you forgot to persist the InfluxDB directory, you will get an error message like follows:
|
||||
|
||||
```
|
||||
scrutiny | time="2022-05-12T22:54:12Z" level=info msg="Trying to connect to scrutiny sqlite db: /opt/scrutiny/config/scrutiny.db\n"
|
||||
scrutiny | time="2022-05-12T22:54:12Z" level=info msg="Successfully connected to scrutiny sqlite db: /opt/scrutiny/config/scrutiny.db\n"
|
||||
scrutiny | ts=2022-05-12T22:54:12.240791Z lvl=info msg=Unauthorized log_id=0aQcVlOW000 error="authorization not found"
|
||||
scrutiny | panic: unauthorized: unauthorized access
|
||||
```
|
||||
|
||||
Unfortunately this may mean that your database is lost, and the previous Scrutiny data is unavailable.
|
||||
You should fix the docker-compose/docker run command that you're using to ensure that your database folder is persisted correctly,
|
||||
then delete the `web.influxdb.token` field in your `scrutiny.yaml` file, and then restart Scrutiny.
|
||||
|
||||
|
||||
## First Start
|
||||
The web/api service will trigger an InfluxDB onboarding process automatically when it first starts. After that, it will store the newly generated influxdb api token in the Scrutiny config file.
|
||||
|
||||
If this Credential is not correctly stored in the scrutiny config file, Scrutiny will fail to start (with an authentication error)
|
||||
|
||||
```
|
||||
scrutiny | time="2022-05-12T22:52:55Z" level=info msg="Successfully connected to scrutiny sqlite db: /opt/scrutiny/config/scrutiny.db\n"
|
||||
scrutiny | ts=2022-05-12T22:52:55.235753Z lvl=error msg="failed to onboard user admin" log_id=0aQcRnc0000 handler=onboard error="onboarding has already been completed" took=0.038ms
|
||||
scrutiny | ts=2022-05-12T22:52:55.235816Z lvl=error msg="api error encountered" log_id=0aQcRnc0000 error="onboarding has already been completed"
|
||||
scrutiny | panic: conflict: onboarding has already been completed
|
||||
```
|
||||
|
||||
You can fix this issue by authenticating to the InfluxDB admin portal (the default credentials are username: `admin`, password: `password12345`),
|
||||
then retrieving the API token, and writing it to your `scrutiny.yaml` config file under the `web.influxdb.token` field:
|
||||
|
||||

|
||||
|
||||
## Upgrading from v0.3.x to v0.4.x
|
||||
|
||||
When upgrading from v0.3.x to v0.4.x, some users have noticed problems such as:
|
||||
|
||||
```
|
||||
2022/05/13 14:38:05 Loading configuration file: /opt/scrutiny/config/scrutiny.yaml
|
||||
time="2022-05-13T14:38:05Z" level=info msg="Trying to connect to scrutiny sqlite db:"
|
||||
time="2022-05-13T14:38:05Z" level=info msg="Successfully connected to scrutiny sqlite db:"
|
||||
panic: a username and password is required for a setup
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
Start the scrutiny server
|
||||
time="2022-06-11T10:35:04-04:00" level=info msg="Trying to connect to scrutiny sqlite db: \n"
|
||||
time="2022-06-11T10:35:04-04:00" level=info msg="Successfully connected to scrutiny sqlite db: \n"
|
||||
panic: failed to check influxdb setup status - parse "://:": missing protocol scheme
|
||||
```
|
||||
|
||||
As discussed in [#248](https://github.com/AnalogJ/scrutiny/issues/248) and [#234](https://github.com/AnalogJ/scrutiny/issues/234),
|
||||
this usually related to either:
|
||||
|
||||
- Upgrading from the LSIO Scrutiny image to the Official Scrutiny image, without removing LSIO specific environmental
|
||||
variables
|
||||
- remove the `SCRUTINY_WEB=true` and `SCRUTINY_COLLECTOR=true` environmental variables. They were used by the LSIO
|
||||
image, but are unnecessary and cause issues with the official Scrutiny image.
|
||||
- Change your volume mappings to `/opt/scrutiny` from `/scrutiny`
|
||||
- Updated versions of the [LSIO Scrutiny images are broken](https://github.com/linuxserver/docker-scrutiny/issues/22),
|
||||
as they have not installed InfluxDB which is a required dependency of Scrutiny v0.4.x
|
||||
- You can revert to an earlier version of the LSIO image (`lscr.io/linuxserver/scrutiny:060ac7b8-ls34`), or just
|
||||
change to the official Scrutiny image (`ghcr.io/analogj/scrutiny:master-omnibus`)
|
||||
|
||||
Here's a couple of confirmed working docker-compose files that you may want to look at:
|
||||
|
||||
- https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml
|
||||
- https://github.com/AnalogJ/scrutiny/blob/master/docker/example.omnibus.docker-compose.yml
|
||||
|
||||
## Bring your own InfluxDB
|
||||
|
||||
> WARNING: Most users should not follow these steps. This is ONLY for users who have an EXISTING InfluxDB installation which contains data from multiple services.
|
||||
> The Scrutiny Docker omnibus image includes an empty InfluxDB instance which it can configure.
|
||||
> If you're deploying manually or via Hub/Spoke, you can just follow the installation instructions, Scrutiny knows how
|
||||
> to run the first-time setup automatically.
|
||||
|
||||
The goal here is to create an InfluxDB API key with minimal permissions for use by Scrutiny.
|
||||
|
||||
- Create Scrutiny buckets (`metrics`, `metrics_weekly`, `metrics_monthly`, `metrics_yearly`) with placeholder config
|
||||
- Create Downsampling tasks (`tsk-weekly-aggr`, `tsk-monthly-aggr`, `tsk-yearly-aggr`) with placeholder script.
|
||||
- Create API token with restricted scope
|
||||
- NOTE: Placeholder bucket & task configuration will be replaced automatically by Scrutiny during startup
|
||||
|
||||
The placeholder buckets and tasks need to be created before the API token can be created, as the resource ID's need to
|
||||
exist for the scope restriction to work.
|
||||
|
||||
Scopes:
|
||||
|
||||
- `orgs`: read - required for scrutiny to find it's configured org_id
|
||||
- `tasks`: scrutiny specific read/write access - Scrutiny only needs access to the downsampling tasks you created above
|
||||
- `buckets`: scrutiny specific read/write access - Scrutiny only needs access to the buckets you created above
|
||||
|
||||
### Setup Environmental Variables
|
||||
|
||||
```bash
|
||||
# replace the following values with correct values for your InfluxDB installation
|
||||
export INFLUXDB_ADMIN_TOKEN=pCqRq7xxxxxx-FZgNLfstIs0w==
|
||||
export INFLUXDB_ORG_ID=b2495xxxxx
|
||||
export INFLUXDB_HOSTNAME=http://localhost:8086
|
||||
|
||||
# if you want to change the bucket name prefix below, you'll also need to update the setting in the scrutiny.yaml config file.
|
||||
export INFLUXDB_SCRUTINY_BUCKET_BASENAME=metrics
|
||||
```
|
||||
|
||||
### Create placeholder buckets
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_weekly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_monthly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_yearly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Create placeholder tasks
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-weekly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-monthly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-yearly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Create InfluxDB API Token
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
# replace these values with placeholder bucket and task ids from your InfluxDB installation.
|
||||
export INFLUXDB_SCRUTINY_BASE_BUCKET_ID=1e0709xxxx
|
||||
export INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID=1af03dexxxxx
|
||||
export INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID=b3c59c7xxxxx
|
||||
export INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID=f381d8cxxxxx
|
||||
|
||||
export INFLUXDB_SCRUTINY_WEEKLY_TASK_ID=09a64ecxxxxx
|
||||
export INFLUXDB_SCRUTINY_MONTHLY_TASK_ID=09a64xxxxx
|
||||
export INFLUXDB_SCRUTINY_YEARLY_TASK_ID=09a64ecxxxxx
|
||||
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/authorizations \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"description": "scrutiny - restricted scope token",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"permissions": [
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "orgs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "tasks"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_BASE_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_BASE_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Save InfluxDB API Token
|
||||
|
||||
After running the Curl command above, you'll see a JSON response that looks like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"token": "ksVU2t5SkQwYkvIxxxxxxxYt2xUt0uRKSbSF1Po0UQ==",
|
||||
"status": "active",
|
||||
"description": "scrutiny - restricted scope token",
|
||||
"orgID": "b2495586xxxx",
|
||||
"org": "my-org",
|
||||
"user": "admin",
|
||||
"permissions": [
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "orgs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "tasks"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "09a64exxxxx",
|
||||
"orgID": "b24955860xxxxx",
|
||||
"org": "my-org"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
You must copy the token field from the JSON response, and save it in your `scrutiny.yaml` config file. After that's
|
||||
done, you can start the Scrutiny server
|
||||
|
||||
## Customize InfluxDB Admin Username & Password
|
||||
|
||||
The full set of InfluxDB configuration options are available
|
||||
in [code](https://github.com/AnalogJ/scrutiny/blob/master/webapp/backend/pkg/config/config.go?rgh-link-date=2023-01-19T16%3A23%3A40Z#L49-L51)
|
||||
.
|
||||
|
||||
During first startup Scrutiny will connect to the unprotected InfluxDB server, start the setup process (via API) using a
|
||||
username and password of `admin`:`password12345` and then create an API token of `scrutiny-default-admin-token`.
|
||||
|
||||
After that's complete, it will use the api token for all subsequent communication with InfluxDB.
|
||||
|
||||
You can configure the values for the Admin username, password and token using the config file, or env variables:
|
||||
|
||||
#### Config File Example
|
||||
|
||||
```yaml
|
||||
web:
|
||||
influxdb:
|
||||
token: 'my-custom-token'
|
||||
init_username: 'my-custom-username'
|
||||
init_password: 'my-custom-password'
|
||||
```
|
||||
|
||||
#### Environmental Variables Example
|
||||
|
||||
`SCRUTINY_WEB_INFLUXDB_TOKEN` , `SCRUTINY_WEB_INFLUXDB_INIT_USERNAME` and `SCRUTINY_WEB_INFLUXDB_INIT_PASSWORD`
|
||||
|
||||
It's safe to change the InfluxDB Admin username/password after setup has completed, only the API token is used for
|
||||
subsequent communication with InfluxDB.
|
||||
@@ -0,0 +1,31 @@
|
||||
# Notifications
|
||||
|
||||
As documented in [example.scrutiny.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml#L59-L75)
|
||||
there are multiple ways to configure notifications for Scrutiny.
|
||||
|
||||
Under the hood we use a library called [Shoutrrr](https://github.com/containrrr/shoutrrr) to send our notifications, and you should use their documentation if you run into
|
||||
any issues: https://containrrr.dev/shoutrrr/services/overview/
|
||||
|
||||
|
||||
# Script Notifications
|
||||
|
||||
While the Shoutrrr library supports many popular providers for sending notifications Scrutiny also supports a "script" based
|
||||
notification system, allowing you to execute a custom script whenever a notification needs to be sent.
|
||||
Data is provided to this script using the following environmental variables:
|
||||
|
||||
```
|
||||
SCRUTINY_SUBJECT - eg. "Scrutiny SMART error (%s) detected on device: %s"
|
||||
SCRUTINY_DATE
|
||||
SCRUTINY_FAILURE_TYPE - EmailTest, SmartFail, ScrutinyFail
|
||||
SCRUTINY_DEVICE_NAME - eg. /dev/sda
|
||||
SCRUTINY_DEVICE_TYPE - ATA/SCSI/NVMe
|
||||
SCRUTINY_DEVICE_SERIAL - eg. WDDJ324KSO
|
||||
SCRUTINY_MESSAGE - eg. "Scrutiny SMART error notification for device: %s\nFailure Type: %s\nDevice Name: %s\nDevice Serial: %s\nDevice Type: %s\nDate: %s"
|
||||
SCRUTINY_HOST_ID - (optional) eg. "my-custom-host-id"
|
||||
```
|
||||
|
||||
# Testing Notifications
|
||||
You can test that your notifications are configured correctly by posting an empty payload to the notifications health check API.
|
||||
```
|
||||
curl -X POST http://localhost:8080/api/health/notify
|
||||
```
|
||||
@@ -0,0 +1,106 @@
|
||||
# Reverse Proxy Support
|
||||
|
||||
Scrutiny is designed so that it can be used with a reverse proxy, leveraging `domain`, `port` or `path` based matching to correctly route to the Scrutiny service.
|
||||
|
||||
For simple `domain` and/or `port` based routing, this is easy.
|
||||
|
||||
If your domain:port pair is similar to `http://scrutiny.example.com` or `http://localhost:54321`, just update your reverse proxy configuration
|
||||
to route traffic to the Scrutiny backend, which is listening on `0.0.0.0:8080` by default.
|
||||
|
||||
```yaml
|
||||
# default config
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
```
|
||||
|
||||
However if you're using `path` based routing to differentiate your reverse proxy protected services, things become more complicated.
|
||||
|
||||
If you'd like to access Scrutiny using a path like: `http://example.com/scrutiny/`, then we need a way to configure Scrutiny so that it
|
||||
understands `http://example.com/scrutiny/api/health` actually means `http://localhost:8080/api/health`.
|
||||
|
||||
Thankfully this can be done by changing **two** settings (both are required).
|
||||
|
||||
1. The webserver has a `web.listen.basepath` key
|
||||
2. The collectors have a `api.endpoint` key.
|
||||
|
||||
## Webserver Configuration
|
||||
|
||||
When setting the `web.listen.basepath` key in the web config file, make sure the `basepath` key is prefixed with `/`.
|
||||
|
||||
```yaml
|
||||
# customized webserver config
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
# if you're using a reverse proxy like apache/nginx, you can override this value to serve scrutiny on a subpath.
|
||||
# eg. http://example.com/custombasepath/* vs http://example.com:8080
|
||||
basepath: '/custombasepath'
|
||||
```
|
||||
|
||||
## Collector Configuration
|
||||
|
||||
Here's how you can update the collector `api.endpoint` key:
|
||||
|
||||
```yaml
|
||||
# customized collector config
|
||||
api:
|
||||
endpoint: 'http://localhost:8080/custombasepath'
|
||||
```
|
||||
|
||||
# Environmental Variables.
|
||||
|
||||
You may also configure these values using the following environmental variables (both are required).
|
||||
|
||||
- `COLLECTOR_API_ENDPOINT=http://localhost:8080/custombasepath`
|
||||
- `SCRUTINY_WEB_LISTEN_BASEPATH=/custombasepath`
|
||||
|
||||
# Real Examples
|
||||
|
||||
## Caddy
|
||||
|
||||
1. Create a Caddyfile
|
||||
```yaml
|
||||
# Caddyfile
|
||||
:9090
|
||||
|
||||
# The `scrutiny` text in this file must match the service name in the docker-compose file below.
|
||||
# The `/custom/` text is the custom base path scrutiny will be availble on.
|
||||
reverse_proxy /custom/* scrutiny:8080
|
||||
|
||||
```
|
||||
2. Create a `docker-compose.yml` file
|
||||
|
||||
```yaml
|
||||
# docker-compose.yml
|
||||
version: '3.5'
|
||||
|
||||
services:
|
||||
scrutiny:
|
||||
container_name: scrutiny
|
||||
image: ghcr.io/analogj/scrutiny:master-omnibus
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
ports:
|
||||
- "8086:8086" # influxDB admin
|
||||
volumes:
|
||||
- /run/udev:/run/udev:ro
|
||||
- ./config:/opt/scrutiny/config
|
||||
- ./influxdb:/opt/scrutiny/influxdb
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
environment:
|
||||
- SCRUTINY_WEB_LISTEN_BASEPATH=/custom
|
||||
- COLLECTOR_API_ENDPOINT=http://localhost:8080/custom
|
||||
caddy:
|
||||
image: caddy
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile
|
||||
ports:
|
||||
- "9090:9090"
|
||||
```
|
||||
3. run `docker-compose up`
|
||||
4. visit [http://localhost:9090/custom/web](http://localhost:9090/custom/web) - access the scrutiny container via caddy reverse proxy
|
||||
@@ -0,0 +1,18 @@
|
||||
# Operating systems without udev
|
||||
|
||||
Some operating systems do not come with `udev` out of the box, for example Alpine Linux. In these instances you will not be able to bind `/run/udev` to the container for sharing device metadata. Some operating systems offer `udev` as a package that can be installed separately, or an alternative (such as `eudev` in the case of Alpine Linux) that provides the same functionality.
|
||||
|
||||
To install `eudev` in Alpine Linux (run as root):
|
||||
|
||||
```
|
||||
apk add eudev
|
||||
setup-udev
|
||||
```
|
||||
|
||||
Once your `udev` implementation is installed, create `/run/udev` with the following command:
|
||||
|
||||
```
|
||||
udevadm trigger
|
||||
```
|
||||
|
||||
On Alpine Linux, this also has the benefit of creating symlinks to device serial numbers in `/dev/disk/by-id`.
|
||||
+70
-44
@@ -1,62 +1,88 @@
|
||||
|
||||
// SQLite Table(s)
|
||||
Table device {
|
||||
created_at timestamp
|
||||
|
||||
wwn varchar [pk]
|
||||
Table Device {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
CreatedAt time
|
||||
UpdatedAt time
|
||||
DeletedAt time
|
||||
|
||||
//user provided
|
||||
label varchar
|
||||
host_id varchar
|
||||
WWN string
|
||||
|
||||
// smartctl provided
|
||||
device_name varchar
|
||||
manufacturer varchar
|
||||
model_name varchar
|
||||
interface_type varchar
|
||||
interface_speed varchar
|
||||
serial_number varchar
|
||||
firmware varchar
|
||||
rotational_speed varchar
|
||||
capacity varchar
|
||||
form_factor varchar
|
||||
smart_support varchar
|
||||
device_protocol varchar
|
||||
device_type varchar
|
||||
DeviceName string
|
||||
DeviceUUID string
|
||||
DeviceSerialID string
|
||||
DeviceLabel string
|
||||
|
||||
Manufacturer string
|
||||
ModelName string
|
||||
InterfaceType string
|
||||
InterfaceSpeed string
|
||||
SerialNumber string
|
||||
Firmware string
|
||||
RotationSpeed int
|
||||
Capacity int64
|
||||
FormFactor string
|
||||
SmartSupport bool
|
||||
DeviceProtocol string//protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||
DeviceType string//device type is used for querying with -d/t flag, should only be used by collector.
|
||||
|
||||
// User provided metadata
|
||||
Label string
|
||||
HostId string
|
||||
|
||||
// Data set by Scrutiny
|
||||
DeviceStatus enum
|
||||
}
|
||||
|
||||
Table Setting {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
|
||||
SettingKeyName string
|
||||
SettingKeyDescription string
|
||||
SettingDataType string
|
||||
|
||||
SettingValueNumeric int64
|
||||
SettingValueString string
|
||||
}
|
||||
|
||||
|
||||
// InfluxDB Tables
|
||||
Table device_temperature {
|
||||
//timestamp
|
||||
created_at timestamp
|
||||
|
||||
//tags (indexed & queryable)
|
||||
device_wwn varchar [pk]
|
||||
|
||||
//fields
|
||||
temp bigint
|
||||
}
|
||||
Table SmartTemperature {
|
||||
Date time
|
||||
DeviceWWN string //(tag)
|
||||
Temp int64
|
||||
}
|
||||
|
||||
|
||||
Table smart_ata_results {
|
||||
//timestamp
|
||||
created_at timestamp
|
||||
Table Smart {
|
||||
Date time
|
||||
DeviceWWN string //(tag)
|
||||
DeviceProtocol string
|
||||
|
||||
//tags (indexed & queryable)
|
||||
device_wwn varchar [pk]
|
||||
smart_status varchar
|
||||
scrutiny_status varchar
|
||||
//Metrics (fields)
|
||||
Temp int64
|
||||
PowerOnHours int64
|
||||
PowerCycleCount int64
|
||||
|
||||
//Smart Status
|
||||
Status enum
|
||||
|
||||
|
||||
//fields
|
||||
temp bigint
|
||||
power_on_hours bigint
|
||||
power_cycle_count bigint
|
||||
|
||||
//SMART Attributes (fields)
|
||||
Attr_ID_AttributeId int
|
||||
Attr_ID_Value int64
|
||||
Attr_ID_Threshold int64
|
||||
Attr_ID_Worst int64
|
||||
Attr_ID_RawValue int64
|
||||
Attr_ID_RawString string
|
||||
Attr_ID_WhenFailed string
|
||||
//Generated data
|
||||
Attr_ID_TransformedValue int64
|
||||
Attr_ID_Status enum
|
||||
Attr_ID_StatusReason string
|
||||
Attr_ID_FailureRate float64
|
||||
|
||||
}
|
||||
|
||||
Ref: device.wwn < smart_ata_results.device_wwn
|
||||
Ref: Device.WWN < Smart.DeviceWWN
|
||||
Ref: Device.WWN < SmartTemperature.DeviceWWN
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 35 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 78 KiB |
@@ -31,6 +31,10 @@ devices:
|
||||
# - device: /dev/sda
|
||||
# type: 'sat'
|
||||
#
|
||||
# # example for using `-d sat,auto`, notice the square brackets (workaround for #418)
|
||||
# - device: /dev/sda
|
||||
# type: ['sat,auto']
|
||||
#
|
||||
# # example to show how to ignore a specific disk/device.
|
||||
# - device: /dev/sda
|
||||
# ignore: true
|
||||
@@ -53,6 +57,13 @@ devices:
|
||||
# - 3ware,3
|
||||
# - 3ware,4
|
||||
# - 3ware,5
|
||||
#
|
||||
# # example to show how to override the smartctl command args (per device), see below for how to override these globally.
|
||||
# - device: /dev/sda
|
||||
# commands:
|
||||
# metrics_info_args: '--info --json -T permissive' # used to determine device unique ID & register device with Scrutiny
|
||||
# metrics_smart_args: '--xall --json -T permissive' # used to retrieve smart data for each device.
|
||||
|
||||
|
||||
#log:
|
||||
# file: '' #absolute or relative paths allowed, eg. web.log
|
||||
@@ -60,6 +71,17 @@ devices:
|
||||
#
|
||||
#api:
|
||||
# endpoint: 'http://localhost:8080'
|
||||
# endpoint: 'http://localhost:8080/custombasepath'
|
||||
# if you need to use a custom base path (for a reverse proxy), you can add a suffix to the endpoint.
|
||||
# See docs/TROUBLESHOOTING_REVERSE_PROXY.md for more info,
|
||||
|
||||
# example to show how to override the smartctl command args globally
|
||||
#commands:
|
||||
# metrics_smartctl_bin: 'smartctl' # change to provide custom `smartctl` binary path, eg. `/usr/sbin/smartctl`
|
||||
# metrics_scan_args: '--scan --json' # used to detect devices
|
||||
# metrics_info_args: '--info --json' # used to determine device unique ID & register device with Scrutiny
|
||||
# metrics_smart_args: '--xall --json' # used to retrieve smart data for each device.
|
||||
|
||||
|
||||
########################################################################################################################
|
||||
# FEATURES COMING SOON
|
||||
@@ -68,3 +90,10 @@ devices:
|
||||
#
|
||||
########################################################################################################################
|
||||
|
||||
#collect:
|
||||
# long:
|
||||
# enable: false
|
||||
# command: ''
|
||||
# short:
|
||||
# enable: false
|
||||
# command: ''
|
||||
|
||||
+12
-17
@@ -34,13 +34,24 @@ web:
|
||||
# the location on the filesystem where scrutiny javascript + css is located
|
||||
frontend:
|
||||
path: /opt/scrutiny/web
|
||||
|
||||
# if you're running influxdb on a different host (or using a cloud-provider) you'll need to update the host & port below.
|
||||
# token, org, bucket are unnecessary for a new InfluxDB installation, as Scrutiny will automatically run the InfluxDB setup,
|
||||
# and store the information in the config file. If you 're re-using an existing influxdb installation, you'll need to provide
|
||||
# the `token`
|
||||
influxdb:
|
||||
# scheme: 'http'
|
||||
host: 0.0.0.0
|
||||
port: 8086
|
||||
# token: 'my-token'
|
||||
# org: 'my-org'
|
||||
# bucket: 'bucket'
|
||||
retention_policy: true
|
||||
# if you wish to disable TLS certificate verification,
|
||||
# when using self-signed certificates for example,
|
||||
# then uncomment the lines below and set `insecure_skip_verify: true`
|
||||
# tls:
|
||||
# insecure_skip_verify: false
|
||||
|
||||
log:
|
||||
file: '' #absolute or relative paths allowed, eg. web.log
|
||||
@@ -62,6 +73,7 @@ log:
|
||||
# - "pushbullet://api-token[/device/#channel/email]"
|
||||
# - "ifttt://key/?events=event1[,event2,...]&value1=value1&value2=value2&value3=value3"
|
||||
# - "mattermost://[username@]mattermost-host/token[/channel]"
|
||||
# - "ntfy://username:password@host:port/topic"
|
||||
# - "hangouts://chat.googleapis.com/v1/spaces/FOO/messages?key=bar&token=baz"
|
||||
# - "zulip://bot-mail:bot-key@zulip-domain/?stream=name-or-id&topic=name"
|
||||
# - "join://shoutrrr:api-key@join/?devices=device1[,device2, ...][&icon=icon][&title=title]"
|
||||
@@ -75,12 +87,6 @@ log:
|
||||
#
|
||||
########################################################################################################################
|
||||
|
||||
#disks:
|
||||
# include:
|
||||
# # - /dev/sda
|
||||
# exclude:
|
||||
# # - /dev/sdb
|
||||
|
||||
#limits:
|
||||
# ata:
|
||||
# critical:
|
||||
@@ -95,14 +101,3 @@ log:
|
||||
# critical: true
|
||||
# standard: true
|
||||
|
||||
|
||||
#collect:
|
||||
# metric:
|
||||
# enable: true
|
||||
# command: '-a -o on -S on'
|
||||
# long:
|
||||
# enable: false
|
||||
# command: ''
|
||||
# short:
|
||||
# enable: false
|
||||
# command: ''
|
||||
|
||||
@@ -1,31 +1,82 @@
|
||||
module github.com/analogj/scrutiny
|
||||
|
||||
go 1.13
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/analogj/go-util v0.0.0-20190301173314-5295e364eb14
|
||||
github.com/citilinkru/libudev v1.0.0 // indirect
|
||||
github.com/containrrr/shoutrrr v0.4.4
|
||||
github.com/fatih/color v1.10.0
|
||||
github.com/containrrr/shoutrrr v0.7.1
|
||||
github.com/fatih/color v1.15.0
|
||||
github.com/gin-gonic/gin v1.6.3
|
||||
github.com/glebarez/sqlite v1.4.5
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.0.0
|
||||
github.com/golang/mock v1.4.3
|
||||
github.com/google/uuid v1.2.0 // indirect
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.2.3
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.9.0
|
||||
github.com/jaypipes/ghw v0.6.1
|
||||
github.com/jinzhu/gorm v1.9.16
|
||||
github.com/klauspost/compress v1.12.1 // indirect
|
||||
github.com/kvz/logstreamer v0.0.0-20150507115422-a635b98146f0 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.2.2
|
||||
github.com/onsi/ginkgo v1.16.1 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/samber/lo v1.25.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/spf13/viper v1.14.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/urfave/cli/v2 v2.2.0
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect
|
||||
gorm.io/driver/sqlite v1.1.3
|
||||
gorm.io/gorm v1.20.2
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
golang.org/x/sync v0.1.0
|
||||
gorm.io/gorm v1.23.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.17.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/go-playground/locales v0.13.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
|
||||
github.com/jaypipes/pcidb v0.5.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.4 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
||||
github.com/kvz/logstreamer v0.0.0-20201023134116-02d20f4338f5 // indirect
|
||||
github.com/leodido/go-urn v1.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/ugorji/go/codec v1.1.7 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
modernc.org/libc v1.16.8 // indirect
|
||||
modernc.org/mathutil v1.4.1 // indirect
|
||||
modernc.org/memory v1.1.1 // indirect
|
||||
modernc.org/sqlite v1.17.2 // indirect
|
||||
)
|
||||
|
||||
+2
-14
@@ -1,18 +1,6 @@
|
||||
---
|
||||
engine_enable_code_mutation: true
|
||||
engine_cmd_compile:
|
||||
- go build -ldflags '-w -extldflags "-static"' -o scrutiny webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
- 'GOOS=linux GOARCH=amd64 go build -ldflags "-X main.goos=linux -X main.goarch=amd64" -o scrutiny-web-linux-amd64 -tags "static" webapp/backend/cmd/scrutiny/scrutiny.go'
|
||||
- 'chmod +x scrutiny-web-linux-amd64'
|
||||
- 'GOOS=linux GOARCH=amd64 go build -ldflags "-X main.goos=linux -X main.goarch=amd64" -o scrutiny-collector-metrics-linux-amd64 -tags "static" collector/cmd/collector-metrics/collector-metrics.go'
|
||||
- 'chmod +x scrutiny-collector-metrics-linux-amd64'
|
||||
mgr_keep_lock_file: true
|
||||
engine_version_metadata_path: 'webapp/backend/pkg/version/version.go'
|
||||
engine_cmd_test: 'go test -v -tags "static" $(go list ./... | grep -v /vendor/)'
|
||||
engine_golang_package_path: 'github.com/analogj/scrutiny'
|
||||
scm_enable_branch_cleanup: true
|
||||
engine_disable_lint: true
|
||||
scm_release_assets:
|
||||
- local_path: scrutiny-web-linux-amd64
|
||||
artifact_name: scrutiny-web-linux-amd64
|
||||
- local_path: scrutiny-collector-metrics-linux-amd64
|
||||
artifact_name: scrutiny-collector-metrics-linux-amd64
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
COLLECTOR_CRON_SCHEDULE=${COLLECTOR_CRON_SCHEDULE:-"0 0 * * *"}
|
||||
sed -i 's|{COLLECTOR_CRON_SCHEDULE}|'"${COLLECTOR_CRON_SCHEDULE}"'|g' /etc/cron.d/scrutiny
|
||||
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
# Cron runs in its own isolated environment (usually using only /etc/environment )
|
||||
# So when the container starts up, we will do a dump of the runtime environment into a .env file that we
|
||||
# will then source into the crontab file (/etc/cron.d/scrutiny)
|
||||
(set -o posix; export -p) > /env.sh
|
||||
|
||||
# adding ability to customize the cron schedule.
|
||||
COLLECTOR_CRON_SCHEDULE=${COLLECTOR_CRON_SCHEDULE:-"0 0 * * *"}
|
||||
|
||||
# if the cron schedule has been overridden via env variable (eg docker-compose) we should make sure to strip quotes
|
||||
[[ "${COLLECTOR_CRON_SCHEDULE}" == \"*\" || "${COLLECTOR_CRON_SCHEDULE}" == \'*\' ]] && COLLECTOR_CRON_SCHEDULE="${COLLECTOR_CRON_SCHEDULE:1:-1}"
|
||||
|
||||
# replace placeholder with correct value
|
||||
sed -i 's|{COLLECTOR_CRON_SCHEDULE}|'"${COLLECTOR_CRON_SCHEDULE}"'|g' /etc/cron.d/scrutiny
|
||||
@@ -9,5 +9,5 @@ s6-svc -O /var/run/s6/services/collector-once
|
||||
# wait until scrutiny is "Ready"
|
||||
until $(curl --output /dev/null --silent --head --fail http://localhost:8080/api/health); do echo "scrutiny api not ready" && sleep 5; done
|
||||
|
||||
echo "starting scrutiny collector"
|
||||
echo "starting scrutiny collector (run-once mode. subsequent calls will be triggered via cron service)"
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
|
||||
Regular → Executable
+1
-7
@@ -1,10 +1,4 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
# Cron runs in its own isolated environment (usually using only /etc/environment )
|
||||
# So when the container starts up, we will do a dump of the runtime environment into a .env file that we
|
||||
# will then source into the crontab file (/etc/cron.d/scrutiny.sh)
|
||||
|
||||
printenv | sed 's/^\(.*\)$/export \1/g' > /env.sh
|
||||
|
||||
echo "starting cron"
|
||||
cron -f
|
||||
cron -f -L 15
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/web"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -26,11 +29,18 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configFilePath := "/opt/scrutiny/config/scrutiny.yaml"
|
||||
configFilePathAlternative := "/opt/scrutiny/config/scrutiny.yml"
|
||||
if !utils.FileExists(configFilePath) && utils.FileExists(configFilePathAlternative) {
|
||||
configFilePath = configFilePathAlternative
|
||||
}
|
||||
|
||||
//we're going to load the config file manually, since we need to validate it.
|
||||
err = config.ReadConfig("/opt/scrutiny/config/scrutiny.yaml") // Find and read the config file
|
||||
err = config.ReadConfig(configFilePath) // Find and read the config file
|
||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||
//ignore "could not find config file"
|
||||
} else if err != nil {
|
||||
log.Print(color.HiRedString("CONFIG ERROR: %v", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -107,7 +117,18 @@ OPTIONS:
|
||||
config.Set("log.file", c.String("log-file"))
|
||||
}
|
||||
|
||||
webServer := web.AppEngine{Config: config}
|
||||
webLogger, logFile, err := CreateLogger(config)
|
||||
if logFile != nil {
|
||||
defer logFile.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settingsData, err := json.Marshal(config.AllSettings())
|
||||
webLogger.Debug(string(settingsData), err)
|
||||
|
||||
webServer := web.AppEngine{Config: config, Logger: webLogger}
|
||||
|
||||
return webServer.Start()
|
||||
},
|
||||
@@ -140,3 +161,27 @@ OPTIONS:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
|
||||
logger := logrus.WithFields(logrus.Fields{
|
||||
"type": "web",
|
||||
})
|
||||
//set default log level
|
||||
if level, err := logrus.ParseLevel(appConfig.GetString("log.level")); err == nil {
|
||||
logger.Logger.SetLevel(level)
|
||||
} else {
|
||||
logger.Logger.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
var logFile *os.File
|
||||
var err error
|
||||
if appConfig.IsSet("log.file") && len(appConfig.GetString("log.file")) > 0 {
|
||||
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Logger.Errorf("Failed to open log file %s for output: %s", appConfig.GetString("log.file"), err)
|
||||
return nil, logFile, err
|
||||
}
|
||||
logger.Logger.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
return logger, logFile, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const DB_USER_SETTINGS_SUBKEY = "user"
|
||||
|
||||
// When initializing this class the following methods must be called:
|
||||
// Config.New
|
||||
// Config.Init
|
||||
@@ -39,28 +41,20 @@ func (c *configuration) Init() error {
|
||||
|
||||
c.SetDefault("notify.urls", []string{})
|
||||
|
||||
c.SetDefault("web.influxdb.host", "0.0.0.0")
|
||||
c.SetDefault("web.influxdb.scheme", "http")
|
||||
c.SetDefault("web.influxdb.host", "localhost")
|
||||
c.SetDefault("web.influxdb.port", "8086")
|
||||
c.SetDefault("web.influxdb.org", "scrutiny")
|
||||
c.SetDefault("web.influxdb.bucket", "metrics")
|
||||
c.SetDefault("web.influxdb.init_username", "admin")
|
||||
c.SetDefault("web.influxdb.init_password", "password12345")
|
||||
c.SetDefault("web.influxdb.token", "scrutiny-default-admin-token")
|
||||
c.SetDefault("web.influxdb.tls.insecure_skip_verify", false)
|
||||
c.SetDefault("web.influxdb.retention_policy", true)
|
||||
|
||||
//c.SetDefault("disks.include", []string{})
|
||||
//c.SetDefault("disks.exclude", []string{})
|
||||
|
||||
//c.SetDefault("notify.metric.script", "/opt/scrutiny/config/notify-metrics.sh")
|
||||
//c.SetDefault("notify.long.script", "/opt/scrutiny/config/notify-long-test.sh")
|
||||
//c.SetDefault("notify.short.script", "/opt/scrutiny/config/notify-short-test.sh")
|
||||
|
||||
//c.SetDefault("collect.metric.enable", true)
|
||||
//c.SetDefault("collect.metric.command", "-a -o on -S on")
|
||||
//c.SetDefault("collect.long.enable", true)
|
||||
//c.SetDefault("collect.long.command", "-a -o on -S on")
|
||||
//c.SetDefault("collect.short.enable", true)
|
||||
//c.SetDefault("collect.short.command", "-a -o on -S on")
|
||||
|
||||
//if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
|
||||
c.SetConfigType("yaml")
|
||||
//c.SetConfigName("drawbridge")
|
||||
@@ -72,7 +66,18 @@ func (c *configuration) Init() error {
|
||||
c.AutomaticEnv()
|
||||
|
||||
//CLI options will be added via the `Set()` function
|
||||
return nil
|
||||
return c.ValidateConfig()
|
||||
}
|
||||
|
||||
func (c *configuration) SubKeys(key string) []string {
|
||||
return c.Sub(key).AllKeys()
|
||||
}
|
||||
|
||||
func (c *configuration) Sub(key string) Interface {
|
||||
config := configuration{
|
||||
Viper: c.Viper.Sub(key),
|
||||
}
|
||||
return &config
|
||||
}
|
||||
|
||||
func (c *configuration) ReadConfig(configFilePath string) error {
|
||||
@@ -115,24 +120,18 @@ func (c *configuration) ReadConfig(configFilePath string) error {
|
||||
// This function ensures that the merged config works correctly.
|
||||
func (c *configuration) ValidateConfig() error {
|
||||
|
||||
////deserialize Questions
|
||||
//questionsMap := map[string]Question{}
|
||||
//err := c.UnmarshalKey("questions", &questionsMap)
|
||||
//
|
||||
//if err != nil {
|
||||
// log.Printf("questions could not be deserialized correctly. %v", err)
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//for _, v := range questionsMap {
|
||||
//
|
||||
// typeContent, ok := v.Schema["type"].(string)
|
||||
// if !ok || len(typeContent) == 0 {
|
||||
// return errors.QuestionSyntaxError("`type` is required for questions")
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//
|
||||
//the following keys are deprecated, and no longer supported
|
||||
/*
|
||||
- notify.filter_attributes (replaced by metrics.status.filter_attributes SETTING)
|
||||
- notify.level (replaced by metrics.notify.level and metrics.status.threshold SETTING)
|
||||
*/
|
||||
//TODO add docs and upgrade doc.
|
||||
if c.IsSet("notify.filter_attributes") {
|
||||
return errors.ConfigValidationError("`notify.filter_attributes` configuration option is deprecated. Replaced by option in Dashboard Settings page")
|
||||
}
|
||||
if c.IsSet("notify.level") {
|
||||
return errors.ConfigValidationError("`notify.level` configuration option is deprecated. Replaced by option in Dashboard Settings page")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_MergeConfigMap(t *testing.T) {
|
||||
//setup
|
||||
testConfig := configuration{
|
||||
Viper: viper.New(),
|
||||
}
|
||||
testConfig.Set("user.dashboard_display", "hello")
|
||||
testConfig.SetDefault("user.layout", "hello")
|
||||
|
||||
mergeSettings := map[string]interface{}{
|
||||
"user": map[string]interface{}{
|
||||
"dashboard_display": "dashboard_display",
|
||||
"layout": "layout",
|
||||
},
|
||||
}
|
||||
//test
|
||||
err := testConfig.MergeConfigMap(mergeSettings)
|
||||
|
||||
//verify
|
||||
require.NoError(t, err)
|
||||
|
||||
// if using Set, the MergeConfigMap functionality will not override
|
||||
// if using SetDefault, the MergeConfigMap will override correctly
|
||||
require.Equal(t, "hello", testConfig.GetString("user.dashboard_display"))
|
||||
require.Equal(t, "layout", testConfig.GetString("user.layout"))
|
||||
|
||||
}
|
||||
@@ -12,12 +12,17 @@ type Interface interface {
|
||||
WriteConfig() error
|
||||
Set(key string, value interface{})
|
||||
SetDefault(key string, value interface{})
|
||||
MergeConfigMap(cfg map[string]interface{}) error
|
||||
|
||||
Sub(key string) Interface
|
||||
AllSettings() map[string]interface{}
|
||||
AllKeys() []string
|
||||
SubKeys(key string) []string
|
||||
IsSet(key string) bool
|
||||
Get(key string) interface{}
|
||||
GetBool(key string) bool
|
||||
GetInt(key string) int
|
||||
GetInt64(key string) int64
|
||||
GetString(key string) string
|
||||
GetStringSlice(key string) []string
|
||||
UnmarshalKey(key string, rawVal interface{}, decoderOpts ...viper.DecoderConfigOption) error
|
||||
|
||||
@@ -7,6 +7,7 @@ package mock_config
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
config "github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
viper "github.com/spf13/viper"
|
||||
)
|
||||
@@ -34,6 +35,20 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// AllKeys mocks base method.
|
||||
func (m *MockInterface) AllKeys() []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "AllKeys")
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// AllKeys indicates an expected call of AllKeys.
|
||||
func (mr *MockInterfaceMockRecorder) AllKeys() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllKeys", reflect.TypeOf((*MockInterface)(nil).AllKeys))
|
||||
}
|
||||
|
||||
// AllSettings mocks base method.
|
||||
func (m *MockInterface) AllSettings() map[string]interface{} {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -90,6 +105,20 @@ func (mr *MockInterfaceMockRecorder) GetInt(key interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInt", reflect.TypeOf((*MockInterface)(nil).GetInt), key)
|
||||
}
|
||||
|
||||
// GetInt64 mocks base method.
|
||||
func (m *MockInterface) GetInt64(key string) int64 {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetInt64", key)
|
||||
ret0, _ := ret[0].(int64)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetInt64 indicates an expected call of GetInt64.
|
||||
func (mr *MockInterfaceMockRecorder) GetInt64(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInt64", reflect.TypeOf((*MockInterface)(nil).GetInt64), key)
|
||||
}
|
||||
|
||||
// GetString mocks base method.
|
||||
func (m *MockInterface) GetString(key string) string {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -146,6 +175,20 @@ func (mr *MockInterfaceMockRecorder) IsSet(key interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSet", reflect.TypeOf((*MockInterface)(nil).IsSet), key)
|
||||
}
|
||||
|
||||
// MergeConfigMap mocks base method.
|
||||
func (m *MockInterface) MergeConfigMap(cfg map[string]interface{}) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "MergeConfigMap", cfg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// MergeConfigMap indicates an expected call of MergeConfigMap.
|
||||
func (mr *MockInterfaceMockRecorder) MergeConfigMap(cfg interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeConfigMap", reflect.TypeOf((*MockInterface)(nil).MergeConfigMap), cfg)
|
||||
}
|
||||
|
||||
// ReadConfig mocks base method.
|
||||
func (m *MockInterface) ReadConfig(configFilePath string) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -184,6 +227,34 @@ func (mr *MockInterfaceMockRecorder) SetDefault(key, value interface{}) *gomock.
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefault", reflect.TypeOf((*MockInterface)(nil).SetDefault), key, value)
|
||||
}
|
||||
|
||||
// Sub mocks base method.
|
||||
func (m *MockInterface) Sub(key string) config.Interface {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Sub", key)
|
||||
ret0, _ := ret[0].(config.Interface)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Sub indicates an expected call of Sub.
|
||||
func (mr *MockInterfaceMockRecorder) Sub(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sub", reflect.TypeOf((*MockInterface)(nil).Sub), key)
|
||||
}
|
||||
|
||||
// SubKeys mocks base method.
|
||||
func (m *MockInterface) SubKeys(key string) []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SubKeys", key)
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SubKeys indicates an expected call of SubKeys.
|
||||
func (mr *MockInterfaceMockRecorder) SubKeys(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubKeys", reflect.TypeOf((*MockInterface)(nil).SubKeys), key)
|
||||
}
|
||||
|
||||
// UnmarshalKey mocks base method.
|
||||
func (m *MockInterface) UnmarshalKey(key string, rawVal interface{}, decoderOpts ...viper.DecoderConfigOption) error {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
@@ -4,25 +4,62 @@ const DeviceProtocolAta = "ATA"
|
||||
const DeviceProtocolScsi = "SCSI"
|
||||
const DeviceProtocolNvme = "NVMe"
|
||||
|
||||
const SmartAttributeStatusPassed = 0
|
||||
const SmartAttributeStatusFailed = 1
|
||||
const SmartAttributeStatusWarning = 2
|
||||
//go:generate stringer -type=AttributeStatus
|
||||
// AttributeStatus bitwise flag, 1,2,4,8,16,32,etc
|
||||
type AttributeStatus uint8
|
||||
|
||||
const SmartWhenFailedFailingNow = "FAILING_NOW"
|
||||
const SmartWhenFailedInThePast = "IN_THE_PAST"
|
||||
const (
|
||||
AttributeStatusPassed AttributeStatus = 0
|
||||
AttributeStatusFailedSmart AttributeStatus = 1
|
||||
AttributeStatusWarningScrutiny AttributeStatus = 2
|
||||
AttributeStatusFailedScrutiny AttributeStatus = 4
|
||||
)
|
||||
|
||||
//const SmartStatusPassed = "passed"
|
||||
//const SmartStatusFailed = "failed"
|
||||
const AttributeWhenFailedFailingNow = "FAILING_NOW"
|
||||
const AttributeWhenFailedInThePast = "IN_THE_PAST"
|
||||
|
||||
type DeviceStatus int
|
||||
func AttributeStatusSet(b, flag AttributeStatus) AttributeStatus { return b | flag }
|
||||
func AttributeStatusClear(b, flag AttributeStatus) AttributeStatus { return b &^ flag }
|
||||
func AttributeStatusToggle(b, flag AttributeStatus) AttributeStatus { return b ^ flag }
|
||||
func AttributeStatusHas(b, flag AttributeStatus) bool { return b&flag != 0 }
|
||||
|
||||
//go:generate stringer -type=DeviceStatus
|
||||
// DeviceStatus bitwise flag, 1,2,4,8,16,32,etc
|
||||
type DeviceStatus uint8
|
||||
|
||||
const (
|
||||
DeviceStatusPassed DeviceStatus = 0
|
||||
DeviceStatusFailedSmart DeviceStatus = iota
|
||||
DeviceStatusFailedScrutiny DeviceStatus = iota
|
||||
DeviceStatusFailedSmart DeviceStatus = 1
|
||||
DeviceStatusFailedScrutiny DeviceStatus = 2
|
||||
)
|
||||
|
||||
func Set(b, flag DeviceStatus) DeviceStatus { return b | flag }
|
||||
func Clear(b, flag DeviceStatus) DeviceStatus { return b &^ flag }
|
||||
func Toggle(b, flag DeviceStatus) DeviceStatus { return b ^ flag }
|
||||
func Has(b, flag DeviceStatus) bool { return b&flag != 0 }
|
||||
func DeviceStatusSet(b, flag DeviceStatus) DeviceStatus { return b | flag }
|
||||
func DeviceStatusClear(b, flag DeviceStatus) DeviceStatus { return b &^ flag }
|
||||
func DeviceStatusToggle(b, flag DeviceStatus) DeviceStatus { return b ^ flag }
|
||||
func DeviceStatusHas(b, flag DeviceStatus) bool { return b&flag != 0 }
|
||||
|
||||
// Metrics Specific Filtering & Threshold Constants
|
||||
type MetricsNotifyLevel int64
|
||||
|
||||
const (
|
||||
MetricsNotifyLevelWarn MetricsNotifyLevel = 1
|
||||
MetricsNotifyLevelFail MetricsNotifyLevel = 2
|
||||
)
|
||||
|
||||
type MetricsStatusFilterAttributes int64
|
||||
|
||||
const (
|
||||
MetricsStatusFilterAttributesAll MetricsStatusFilterAttributes = 0
|
||||
MetricsStatusFilterAttributesCritical MetricsStatusFilterAttributes = 1
|
||||
)
|
||||
|
||||
// MetricsStatusThreshold bitwise flag, 1,2,4,8,16,32,etc
|
||||
type MetricsStatusThreshold int64
|
||||
|
||||
const (
|
||||
MetricsStatusThresholdSmart MetricsStatusThreshold = 1
|
||||
MetricsStatusThresholdScrutiny MetricsStatusThreshold = 2
|
||||
|
||||
//shortcut
|
||||
MetricsStatusThresholdBoth MetricsStatusThreshold = 3
|
||||
)
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||
"sort"
|
||||
)
|
||||
|
||||
func sortSmartMeasurementsDesc(smartResults []measurements.Smart) {
|
||||
sort.SliceStable(smartResults, func(i, j int) bool {
|
||||
return smartResults[i].Date.After(smartResults[j].Date)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test_sortSmartMeasurementsDesc_LatestFirst(t *testing.T) {
|
||||
//setup
|
||||
timeNow := time.Now()
|
||||
smartResults := []measurements.Smart{
|
||||
{
|
||||
Date: timeNow.AddDate(0, 0, -2),
|
||||
},
|
||||
{
|
||||
Date: timeNow,
|
||||
},
|
||||
{
|
||||
Date: timeNow.AddDate(0, 0, -1),
|
||||
},
|
||||
}
|
||||
|
||||
//test
|
||||
sortSmartMeasurementsDesc(smartResults)
|
||||
|
||||
//assert
|
||||
require.Equal(t, smartResults[0].Date, timeNow)
|
||||
}
|
||||
@@ -10,15 +10,14 @@ import (
|
||||
|
||||
type DeviceRepo interface {
|
||||
Close() error
|
||||
|
||||
//GetSettings()
|
||||
//SaveSetting()
|
||||
HealthCheck(ctx context.Context) error
|
||||
|
||||
RegisterDevice(ctx context.Context, dev models.Device) error
|
||||
GetDevices(ctx context.Context) ([]models.Device, error)
|
||||
UpdateDevice(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (models.Device, error)
|
||||
UpdateDeviceStatus(ctx context.Context, wwn string, status pkg.DeviceStatus) (models.Device, error)
|
||||
GetDeviceDetails(ctx context.Context, wwn string) (models.Device, error)
|
||||
DeleteDevice(ctx context.Context, wwn string) error
|
||||
|
||||
SaveSmartAttributes(ctx context.Context, wwn string, collectorSmartData collector.SmartInfo) (measurements.Smart, error)
|
||||
GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, attributes []string) ([]measurements.Smart, error)
|
||||
@@ -27,4 +26,7 @@ type DeviceRepo interface {
|
||||
|
||||
GetSummary(ctx context.Context) (map[string]*models.DeviceSummary, error)
|
||||
GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error)
|
||||
|
||||
LoadSettings(ctx context.Context) (*models.Settings, error)
|
||||
SaveSettings(ctx context.Context, settings models.Settings) error
|
||||
}
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
package m20220503120000
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Deprecated: m20220503120000.Device is deprecated, only used by db migrations
|
||||
type Device struct {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
DeletedAt *time.Time
|
||||
|
||||
WWN string `json:"wwn" gorm:"primary_key"`
|
||||
|
||||
DeviceName string `json:"device_name"`
|
||||
Manufacturer string `json:"manufacturer"`
|
||||
ModelName string `json:"model_name"`
|
||||
InterfaceType string `json:"interface_type"`
|
||||
InterfaceSpeed string `json:"interface_speed"`
|
||||
SerialNumber string `json:"serial_number"`
|
||||
Firmware string `json:"firmware"`
|
||||
RotationSpeed int `json:"rotational_speed"`
|
||||
Capacity int64 `json:"capacity"`
|
||||
FormFactor string `json:"form_factor"`
|
||||
SmartSupport bool `json:"smart_support"`
|
||||
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
|
||||
|
||||
// User provided metadata
|
||||
Label string `json:"label"`
|
||||
HostId string `json:"host_id"`
|
||||
|
||||
// Data set by Scrutiny
|
||||
DeviceStatus pkg.DeviceStatus `json:"device_status"`
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
package m20220509170100
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Device struct {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
DeletedAt *time.Time
|
||||
|
||||
WWN string `json:"wwn" gorm:"primary_key"`
|
||||
|
||||
DeviceName string `json:"device_name"`
|
||||
DeviceUUID string `json:"device_uuid"`
|
||||
DeviceSerialID string `json:"device_serial_id"`
|
||||
DeviceLabel string `json:"device_label"`
|
||||
|
||||
Manufacturer string `json:"manufacturer"`
|
||||
ModelName string `json:"model_name"`
|
||||
InterfaceType string `json:"interface_type"`
|
||||
InterfaceSpeed string `json:"interface_speed"`
|
||||
SerialNumber string `json:"serial_number"`
|
||||
Firmware string `json:"firmware"`
|
||||
RotationSpeed int `json:"rotational_speed"`
|
||||
Capacity int64 `json:"capacity"`
|
||||
FormFactor string `json:"form_factor"`
|
||||
SmartSupport bool `json:"smart_support"`
|
||||
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
|
||||
|
||||
// User provided metadata
|
||||
Label string `json:"label"`
|
||||
HostId string `json:"host_id"`
|
||||
|
||||
// Data set by Scrutiny
|
||||
DeviceStatus pkg.DeviceStatus `json:"device_status"`
|
||||
}
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
package m20220716214900
|
||||
|
||||
import (
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type Setting struct {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
gorm.Model
|
||||
|
||||
SettingKeyName string `json:"setting_key_name"`
|
||||
SettingKeyDescription string `json:"setting_key_description"`
|
||||
SettingDataType string `json:"setting_data_type"`
|
||||
|
||||
SettingValueNumeric int `json:"setting_value_numeric"`
|
||||
SettingValueString string `json:"setting_value_string"`
|
||||
SettingValueBool bool `json:"setting_value_bool"`
|
||||
}
|
||||
@@ -2,15 +2,20 @@ package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/glebarez/sqlite"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||
"github.com/influxdata/influxdb-client-go/v2/domain"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -58,7 +63,20 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
|
||||
// Gorm/SQLite setup
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
globalLogger.Infof("Trying to connect to scrutiny sqlite db: %s\n", appConfig.GetString("web.database.location"))
|
||||
database, err := gorm.Open(sqlite.Open(appConfig.GetString("web.database.location")), &gorm.Config{
|
||||
|
||||
// When a transaction cannot lock the database, because it is already locked by another one,
|
||||
// SQLite by default throws an error: database is locked. This behavior is usually not appropriate when
|
||||
// concurrent access is needed, typically when multiple processes write to the same database.
|
||||
// PRAGMA busy_timeout lets you set a timeout or a handler for these events. When setting a timeout,
|
||||
// SQLite will try the transaction multiple times within this timeout.
|
||||
// fixes #341
|
||||
// https://rsqlite.r-dbi.org/reference/sqlitesetbusyhandler
|
||||
// retrying for 30000 milliseconds, 30seconds - this would be unreasonable for a distributed multi-tenant application,
|
||||
// but should be fine for local usage.
|
||||
pragmaStr := sqlitePragmaString(map[string]string{
|
||||
"busy_timeout": "30000",
|
||||
})
|
||||
database, err := gorm.Open(sqlite.Open(appConfig.GetString("web.database.location")+pragmaStr), &gorm.Config{
|
||||
//TODO: figure out how to log database queries again.
|
||||
//Logger: logger
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
@@ -75,36 +93,47 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Create a new client using an InfluxDB server base URL and an authentication token
|
||||
influxdbUrl := fmt.Sprintf("http://%s:%s", appConfig.GetString("web.influxdb.host"), appConfig.GetString("web.influxdb.port"))
|
||||
influxdbUrl := fmt.Sprintf("%s://%s:%s", appConfig.GetString("web.influxdb.scheme"), appConfig.GetString("web.influxdb.host"), appConfig.GetString("web.influxdb.port"))
|
||||
globalLogger.Debugf("InfluxDB url: %s", influxdbUrl)
|
||||
|
||||
client := influxdb2.NewClient(influxdbUrl, appConfig.GetString("web.influxdb.token"))
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: appConfig.GetBool("web.influxdb.tls.insecure_skip_verify"),
|
||||
}
|
||||
globalLogger.Infof("InfluxDB certificate verification: %t\n", !tlsConfig.InsecureSkipVerify)
|
||||
|
||||
if !appConfig.IsSet("web.influxdb.token") {
|
||||
globalLogger.Debugf("No influxdb token found, running first-time setup...")
|
||||
client := influxdb2.NewClientWithOptions(
|
||||
influxdbUrl,
|
||||
appConfig.GetString("web.influxdb.token"),
|
||||
influxdb2.DefaultOptions().SetTLSConfig(tlsConfig),
|
||||
)
|
||||
|
||||
//if !appConfig.IsSet("web.influxdb.token") {
|
||||
globalLogger.Debugf("Determine Influxdb setup status...")
|
||||
influxSetupComplete, err := InfluxSetupComplete(influxdbUrl, tlsConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check influxdb setup status - %w", err)
|
||||
}
|
||||
|
||||
if !influxSetupComplete {
|
||||
globalLogger.Debugf("Influxdb un-initialized, running first-time setup...")
|
||||
|
||||
// if no token is provided, but we have a valid server, we're going to assume this is the first setup of our server.
|
||||
// we will initialize with a predetermined username & password, that you should change.
|
||||
|
||||
// metrics bucket will have a retention period of 8 days (since it will be down-sampled once a week)
|
||||
// in seconds (60seconds * 60minutes * 24hours * 15 days) = 1_296_000 (see EnsureBucket() function)
|
||||
onboardingResponse, err := client.Setup(
|
||||
_, err := client.SetupWithToken(
|
||||
backgroundContext,
|
||||
appConfig.GetString("web.influxdb.init_username"),
|
||||
appConfig.GetString("web.influxdb.init_password"),
|
||||
appConfig.GetString("web.influxdb.org"),
|
||||
appConfig.GetString("web.influxdb.bucket"),
|
||||
0)
|
||||
0,
|
||||
appConfig.GetString("web.influxdb.token"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appConfig.Set("web.influxdb.token", *onboardingResponse.Auth.Token)
|
||||
// we should write the config file out here. Ignore failures.
|
||||
err = appConfig.WriteConfig()
|
||||
if err != nil {
|
||||
globalLogger.Infof("ignoring error while writing influxdb info to config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Use blocking write client for writes to desired bucket
|
||||
@@ -176,6 +205,61 @@ func (sr *scrutinyRepository) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *scrutinyRepository) HealthCheck(ctx context.Context) error {
|
||||
//check influxdb
|
||||
status, err := sr.influxClient.Health(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("influxdb healthcheck failed: %w", err)
|
||||
}
|
||||
if status.Status != "pass" {
|
||||
return fmt.Errorf("influxdb healthcheckf failed: status=%s", status.Status)
|
||||
}
|
||||
|
||||
//check sqlite db.
|
||||
database, err := sr.gormClient.DB()
|
||||
if err != nil {
|
||||
return fmt.Errorf("sqlite healthcheck failed: %w", err)
|
||||
}
|
||||
err = database.Ping()
|
||||
if err != nil {
|
||||
return fmt.Errorf("sqlite healthcheck failed during ping: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func InfluxSetupComplete(influxEndpoint string, tlsConfig *tls.Config) (bool, error) {
|
||||
influxUri, err := url.Parse(influxEndpoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
influxUri, err = influxUri.Parse("/api/v2/setup")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}}
|
||||
res, err := client.Get(influxUri.String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
type SetupStatus struct {
|
||||
Allowed bool `json:"allowed"`
|
||||
}
|
||||
var data SetupStatus
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !data.Allowed, nil
|
||||
}
|
||||
|
||||
func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Organization) error {
|
||||
|
||||
var mainBucketRetentionRule domain.RetentionRule
|
||||
@@ -205,21 +289,29 @@ func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Org
|
||||
|
||||
//create buckets (used for downsampling)
|
||||
weeklyBucket := fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||
if _, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, weeklyBucket); foundErr != nil {
|
||||
if foundWeeklyBucket, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, weeklyBucket); foundErr != nil {
|
||||
// metrics_weekly bucket will have a retention period of 8+1 weeks (since it will be down-sampled once a month)
|
||||
_, err := sr.influxClient.BucketsAPI().CreateBucketWithName(ctx, org, weeklyBucket, weeklyBucketRetentionRule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if sr.appConfig.GetBool("web.influxdb.retention_policy") {
|
||||
//correctly set the retention period for the bucket (may not be able to do it during setup/creation)
|
||||
foundWeeklyBucket.RetentionRules = domain.RetentionRules{weeklyBucketRetentionRule}
|
||||
sr.influxClient.BucketsAPI().UpdateBucket(ctx, foundWeeklyBucket)
|
||||
}
|
||||
|
||||
monthlyBucket := fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||
if _, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, monthlyBucket); foundErr != nil {
|
||||
if foundMonthlyBucket, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, monthlyBucket); foundErr != nil {
|
||||
// metrics_monthly bucket will have a retention period of 24+1 months (since it will be down-sampled once a year)
|
||||
_, err := sr.influxClient.BucketsAPI().CreateBucketWithName(ctx, org, monthlyBucket, monthlyBucketRetentionRule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if sr.appConfig.GetBool("web.influxdb.retention_policy") {
|
||||
//correctly set the retention period for the bucket (may not be able to do it during setup/creation)
|
||||
foundMonthlyBucket.RetentionRules = domain.RetentionRules{monthlyBucketRetentionRule}
|
||||
sr.influxClient.BucketsAPI().UpdateBucket(ctx, foundMonthlyBucket)
|
||||
}
|
||||
|
||||
yearlyBucket := fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||
@@ -405,3 +497,16 @@ func (sr *scrutinyRepository) lookupNestedDurationKeys(durationKey string) []str
|
||||
}
|
||||
return []string{DURATION_KEY_WEEK}
|
||||
}
|
||||
|
||||
func sqlitePragmaString(pragmas map[string]string) string {
|
||||
q := url.Values{}
|
||||
for key, val := range pragmas {
|
||||
q.Add("_pragma", key+"="+val)
|
||||
}
|
||||
|
||||
queryStr := q.Encode()
|
||||
if len(queryStr) > 0 {
|
||||
return "?" + queryStr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||
"gorm.io/gorm/clause"
|
||||
"time"
|
||||
)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
func (sr *scrutinyRepository) RegisterDevice(ctx context.Context, dev models.Device) error {
|
||||
if err := sr.gormClient.WithContext(ctx).Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "wwn"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"host_id", "device_name", "device_type"}),
|
||||
DoUpdates: clause.AssignmentColumns([]string{"host_id", "device_name", "device_type", "device_uuid", "device_serial_id", "device_label"}),
|
||||
}).Create(&dev).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -57,7 +58,7 @@ func (sr *scrutinyRepository) UpdateDeviceStatus(ctx context.Context, wwn string
|
||||
return device, fmt.Errorf("Could not get device from DB: %v", err)
|
||||
}
|
||||
|
||||
device.DeviceStatus = pkg.Set(device.DeviceStatus, status)
|
||||
device.DeviceStatus = pkg.DeviceStatusSet(device.DeviceStatus, status)
|
||||
return device, sr.gormClient.Model(&device).Updates(device).Error
|
||||
}
|
||||
|
||||
@@ -72,3 +73,33 @@ func (sr *scrutinyRepository) GetDeviceDetails(ctx context.Context, wwn string)
|
||||
|
||||
return device, nil
|
||||
}
|
||||
|
||||
func (sr *scrutinyRepository) DeleteDevice(ctx context.Context, wwn string) error {
|
||||
if err := sr.gormClient.WithContext(ctx).Where("wwn = ?", wwn).Delete(&models.Device{}).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//delete data from influxdb.
|
||||
buckets := []string{
|
||||
sr.appConfig.GetString("web.influxdb.bucket"),
|
||||
fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket")),
|
||||
fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket")),
|
||||
fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket")),
|
||||
}
|
||||
|
||||
for _, bucket := range buckets {
|
||||
sr.logger.Infof("Deleting data for %s in bucket: %s", wwn, bucket)
|
||||
if err := sr.influxClient.DeleteAPI().DeleteWithName(
|
||||
ctx,
|
||||
sr.appConfig.GetString("web.influxdb.org"),
|
||||
bucket,
|
||||
time.Now().AddDate(-10, 0, 0),
|
||||
time.Now(),
|
||||
fmt.Sprintf(`device_wwn="%s"`, wwn),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ func (sr *scrutinyRepository) SaveSmartAttributes(ctx context.Context, wwn strin
|
||||
return deviceSmartData, sr.saveDatapoint(sr.influxWriteApi, "smart", tags, fields, deviceSmartData.Date, ctx)
|
||||
}
|
||||
|
||||
// GetSmartAttributeHistory MUST return in sorted order, where newest entries are at the beginning of the list, and oldest are at the end.
|
||||
func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, attributes []string) ([]measurements.Smart, error) {
|
||||
// Get SMartResults from InfluxDB
|
||||
|
||||
@@ -64,6 +65,9 @@ func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//we have to sort the smartResults again, because the `union` command will return multiple 'tables' and only sort the records in each table.
|
||||
sortSmartMeasurementsDesc(smartResults)
|
||||
|
||||
return smartResults, nil
|
||||
|
||||
//if err := device.SquashHistory(); err != nil {
|
||||
|
||||
@@ -2,13 +2,20 @@ package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20201107210306"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220503120000"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220509170100"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220716214900"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||
_ "github.com/glebarez/sqlite"
|
||||
"github.com/go-gormigrate/gormigrate/v2"
|
||||
_ "github.com/jinzhu/gorm/dialects/sqlite"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api/http"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gorm.io/gorm"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -21,9 +28,12 @@ import (
|
||||
|
||||
func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
|
||||
sr.logger.Infoln("Database migration starting")
|
||||
sr.logger.Infoln("Database migration starting. Please wait, this process may take a long time....")
|
||||
|
||||
m := gormigrate.New(sr.gormClient, gormigrate.DefaultOptions, []*gormigrate.Migration{
|
||||
gormMigrateOptions := gormigrate.DefaultOptions
|
||||
gormMigrateOptions.UseTransaction = true
|
||||
|
||||
m := gormigrate.New(sr.gormClient, gormMigrateOptions, []*gormigrate.Migration{
|
||||
{
|
||||
ID: "20201107210306", // v0.3.13 (pre-influxdb schema). 9fac3c6308dc6cb6cd5bbc43a68cd93e8fb20b87
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
@@ -37,16 +47,6 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
&m20201107210306.SmartNvmeAttribute{},
|
||||
)
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return tx.Migrator().DropTable(
|
||||
&m20201107210306.Device{},
|
||||
&m20201107210306.Smart{},
|
||||
&m20201107210306.SmartAtaAttribute{},
|
||||
&m20201107210306.SmartNvmeAttribute{},
|
||||
&m20201107210306.SmartNvmeAttribute{},
|
||||
"self_tests",
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "20220503113100", // backwards compatible - influxdb schema
|
||||
@@ -136,7 +136,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
smartTags,
|
||||
smartFields,
|
||||
postSmartResults.Date, ctx)
|
||||
if err != nil {
|
||||
if ignorePastRetentionPolicyError(err) != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
tempTags,
|
||||
tempFields,
|
||||
postSmartResults.Date, ctx)
|
||||
if err != nil {
|
||||
if ignorePastRetentionPolicyError(err) != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
smartFields,
|
||||
postSmartResults.Date, ctx)
|
||||
|
||||
if err != nil {
|
||||
if ignorePastRetentionPolicyError(err) != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
tempTags,
|
||||
tempFields,
|
||||
postSmartResults.Date, ctx)
|
||||
if err != nil {
|
||||
if ignorePastRetentionPolicyError(err) != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -193,7 +193,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
smartTags,
|
||||
smartFields,
|
||||
postSmartResults.Date, ctx)
|
||||
if err != nil {
|
||||
if ignorePastRetentionPolicyError(err) != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
tempTags,
|
||||
tempFields,
|
||||
postSmartResults.Date, ctx)
|
||||
if err != nil {
|
||||
if ignorePastRetentionPolicyError(err) != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
smartTags,
|
||||
smartFields,
|
||||
postSmartResults.Date, ctx)
|
||||
if err != nil {
|
||||
if ignorePastRetentionPolicyError(err) != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
tempTags,
|
||||
tempFields,
|
||||
postSmartResults.Date, ctx)
|
||||
if err != nil {
|
||||
if ignorePastRetentionPolicyError(err) != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -256,20 +256,168 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//migrate the device database to the final version
|
||||
return tx.AutoMigrate(models.Device{})
|
||||
//migrate the device database
|
||||
return tx.AutoMigrate(m20220503120000.Device{})
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "m20220509170100", // addl udev device data
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
|
||||
//migrate the device database.
|
||||
// adding addl columns (device_label, device_uuid, device_serial_id)
|
||||
return tx.AutoMigrate(m20220509170100.Device{})
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "m20220709181300",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
|
||||
// delete devices with empty `wwn` field (they are impossible to delete manually), and are invalid.
|
||||
return tx.Where("wwn = ?", "").Delete(&models.Device{}).Error
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "m20220716214900", // add settings table.
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
|
||||
// adding the settings table.
|
||||
err := tx.AutoMigrate(m20220716214900.Setting{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//add defaults.
|
||||
|
||||
var defaultSettings = []m20220716214900.Setting{
|
||||
{
|
||||
SettingKeyName: "theme",
|
||||
SettingKeyDescription: "Frontend theme ('light' | 'dark' | 'system')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "system", // options: 'light' | 'dark' | 'system'
|
||||
},
|
||||
{
|
||||
SettingKeyName: "layout",
|
||||
SettingKeyDescription: "Frontend layout ('material')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "material",
|
||||
},
|
||||
{
|
||||
SettingKeyName: "dashboard_display",
|
||||
SettingKeyDescription: "Frontend device display title ('name' | 'serial_id' | 'uuid' | 'label')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "name",
|
||||
},
|
||||
{
|
||||
SettingKeyName: "dashboard_sort",
|
||||
SettingKeyDescription: "Frontend device sort by ('status' | 'title' | 'age')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "status",
|
||||
},
|
||||
{
|
||||
SettingKeyName: "temperature_unit",
|
||||
SettingKeyDescription: "Frontend temperature unit ('celsius' | 'fahrenheit')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "celsius",
|
||||
},
|
||||
{
|
||||
SettingKeyName: "file_size_si_units",
|
||||
SettingKeyDescription: "File size in SI units (true | false)",
|
||||
SettingDataType: "bool",
|
||||
SettingValueBool: false,
|
||||
},
|
||||
{
|
||||
SettingKeyName: "line_stroke",
|
||||
SettingKeyDescription: "Temperature chart line stroke ('smooth' | 'straight' | 'stepline')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "smooth",
|
||||
},
|
||||
|
||||
{
|
||||
SettingKeyName: "metrics.notify_level",
|
||||
SettingKeyDescription: "Determines which device status will cause a notification (fail or warn)",
|
||||
SettingDataType: "numeric",
|
||||
SettingValueNumeric: int(pkg.MetricsNotifyLevelFail), // options: 'fail' or 'warn'
|
||||
},
|
||||
{
|
||||
SettingKeyName: "metrics.status_filter_attributes",
|
||||
SettingKeyDescription: "Determines which attributes should impact device status",
|
||||
SettingDataType: "numeric",
|
||||
SettingValueNumeric: int(pkg.MetricsStatusFilterAttributesAll), // options: 'all' or 'critical'
|
||||
},
|
||||
{
|
||||
SettingKeyName: "metrics.status_threshold",
|
||||
SettingKeyDescription: "Determines which threshold should impact device status",
|
||||
SettingDataType: "numeric",
|
||||
SettingValueNumeric: int(pkg.MetricsStatusThresholdBoth), // options: 'scrutiny', 'smart', 'both'
|
||||
},
|
||||
}
|
||||
return tx.Create(&defaultSettings).Error
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "m20221115214900", // add line_stroke setting.
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
//add line_stroke setting default.
|
||||
var defaultSettings = []m20220716214900.Setting{
|
||||
{
|
||||
SettingKeyName: "line_stroke",
|
||||
SettingKeyDescription: "Temperature chart line stroke ('smooth' | 'straight' | 'stepline')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "smooth",
|
||||
},
|
||||
}
|
||||
return tx.Create(&defaultSettings).Error
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err := m.Migrate(); err != nil {
|
||||
sr.logger.Errorf("Database migration failed with error: %w", err)
|
||||
sr.logger.Errorf("Database migration failed with error. \n Please open a github issue at https://github.com/AnalogJ/scrutiny and attach a copy of your scrutiny.db file. \n %v", err)
|
||||
return err
|
||||
}
|
||||
sr.logger.Infoln("Database migration completed successfully")
|
||||
|
||||
//these migrations cannot be done within a transaction, so they are done as a separate group, with `UseTransaction = false`
|
||||
sr.logger.Infoln("SQLite global configuration migrations starting. Please wait....")
|
||||
globalMigrateOptions := gormigrate.DefaultOptions
|
||||
globalMigrateOptions.UseTransaction = false
|
||||
gm := gormigrate.New(sr.gormClient, globalMigrateOptions, []*gormigrate.Migration{
|
||||
{
|
||||
ID: "g20220802211500",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
//shrink the Database (maybe necessary after 20220503113100)
|
||||
if err := tx.Exec("VACUUM;").Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err := gm.Migrate(); err != nil {
|
||||
sr.logger.Errorf("SQLite global configuration migrations failed with error. \n Please open a github issue at https://github.com/AnalogJ/scrutiny and attach a copy of your scrutiny.db file. \n %v", err)
|
||||
return err
|
||||
}
|
||||
sr.logger.Infoln("SQLite global configuration migrations completed successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
//When adding data to influxdb, an error may be returned if the data point is outside the range of the retention policy.
|
||||
//This function will ignore retention policy errors, and allow the migration to continue.
|
||||
func ignorePastRetentionPolicyError(err error) error {
|
||||
var influxDbWriteError *http.Error
|
||||
if errors.As(err, &influxDbWriteError) {
|
||||
if influxDbWriteError.StatusCode == 422 {
|
||||
log.Infoln("ignoring error: attempted to writePoint past retention period duration")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Deprecated
|
||||
func m20201107210306_FromPreInfluxDBTempCreatePostInfluxDBTemp(preDevice m20201107210306.Device, preSmartResult m20201107210306.Smart) (error, measurements.SmartTemperature) {
|
||||
//extract temperature data for every datapoint
|
||||
|
||||
@@ -0,0 +1,85 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// LoadSettings will retrieve settings from the database, store them in the AppConfig object, and return a Settings struct
|
||||
func (sr *scrutinyRepository) LoadSettings(ctx context.Context) (*models.Settings, error) {
|
||||
settingsEntries := []models.SettingEntry{}
|
||||
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
|
||||
return nil, fmt.Errorf("Could not get settings from DB: %v", err)
|
||||
}
|
||||
|
||||
// store retrieved settings in the AppConfig obj
|
||||
for _, settingsEntry := range settingsEntries {
|
||||
configKey := fmt.Sprintf("%s.%s", config.DB_USER_SETTINGS_SUBKEY, settingsEntry.SettingKeyName)
|
||||
|
||||
if settingsEntry.SettingDataType == "numeric" {
|
||||
sr.appConfig.SetDefault(configKey, settingsEntry.SettingValueNumeric)
|
||||
} else if settingsEntry.SettingDataType == "string" {
|
||||
sr.appConfig.SetDefault(configKey, settingsEntry.SettingValueString)
|
||||
} else if settingsEntry.SettingDataType == "bool" {
|
||||
sr.appConfig.SetDefault(configKey, settingsEntry.SettingValueBool)
|
||||
}
|
||||
}
|
||||
|
||||
// unmarshal the dbsetting object data to a settings object.
|
||||
var settings models.Settings
|
||||
err := sr.appConfig.UnmarshalKey(config.DB_USER_SETTINGS_SUBKEY, &settings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &settings, nil
|
||||
}
|
||||
|
||||
// testing
|
||||
// curl -d '{"metrics": { "notify_level": 5, "status_filter_attributes": 5, "status_threshold": 5 }}' -H "Content-Type: application/json" -X POST http://localhost:9090/api/settings
|
||||
// SaveSettings will update settings in AppConfig object, then save the settings to the database.
|
||||
func (sr *scrutinyRepository) SaveSettings(ctx context.Context, settings models.Settings) error {
|
||||
//save the entries to the appconfig
|
||||
settingsMap := &map[string]interface{}{}
|
||||
err := mapstructure.Decode(settings, &settingsMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
settingsWrapperMap := map[string]interface{}{}
|
||||
settingsWrapperMap[config.DB_USER_SETTINGS_SUBKEY] = *settingsMap
|
||||
err = sr.appConfig.MergeConfigMap(settingsWrapperMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sr.logger.Debugf("after merge settings: %v", sr.appConfig.AllSettings())
|
||||
//retrieve current settings from the database
|
||||
settingsEntries := []models.SettingEntry{}
|
||||
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
|
||||
return fmt.Errorf("Could not get settings from DB: %v", err)
|
||||
}
|
||||
|
||||
//update settingsEntries
|
||||
for ndx, settingsEntry := range settingsEntries {
|
||||
configKey := fmt.Sprintf("%s.%s", config.DB_USER_SETTINGS_SUBKEY, strings.ToLower(settingsEntry.SettingKeyName))
|
||||
|
||||
if settingsEntry.SettingDataType == "numeric" {
|
||||
settingsEntries[ndx].SettingValueNumeric = sr.appConfig.GetInt(configKey)
|
||||
} else if settingsEntry.SettingDataType == "string" {
|
||||
settingsEntries[ndx].SettingValueString = sr.appConfig.GetString(configKey)
|
||||
} else if settingsEntry.SettingDataType == "bool" {
|
||||
settingsEntries[ndx].SettingValueBool = sr.appConfig.GetBool(configKey)
|
||||
}
|
||||
|
||||
// store in database.
|
||||
//TODO: this should be `sr.gormClient.Updates(&settingsEntries).Error`
|
||||
err := sr.gormClient.Model(&models.SettingEntry{}).Where([]uint{settingsEntry.ID}).Select("setting_value_numeric", "setting_value_string", "setting_value_bool").Updates(settingsEntries[ndx]).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -11,35 +11,71 @@ import (
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) error {
|
||||
weeklyTaskName := "tsk-weekly-aggr"
|
||||
weeklyTaskScript := sr.DownsampleScript("weekly", weeklyTaskName, "0 1 * * 0")
|
||||
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: weeklyTaskName}); findErr == nil && len(found) == 0 {
|
||||
//weekly on Sunday at 1:00am
|
||||
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, weeklyTaskName, sr.DownsampleScript("weekly"), "0 1 * * 0", orgID)
|
||||
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, weeklyTaskScript, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(found) == 1 {
|
||||
//check if we should update
|
||||
task := &found[0]
|
||||
if weeklyTaskScript != task.Flux {
|
||||
sr.logger.Infoln("updating weekly task script")
|
||||
task.Flux = weeklyTaskScript
|
||||
_, err := sr.influxTaskApi.UpdateTask(ctx, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
monthlyTaskName := "tsk-monthly-aggr"
|
||||
monthlyTaskScript := sr.DownsampleScript("monthly", monthlyTaskName, "30 1 1 * *")
|
||||
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: monthlyTaskName}); findErr == nil && len(found) == 0 {
|
||||
//monthly on first day of the month at 1:30am
|
||||
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, monthlyTaskName, sr.DownsampleScript("monthly"), "30 1 1 * *", orgID)
|
||||
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, monthlyTaskScript, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(found) == 1 {
|
||||
//check if we should update
|
||||
task := &found[0]
|
||||
if monthlyTaskScript != task.Flux {
|
||||
sr.logger.Infoln("updating monthly task script")
|
||||
task.Flux = monthlyTaskScript
|
||||
_, err := sr.influxTaskApi.UpdateTask(ctx, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
yearlyTaskName := "tsk-yearly-aggr"
|
||||
yearlyTaskScript := sr.DownsampleScript("yearly", yearlyTaskName, "0 2 1 1 *")
|
||||
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: yearlyTaskName}); findErr == nil && len(found) == 0 {
|
||||
//yearly on the first day of the year at 2:00am
|
||||
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, yearlyTaskName, sr.DownsampleScript("yearly"), "0 2 1 1 *", orgID)
|
||||
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, yearlyTaskScript, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(found) == 1 {
|
||||
//check if we should update
|
||||
task := &found[0]
|
||||
if yearlyTaskScript != task.Flux {
|
||||
sr.logger.Infoln("updating yearly task script")
|
||||
task.Flux = yearlyTaskScript
|
||||
_, err := sr.influxTaskApi.UpdateTask(ctx, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *scrutinyRepository) DownsampleScript(aggregationType string) string {
|
||||
func (sr *scrutinyRepository) DownsampleScript(aggregationType string, name string, cron string) string {
|
||||
var sourceBucket string // the source of the data
|
||||
var destBucket string // the destination for the aggregated data
|
||||
var rangeStart string
|
||||
@@ -88,30 +124,37 @@ func (sr *scrutinyRepository) DownsampleScript(aggregationType string) string {
|
||||
*/
|
||||
|
||||
return fmt.Sprintf(`
|
||||
sourceBucket = "%s"
|
||||
rangeStart = %s
|
||||
rangeEnd = %s
|
||||
aggWindow = %s
|
||||
destBucket = "%s"
|
||||
destOrg = "%s"
|
||||
option task = {
|
||||
name: "%s",
|
||||
cron: "%s",
|
||||
}
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
sourceBucket = "%s"
|
||||
rangeStart = %s
|
||||
rangeEnd = %s
|
||||
aggWindow = %s
|
||||
destBucket = "%s"
|
||||
destOrg = "%s"
|
||||
|
||||
temp_data = from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
|
||||
temp_data
|
||||
|> aggregateWindow(fn: mean, every: aggWindow)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||
|> set(key: "_measurement", value: "temp")
|
||||
|> set(key: "_field", value: "temp")
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
`,
|
||||
name,
|
||||
cron,
|
||||
sourceBucket,
|
||||
rangeStart,
|
||||
rangeEnd,
|
||||
|
||||
@@ -0,0 +1,164 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_DownsampleScript_Weekly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := "weekly"
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-weekly-aggr", "0 1 * * 0")
|
||||
|
||||
//assert
|
||||
require.Equal(t, `
|
||||
option task = {
|
||||
name: "tsk-weekly-aggr",
|
||||
cron: "0 1 * * 0",
|
||||
}
|
||||
|
||||
sourceBucket = "metrics"
|
||||
rangeStart = -2w
|
||||
rangeEnd = -1w
|
||||
aggWindow = 1w
|
||||
destBucket = "metrics_weekly"
|
||||
destOrg = "scrutiny"
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||
|> set(key: "_measurement", value: "temp")
|
||||
|> set(key: "_field", value: "temp")
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_DownsampleScript_Monthly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := "monthly"
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-monthly-aggr", "30 1 1 * *")
|
||||
|
||||
//assert
|
||||
require.Equal(t, `
|
||||
option task = {
|
||||
name: "tsk-monthly-aggr",
|
||||
cron: "30 1 1 * *",
|
||||
}
|
||||
|
||||
sourceBucket = "metrics_weekly"
|
||||
rangeStart = -2mo
|
||||
rangeEnd = -1mo
|
||||
aggWindow = 1mo
|
||||
destBucket = "metrics_monthly"
|
||||
destOrg = "scrutiny"
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||
|> set(key: "_measurement", value: "temp")
|
||||
|> set(key: "_field", value: "temp")
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_DownsampleScript_Yearly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := "yearly"
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-yearly-aggr", "0 2 1 1 *")
|
||||
|
||||
//assert
|
||||
require.Equal(t, `
|
||||
option task = {
|
||||
name: "tsk-yearly-aggr",
|
||||
cron: "0 2 1 1 *",
|
||||
}
|
||||
|
||||
sourceBucket = "metrics_monthly"
|
||||
rangeStart = -2y
|
||||
rangeEnd = -1y
|
||||
aggWindow = 1y
|
||||
destBucket = "metrics_yearly"
|
||||
destOrg = "scrutiny"
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||
|> set(key: "_measurement", value: "temp")
|
||||
|> set(key: "_field", value: "temp")
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
`, influxDbScript)
|
||||
}
|
||||
@@ -17,6 +17,10 @@ func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn stri
|
||||
if len(collectorSmartData.AtaSctTemperatureHistory.Table) > 0 {
|
||||
|
||||
for ndx, temp := range collectorSmartData.AtaSctTemperatureHistory.Table {
|
||||
//temp value may be null, we must skip/ignore them. See #393
|
||||
if temp == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
minutesOffset := collectorSmartData.AtaSctTemperatureHistory.LoggingIntervalMinutes * int64(ndx) * 60
|
||||
smartTemp := measurements.SmartTemperature{
|
||||
|
||||
@@ -0,0 +1,185 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_aggregateTempQuery_Week(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := DURATION_KEY_WEEK
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
|
||||
|
||||
//assert
|
||||
require.Equal(t, `import "influxdata/influxdb/schema"
|
||||
weekData = from(bucket: "metrics")
|
||||
|> range(start: -1w, stop: now())
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
weekData
|
||||
|> schema.fieldsAsCols()
|
||||
|> yield()`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_aggregateTempQuery_Month(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := DURATION_KEY_MONTH
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
|
||||
|
||||
//assert
|
||||
require.Equal(t, `import "influxdata/influxdb/schema"
|
||||
weekData = from(bucket: "metrics")
|
||||
|> range(start: -1w, stop: now())
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
monthData = from(bucket: "metrics_weekly")
|
||||
|> range(start: -1mo, stop: -1w)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
union(tables: [weekData, monthData])
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> sort(columns: ["_time"], desc: false)
|
||||
|> schema.fieldsAsCols()`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_aggregateTempQuery_Year(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := DURATION_KEY_YEAR
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
|
||||
|
||||
//assert
|
||||
require.Equal(t, `import "influxdata/influxdb/schema"
|
||||
weekData = from(bucket: "metrics")
|
||||
|> range(start: -1w, stop: now())
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
monthData = from(bucket: "metrics_weekly")
|
||||
|> range(start: -1mo, stop: -1w)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
yearData = from(bucket: "metrics_monthly")
|
||||
|> range(start: -1y, stop: -1mo)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
union(tables: [weekData, monthData, yearData])
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> sort(columns: ["_time"], desc: false)
|
||||
|> schema.fieldsAsCols()`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_aggregateTempQuery_Forever(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := DURATION_KEY_FOREVER
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
|
||||
|
||||
//assert
|
||||
require.Equal(t, `import "influxdata/influxdb/schema"
|
||||
weekData = from(bucket: "metrics")
|
||||
|> range(start: -1w, stop: now())
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
monthData = from(bucket: "metrics_weekly")
|
||||
|> range(start: -1mo, stop: -1w)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
yearData = from(bucket: "metrics_monthly")
|
||||
|> range(start: -1y, stop: -1mo)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
foreverData = from(bucket: "metrics_yearly")
|
||||
|> range(start: -10y, stop: -1y)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
union(tables: [weekData, monthData, yearData, foreverData])
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> sort(columns: ["_time"], desc: false)
|
||||
|> schema.fieldsAsCols()`, influxDbScript)
|
||||
}
|
||||
@@ -21,6 +21,10 @@ type Device struct {
|
||||
WWN string `json:"wwn" gorm:"primary_key"`
|
||||
|
||||
DeviceName string `json:"device_name"`
|
||||
DeviceUUID string `json:"device_uuid"`
|
||||
DeviceSerialID string `json:"device_serial_id"`
|
||||
DeviceLabel string `json:"device_label"`
|
||||
|
||||
Manufacturer string `json:"manufacturer"`
|
||||
ModelName string `json:"model_name"`
|
||||
InterfaceType string `json:"interface_type"`
|
||||
@@ -162,7 +166,7 @@ func (dv *Device) UpdateFromCollectorSmartInfo(info collector.SmartInfo) error {
|
||||
dv.DeviceProtocol = info.Device.Protocol
|
||||
|
||||
if !info.SmartStatus.Passed {
|
||||
dv.DeviceStatus = pkg.Set(dv.DeviceStatus, pkg.DeviceStatusFailedSmart)
|
||||
dv.DeviceStatus = pkg.DeviceStatusSet(dv.DeviceStatus, pkg.DeviceStatusFailedSmart)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -110,7 +110,7 @@ func (sm *Smart) FromCollectorSmartInfo(wwn string, info collector.SmartInfo) er
|
||||
sm.PowerCycleCount = info.PowerCycleCount
|
||||
sm.PowerOnHours = info.PowerOnTime.Hours
|
||||
if !info.SmartStatus.Passed {
|
||||
sm.Status = pkg.DeviceStatusFailedSmart
|
||||
sm.Status = pkg.DeviceStatusSet(sm.Status, pkg.DeviceStatusFailedSmart)
|
||||
}
|
||||
|
||||
sm.DeviceProtocol = info.Device.Protocol
|
||||
@@ -148,8 +148,9 @@ func (sm *Smart) ProcessAtaSmartInfo(tableItems []collector.AtaSmartAttributesTa
|
||||
}
|
||||
attrModel.PopulateAttributeStatus()
|
||||
sm.Attributes[strconv.Itoa(collectorAttr.ID)] = &attrModel
|
||||
if attrModel.Status == pkg.SmartAttributeStatusFailed {
|
||||
sm.Status = pkg.Set(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||
|
||||
if pkg.AttributeStatusHas(attrModel.Status, pkg.AttributeStatusFailedScrutiny) {
|
||||
sm.Status = pkg.DeviceStatusSet(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -171,15 +172,15 @@ func (sm *Smart) ProcessNvmeSmartInfo(nvmeSmartHealthInformationLog collector.Nv
|
||||
"power_on_hours": (&SmartNvmeAttribute{AttributeId: "power_on_hours", Value: nvmeSmartHealthInformationLog.PowerOnHours, Threshold: -1}).PopulateAttributeStatus(),
|
||||
"unsafe_shutdowns": (&SmartNvmeAttribute{AttributeId: "unsafe_shutdowns", Value: nvmeSmartHealthInformationLog.UnsafeShutdowns, Threshold: -1}).PopulateAttributeStatus(),
|
||||
"media_errors": (&SmartNvmeAttribute{AttributeId: "media_errors", Value: nvmeSmartHealthInformationLog.MediaErrors, Threshold: 0}).PopulateAttributeStatus(),
|
||||
"num_err_log_entries": (&SmartNvmeAttribute{AttributeId: "num_err_log_entries", Value: nvmeSmartHealthInformationLog.NumErrLogEntries, Threshold: 0}).PopulateAttributeStatus(),
|
||||
"num_err_log_entries": (&SmartNvmeAttribute{AttributeId: "num_err_log_entries", Value: nvmeSmartHealthInformationLog.NumErrLogEntries, Threshold: -1}).PopulateAttributeStatus(),
|
||||
"warning_temp_time": (&SmartNvmeAttribute{AttributeId: "warning_temp_time", Value: nvmeSmartHealthInformationLog.WarningTempTime, Threshold: -1}).PopulateAttributeStatus(),
|
||||
"critical_comp_time": (&SmartNvmeAttribute{AttributeId: "critical_comp_time", Value: nvmeSmartHealthInformationLog.CriticalCompTime, Threshold: -1}).PopulateAttributeStatus(),
|
||||
}
|
||||
|
||||
//find analyzed attribute status
|
||||
for _, val := range sm.Attributes {
|
||||
if val.GetStatus() == pkg.SmartAttributeStatusFailed {
|
||||
sm.Status = pkg.Set(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||
if pkg.AttributeStatusHas(val.GetStatus(), pkg.AttributeStatusFailedScrutiny) {
|
||||
sm.Status = pkg.DeviceStatusSet(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -204,8 +205,8 @@ func (sm *Smart) ProcessScsiSmartInfo(defectGrownList int64, scsiErrorCounterLog
|
||||
|
||||
//find analyzed attribute status
|
||||
for _, val := range sm.Attributes {
|
||||
if val.GetStatus() == pkg.SmartAttributeStatusFailed {
|
||||
sm.Status = pkg.Set(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||
if pkg.AttributeStatusHas(val.GetStatus(), pkg.AttributeStatusFailedScrutiny) {
|
||||
sm.Status = pkg.DeviceStatusSet(sm.Status, pkg.DeviceStatusFailedScrutiny)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,13 +18,13 @@ type SmartAtaAttribute struct {
|
||||
WhenFailed string `json:"when_failed"`
|
||||
|
||||
//Generated data
|
||||
TransformedValue int64 `json:"transformed_value"`
|
||||
Status int64 `json:"status"`
|
||||
StatusReason string `json:"status_reason,omitempty"`
|
||||
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||
TransformedValue int64 `json:"transformed_value"`
|
||||
Status pkg.AttributeStatus `json:"status"`
|
||||
StatusReason string `json:"status_reason,omitempty"`
|
||||
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||
}
|
||||
|
||||
func (sa *SmartAtaAttribute) GetStatus() int64 {
|
||||
func (sa *SmartAtaAttribute) GetStatus() pkg.AttributeStatus {
|
||||
return sa.Status
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (sa *SmartAtaAttribute) Flatten() map[string]interface{} {
|
||||
|
||||
//Generated Data
|
||||
fmt.Sprintf("attr.%s.transformed_value", idString): sa.TransformedValue,
|
||||
fmt.Sprintf("attr.%s.status", idString): sa.Status,
|
||||
fmt.Sprintf("attr.%s.status", idString): int64(sa.Status),
|
||||
fmt.Sprintf("attr.%s.status_reason", idString): sa.StatusReason,
|
||||
fmt.Sprintf("attr.%s.failure_rate", idString): sa.FailureRate,
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func (sa *SmartAtaAttribute) Inflate(key string, val interface{}) {
|
||||
case "transformed_value":
|
||||
sa.TransformedValue = val.(int64)
|
||||
case "status":
|
||||
sa.Status = val.(int64)
|
||||
sa.Status = pkg.AttributeStatus(val.(int64))
|
||||
case "status_reason":
|
||||
sa.StatusReason = val.(string)
|
||||
case "failure_rate":
|
||||
@@ -89,14 +89,16 @@ func (sa *SmartAtaAttribute) Inflate(key string, val interface{}) {
|
||||
//populate attribute status, using SMART Thresholds & Observed Metadata
|
||||
// Chainable
|
||||
func (sa *SmartAtaAttribute) PopulateAttributeStatus() *SmartAtaAttribute {
|
||||
if strings.ToUpper(sa.WhenFailed) == pkg.SmartWhenFailedFailingNow {
|
||||
if strings.ToUpper(sa.WhenFailed) == pkg.AttributeWhenFailedFailingNow {
|
||||
//this attribute has previously failed
|
||||
sa.Status = pkg.SmartAttributeStatusFailed
|
||||
sa.StatusReason = "Attribute is failing manufacturer SMART threshold"
|
||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusFailedSmart)
|
||||
sa.StatusReason += "Attribute is failing manufacturer SMART threshold"
|
||||
//if the Smart Status is failed, we should exit early, no need to look at thresholds.
|
||||
return sa
|
||||
|
||||
} else if strings.ToUpper(sa.WhenFailed) == pkg.SmartWhenFailedInThePast {
|
||||
sa.Status = pkg.SmartAttributeStatusWarning
|
||||
sa.StatusReason = "Attribute has previously failed manufacturer SMART threshold"
|
||||
} else if strings.ToUpper(sa.WhenFailed) == pkg.AttributeWhenFailedInThePast {
|
||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusWarningScrutiny)
|
||||
sa.StatusReason += "Attribute has previously failed manufacturer SMART threshold"
|
||||
}
|
||||
|
||||
if smartMetadata, ok := thresholds.AtaMetadata[sa.AttributeId]; ok {
|
||||
@@ -136,16 +138,16 @@ func (sa *SmartAtaAttribute) ValidateThreshold(smartMetadata thresholds.AtaAttri
|
||||
|
||||
if smartMetadata.Critical {
|
||||
if obsThresh.AnnualFailureRate >= 0.10 {
|
||||
sa.Status = pkg.SmartAttributeStatusFailed
|
||||
sa.StatusReason = "Observed Failure Rate for Critical Attribute is greater than 10%"
|
||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusFailedScrutiny)
|
||||
sa.StatusReason += "Observed Failure Rate for Critical Attribute is greater than 10%"
|
||||
}
|
||||
} else {
|
||||
if obsThresh.AnnualFailureRate >= 0.20 {
|
||||
sa.Status = pkg.SmartAttributeStatusFailed
|
||||
sa.StatusReason = "Observed Failure Rate for Attribute is greater than 20%"
|
||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusFailedScrutiny)
|
||||
sa.StatusReason += "Observed Failure Rate for Non-Critical Attribute is greater than 20%"
|
||||
} else if obsThresh.AnnualFailureRate >= 0.10 {
|
||||
sa.Status = pkg.SmartAttributeStatusWarning
|
||||
sa.StatusReason = "Observed Failure Rate for Attribute is greater than 10%"
|
||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusWarningScrutiny)
|
||||
sa.StatusReason += "Observed Failure Rate for Non-Critical Attribute is greater than 10%"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,7 +157,7 @@ func (sa *SmartAtaAttribute) ValidateThreshold(smartMetadata thresholds.AtaAttri
|
||||
}
|
||||
// no bucket found
|
||||
if smartMetadata.Critical {
|
||||
sa.Status = pkg.SmartAttributeStatusWarning
|
||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusWarningScrutiny)
|
||||
sa.StatusReason = "Could not determine Observed Failure Rate for Critical Attribute"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package measurements
|
||||
|
||||
import "github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
|
||||
type SmartAttribute interface {
|
||||
Flatten() (fields map[string]interface{})
|
||||
Inflate(key string, val interface{})
|
||||
GetStatus() int64
|
||||
GetStatus() pkg.AttributeStatus
|
||||
}
|
||||
|
||||
@@ -12,13 +12,13 @@ type SmartNvmeAttribute struct {
|
||||
Value int64 `json:"value"`
|
||||
Threshold int64 `json:"thresh"`
|
||||
|
||||
TransformedValue int64 `json:"transformed_value"`
|
||||
Status int64 `json:"status"`
|
||||
StatusReason string `json:"status_reason,omitempty"`
|
||||
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||
TransformedValue int64 `json:"transformed_value"`
|
||||
Status pkg.AttributeStatus `json:"status"`
|
||||
StatusReason string `json:"status_reason,omitempty"`
|
||||
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||
}
|
||||
|
||||
func (sa *SmartNvmeAttribute) GetStatus() int64 {
|
||||
func (sa *SmartNvmeAttribute) GetStatus() pkg.AttributeStatus {
|
||||
return sa.Status
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func (sa *SmartNvmeAttribute) Flatten() map[string]interface{} {
|
||||
|
||||
//Generated Data
|
||||
fmt.Sprintf("attr.%s.transformed_value", sa.AttributeId): sa.TransformedValue,
|
||||
fmt.Sprintf("attr.%s.status", sa.AttributeId): sa.Status,
|
||||
fmt.Sprintf("attr.%s.status", sa.AttributeId): int64(sa.Status),
|
||||
fmt.Sprintf("attr.%s.status_reason", sa.AttributeId): sa.StatusReason,
|
||||
fmt.Sprintf("attr.%s.failure_rate", sa.AttributeId): sa.FailureRate,
|
||||
}
|
||||
@@ -54,7 +54,7 @@ func (sa *SmartNvmeAttribute) Inflate(key string, val interface{}) {
|
||||
case "transformed_value":
|
||||
sa.TransformedValue = val.(int64)
|
||||
case "status":
|
||||
sa.Status = val.(int64)
|
||||
sa.Status = pkg.AttributeStatus(val.(int64))
|
||||
case "status_reason":
|
||||
sa.StatusReason = val.(string)
|
||||
case "failure_rate":
|
||||
@@ -72,8 +72,8 @@ func (sa *SmartNvmeAttribute) PopulateAttributeStatus() *SmartNvmeAttribute {
|
||||
//check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
|
||||
if (smartMetadata.Ideal == "low" && sa.Value > sa.Threshold) ||
|
||||
(smartMetadata.Ideal == "high" && sa.Value < sa.Threshold) {
|
||||
sa.Status = pkg.SmartAttributeStatusFailed
|
||||
sa.StatusReason = "Attribute is failing recommended SMART threshold"
|
||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusFailedScrutiny)
|
||||
sa.StatusReason += "Attribute is failing recommended SMART threshold"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,13 +12,13 @@ type SmartScsiAttribute struct {
|
||||
Value int64 `json:"value"`
|
||||
Threshold int64 `json:"thresh"`
|
||||
|
||||
TransformedValue int64 `json:"transformed_value"`
|
||||
Status int64 `json:"status"`
|
||||
StatusReason string `json:"status_reason,omitempty"`
|
||||
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||
TransformedValue int64 `json:"transformed_value"`
|
||||
Status pkg.AttributeStatus `json:"status"`
|
||||
StatusReason string `json:"status_reason,omitempty"`
|
||||
FailureRate float64 `json:"failure_rate,omitempty"`
|
||||
}
|
||||
|
||||
func (sa *SmartScsiAttribute) GetStatus() int64 {
|
||||
func (sa *SmartScsiAttribute) GetStatus() pkg.AttributeStatus {
|
||||
return sa.Status
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func (sa *SmartScsiAttribute) Flatten() map[string]interface{} {
|
||||
|
||||
//Generated Data
|
||||
fmt.Sprintf("attr.%s.transformed_value", sa.AttributeId): sa.TransformedValue,
|
||||
fmt.Sprintf("attr.%s.status", sa.AttributeId): sa.Status,
|
||||
fmt.Sprintf("attr.%s.status", sa.AttributeId): int64(sa.Status),
|
||||
fmt.Sprintf("attr.%s.status_reason", sa.AttributeId): sa.StatusReason,
|
||||
fmt.Sprintf("attr.%s.failure_rate", sa.AttributeId): sa.FailureRate,
|
||||
}
|
||||
@@ -54,7 +54,7 @@ func (sa *SmartScsiAttribute) Inflate(key string, val interface{}) {
|
||||
case "transformed_value":
|
||||
sa.TransformedValue = val.(int64)
|
||||
case "status":
|
||||
sa.Status = val.(int64)
|
||||
sa.Status = pkg.AttributeStatus(val.(int64))
|
||||
case "status_reason":
|
||||
sa.StatusReason = val.(string)
|
||||
case "failure_rate":
|
||||
@@ -73,7 +73,7 @@ func (sa *SmartScsiAttribute) PopulateAttributeStatus() *SmartScsiAttribute {
|
||||
//check what the ideal is. Ideal tells us if we our recorded value needs to be above, or below the threshold
|
||||
if (smartMetadata.Ideal == "low" && sa.Value > sa.Threshold) ||
|
||||
(smartMetadata.Ideal == "high" && sa.Value < sa.Threshold) {
|
||||
sa.Status = pkg.SmartAttributeStatusFailed
|
||||
sa.Status = pkg.AttributeStatusSet(sa.Status, pkg.AttributeStatusFailedScrutiny)
|
||||
sa.StatusReason = "Attribute is failing recommended SMART threshold"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -328,9 +328,12 @@ func TestFromCollectorSmartInfo(t *testing.T) {
|
||||
require.Equal(t, 18, len(smartMdl.Attributes))
|
||||
|
||||
//check that temperature was correctly parsed
|
||||
|
||||
require.Equal(t, int64(163210330144), smartMdl.Attributes["194"].(*measurements.SmartAtaAttribute).RawValue)
|
||||
require.Equal(t, int64(32), smartMdl.Attributes["194"].(*measurements.SmartAtaAttribute).TransformedValue)
|
||||
|
||||
//ensure that Scrutiny warning for a non critical attribute does not set device status to failed.
|
||||
require.Equal(t, pkg.AttributeStatusWarningScrutiny, smartMdl.Attributes["3"].GetStatus())
|
||||
|
||||
}
|
||||
|
||||
func TestFromCollectorSmartInfo_Fail_Smart(t *testing.T) {
|
||||
@@ -381,6 +384,70 @@ func TestFromCollectorSmartInfo_Fail_ScrutinySmart(t *testing.T) {
|
||||
require.Equal(t, 17, len(smartMdl.Attributes))
|
||||
}
|
||||
|
||||
func TestFromCollectorSmartInfo_Fail_ScrutinyNonCriticalFailed(t *testing.T) {
|
||||
//setup
|
||||
smartDataFile, err := os.Open("../testdata/smart-ata-failed-scrutiny.json")
|
||||
require.NoError(t, err)
|
||||
defer smartDataFile.Close()
|
||||
|
||||
var smartJson collector.SmartInfo
|
||||
|
||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
||||
require.NoError(t, err)
|
||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||
require.NoError(t, err)
|
||||
|
||||
//test
|
||||
smartMdl := measurements.Smart{}
|
||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
||||
|
||||
//assert
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
||||
require.Equal(t, pkg.DeviceStatusFailedScrutiny, smartMdl.Status)
|
||||
require.Equal(t, pkg.AttributeStatusFailedScrutiny, smartMdl.Attributes["199"].GetStatus(),
|
||||
"scrutiny should detect that %d failed (status: %d, %s)",
|
||||
smartMdl.Attributes["199"].(*measurements.SmartAtaAttribute).AttributeId,
|
||||
smartMdl.Attributes["199"].GetStatus(), smartMdl.Attributes["199"].(*measurements.SmartAtaAttribute).StatusReason,
|
||||
)
|
||||
|
||||
require.Equal(t, 14, len(smartMdl.Attributes))
|
||||
}
|
||||
|
||||
//TODO: Scrutiny Warn
|
||||
//TODO: Smart + Scrutiny Warn
|
||||
|
||||
func TestFromCollectorSmartInfo_NVMe_Fail_Scrutiny(t *testing.T) {
|
||||
//setup
|
||||
smartDataFile, err := os.Open("../testdata/smart-nvme-failed.json")
|
||||
require.NoError(t, err)
|
||||
defer smartDataFile.Close()
|
||||
|
||||
var smartJson collector.SmartInfo
|
||||
|
||||
smartDataBytes, err := ioutil.ReadAll(smartDataFile)
|
||||
require.NoError(t, err)
|
||||
err = json.Unmarshal(smartDataBytes, &smartJson)
|
||||
require.NoError(t, err)
|
||||
|
||||
//test
|
||||
smartMdl := measurements.Smart{}
|
||||
err = smartMdl.FromCollectorSmartInfo("WWN-test", smartJson)
|
||||
|
||||
//assert
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "WWN-test", smartMdl.DeviceWWN)
|
||||
require.Equal(t, pkg.DeviceStatusFailedScrutiny, smartMdl.Status)
|
||||
require.Equal(t, pkg.AttributeStatusFailedScrutiny, smartMdl.Attributes["media_errors"].GetStatus(),
|
||||
"scrutiny should detect that %s failed (status: %d, %s)",
|
||||
smartMdl.Attributes["media_errors"].(*measurements.SmartNvmeAttribute).AttributeId,
|
||||
smartMdl.Attributes["media_errors"].GetStatus(),
|
||||
smartMdl.Attributes["media_errors"].(*measurements.SmartNvmeAttribute).StatusReason,
|
||||
)
|
||||
|
||||
require.Equal(t, 16, len(smartMdl.Attributes))
|
||||
}
|
||||
|
||||
func TestFromCollectorSmartInfo_Nvme(t *testing.T) {
|
||||
//setup
|
||||
smartDataFile, err := os.Open("../testdata/smart-nvme.json")
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
package models
|
||||
|
||||
// Temperature Format
|
||||
// Date Format
|
||||
// Device History window
|
||||
@@ -0,0 +1,23 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// SettingEntry matches a setting row in the database
|
||||
type SettingEntry struct {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
gorm.Model
|
||||
|
||||
SettingKeyName string `json:"setting_key_name" gorm:"unique;not null"`
|
||||
SettingKeyDescription string `json:"setting_key_description"`
|
||||
SettingDataType string `json:"setting_data_type"`
|
||||
|
||||
SettingValueNumeric int `json:"setting_value_numeric"`
|
||||
SettingValueString string `json:"setting_value_string"`
|
||||
SettingValueBool bool `json:"setting_value_bool"`
|
||||
}
|
||||
|
||||
func (s SettingEntry) TableName() string {
|
||||
return "settings"
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package models
|
||||
|
||||
// Settings is made up of parsed SettingEntry objects retrieved from the database
|
||||
//type Settings struct {
|
||||
// MetricsNotifyLevel pkg.MetricsNotifyLevel `json:"metrics.notify.level" mapstructure:"metrics.notify.level"`
|
||||
// MetricsStatusFilterAttributes pkg.MetricsStatusFilterAttributes `json:"metrics.status.filter_attributes" mapstructure:"metrics.status.filter_attributes"`
|
||||
// MetricsStatusThreshold pkg.MetricsStatusThreshold `json:"metrics.status.threshold" mapstructure:"metrics.status.threshold"`
|
||||
//}
|
||||
|
||||
type Settings struct {
|
||||
Theme string `json:"theme" mapstructure:"theme"`
|
||||
Layout string `json:"layout" mapstructure:"layout"`
|
||||
DashboardDisplay string `json:"dashboard_display" mapstructure:"dashboard_display"`
|
||||
DashboardSort string `json:"dashboard_sort" mapstructure:"dashboard_sort"`
|
||||
TemperatureUnit string `json:"temperature_unit" mapstructure:"temperature_unit"`
|
||||
FileSizeSIUnits bool `json:"file_size_si_units" mapstructure:"file_size_si_units"`
|
||||
LineStroke string `json:"line_stroke" mapstructure:"line_stroke"`
|
||||
|
||||
Metrics struct {
|
||||
NotifyLevel int `json:"notify_level" mapstructure:"notify_level"`
|
||||
StatusFilterAttributes int `json:"status_filter_attributes" mapstructure:"status_filter_attributes"`
|
||||
StatusThreshold int `json:"status_threshold" mapstructure:"status_threshold"`
|
||||
} `json:"metrics" mapstructure:"metrics"`
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user