Compare commits
177 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ee893cc360 | |||
| d73907d357 | |||
| 0b50305f38 | |||
| ee3d719c3a | |||
| d76cdca4a5 | |||
| b34ed607b7 | |||
| 932e191510 | |||
| 3a6c407fe7 | |||
| 8c3afc31f4 | |||
| 2e4ba44952 | |||
| 4192ac719e | |||
| 539c94595f | |||
| de21e611a3 | |||
| dea362361e | |||
| 7b77519f49 | |||
| 94df7e1ec3 | |||
| babd8d3089 | |||
| 52ef28f091 | |||
| 80d72f8a1b | |||
| 2e8f4a0581 | |||
| 7fd2e2b050 | |||
| 8c65166a90 | |||
| 733b49c2c4 | |||
| 711b5c40b1 | |||
| f94e616d8d | |||
| fb7848f341 | |||
| 007857afd5 | |||
| e4bbe8c035 | |||
| cb5226f6e4 | |||
| c69770d1dd | |||
| e07a53046f | |||
| 19a0b8c2ac | |||
| 97f73703b1 | |||
| 4fcd11f497 | |||
| 7f1023fa9b | |||
| d49497da80 | |||
| d8c359bd8a | |||
| ad4b117f6e | |||
| 22d2f9847c | |||
| 1c7f299b98 | |||
| 602fdce0ee | |||
| 61fde6a2ca | |||
| d843bcc258 | |||
| f2856e0f26 | |||
| 58ef1aa311 | |||
| 6a6570b8e3 | |||
| 29a0860caa | |||
| fb760a9f6d | |||
| c9f13f4398 | |||
| dd03a8cf63 | |||
| 0a6ade4da9 | |||
| 5c8c11d78b | |||
| 00502cc565 | |||
| 0febe3fda5 | |||
| fcd4bb4561 | |||
| 89f763e65d | |||
| 075eb94fa2 | |||
| e9cf8a9180 | |||
| 64ad353628 | |||
| 5518865bc6 | |||
| 50321d897a | |||
| e18a7e9ce0 | |||
| 536b590080 | |||
| 098ce0673a | |||
| 2677796322 | |||
| 5cc7fb30ed | |||
| 222b8103d6 | |||
| 727d5b0ace | |||
| d7b45e5f01 | |||
| 578a262d90 | |||
| c6e11f88b4 | |||
| b795331efb | |||
| f1e5bd3ed4 | |||
| d8d56f77f9 | |||
| 26b221532e | |||
| 15d3206f6f | |||
| 59e2e928a8 | |||
| 51f59e4fcd | |||
| f823127825 | |||
| d41d535ab7 | |||
| 9a4a8de341 | |||
| 2d6f60abaa | |||
| d201f798fb | |||
| a1b0108503 | |||
| f0275d2349 | |||
| 9dafde8a43 | |||
| fa8f86ab7b | |||
| 41c9daa939 | |||
| 83186ba36e | |||
| 3205e3d022 | |||
| 3f272b36d4 | |||
| b238579fe6 | |||
| ce2f990eb1 | |||
| b11b8732aa | |||
| 5cd441da7b | |||
| 2e768fb491 | |||
| e8755ff617 | |||
| e41ee47371 | |||
| 7a68a68e76 | |||
| 94594db20a | |||
| 7e672e8b8e | |||
| 54e2cacb00 | |||
| c0f1dfdb0b | |||
| 29bc79996b | |||
| 99af2b8b16 | |||
| dd0c3e6fba | |||
| 5b2746f389 | |||
| e9c1de9664 | |||
| 6ca4bd4912 | |||
| c34ee85e48 | |||
| 91e8eb1def | |||
| a01b8fe083 | |||
| 550fb542d4 | |||
| 7841063783 | |||
| 8e05b2e2f8 | |||
| 64e1c93d16 | |||
| b227054b52 | |||
| 66bd6f99c5 | |||
| c6579864b8 | |||
| 2361c329e2 | |||
| 5ea149d878 | |||
| 30bd18f816 | |||
| 0f0efac866 | |||
| 04563c0d0d | |||
| 9316eccabe | |||
| b71d6660a6 | |||
| 0e2fec4e93 | |||
| ff171282cc | |||
| ea8fe208d0 | |||
| 9ae9c387cc | |||
| 772b4f6528 | |||
| 4a16ca0d5a | |||
| 316ce856f7 | |||
| 6e0321f488 | |||
| 338d2ae04e | |||
| 4419f7f429 | |||
| 797a6b0429 | |||
| d0b545dfb7 | |||
| b0bff53bbd | |||
| b4adf3d88d | |||
| eefdc548b2 | |||
| fb918e2d6e | |||
| 3d9001a5e4 | |||
| fbe7d63a24 | |||
| d718b0898b | |||
| 44c7211b5f | |||
| 157c93b967 | |||
| 7babc280a0 | |||
| e364e480e8 | |||
| bfefe7e98a | |||
| 831cca7853 | |||
| 46f3b1c02c | |||
| 8a1ae2ffa0 | |||
| 145c819fc1 | |||
| a9ea231de0 | |||
| c2488af1c3 | |||
| ecf7a447a7 | |||
| f8e61af2f9 | |||
| ee61d986d8 | |||
| 8fe8cec09a | |||
| b953456d6b | |||
| 4057699cad | |||
| d3e7fc6067 | |||
| 09a8574d83 | |||
| 7695cc185f | |||
| fc7208020e | |||
| 75d5930835 | |||
| 3c9e16169e | |||
| 9e1076f302 | |||
| 75ab87e109 | |||
| 0b8251fce2 | |||
| f57b71ae96 | |||
| ce324c3de1 | |||
| 769f253e7d | |||
| fbd5bb57ac | |||
| b9eb5687cd | |||
| cbd230a7e0 |
@@ -1,85 +0,0 @@
|
||||
name: CI
|
||||
# This workflow is triggered on pushes & pull requests
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
container: techknowlogick/xgo:go-1.17.x
|
||||
|
||||
# Service containers to run with `build` (Required for end-to-end testing)
|
||||
services:
|
||||
influxdb:
|
||||
image: influxdb:2.2
|
||||
env:
|
||||
DOCKER_INFLUXDB_INIT_MODE: setup
|
||||
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
||||
DOCKER_INFLUXDB_INIT_PASSWORD: password12345
|
||||
DOCKER_INFLUXDB_INIT_ORG: scrutiny
|
||||
DOCKER_INFLUXDB_INIT_BUCKET: metrics
|
||||
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: my-super-secret-auth-token
|
||||
ports:
|
||||
- 8086:8086
|
||||
env:
|
||||
PROJECT_PATH: /go/src/github.com/analogj/scrutiny
|
||||
CGO_ENABLED: 1
|
||||
steps:
|
||||
- name: Git
|
||||
run: |
|
||||
apt-get update && apt-get install -y software-properties-common
|
||||
add-apt-repository ppa:git-core/ppa && apt-get update && apt-get install -y git
|
||||
git --version
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
run: |
|
||||
mkdir -p $(dirname "$PROJECT_PATH")
|
||||
cp -a $GITHUB_WORKSPACE $PROJECT_PATH
|
||||
cd $PROJECT_PATH
|
||||
|
||||
go mod vendor
|
||||
go test -race -coverprofile=coverage.txt -covermode=atomic -v -tags "static" $(go list ./... | grep -v /vendor/)
|
||||
- name: Generate coverage report
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
files: ${{ env.PROJECT_PATH }}/coverage.txt
|
||||
flags: unittests
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
|
||||
cd $PROJECT_PATH
|
||||
make all
|
||||
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: binaries.zip
|
||||
path: |
|
||||
/build/scrutiny-web-linux-amd64
|
||||
/build/scrutiny-collector-metrics-linux-amd64
|
||||
/build/scrutiny-web-linux-arm64
|
||||
/build/scrutiny-collector-metrics-linux-arm64
|
||||
/build/scrutiny-web-linux-arm-5
|
||||
/build/scrutiny-collector-metrics-linux-arm-5
|
||||
/build/scrutiny-web-linux-arm-6
|
||||
/build/scrutiny-collector-metrics-linux-arm-6
|
||||
/build/scrutiny-web-linux-arm-7
|
||||
/build/scrutiny-collector-metrics-linux-arm-7
|
||||
/build/scrutiny-web-windows-4.0-amd64.exe
|
||||
/build/scrutiny-collector-metrics-windows-4.0-amd64.exe
|
||||
# /build/scrutiny-web-darwin-arm64
|
||||
# /build/scrutiny-collector-metrics-darwin-arm64
|
||||
# /build/scrutiny-web-darwin-amd64
|
||||
# /build/scrutiny-collector-metrics-darwin-amd64
|
||||
# /build/scrutiny-web-freebsd-amd64
|
||||
# /build/scrutiny-collector-metrics-freebsd-amd64
|
||||
- uses: codecov/codecov-action@v2
|
||||
with:
|
||||
file: ${{ env.PROJECT_PATH }}/coverage.txt
|
||||
flags: unittests
|
||||
fail_ci_if_error: false
|
||||
|
||||
|
||||
@@ -0,0 +1,114 @@
|
||||
name: CI
|
||||
# This workflow is triggered on pushes & pull requests
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
test-frontend:
|
||||
name: Test Frontend
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test Frontend
|
||||
run: |
|
||||
make binary-frontend-test-coverage
|
||||
- name: Upload coverage
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
path: ${{ github.workspace }}/webapp/frontend/coverage/lcov.info
|
||||
retention-days: 1
|
||||
test-backend:
|
||||
name: Test Backend
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/packagrio/packagr:latest-golang
|
||||
# Service containers to run with `build` (Required for end-to-end testing)
|
||||
services:
|
||||
influxdb:
|
||||
image: influxdb:2.2
|
||||
env:
|
||||
DOCKER_INFLUXDB_INIT_MODE: setup
|
||||
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
||||
DOCKER_INFLUXDB_INIT_PASSWORD: password12345
|
||||
DOCKER_INFLUXDB_INIT_ORG: scrutiny
|
||||
DOCKER_INFLUXDB_INIT_BUCKET: metrics
|
||||
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: my-super-secret-auth-token
|
||||
ports:
|
||||
- 8086:8086
|
||||
env:
|
||||
STATIC: true
|
||||
steps:
|
||||
- name: Git
|
||||
run: |
|
||||
apt-get update && apt-get install -y software-properties-common
|
||||
add-apt-repository ppa:git-core/ppa && apt-get update && apt-get install -y git
|
||||
git --version
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test Backend
|
||||
run: |
|
||||
make binary-clean binary-test-coverage
|
||||
- name: Upload coverage
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
path: ${{ github.workspace }}/coverage.txt
|
||||
retention-days: 1
|
||||
test-coverage:
|
||||
name: Test Coverage Upload
|
||||
needs:
|
||||
- test-backend
|
||||
- test-frontend
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Download coverage reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
- name: Upload coverage reports
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
files: ${{ github.workspace }}/coverage.txt,${{ github.workspace }}/lcov.info
|
||||
flags: unittests
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.cfg.goos }}/${{ matrix.cfg.goarch }}
|
||||
runs-on: ${{ matrix.cfg.on }}
|
||||
env:
|
||||
GOOS: ${{ matrix.cfg.goos }}
|
||||
GOARCH: ${{ matrix.cfg.goarch }}
|
||||
GOARM: ${{ matrix.cfg.goarm }}
|
||||
STATIC: true
|
||||
strategy:
|
||||
matrix:
|
||||
cfg:
|
||||
- { on: ubuntu-latest, goos: linux, goarch: amd64 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 5 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 6 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 7 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: amd64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: arm64 }
|
||||
- { on: macos-latest, goos: freebsd, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: arm64 }
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.20.1'
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
make binary-clean binary-all
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: binaries.zip
|
||||
path: |
|
||||
scrutiny-web-*
|
||||
scrutiny-collector-metrics-*
|
||||
@@ -1,7 +1,5 @@
|
||||
name: Docker
|
||||
on:
|
||||
schedule:
|
||||
- cron: '36 12 * * *'
|
||||
push:
|
||||
branches: [ master, beta ]
|
||||
# Publish semver tags as releases.
|
||||
@@ -76,15 +74,6 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: "Generate frontend"
|
||||
uses: addnab/docker-run-action@v3
|
||||
with:
|
||||
image: node:lts
|
||||
options: -v ${{ github.workspace }}:/work
|
||||
run: |
|
||||
cd /work
|
||||
make frontend && echo "print contents of /work/dist" && ls -alt /work/dist
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
@@ -136,16 +125,6 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: "Generate frontend & version information"
|
||||
uses: addnab/docker-run-action@v3
|
||||
with:
|
||||
image: node:lts
|
||||
options: -v ${{ github.workspace }}:/work
|
||||
run: |
|
||||
cd /work
|
||||
make frontend && echo "print contents of /work/dist" && ls -alt /work/dist
|
||||
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
@@ -183,4 +162,4 @@ jobs:
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
# cache-to: type=gha,mode=max
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
name: Docker - Nightly
|
||||
on:
|
||||
schedule:
|
||||
- cron: '36 12 * * *'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
omnibus:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Extract metadata (tags, labels) for Docker
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-omnibus-nightly
|
||||
type=ref,enable=true,event=tag,suffix=-omnibus-nightly
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
push: false
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
@@ -1,83 +0,0 @@
|
||||
# compiles FreeBSD artifacts and attaches them to build
|
||||
name: Release FreeBSD
|
||||
|
||||
on:
|
||||
release:
|
||||
# Only use the types keyword to narrow down the activity types that will trigger your workflow.
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag_name:
|
||||
description: 'tag to build artifacts for'
|
||||
required: true
|
||||
default: 'v0.0.0'
|
||||
jobs:
|
||||
|
||||
release-freebsd:
|
||||
name: Release FreeBSD
|
||||
runs-on: macos-10.15
|
||||
env:
|
||||
PROJECT_PATH: /go/src/github.com/analogj/scrutiny
|
||||
GOPATH: /go
|
||||
GOOS: freebsd
|
||||
GOARCH: amd64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{github.event.release.tag_name || github.event.inputs.tag_name }}
|
||||
- name: Build Binaries
|
||||
uses: vmactions/freebsd-vm@v0.1.5
|
||||
with:
|
||||
envs: 'PROJECT_PATH GOPATH GOOS GOARCH'
|
||||
usesh: true
|
||||
#TODO: lock go version using https://www.jeremymorgan.com/tutorials/golang/how-to-install-go-freebsd/
|
||||
prepare: pkg install -y curl go gmake
|
||||
run: |
|
||||
pwd
|
||||
ls -lah
|
||||
whoami
|
||||
freebsd-version
|
||||
|
||||
mkdir -p $(dirname "$PROJECT_PATH")
|
||||
cp -R $GITHUB_WORKSPACE $PROJECT_PATH
|
||||
cd $PROJECT_PATH
|
||||
|
||||
mkdir -p $GITHUB_WORKSPACE/dist
|
||||
|
||||
echo "building web binary (OS = ${GOOS}, ARCH = ${GOARCH})"
|
||||
go build -ldflags "-extldflags=-static -X main.goos=${GOOS} -X main.goarch=${GOARCH}" -o $GITHUB_WORKSPACE/dist/scrutiny-web-${GOOS}-${GOARCH} -tags "static netgo sqlite_omit_load_extension" webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
|
||||
chmod +x "$GITHUB_WORKSPACE/dist/scrutiny-web-${GOOS}-${GOARCH}"
|
||||
file "$GITHUB_WORKSPACE/dist/scrutiny-web-${GOOS}-${GOARCH}" || true
|
||||
ldd "$GITHUB_WORKSPACE/dist/scrutiny-web-${GOOS}-${GOARCH}" || true
|
||||
|
||||
echo "building collector binary (OS = ${GOOS}, ARCH = ${GOARCH})"
|
||||
go build -ldflags "-extldflags=-static -X main.goos=${GOOS} -X main.goarch=${GOARCH}" -o $GITHUB_WORKSPACE/dist/scrutiny-collector-metrics-${GOOS}-${GOARCH} -tags "static netgo" collector/cmd/collector-metrics/collector-metrics.go
|
||||
|
||||
chmod +x "$GITHUB_WORKSPACE/dist/scrutiny-collector-metrics-${GOOS}-${GOARCH}"
|
||||
file "$GITHUB_WORKSPACE/dist/scrutiny-collector-metrics-${GOOS}-${GOARCH}" || true
|
||||
ldd "$GITHUB_WORKSPACE/dist/scrutiny-collector-metrics-${GOOS}-${GOARCH}" || true
|
||||
|
||||
- name: Release Asset - Collector - freebsd-amd64
|
||||
id: upload-release-asset2
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: './dist/scrutiny-collector-metrics-freebsd-amd64'
|
||||
asset_name: scrutiny-collector-metrics-freebsd-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
- name: Release Asset - Web - freebsd-amd64
|
||||
id: upload-release-asset1
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: './dist/scrutiny-web-freebsd-amd64'
|
||||
asset_name: scrutiny-web-freebsd-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
@@ -19,11 +19,8 @@ jobs:
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Build Frontend
|
||||
run: |
|
||||
cd webapp/frontend
|
||||
npm install -g @angular/cli@9.1.4
|
||||
npm install
|
||||
mkdir -p dist
|
||||
npm run build:prod -- --output-path=dist
|
||||
apt-get update && apt-get install -y make
|
||||
make binary-frontend
|
||||
tar -czf scrutiny-web-frontend.tar.gz dist
|
||||
- name: Upload Frontend Asset
|
||||
id: upload-release-asset3
|
||||
@@ -32,6 +29,6 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: './webapp/frontend/scrutiny-web-frontend.tar.gz'
|
||||
asset_path: './scrutiny-web-frontend.tar.gz'
|
||||
asset_name: scrutiny-web-frontend.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
@@ -13,10 +13,10 @@ on:
|
||||
default: 'webapp/backend/pkg/version/version.go'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
release:
|
||||
name: Create Release Commit
|
||||
runs-on: ubuntu-latest
|
||||
container: techknowlogick/xgo:go-1.17.x
|
||||
container: ghcr.io/packagrio/packagr:latest-golang
|
||||
# Service containers to run with `build` (Required for end-to-end testing)
|
||||
services:
|
||||
influxdb:
|
||||
@@ -31,8 +31,7 @@ jobs:
|
||||
ports:
|
||||
- 8086:8086
|
||||
env:
|
||||
PROJECT_PATH: /go/src/github.com/analogj/scrutiny
|
||||
CGO_ENABLED: 1
|
||||
STATIC: true
|
||||
steps:
|
||||
- name: Git
|
||||
run: |
|
||||
@@ -53,34 +52,80 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }} # Leave this line unchanged
|
||||
- name: Test
|
||||
run: |
|
||||
mkdir -p $(dirname "$PROJECT_PATH")
|
||||
cp -a $GITHUB_WORKSPACE $PROJECT_PATH
|
||||
cd $PROJECT_PATH
|
||||
|
||||
go mod vendor
|
||||
go test -v -tags "static" $(go list ./... | grep -v /vendor/)
|
||||
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
|
||||
cd $PROJECT_PATH
|
||||
make all
|
||||
|
||||
# restore modified dir to GH workspace.
|
||||
cp -arf $PROJECT_PATH/. $GITHUB_WORKSPACE/
|
||||
|
||||
# copy all the build artifacts to the GH workspace
|
||||
cp -arf /build/. $GITHUB_WORKSPACE/
|
||||
|
||||
- name: Commit Changes
|
||||
make binary-clean binary-test-coverage
|
||||
- name: Commit Changes Locally
|
||||
id: commit
|
||||
uses: packagrio/action-releasr-go@master
|
||||
env:
|
||||
# This is necessary in order to push a commit to the repo
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }} # Leave this line unchanged
|
||||
with:
|
||||
version_metadata_path: ${{ github.event.inputs.version_metadata_path }}
|
||||
- name: Publish Release
|
||||
- name: Upload workspace
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: workspace
|
||||
path: ${{ github.workspace }}/**/*
|
||||
retention-days: 1
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.cfg.goos }}/${{ matrix.cfg.goarch }}${{ matrix.cfg.goarm }}
|
||||
needs: release
|
||||
runs-on: ${{ matrix.cfg.on }}
|
||||
env:
|
||||
GOOS: ${{ matrix.cfg.goos }}
|
||||
GOARCH: ${{ matrix.cfg.goarch }}
|
||||
GOARM: ${{ matrix.cfg.goarm }}
|
||||
STATIC: true
|
||||
strategy:
|
||||
matrix:
|
||||
cfg:
|
||||
- { on: ubuntu-latest, goos: linux, goarch: amd64 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 5 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 6 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 7 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: amd64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: arm64 }
|
||||
- { on: macos-latest, goos: freebsd, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: arm64 }
|
||||
steps:
|
||||
- name: Download workspace
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: workspace
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20.1' # The Go version to download (if necessary) and use.
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
make binary-clean binary-all
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: binaries.zip
|
||||
path: |
|
||||
scrutiny-web-*
|
||||
scrutiny-collector-metrics-*
|
||||
|
||||
release-publish:
|
||||
name: Publish Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download workspace
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: workspace
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: binaries.zip
|
||||
- name: List
|
||||
shell: bash
|
||||
run: |
|
||||
ls -alt
|
||||
- name: Publish Release & Assets
|
||||
id: publish
|
||||
uses: packagrio/action-publishr-go@master
|
||||
env:
|
||||
@@ -89,15 +134,23 @@ jobs:
|
||||
with:
|
||||
version_metadata_path: ${{ github.event.inputs.version_metadata_path }}
|
||||
upload_assets:
|
||||
scrutiny-web-linux-amd64
|
||||
scrutiny-collector-metrics-darwin-amd64
|
||||
scrutiny-collector-metrics-darwin-arm64
|
||||
scrutiny-collector-metrics-freebsd-amd64
|
||||
scrutiny-collector-metrics-linux-amd64
|
||||
scrutiny-web-linux-arm64
|
||||
scrutiny-collector-metrics-linux-arm64
|
||||
scrutiny-web-linux-arm-5
|
||||
scrutiny-collector-metrics-linux-arm-5
|
||||
scrutiny-web-linux-arm-6
|
||||
scrutiny-collector-metrics-linux-arm-6
|
||||
scrutiny-web-linux-arm-7
|
||||
scrutiny-collector-metrics-linux-arm-7
|
||||
scrutiny-web-windows-4.0-amd64.exe
|
||||
scrutiny-collector-metrics-windows-4.0-amd64.exe
|
||||
scrutiny-collector-metrics-linux-arm64
|
||||
scrutiny-collector-metrics-windows-amd64.exe
|
||||
scrutiny-collector-metrics-windows-arm64.exe
|
||||
scrutiny-web-darwin-amd64
|
||||
scrutiny-web-darwin-arm64
|
||||
scrutiny-web-freebsd-amd64
|
||||
scrutiny-web-linux-amd64
|
||||
scrutiny-web-linux-arm-5
|
||||
scrutiny-web-linux-arm-6
|
||||
scrutiny-web-linux-arm-7
|
||||
scrutiny-web-linux-arm64
|
||||
scrutiny-web-windows-amd64.exe
|
||||
scrutiny-web-windows-arm64.exe
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
name: Cleanup Artifacts
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Every day at 1am
|
||||
- cron: '0 1 * * *'
|
||||
|
||||
jobs:
|
||||
remove-old-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Remove old artifacts
|
||||
uses: c-hive/gha-remove-artifacts@v1
|
||||
with:
|
||||
age: '1 day'
|
||||
skip-tags: true
|
||||
skip-recent: 5
|
||||
+13
-12
@@ -5,17 +5,18 @@ The Scrutiny repository is a [monorepo](https://en.wikipedia.org/wiki/Monorepo)
|
||||
- Scrutiny Frontend Angular SPA
|
||||
- S.M.A.R.T Collector
|
||||
|
||||
Depending on the functionality you are adding, you may need to setup a development environment for 1 or more projects.
|
||||
Depending on the functionality you are adding, you may need to setup a development environment for 1 or more projects.
|
||||
|
||||
# Modifying the Scrutiny Backend Server (API)
|
||||
|
||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.17+)
|
||||
2. download the `scrutiny-web-frontend.tar.gz` for the [latest release](https://github.com/AnalogJ/scrutiny/releases/latest). Extract to a folder named `dist`
|
||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.20+)
|
||||
2. download the `scrutiny-web-frontend.tar.gz` for
|
||||
the [latest release](https://github.com/AnalogJ/scrutiny/releases/latest). Extract to a folder named `dist`
|
||||
3. create a `scrutiny.yaml` config file
|
||||
```yaml
|
||||
# config file for local development. store as scrutiny.yaml
|
||||
version: 1
|
||||
|
||||
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
@@ -28,13 +29,13 @@ Depending on the functionality you are adding, you may need to setup a developme
|
||||
path: ./dist
|
||||
influxdb:
|
||||
retention_policy: false
|
||||
|
||||
|
||||
log:
|
||||
file: 'web.log' #absolute or relative paths allowed, eg. web.log
|
||||
level: DEBUG
|
||||
|
||||
```
|
||||
4. start a InfluxDB docker container.
|
||||
4. start a InfluxDB docker container.
|
||||
```bash
|
||||
docker run -p 8086:8086 --rm influxdb:2.2
|
||||
```
|
||||
@@ -54,21 +55,21 @@ The frontend is written in Angular. If you're working on the frontend and can us
|
||||
```bash
|
||||
cd webapp/frontend
|
||||
npm install
|
||||
npm run start -- --deploy-url="/web/" --base-href="/web/" --port 4200
|
||||
npm run start -- --serve-path="/web/" --port 4200
|
||||
```
|
||||
3. open your browser and visit [http://localhost:4200/web](http://localhost:4200/web)
|
||||
|
||||
# Modifying both Scrutiny Backend and Frontend Applications
|
||||
If you're developing a feature that requires changes to the backend and the frontend, or a frontend feature that requires real data,
|
||||
If you're developing a feature that requires changes to the backend and the frontend, or a frontend feature that requires real data,
|
||||
you'll need to follow the steps below:
|
||||
|
||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.17+)
|
||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.20+)
|
||||
2. install [NodeJS](https://nodejs.org/en/download/)
|
||||
3. create a `scrutiny.yaml` config file
|
||||
```yaml
|
||||
# config file for local development. store as scrutiny.yaml
|
||||
version: 1
|
||||
|
||||
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
@@ -81,7 +82,7 @@ you'll need to follow the steps below:
|
||||
path: ./dist
|
||||
influxdb:
|
||||
retention_policy: false
|
||||
|
||||
|
||||
log:
|
||||
file: 'web.log' #absolute or relative paths allowed, eg. web.log
|
||||
level: DEBUG
|
||||
@@ -184,4 +185,4 @@ docker run -p 8086:8086 -d --rm \
|
||||
influxdb:2.2
|
||||
go test ./...
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,66 +1,133 @@
|
||||
export CGO_ENABLED = 1
|
||||
.ONESHELL: # Applies to every targets in the file! .ONESHELL instructs make to invoke a single instance of the shell and provide it with the entire recipe, regardless of how many lines it contains.
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
########################################################################################################################
|
||||
# Global Env Settings
|
||||
########################################################################################################################
|
||||
|
||||
GO_WORKSPACE ?= /go/src/github.com/analogj/scrutiny
|
||||
|
||||
BINARY=\
|
||||
linux/amd64 \
|
||||
linux/arm-5 \
|
||||
linux/arm-6 \
|
||||
linux/arm-7 \
|
||||
linux/arm64 \
|
||||
COLLECTOR_BINARY_NAME = scrutiny-collector-metrics
|
||||
WEB_BINARY_NAME = scrutiny-web
|
||||
LD_FLAGS =
|
||||
|
||||
.ONESHELL: # Applies to every targets in the file! .ONESHELL instructs make to invoke a single instance of the shell and provide it with the entire recipe, regardless of how many lines it contains.
|
||||
.PHONY: all $(BINARY)
|
||||
all: $(BINARY) windows/amd64
|
||||
STATIC_TAGS =
|
||||
# enable multiarch docker image builds
|
||||
DOCKER_TARGETARCH_BUILD_ARG =
|
||||
ifdef TARGETARCH
|
||||
DOCKER_TARGETARCH_BUILD_ARG := $(DOCKER_TARGETARCH_BUILD_ARG) --build-arg TARGETARCH=$(TARGETARCH)
|
||||
endif
|
||||
|
||||
$(BINARY): OS = $(word 1,$(subst /, ,$*))
|
||||
$(BINARY): ARCH = $(word 2,$(subst /, ,$*))
|
||||
$(BINARY): build/scrutiny-web-%:
|
||||
@echo "building web binary (OS = $(OS), ARCH = $(ARCH))"
|
||||
xgo -v --targets="$(OS)/$(ARCH)" -ldflags "-extldflags=-static -X main.goos=$(OS) -X main.goarch=$(ARCH)" -out scrutiny-web -tags "static netgo sqlite_omit_load_extension" ${GO_WORKSPACE}/webapp/backend/cmd/scrutiny/
|
||||
# enable to build static binaries.
|
||||
ifdef STATIC
|
||||
export CGO_ENABLED = 0
|
||||
LD_FLAGS := $(LD_FLAGS) -extldflags=-static
|
||||
STATIC_TAGS := $(STATIC_TAGS) -tags "static netgo"
|
||||
endif
|
||||
ifdef GOOS
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOOS)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOOS)
|
||||
LD_FLAGS := $(LD_FLAGS) -X main.goos=$(GOOS)
|
||||
endif
|
||||
ifdef GOARCH
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOARCH)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOARCH)
|
||||
LD_FLAGS := $(LD_FLAGS) -X main.goarch=$(GOARCH)
|
||||
endif
|
||||
ifdef GOARM
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOARM)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOARM)
|
||||
endif
|
||||
ifeq ($(OS),Windows_NT)
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME).exe
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME).exe
|
||||
endif
|
||||
|
||||
chmod +x "/build/scrutiny-web-$(OS)-$(ARCH)"
|
||||
file "/build/scrutiny-web-$(OS)-$(ARCH)" || true
|
||||
ldd "/build/scrutiny-web-$(OS)-$(ARCH)" || true
|
||||
########################################################################################################################
|
||||
# Binary
|
||||
########################################################################################################################
|
||||
.PHONY: all
|
||||
all: binary-all
|
||||
|
||||
@echo "building collector binary (OS = $(OS), ARCH = $(ARCH))"
|
||||
xgo -v --targets="$(OS)/$(ARCH)" -ldflags "-extldflags=-static -X main.goos=$(OS) -X main.goarch=$(ARCH)" -out scrutiny-collector-metrics -tags "static netgo" ${GO_WORKSPACE}/collector/cmd/collector-metrics/
|
||||
|
||||
chmod +x "/build/scrutiny-collector-metrics-$(OS)-$(ARCH)"
|
||||
file "/build/scrutiny-collector-metrics-$(OS)-$(ARCH)" || true
|
||||
ldd "/build/scrutiny-collector-metrics-$(OS)-$(ARCH)" || true
|
||||
|
||||
windows/amd64: export OS = windows
|
||||
windows/amd64: export ARCH = amd64
|
||||
windows/amd64:
|
||||
@echo "building web binary (OS = $(OS), ARCH = $(ARCH))"
|
||||
xgo -v --targets="$(OS)/$(ARCH)" -ldflags "-extldflags=-static -X main.goos=$(OS) -X main.goarch=$(ARCH)" -out scrutiny-web -tags "static netgo sqlite_omit_load_extension" ${GO_WORKSPACE}/webapp/backend/cmd/scrutiny/
|
||||
|
||||
@echo "building collector binary (OS = $(OS), ARCH = $(ARCH))"
|
||||
xgo -v --targets="$(OS)/$(ARCH)" -ldflags "-extldflags=-static -X main.goos=$(OS) -X main.goarch=$(ARCH)" -out scrutiny-collector-metrics -tags "static netgo" ${GO_WORKSPACE}/collector/cmd/collector-metrics/
|
||||
.PHONY: binary-all
|
||||
binary-all: binary-collector binary-web
|
||||
@echo "built binary-collector and binary-web targets"
|
||||
|
||||
|
||||
docker-collector:
|
||||
@echo "building collector docker image"
|
||||
docker build --build-arg TARGETARCH=amd64 -f docker/Dockerfile.collector -t analogj/scrutiny-dev:collector .
|
||||
.PHONY: binary-clean
|
||||
binary-clean:
|
||||
go clean
|
||||
|
||||
docker-web:
|
||||
@echo "building web docker image"
|
||||
docker build --build-arg TARGETARCH=amd64 -f docker/Dockerfile.web -t analogj/scrutiny-dev:web .
|
||||
.PHONY: binary-dep
|
||||
binary-dep:
|
||||
go mod vendor
|
||||
|
||||
docker-omnibus:
|
||||
@echo "building omnibus docker image"
|
||||
docker build --build-arg TARGETARCH=amd64 -f docker/Dockerfile -t analogj/scrutiny-dev:omnibus .
|
||||
.PHONY: binary-test
|
||||
binary-test: binary-dep
|
||||
go test -v $(STATIC_TAGS) ./...
|
||||
|
||||
.PHONY: binary-test-coverage
|
||||
binary-test-coverage: binary-dep
|
||||
go test -coverprofile=coverage.txt -covermode=atomic -v $(STATIC_TAGS) ./...
|
||||
|
||||
.PHONY: binary-collector
|
||||
binary-collector: binary-dep
|
||||
go build -ldflags "$(LD_FLAGS)" -o $(COLLECTOR_BINARY_NAME) $(STATIC_TAGS) ./collector/cmd/collector-metrics/
|
||||
ifneq ($(OS),Windows_NT)
|
||||
chmod +x $(COLLECTOR_BINARY_NAME)
|
||||
file $(COLLECTOR_BINARY_NAME) || true
|
||||
ldd $(COLLECTOR_BINARY_NAME) || true
|
||||
./$(COLLECTOR_BINARY_NAME) || true
|
||||
endif
|
||||
|
||||
.PHONY: binary-web
|
||||
binary-web: binary-dep
|
||||
go build -ldflags "$(LD_FLAGS)" -o $(WEB_BINARY_NAME) $(STATIC_TAGS) ./webapp/backend/cmd/scrutiny/
|
||||
ifneq ($(OS),Windows_NT)
|
||||
chmod +x $(WEB_BINARY_NAME)
|
||||
file $(WEB_BINARY_NAME) || true
|
||||
ldd $(WEB_BINARY_NAME) || true
|
||||
./$(WEB_BINARY_NAME) || true
|
||||
endif
|
||||
|
||||
########################################################################################################################
|
||||
# Binary
|
||||
########################################################################################################################
|
||||
|
||||
.PHONY: binary-frontend
|
||||
# reduce logging, disable angular-cli analytics for ci environment
|
||||
frontend: export NPM_CONFIG_LOGLEVEL = warn
|
||||
frontend: export NG_CLI_ANALYTICS = false
|
||||
frontend:
|
||||
binary-frontend: export NPM_CONFIG_LOGLEVEL = warn
|
||||
binary-frontend: export NG_CLI_ANALYTICS = false
|
||||
binary-frontend:
|
||||
cd webapp/frontend
|
||||
npm install -g @angular/cli@9.1.4
|
||||
npm install -g @angular/cli@v13-lts
|
||||
mkdir -p $(CURDIR)/dist
|
||||
npm ci
|
||||
npm run build:prod -- --output-path=$(CURDIR)/dist
|
||||
|
||||
# clean:
|
||||
# rm scrutiny-collector-metrics-* scrutiny-web-*
|
||||
.PHONY: binary-frontend-test-coverage
|
||||
# reduce logging, disable angular-cli analytics for ci environment
|
||||
binary-frontend-test-coverage:
|
||||
cd webapp/frontend
|
||||
npm ci
|
||||
npx ng test --watch=false --browsers=ChromeHeadless --code-coverage
|
||||
|
||||
########################################################################################################################
|
||||
# Docker
|
||||
# NOTE: these docker make targets are only used for local development (not used by Github Actions/CI)
|
||||
# NOTE: docker-web and docker-omnibus require `make binary-frontend` or frontend.tar.gz content in /dist before executing.
|
||||
########################################################################################################################
|
||||
.PHONY: docker-collector
|
||||
docker-collector:
|
||||
@echo "building collector docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.collector -t analogj/scrutiny-dev:collector .
|
||||
|
||||
.PHONY: docker-web
|
||||
docker-web:
|
||||
@echo "building web docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.web -t analogj/scrutiny-dev:web .
|
||||
|
||||
.PHONY: docker-omnibus
|
||||
docker-omnibus:
|
||||
@echo "building omnibus docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile -t analogj/scrutiny-dev:omnibus .
|
||||
|
||||
@@ -46,7 +46,7 @@ Scrutiny is a simple but focused application, with a couple of core features:
|
||||
- Customized thresholds using real world failure rates
|
||||
- Temperature tracking
|
||||
- Provided as an all-in-one Docker image (but can be installed manually)
|
||||
- Future Configurable Alerting/Notifications via Webhooks
|
||||
- Configurable Alerting/Notifications via Webhooks
|
||||
- (Future) Hard Drive performance testing & tracking
|
||||
|
||||
# Getting Started
|
||||
@@ -69,7 +69,7 @@ See [docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md](./docs/TROUBLESHOOTING_DEVICE_COL
|
||||
|
||||
If you're using Docker, getting started is as simple as running the following command:
|
||||
|
||||
> See [docker/example.omnibus.docker-compose.yml](./docker/example.omnibus.docker-compose.yml) for a docker-compose file.
|
||||
> See [docker/example.omnibus.docker-compose.yml](./docker/example.omnibus.docker-compose.yml) for a docker-compose file.
|
||||
|
||||
```bash
|
||||
docker run -it --rm -p 8080:8080 -p 8086:8086 \
|
||||
@@ -91,10 +91,14 @@ docker run -it --rm -p 8080:8080 -p 8086:8086 \
|
||||
|
||||
### Hub/Spoke Deployment
|
||||
|
||||
In addition to the Omnibus image (available under the `latest` tag) there are 2 other Docker images available:
|
||||
In addition to the Omnibus image (available under the `latest` tag) you can deploy in Hub/Spoke mode, which requires 3
|
||||
other Docker images:
|
||||
|
||||
- `ghcr.io/analogj/scrutiny:master-collector` - Contains the Scrutiny data collector, `smartctl` binary and cron-like scheduler. You can run one collector on each server.
|
||||
- `ghcr.io/analogj/scrutiny:master-web` - Contains the Web UI, API and Database. Only one container necessary
|
||||
- `ghcr.io/analogj/scrutiny:master-collector` - Contains the Scrutiny data collector, `smartctl` binary and cron-like
|
||||
scheduler. You can run one collector on each server.
|
||||
- `ghcr.io/analogj/scrutiny:master-web` - Contains the Web UI and API. Only one container necessary
|
||||
- `influxdb:2.2` - InfluxDB image, used by the Web container to persist SMART data. Only one container necessary
|
||||
See [docs/TROUBLESHOOTING_INFLUXDB.md](./docs/TROUBLESHOOTING_INFLUXDB.md)
|
||||
|
||||
> See [docker/example.hubspoke.docker-compose.yml](./docker/example.hubspoke.docker-compose.yml) for a docker-compose file.
|
||||
|
||||
@@ -153,7 +157,7 @@ Neither file is required, however if provided, it allows you to configure how Sc
|
||||
|
||||
## Cron Schedule
|
||||
Unfortunately the Cron schedule cannot be configured via the `collector.yaml` (as the collector binary needs to be trigged by a scheduler/cron).
|
||||
However, if you are using the official `ghcr.io/analogj/scrutiny:master-collector` or `ghcr.io/analogj/scrutiny:master-omnibus` docker images,
|
||||
However, if you are using the official `ghcr.io/analogj/scrutiny:master-collector` or `ghcr.io/analogj/scrutiny:master-omnibus` docker images,
|
||||
you can use the `COLLECTOR_CRON_SCHEDULE` environmental variable to override the default cron schedule (daily @ midnight - `0 0 * * *`).
|
||||
|
||||
`docker run -e COLLECTOR_CRON_SCHEDULE="0 0 * * *" ...`
|
||||
@@ -170,6 +174,7 @@ Scrutiny supports sending SMART device failure notifications via the following s
|
||||
- IFTTT
|
||||
- Join
|
||||
- Mattermost
|
||||
- ntfy
|
||||
- Pushbullet
|
||||
- Pushover
|
||||
- Slack
|
||||
@@ -232,18 +237,18 @@ scrutiny-collector-metrics run --debug --log-file /tmp/collector.log
|
||||
|
||||
# Supported Architectures
|
||||
|
||||
|
||||
| Architecture Name | Binaries | Docker |
|
||||
| --- | --- | --- |
|
||||
| amd64 | :white_check_mark: | :white_check_mark: |
|
||||
| arm-5 | :white_check_mark: | |
|
||||
| arm-6 | :white_check_mark: | |
|
||||
| arm-7 | :white_check_mark: | web/collector only. see [#236](https://github.com/AnalogJ/scrutiny/issues/236) |
|
||||
| arm64 | :white_check_mark: | :white_check_mark: |
|
||||
| freebsd | collector only. see [#238](https://github.com/AnalogJ/scrutiny/issues/238) | |
|
||||
| macos-amd64 | | :white_check_mark: |
|
||||
| macos-arm64 | | :white_check_mark: |
|
||||
| windows-amd64 | :white_check_mark: | |
|
||||
| linux-amd64 | :white_check_mark: | :white_check_mark: |
|
||||
| linux-arm-5 | :white_check_mark: | |
|
||||
| linux-arm-6 | :white_check_mark: | |
|
||||
| linux-arm-7 | :white_check_mark: | web/collector only. see [#236](https://github.com/AnalogJ/scrutiny/issues/236) |
|
||||
| linux-arm64 | :white_check_mark: | :white_check_mark: |
|
||||
| freebsd-amd64 | :white_check_mark: | |
|
||||
| macos-amd64 | :white_check_mark: | :white_check_mark: |
|
||||
| macos-arm64 | :white_check_mark: | :white_check_mark: |
|
||||
| windows-amd64 | :white_check_mark: | WIP, see [#15](https://github.com/AnalogJ/scrutiny/issues/15) |
|
||||
| windows-arm64 | :white_check_mark: | |
|
||||
|
||||
|
||||
# Contributing
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/collector/pkg/collector"
|
||||
"github.com/analogj/scrutiny/collector/pkg/config"
|
||||
@@ -29,8 +30,14 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configFilePath := "/opt/scrutiny/config/collector.yaml"
|
||||
configFilePathAlternative := "/opt/scrutiny/config/collector.yml"
|
||||
if !utils.FileExists(configFilePath) && utils.FileExists(configFilePathAlternative) {
|
||||
configFilePath = configFilePathAlternative
|
||||
}
|
||||
|
||||
//we're going to load the config file manually, since we need to validate it.
|
||||
err = config.ReadConfig("/opt/scrutiny/config/collector.yaml") // Find and read the config file
|
||||
err = config.ReadConfig(configFilePath) // Find and read the config file
|
||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||
//ignore "could not find config file"
|
||||
} else if err != nil {
|
||||
@@ -120,26 +127,16 @@ OPTIONS:
|
||||
config.Set("api.endpoint", apiEndpoint)
|
||||
}
|
||||
|
||||
collectorLogger := logrus.WithFields(logrus.Fields{
|
||||
"type": "metrics",
|
||||
})
|
||||
|
||||
if level, err := logrus.ParseLevel(config.GetString("log.level")); err == nil {
|
||||
logrus.SetLevel(level)
|
||||
} else {
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
if config.IsSet("log.file") && len(config.GetString("log.file")) > 0 {
|
||||
logFile, err := os.OpenFile(config.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to open log file %s for output: %s", config.GetString("log.file"), err)
|
||||
return err
|
||||
}
|
||||
collectorLogger, logFile, err := CreateLogger(config)
|
||||
if logFile != nil {
|
||||
defer logFile.Close()
|
||||
logrus.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settingsData, err := json.MarshalIndent(config.AllSettings(), "", "\t")
|
||||
collectorLogger.Debug(string(settingsData), err)
|
||||
metricCollector, err := collector.CreateMetricsCollector(
|
||||
config,
|
||||
collectorLogger,
|
||||
@@ -192,5 +189,28 @@ OPTIONS:
|
||||
if err != nil {
|
||||
log.Fatal(color.HiRedString("ERROR: %v", err))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
|
||||
logger := logrus.WithFields(logrus.Fields{
|
||||
"type": "metrics",
|
||||
})
|
||||
|
||||
if level, err := logrus.ParseLevel(appConfig.GetString("log.level")); err == nil {
|
||||
logger.Logger.SetLevel(level)
|
||||
} else {
|
||||
logger.Logger.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
var logFile *os.File
|
||||
var err error
|
||||
if appConfig.IsSet("log.file") && len(appConfig.GetString("log.file")) > 0 {
|
||||
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Logger.Errorf("Failed to open log file %s for output: %s", appConfig.GetString("log.file"), err)
|
||||
return nil, logFile, err
|
||||
}
|
||||
logger.Logger.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
return logger, logFile, nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var httpClient = &http.Client{Timeout: 10 * time.Second}
|
||||
var httpClient = &http.Client{Timeout: 60 * time.Second}
|
||||
|
||||
type BaseCollector struct {
|
||||
logger *logrus.Entry
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/samber/lo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -56,11 +57,16 @@ func (mc *MetricsCollector) Run() error {
|
||||
Logger: mc.logger,
|
||||
Config: mc.config,
|
||||
}
|
||||
detectedStorageDevices, err := deviceDetector.Start()
|
||||
rawDetectedStorageDevices, err := deviceDetector.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//filter any device with empty wwn (they are invalid)
|
||||
detectedStorageDevices := lo.Filter[models.Device](rawDetectedStorageDevices, func(dev models.Device, _ int) bool {
|
||||
return len(dev.WWN) > 0
|
||||
})
|
||||
|
||||
mc.logger.Infoln("Sending detected devices to API, for filtering & validation")
|
||||
jsonObj, _ := json.Marshal(detectedStorageDevices)
|
||||
mc.logger.Debugf("Detected devices: %v", string(jsonObj))
|
||||
@@ -98,10 +104,10 @@ func (mc *MetricsCollector) Run() error {
|
||||
|
||||
func (mc *MetricsCollector) Validate() error {
|
||||
mc.logger.Infoln("Verifying required tools")
|
||||
_, lookErr := exec.LookPath("smartctl")
|
||||
_, lookErr := exec.LookPath(mc.config.GetString("commands.metrics_smartctl_bin"))
|
||||
|
||||
if lookErr != nil {
|
||||
return errors.DependencyMissingError("smartctl is missing")
|
||||
return errors.DependencyMissingError(fmt.Sprintf("%s binary is missing", mc.config.GetString("commands.metrics_smartctl_bin")))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -124,7 +130,7 @@ func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceT
|
||||
}
|
||||
args = append(args, fullDeviceName)
|
||||
|
||||
result, err := mc.shell.Command(mc.logger, "smartctl", args, "", os.Environ())
|
||||
result, err := mc.shell.Command(mc.logger, mc.config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
resultBytes := []byte(result)
|
||||
if err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
|
||||
@@ -43,6 +43,7 @@ func (c *configuration) Init() error {
|
||||
|
||||
c.SetDefault("api.endpoint", "http://localhost:8080")
|
||||
|
||||
c.SetDefault("commands.metrics_smartctl_bin", "smartctl")
|
||||
c.SetDefault("commands.metrics_scan_args", "--scan --json")
|
||||
c.SetDefault("commands.metrics_info_args", "--info --json")
|
||||
c.SetDefault("commands.metrics_smart_args", "--xall --json")
|
||||
|
||||
@@ -36,6 +36,25 @@ func TestConfiguration_GetScanOverrides_Simple(t *testing.T) {
|
||||
require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat"}, Ignore: false}}, scanOverrides)
|
||||
}
|
||||
|
||||
// fixes #418
|
||||
func TestConfiguration_GetScanOverrides_DeviceTypeComma(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "device_type_comma.yaml"))
|
||||
require.NoError(t, err, "should correctly load simple device config")
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{
|
||||
{Device: "/dev/sda", DeviceType: []string{"sat", "auto"}, Ignore: false},
|
||||
{Device: "/dev/sdb", DeviceType: []string{"sat,auto"}, Ignore: false},
|
||||
}, scanOverrides)
|
||||
}
|
||||
|
||||
func TestConfiguration_GetScanOverrides_Ignore(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
version: 1
|
||||
devices:
|
||||
# the scrutiny config parser will detect `sat,auto` as two separate items in a list. If you want to use `-d sat,auto` you must
|
||||
# set 'sat,auto' in a list (see eg. /dev/sbd)
|
||||
- device: /dev/sda
|
||||
type: 'sat,auto'
|
||||
- device: /dev/sdb
|
||||
type:
|
||||
- sat,auto
|
||||
@@ -29,7 +29,7 @@ type Detect struct {
|
||||
func (d *Detect) SmartctlScan() ([]models.Device, error) {
|
||||
//we use smartctl to detect all the drives available.
|
||||
args := strings.Split(d.Config.GetString("commands.metrics_scan_args"), " ")
|
||||
detectedDeviceConnJson, err := d.Shell.Command(d.Logger, "smartctl", args, "", os.Environ())
|
||||
detectedDeviceConnJson, err := d.Shell.Command(d.Logger, d.Config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
if err != nil {
|
||||
d.Logger.Errorf("Error scanning for devices: %v", err)
|
||||
return nil, err
|
||||
@@ -60,7 +60,7 @@ func (d *Detect) SmartCtlInfo(device *models.Device) error {
|
||||
}
|
||||
args = append(args, fullDeviceName)
|
||||
|
||||
availableDeviceInfoJson, err := d.Shell.Command(d.Logger, "smartctl", args, "", os.Environ())
|
||||
availableDeviceInfoJson, err := d.Shell.Command(d.Logger, d.Config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
if err != nil {
|
||||
d.Logger.Errorf("Could not retrieve device information for %s: %v", device.DeviceName, err)
|
||||
return err
|
||||
@@ -149,10 +149,35 @@ func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []mod
|
||||
//create a new device group, and replace the one generated by smartctl --scan
|
||||
overrideDeviceGroup := []models.Device{}
|
||||
|
||||
for _, overrideDeviceType := range overrideDevice.DeviceType {
|
||||
if overrideDevice.DeviceType != nil {
|
||||
for _, overrideDeviceType := range overrideDevice.DeviceType {
|
||||
overrideDeviceGroup = append(overrideDeviceGroup, models.Device{
|
||||
HostId: d.Config.GetString("host.id"),
|
||||
DeviceType: overrideDeviceType,
|
||||
DeviceName: strings.TrimPrefix(overrideDeviceFile, DevicePrefix()),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
//user may have specified device in config file without device type (default to scanned device type)
|
||||
|
||||
//check if the device file was detected by the scanner
|
||||
var deviceType string
|
||||
if scannedDevice, foundScannedDevice := groupedDevices[overrideDeviceFile]; foundScannedDevice {
|
||||
if len(scannedDevice) > 0 {
|
||||
//take the device type from the first grouped device
|
||||
deviceType = scannedDevice[0].DeviceType
|
||||
} else {
|
||||
deviceType = "ata"
|
||||
}
|
||||
|
||||
} else {
|
||||
//fallback to ata if no scanned device detected
|
||||
deviceType = "ata"
|
||||
}
|
||||
|
||||
overrideDeviceGroup = append(overrideDeviceGroup, models.Device{
|
||||
HostId: d.Config.GetString("host.id"),
|
||||
DeviceType: overrideDeviceType,
|
||||
DeviceType: deviceType,
|
||||
DeviceName: strings.TrimPrefix(overrideDeviceFile, DevicePrefix()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ func TestDetect_SmartctlScan(t *testing.T) {
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
@@ -47,6 +48,7 @@ func TestDetect_SmartctlScan_Megaraid(t *testing.T) {
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
@@ -78,6 +80,7 @@ func TestDetect_SmartctlScan_Nvme(t *testing.T) {
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
@@ -108,6 +111,7 @@ func TestDetect_TransformDetectedDevices_Empty(t *testing.T) {
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
detectedDevices := models.Scan{
|
||||
@@ -140,6 +144,7 @@ func TestDetect_TransformDetectedDevices_Ignore(t *testing.T) {
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
|
||||
detectedDevices := models.Scan{
|
||||
@@ -170,6 +175,7 @@ func TestDetect_TransformDetectedDevices_Raid(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{
|
||||
{
|
||||
@@ -210,6 +216,7 @@ func TestDetect_TransformDetectedDevices_Simple(t *testing.T) {
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat+megaraid"}}})
|
||||
detectedDevices := models.Scan{
|
||||
@@ -234,3 +241,59 @@ func TestDetect_TransformDetectedDevices_Simple(t *testing.T) {
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "sat+megaraid", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
// test https://github.com/AnalogJ/scrutiny/issues/255#issuecomment-1164024126
|
||||
func TestDetect_TransformDetectedDevices_WithoutDeviceTypeOverride(t *testing.T) {
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda"}})
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
Name: "/dev/sda",
|
||||
InfoName: "/dev/sda",
|
||||
Protocol: "ata",
|
||||
Type: "scsi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
//test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
//assert
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "scsi", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
func TestDetect_TransformDetectedDevices_WhenDeviceNotDetected(t *testing.T) {
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda"}})
|
||||
detectedDevices := models.Scan{}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
//test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
//assert
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "ata", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
+35
-23
@@ -1,50 +1,62 @@
|
||||
########
|
||||
FROM golang:1.17-bullseye as backendbuild
|
||||
# syntax=docker/dockerfile:1.4
|
||||
########################################################################################################################
|
||||
# Omnibus Image
|
||||
########################################################################################################################
|
||||
|
||||
######## Build the frontend
|
||||
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN make binary-frontend
|
||||
|
||||
|
||||
######## Build the backend
|
||||
FROM golang:1.20-bullseye as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -o scrutiny webapp/backend/cmd/scrutiny/scrutiny.go && \
|
||||
go build -o scrutiny-collector-selftest collector/cmd/collector-selftest/collector-selftest.go && \
|
||||
go build -o scrutiny-collector-metrics collector/cmd/collector-metrics/collector-metrics.go
|
||||
|
||||
########
|
||||
######## Combine build artifacts in runtime image
|
||||
FROM debian:bullseye-slim as runtime
|
||||
ARG TARGETARCH
|
||||
EXPOSE 8080
|
||||
WORKDIR /opt/scrutiny
|
||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
ENV INFLUXD_CONFIG_PATH=/opt/scrutiny/influxdb
|
||||
ENV S6VER="1.21.8.0"
|
||||
ENV INFLUXVER="2.2.0"
|
||||
|
||||
RUN apt-get update && apt-get install -y cron smartmontools ca-certificates curl tzdata \
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
cron \
|
||||
curl \
|
||||
smartmontools \
|
||||
tzdata \
|
||||
&& update-ca-certificates \
|
||||
&& case ${TARGETARCH} in \
|
||||
"amd64") S6_ARCH=amd64 ;; \
|
||||
"arm64") S6_ARCH=aarch64 ;; \
|
||||
esac \
|
||||
&& curl https://github.com/just-containers/s6-overlay/releases/download/v1.21.8.0/s6-overlay-${S6_ARCH}.tar.gz -L -s --output /tmp/s6-overlay-${S6_ARCH}.tar.gz \
|
||||
&& curl https://github.com/just-containers/s6-overlay/releases/download/v${S6VER}/s6-overlay-${S6_ARCH}.tar.gz -L -s --output /tmp/s6-overlay-${S6_ARCH}.tar.gz \
|
||||
&& tar xzf /tmp/s6-overlay-${S6_ARCH}.tar.gz -C / \
|
||||
&& rm -rf /tmp/s6-overlay-${S6_ARCH}.tar.gz \
|
||||
&& curl -L https://dl.influxdata.com/influxdb/releases/influxdb2-2.2.0-${TARGETARCH}.deb --output /tmp/influxdb2-2.2.0-${TARGETARCH}.deb \
|
||||
&& dpkg -i --force-all /tmp/influxdb2-2.2.0-${TARGETARCH}.deb
|
||||
&& curl -L https://dl.influxdata.com/influxdb/releases/influxdb2-${INFLUXVER}-${TARGETARCH}.deb --output /tmp/influxdb2-${INFLUXVER}-${TARGETARCH}.deb \
|
||||
&& dpkg -i --force-all /tmp/influxdb2-${INFLUXVER}-${TARGETARCH}.deb \
|
||||
&& rm -rf /tmp/influxdb2-2.2.0-${TARGETARCH}.deb
|
||||
|
||||
COPY /rootfs /
|
||||
|
||||
COPY /rootfs/etc/cron.d/scrutiny /etc/cron.d/scrutiny
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-selftest /opt/scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||
COPY dist /opt/scrutiny/web
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-selftest && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics && \
|
||||
chmod 0644 /etc/cron.d/scrutiny && \
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||
COPY --link --from=frontendbuild --chmod=644 /go/src/github.com/analogj/scrutiny/dist /opt/scrutiny/web
|
||||
RUN chmod 0644 /etc/cron.d/scrutiny && \
|
||||
rm -f /etc/cron.daily/* && \
|
||||
mkdir -p /opt/scrutiny/web && \
|
||||
mkdir -p /opt/scrutiny/config && \
|
||||
chmod -R ugo+rwx /opt/scrutiny/config
|
||||
|
||||
|
||||
CMD ["/init"]
|
||||
|
||||
@@ -1,27 +1,28 @@
|
||||
########################################################################################################################
|
||||
# Collector Image
|
||||
########################################################################################################################
|
||||
|
||||
|
||||
########
|
||||
FROM golang:1.17-bullseye as backendbuild
|
||||
FROM golang:1.20-bullseye as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-selftest collector/cmd/collector-selftest/collector-selftest.go && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-metrics collector/cmd/collector-metrics/collector-metrics.go
|
||||
RUN make binary-clean binary-collector
|
||||
|
||||
########
|
||||
FROM debian:bullseye-slim as runtime
|
||||
WORKDIR /scrutiny
|
||||
WORKDIR /opt/scrutiny
|
||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
|
||||
RUN apt-get update && apt-get install -y cron smartmontools ca-certificates tzdata && update-ca-certificates
|
||||
|
||||
COPY /docker/entrypoint-collector.sh /entrypoint-collector.sh
|
||||
COPY /rootfs/etc/cron.d/scrutiny /etc/cron.d/scrutiny
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-selftest /opt/scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny-collector-selftest && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics && \
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics && \
|
||||
chmod +x /entrypoint-collector.sh && \
|
||||
chmod 0644 /etc/cron.d/scrutiny && \
|
||||
rm -f /etc/cron.daily/apt /etc/cron.daily/dpkg /etc/cron.daily/passwd
|
||||
|
||||
+20
-10
@@ -1,14 +1,25 @@
|
||||
########
|
||||
FROM golang:1.17-bullseye as backendbuild
|
||||
# syntax=docker/dockerfile:1.4
|
||||
########################################################################################################################
|
||||
# Web Image
|
||||
########################################################################################################################
|
||||
|
||||
######## Build the frontend
|
||||
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN make binary-frontend
|
||||
|
||||
######## Build the backend
|
||||
FROM golang:1.20-bullseye as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -o scrutiny webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
|
||||
########
|
||||
######## Combine build artifacts in runtime image
|
||||
FROM debian:bullseye-slim as runtime
|
||||
EXPOSE 8080
|
||||
WORKDIR /opt/scrutiny
|
||||
@@ -16,10 +27,9 @@ ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates curl tzdata && update-ca-certificates
|
||||
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY dist /opt/scrutiny/web
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny && \
|
||||
mkdir -p /opt/scrutiny/web && \
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --link --from=frontendbuild --chmod=644 /go/src/github.com/analogj/scrutiny/dist /opt/scrutiny/web
|
||||
RUN mkdir -p /opt/scrutiny/web && \
|
||||
mkdir -p /opt/scrutiny/config && \
|
||||
chmod -R ugo+rwx /opt/scrutiny/config
|
||||
CMD ["/opt/scrutiny/bin/scrutiny", "start"]
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
FROM techknowlogick/xgo:go-1.17.x
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN make all
|
||||
Vendored
-18
@@ -1,18 +0,0 @@
|
||||
# This vagrant file is only used for local development & testing.
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.guest = :freebsd
|
||||
config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true
|
||||
config.vm.box = "freebsd/FreeBSD-11.0-CURRENT"
|
||||
config.ssh.shell = "sh"
|
||||
config.vm.base_mac = "080027D14C66"
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
vb.customize ["modifyvm", :id, "--memory", "1024"]
|
||||
vb.customize ["modifyvm", :id, "--cpus", "1"]
|
||||
vb.customize ["modifyvm", :id, "--hwvirtex", "on"]
|
||||
vb.customize ["modifyvm", :id, "--audio", "none"]
|
||||
vb.customize ["modifyvm", :id, "--nictype1", "virtio"]
|
||||
vb.customize ["modifyvm", :id, "--nictype2", "virtio"]
|
||||
end
|
||||
end
|
||||
@@ -7,6 +7,8 @@
|
||||
|
||||
# adding ability to customize the cron schedule.
|
||||
COLLECTOR_CRON_SCHEDULE=${COLLECTOR_CRON_SCHEDULE:-"0 0 * * *"}
|
||||
COLLECTOR_RUN_STARTUP=${COLLECTOR_RUN_STARTUP:-"false"}
|
||||
COLLECTOR_RUN_STARTUP_SLEEP=${COLLECTOR_RUN_STARTUP_SLEEP:-"1"}
|
||||
|
||||
# if the cron schedule has been overridden via env variable (eg docker-compose) we should make sure to strip quotes
|
||||
[[ "${COLLECTOR_CRON_SCHEDULE}" == \"*\" || "${COLLECTOR_CRON_SCHEDULE}" == \'*\' ]] && COLLECTOR_CRON_SCHEDULE="${COLLECTOR_CRON_SCHEDULE:1:-1}"
|
||||
@@ -14,6 +16,13 @@ COLLECTOR_CRON_SCHEDULE=${COLLECTOR_CRON_SCHEDULE:-"0 0 * * *"}
|
||||
# replace placeholder with correct value
|
||||
sed -i 's|{COLLECTOR_CRON_SCHEDULE}|'"${COLLECTOR_CRON_SCHEDULE}"'|g' /etc/cron.d/scrutiny
|
||||
|
||||
if [[ "${COLLECTOR_RUN_STARTUP}" == "true" ]]; then
|
||||
sleep ${COLLECTOR_RUN_STARTUP_SLEEP}
|
||||
echo "starting scrutiny collector (run-once mode. subsequent calls will be triggered via cron service)"
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
fi
|
||||
|
||||
|
||||
# now that we have the env start cron in the foreground
|
||||
echo "starting cron"
|
||||
su -c "cron -f -L 15" root
|
||||
|
||||
+181
-1
@@ -1 +1,181 @@
|
||||
> See [docker/example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml) for a docker-compose file.
|
||||
>
|
||||
See [docker/example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
for a docker-compose file.
|
||||
|
||||
> The following guide was contributed by @TinJoy59 in #417
|
||||
> It describes how to deploy the Scrutiny in Hub/Spoke mode, where the Hub is running in Docker, and the Spokes (
|
||||
> collectors) are running as binaries.
|
||||
> He's using Proxmox & Synology in his guide, however this should be applicable for almost anyone
|
||||
|
||||
# S.M.A.R.T. Monitoring with Scrutiny across machines
|
||||
|
||||

|
||||
|
||||
### 🤔 The problem:
|
||||
|
||||
Scrutiny offers a nice Docker package called "Omnibus" that can monitor HDDs attached to a Docker host with relative
|
||||
ease. Scrutiny can also be installed in a Hub-Spoke layout where Web interface, Database and Collector come in 3
|
||||
separate packages. The official documentation assumes that the spokes in the "Hub-Spokes layout" run Docker, which is
|
||||
not always the case. The third approach is to install Scrutiny manually, entirely outside of Docker.
|
||||
|
||||
### 💡 The solution:
|
||||
|
||||
This tutorial provides a hybrid configuration where the Hub lives in a Docker instance while the spokes have only
|
||||
Scrutiny Collector installed manually. The Collector periodically send data to the Hub. It's not mind-boggling hard to
|
||||
understand but someone might struggle with the setup. This is for them.
|
||||
|
||||
### 🖥️ My setup:
|
||||
|
||||
I have a Proxmox cluster where one VM runs Docker and all monitoring services - Grafana, Prometheus, various exporters,
|
||||
InfluxDB and so forth. Another VM runs the NAS - OpenMediaVault v6, where all hard drives reside. The Scrutiny Collector
|
||||
is triggered every 30min to collect data on the drives. The data is sent to the Docker VM, running InfluxDB.
|
||||
|
||||
## Setting up the Hub
|
||||
|
||||

|
||||
|
||||
The Hub consists of Scrutiny Web - a web interface for viewing the SMART data. And InfluxDB, where the smartmon data is
|
||||
stored.
|
||||
|
||||
[🔗This is the official Hub-Spoke layout in docker-compose.](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
We are going to reuse parts of it. The ENV variables provide the necessary configuration for the initial setup, both for
|
||||
InfluxDB and Scrutiny.
|
||||
|
||||
If you are working with and existing InfluxDB instance, you can forgo all the `INIT` variables as they already exist.
|
||||
|
||||
The official Scrutiny documentation has a
|
||||
sample [scrutiny.yaml ](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml)file that normally
|
||||
contains the connection and notification details but I always find it easier to configure as much as possible in the
|
||||
docker-compose.
|
||||
|
||||
```yaml
|
||||
version: "3.4"
|
||||
|
||||
networks:
|
||||
monitoring: # A common network for all monitoring services to communicate into
|
||||
external: true
|
||||
notifications: # To Gotify or another Notification service
|
||||
external: true
|
||||
|
||||
services:
|
||||
influxdb:
|
||||
container_name: influxdb
|
||||
image: influxdb:2.1-alpine
|
||||
ports:
|
||||
- 8086:8086
|
||||
volumes:
|
||||
- ${DIR_CONFIG}/influxdb2/db:/var/lib/influxdb2
|
||||
- ${DIR_CONFIG}/influxdb2/config:/etc/influxdb2
|
||||
environment:
|
||||
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||
- DOCKER_INFLUXDB_INIT_USERNAME=Admin
|
||||
- DOCKER_INFLUXDB_INIT_PASSWORD=${PASSWORD}
|
||||
- DOCKER_INFLUXDB_INIT_ORG=homelab
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=scrutiny
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=your-very-secret-token
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- monitoring
|
||||
|
||||
scrutiny:
|
||||
container_name: scrutiny
|
||||
image: ghcr.io/analogj/scrutiny:master-web
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ${DIR_CONFIG}/scrutiny/config:/opt/scrutiny/config
|
||||
environment:
|
||||
- SCRUTINY_WEB_INFLUXDB_HOST=influxdb
|
||||
- SCRUTINY_WEB_INFLUXDB_PORT=8086
|
||||
- SCRUTINY_WEB_INFLUXDB_TOKEN=your-very-secret-token
|
||||
- SCRUTINY_WEB_INFLUXDB_ORG=homelab
|
||||
- SCRUTINY_WEB_INFLUXDB_BUCKET=scrutiny
|
||||
# Optional but highly recommended to notify you in case of a problem
|
||||
- SCRUTINY_WEB_NOTIFY_URLS=["http://gotify:80/message?token=a-gotify-token"]
|
||||
depends_on:
|
||||
- influxdb
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- notifications
|
||||
- monitoring
|
||||
```
|
||||
|
||||
A freshly initialized Scrutiny instance can be accessed on port 8080, eg. `192.168.0.100:8080`. The interface will be
|
||||
empty because no metrics have been collected yet.
|
||||
|
||||
## Setting up a Spoke ***without*** Docker
|
||||
|
||||

|
||||
|
||||
A spoke consists of the Scrutiny Collector binary that is run on a set interval via crontab and sends the data to the
|
||||
Hub. The official
|
||||
documentation [describes the manual setup of the Collector](https://github.com/AnalogJ/scrutiny/blob/master/docs/INSTALL_MANUAL.md#collector)
|
||||
- dependencies and step by step commands. I have a shortened version that does the same thing but in one line of code.
|
||||
|
||||
```bash
|
||||
# Installing dependencies
|
||||
apt install smartmontools -y
|
||||
|
||||
# 1. Create directory for the binary
|
||||
# 2. Download the binary into that directory
|
||||
# 3. Make it exacutable
|
||||
# 4. List the contents of the library for confirmation
|
||||
mkdir -p /opt/scrutiny/bin && \
|
||||
curl -L https://github.com/AnalogJ/scrutiny/releases/download/v0.5.0/scrutiny-collector-metrics-linux-amd64 > /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 && \
|
||||
ls -lha /opt/scrutiny/bin
|
||||
```
|
||||
|
||||
<p class="callout warning">When downloading Github Release Assests, make sure that you have the correct version. The provided example is with Release v0.5.0. [The release list can be found here.](https://github.com/analogj/scrutiny/releases) </p>
|
||||
|
||||
Once the Collector is installed, you can run it with the following command. Make sure to add the correct address and
|
||||
port of your Hub as `--api-endpoint`.
|
||||
|
||||
```bash
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://192.168.0.100:8080"
|
||||
```
|
||||
|
||||
This will run the Collector once and populate the Web interface of your Scrutiny instance. In order to collect metrics
|
||||
for a time series, you need to run the command repeatedly. Here is an example for crontab, running the Collector every
|
||||
15min.
|
||||
|
||||
```bash
|
||||
# open crontab
|
||||
crontab -e
|
||||
|
||||
# add a line for Scrutiny
|
||||
*/15 * * * * /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://192.168.0.100:8080"
|
||||
```
|
||||
|
||||
The Collector has its own independent config file that lives in `/opt/scrutiny/config/collector.yaml` but I did not find
|
||||
a need to modify
|
||||
it. [A default collector.yaml can be found in the official documentation.](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml)
|
||||
|
||||
## Setting up a Spoke ***with*** Docker
|
||||
|
||||

|
||||
|
||||
Setting up a remote Spoke in Docker requires you to split
|
||||
the [official Hub-Spoke layout docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
. In the following docker-compose you need to provide the `${API_ENDPOINT}`, in my case `http://192.168.0.100:8080`.
|
||||
Also all drives that you wish to monitor need to be presented to the container under `devices`.
|
||||
|
||||
The image handles the periodic scanning of the drives.
|
||||
|
||||
```yaml
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
|
||||
collector:
|
||||
image: 'ghcr.io/analogj/scrutiny:master-collector'
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
volumes:
|
||||
- '/run/udev:/run/udev:ro'
|
||||
environment:
|
||||
COLLECTOR_API_ENDPOINT: ${API_ENDPOINT}
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
```
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
# Manual Windows Install
|
||||
|
||||
This guide is specifically for people who are on a Windows machine using [WSL](https://learn.microsoft.com/en-us/windows/wsl/about) with Docker.
|
||||
|
||||
Scrutiny is made up of three components: an influxdb Database, a collector and a webapp/api. Docker will be used for
|
||||
the influxdb and webapp/API, the collector component will be facilitated by [Windows Task Scheduler](https://learn.microsoft.com/en-us/windows/win32/taskschd/task-scheduler-start-page).
|
||||
|
||||
> **NOTE:** If you are **NOT** using WSL with docker, then the easiest way to get started with [Scrutiny is the omnibus Docker image](https://github.com/AnalogJ/scrutiny#docker).
|
||||
|
||||
## InfluxDB and Webapp/API (Docker)
|
||||
|
||||
1. Copy the [example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
file and delete the collector section near the bottom of the file.
|
||||
2. Run `docker-compose up -d` to verify that the DB and webapp are working correctly and once its completed, your webapp
|
||||
should be up and running but the dashboard will be empty (default location is `localhost:8080`)
|
||||
|
||||
## Collector (Windows Task Scheduler)
|
||||
|
||||
1. Download the latest `scrutiny-collector-metrics-windows-amd64.exe` from the [releases page](https://github.com/AnalogJ/scrutiny/releases) (under assets)
|
||||
2. On your windows host, open [Windows Task Scheduler](https://www.wikihow.com/Open-Task-Scheduler-in-Windows-10) as **Administrator**
|
||||
1. In the **Start Menu** (Windows key), type `Task Scheduler` and then right click `Run as Administrator` to open
|
||||
3. On the status bar (under the `action` tab), click `Create Task...`
|
||||
4. A new window should open with the `General` Tab open, enter relevant information into the `Name` and `Description` fields
|
||||
1. Under **Security Options** check:
|
||||
1. **Run whether user is logged on or not**
|
||||
2. **Run with highest privileges**
|
||||
5. Next, click the `Triggers` tab and then click `New...` (bottom left-hand side of the window)
|
||||
6. Here you can set how often you want this task to run, example settings are the following:
|
||||
1. **Settings:**
|
||||
1. `Daily`, start at `TODAYS_DATE` `12:00:00 AM`, Recur every `1` days,
|
||||
2. **Advanced Settings:**
|
||||
1. Repeat Task every: `1 hour` for a duration of `Indefinitely`
|
||||
2. Stop task if it runs longer than: `30 minutes`
|
||||
3. Click Ok when satisfied with your schedule
|
||||
> **NOTE:** The above settings will trigger the task **every day at midnight** and then **run every hour after that** (modify as needed)
|
||||
7. Next, click the `Actions` tab and then click `New...` (bottom left-hand side of the window)
|
||||
1. **Action Settings:**
|
||||
1. In the **Program/Script** field, put: `scrutiny-collector-metrics-windows-amd64.exe`
|
||||
2. In the **Add arguments (optional)** field, put: `run --api-endpoint "http://localhost:8080" --config collector.yaml`
|
||||
> **NOTE:**
|
||||
> * Make sure that you put the correct port number (as specified in the docker-compose file) for the webapp (default is `8080`)
|
||||
> * The `--config` param is optional and is not needed if you just want to use the default collector config, see
|
||||
[example.collector.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml) for more info on the collector config.
|
||||
3. In the **Start in (optional)** field, put: FOLDER_PATH_TO_YOUR `scrutiny-collector-metrics-windows-amd64.exe` file
|
||||
> **NOTE:** Must be exact and do not include `scrutiny-collector-metrics-windows-amd64.exe` in the path
|
||||
4. Click Ok when finished
|
||||
8. Next, click the `Conditions` tab and make sure that everything is unchecked (unless you want to specify otherwise)
|
||||
9. Next, click the `Settings` tab and check everything except for the last checkbox
|
||||
1. **Examples for the following settings:**
|
||||
1. If the task fails, restart every: `5 minutes`
|
||||
2. Attempt restart up to: `3` times
|
||||
3. Stop the task if it runs longer than `1 hour`
|
||||
10. Next, once satisfied with everything, click Ok
|
||||
11. Then, find your newly created task (by its name) in the scheduler task list and then manually run it (right click it and then click `Run`)
|
||||
12. Finally, refresh your dashboard after a minute or two and your drive information should have populated the webapp dashboard.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,138 @@
|
||||
# Install collector on Synology
|
||||
|
||||
## Install Entware
|
||||
|
||||
This will allow you to install a newer version of smartmontools on your Synology. Follow the instructions here (This is tested on DSM7) - https://github.com/Entware/Entware/wiki/Install-on-Synology-NAS
|
||||
|
||||
**PLEASE NOTE THAT IF YOU UPDATE DSM FIRMWARE YOU MAY BORK THE EXISTING ENTWARE INSTALLATION, FOR ANYTHING THAT MAY RELATE TO ENTWARE PLEASE VISIT THEIR REPO**
|
||||
|
||||
## Collector Setup
|
||||
|
||||
**1. Run an update**
|
||||
|
||||
`sudo opkg update`
|
||||
|
||||
**2. Run an upgrade**
|
||||
|
||||
`sudo opkg upgrade`
|
||||
|
||||
**3. Install smartmontools**
|
||||
|
||||
`sudo opkg install smartmontools`
|
||||
|
||||
*It should install v7.2-2*
|
||||
|
||||
`Installing smartmontools (7.2-2) to root...`
|
||||
|
||||
**4. We will now create the directories.**
|
||||
|
||||
```
|
||||
mkdir -p /volume1/\@Entware/scrutiny/bin
|
||||
mkdir -p /volume1/\@Entware/scrutiny/conf
|
||||
```
|
||||
|
||||
**5. change into the bin directory**
|
||||
|
||||
`cd /volume1/\@Entware/scrutiny/bin`
|
||||
|
||||
**6. Download the collector binary for your architecture and make it executable**
|
||||
|
||||
`wget https://github.com/AnalogJ/scrutiny/releases/download/v0.4.12/scrutiny-collector-metrics-linux-arm64`
|
||||
|
||||
`chmod +x /volume1/\@Entware/scrutiny/bin/scrutiny-collector-metrics-linux-arm64`
|
||||
|
||||
**7. Create a config file for the collector**
|
||||
|
||||
```
|
||||
cd /volume1/\@Entware/scrutiny/conf
|
||||
wget https://raw.githubusercontent.com/AnalogJ/scrutiny/master/example.collector.yaml
|
||||
mv example.collector.yaml collector.yaml
|
||||
```
|
||||
|
||||
**8. Lets make some changes in the [collector config file](../example.collector.yaml), these are what i uncommented/added, please tweak the device paths to your needs**
|
||||
|
||||
```
|
||||
host:
|
||||
id: 'Server_Name'
|
||||
|
||||
|
||||
devices:
|
||||
# # example for forcing device type detection for a single disk
|
||||
- device: /dev/sda
|
||||
type: 'sat'
|
||||
- device: /dev/sdb
|
||||
type: 'sat'
|
||||
- device: /dev/sdc
|
||||
type: 'sat'
|
||||
- device: /dev/sdd
|
||||
type: 'sat'
|
||||
|
||||
api:
|
||||
endpoint: 'http://<url>:8080'
|
||||
```
|
||||
|
||||
**9. Let's update the smartd db**
|
||||
|
||||
```
|
||||
cd /volume1/\@Entware/scrutiny/bin/
|
||||
wget https://raw.githubusercontent.com/smartmontools/smartmontools/master/smartmontools/drivedb.h
|
||||
```
|
||||
|
||||
**10. I ran it like this but you can tweak to your liking, the most important part is the --drivedb, as this loads it into the aplication for future use**
|
||||
|
||||
`smartctl -d sat --all /dev/sda --drivedb=/volume1/\@Entware/scrutiny/bin/drivedb.h`
|
||||
|
||||
**11. Now lets create a small bash script, this will be used for the scheduled task inside Synology**
|
||||
|
||||
`vim /volume1/\@Entware/scrutiny/bin/run_collect.sh`
|
||||
|
||||
**The contents are below, copy and paste them in**
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
|
||||
/volume1/\@Entware/scrutiny/bin/scrutiny-collector-metrics-linux-arm64 run --config /volume1/\@Entware/scrutiny/conf/collector.yaml
|
||||
```
|
||||
|
||||
**Make `run_collect.sh` executable**
|
||||
|
||||
`chmod +x /volume1/\@Entware/scrutiny/bin/run_collect.sh`
|
||||
|
||||
## Set up Synology to run a scheduled task.
|
||||
|
||||
Log in to DSM and do the following:
|
||||
|
||||
Goto: DSM > Control Panel > Task Scheduler
|
||||
|
||||
Create > Scheduled Task > User Defined Script
|
||||
|
||||
###### General
|
||||
|
||||
```
|
||||
Task: Scrutiny_Collector
|
||||
User: root
|
||||
Enabled: yes
|
||||
```
|
||||
|
||||
###### Schedule
|
||||
```
|
||||
Run on the following days: Daily
|
||||
```
|
||||
###### Time:
|
||||
|
||||
```
|
||||
Frequency: <Your desired frequency>
|
||||
```
|
||||
|
||||
###### Task Settings
|
||||
|
||||
**Run Command**
|
||||
|
||||
```
|
||||
. /opt/etc/profile; /volume1/\@Entware/scrutiny/bin/run_collect.sh
|
||||
```
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you have any issues with your devices being detected, or incorrect data, please take a look at [TROUBLESHOOTING_DEVICE_COLLECTOR.md](./TROUBLESHOOTING_DEVICE_COLLECTOR.md)
|
||||
@@ -1,16 +1,21 @@
|
||||
# Officially Supported NAS OS's
|
||||
# Officially Supported NAS/OS's
|
||||
|
||||
These are the officially supported NAS OS's (with documentation and setup guides).
|
||||
Once a guide is created (in `docs/guides/`) it will be linked here.
|
||||
These are the officially supported NAS OS's (with documentation and setup guides). Once a guide is created (
|
||||
in `docs/guides/` or elsewhere) it will be linked here.
|
||||
|
||||
- [ ] freenas/truenas
|
||||
- [x] [freenas/truenas](https://blog.stefandroid.com/2022/01/14/smart-scrutiny.html)
|
||||
- [x] [unraid](./INSTALL_UNRAID.md)
|
||||
- [ ] ESXI
|
||||
- [ ] Proxmox
|
||||
- [ ] Synology
|
||||
- [x] Synology
|
||||
- [Hub/Spoke Deployment - Collector](./INSTALL_SYNOLOGY_COLLECTOR.md)
|
||||
- [Omnibus Deployment](https://drfrankenstein.co.uk/2022/07/28/scrutiny-in-docker-on-a-synology-nas)
|
||||
- [ ] OMV
|
||||
- [ ] Amahi
|
||||
- [ ] Running in a LXC container
|
||||
- [x] [PFSense](./INSTALL_UNRAID.md)
|
||||
- [ ] QNAP
|
||||
|
||||
- [x] [PFSense](./INSTALL_PFSENSE.md)
|
||||
- [x] QNAP
|
||||
- [x] [RockStor](https://rockstor.com/docs/interface/docker-based-rock-ons/scrutiny.html)
|
||||
- [ ] Solaris/OmniOS CE Support
|
||||
- [ ] Kubernetes
|
||||
- [x] [Windows](./INSTALL_MANUAL_WINDOWS.md)
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
# Testers
|
||||
|
||||
Scrutiny supports many operating systems, CPU architectures and runtime environments. Unfortunately that makes it incredibly
|
||||
difficult to test.
|
||||
Thankfully the following users have been gracious enough to test/validate Scrutiny works on their system.
|
||||
|
||||
> NOTE: If you're interested in volunteering to test Scrutiny beta builds on your system, please [open an issue](https://github.com/AnalogJ/scrutiny/issues).
|
||||
|
||||
| Architecture Name | Binaries | Docker |
|
||||
| --- | --- | --- |
|
||||
| linux-amd64 | -- | @feroxy @rshxyz |
|
||||
| linux-arm-5 | -- | |
|
||||
| linux-arm-6 | -- | |
|
||||
| linux-arm-7 | @Zorlin | @martini1992 |
|
||||
| linux-arm64 | @SiM22 @Zorlin | @ViRb3 @agneevX @benamajin |
|
||||
| freebsd-amd64 | @BadCo-NZ @varunsridharan @martadinata666 @KenwoodFox @FingerlessGlov3s | |
|
||||
| macos-amd64 | -- | -- |
|
||||
| macos-arm64 | -- | -- |
|
||||
| windows-amd64 | @gabrielv33 | -- |
|
||||
| windows-arm64 | -- | -- |
|
||||
@@ -19,6 +19,25 @@ Scrutiny stores and references the devices by their `WWN` which is globally uniq
|
||||
As such, passing devices to the Scrutiny collector container using `/dev/disk/by-id/`, `/dev/disk/by-label/`, `/dev/disk/by-path/` and `/dev/disk/by-uuid/`
|
||||
paths are unnecessary, unless you'd like to ensure the docker run command never needs to change.
|
||||
|
||||
#### Force /dev/disk/by-id paths
|
||||
|
||||
Since Scrutiny uses WWN under the hood, it really doesn't care about `/dev/sd*` vs `/dev/disk/by-id/`. The problem is the interaction between docker and smartmontools when using `--device /dev/disk/by-id` paths.
|
||||
|
||||
Basically Scrutiny offloads all device detection to smartmontools, which doesn't seem to detect devices that have been passed into the docker container using `/dev/disk/by-id` paths.
|
||||
|
||||
If you must use "static" device references, you can map the host device id/uuid/wwn references to device names within the container:
|
||||
|
||||
```
|
||||
# --device=<Host Device>:<Container Device Mapping>
|
||||
|
||||
docker run ....
|
||||
--device=/dev/disk/by-id/wwn-0x5000xxxxx:/dev/sda
|
||||
--device=/dev/disk/by-id/wwn-0x5001xxxxx:/dev/sdb
|
||||
--device=/dev/disk/by-id/wwn-0x5003xxxxx:/dev/sdc
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Device Detection By Smartctl
|
||||
|
||||
@@ -61,7 +80,8 @@ using a collector config file. See [example.collector.yaml](/example.collector.y
|
||||
|
||||
> NOTE: If you use docker, you **must** pass though the RAID virtual disk to the container using `--device` (see below)
|
||||
>
|
||||
> This device may be in `/dev/*` or `/dev/bus/*`.
|
||||
> This device may be in `/dev/*` or `/dev/bus/*`.
|
||||
> If you do not see a virtual device file `/dev/bus/*` you may need to use the `--privileged` flag. See [#366 for more info](https://github.com/AnalogJ/scrutiny/issues/366#issuecomment-1253196407)
|
||||
>
|
||||
> If you're unsure, run `smartctl --scan` on your host, and pass all listed devices to the container.
|
||||
|
||||
@@ -92,7 +112,7 @@ devices:
|
||||
type:
|
||||
- aacraid,0,0,0
|
||||
- aacraid,0,0,1
|
||||
|
||||
|
||||
# HPE Smart Array example: https://github.com/AnalogJ/scrutiny/issues/213
|
||||
- device: /dev/sda
|
||||
type:
|
||||
@@ -100,11 +120,14 @@ devices:
|
||||
- 'cciss,1'
|
||||
```
|
||||
|
||||
>
|
||||
|
||||
### NVMe Drives
|
||||
As mentioned in the [README.md](/README.md), NVMe devices require both `--cap-add SYS_RAWIO` and `--cap-add SYS_ADMIN`
|
||||
|
||||
As mentioned in the [README.md](/README.md), NVMe devices require both `--cap-add SYS_RAWIO` and `--cap-add SYS_ADMIN`
|
||||
to allow smartctl permission to query your NVMe device SMART data [#26](https://github.com/AnalogJ/scrutiny/issues/26)
|
||||
|
||||
When attaching NVMe devices using `--device=/dev/nvme..`, make sure to provide the device controller (`/dev/nvme0`)
|
||||
When attaching NVMe devices using `--device=/dev/nvme..`, make sure to provide the device controller (`/dev/nvme0`)
|
||||
instead of the block device (`/dev/nvme0n1`). See [#209](https://github.com/AnalogJ/scrutiny/issues/209).
|
||||
|
||||
> The character device /dev/nvme0 is the NVME device controller, and block devices like /dev/nvme0n1 are the NVME storage namespaces: the devices you use for actual storage, which will behave essentially as disks.
|
||||
@@ -113,15 +136,29 @@ instead of the block device (`/dev/nvme0n1`). See [#209](https://github.com/Anal
|
||||
|
||||
### ATA
|
||||
|
||||
### USB Devices
|
||||
|
||||
The following information is extracted from [#266](https://github.com/AnalogJ/scrutiny/issues/266)
|
||||
|
||||
External HDDs support two modes of operation usb-storage (old, slower, stable) and uas (new, faster, sometimes unstable)
|
||||
. On some external HDDs, uas mode does not properly pass through SMART information, or even causes hardware issues, so
|
||||
it has been disabled by the kernel. No amount of smartctl parameters will fix this, as it is being rejected by the
|
||||
kernel. This is especially true with Seagate HDDs. One solution is to force these devices into usb-storage mode, which
|
||||
will incur some performance penalty, but may work well enough for you. More info:
|
||||
|
||||
- https://smartmontools.org/wiki/Supported_USB-Devices
|
||||
- https://smartmontools.org/wiki/SAT-with-UAS-Linux
|
||||
- https://forums.raspberrypi.com/viewtopic.php?t=245931
|
||||
|
||||
### Exit Codes
|
||||
|
||||
If you see an error message similar to `smartctl returned an error code (2) while processing /dev/sda`, this means that
|
||||
`smartctl` (not Scrutiny) exited with an error code. Scrutiny will attempt to print a helpful error message to help you debug,
|
||||
but you can look at the table (and associated links) below to debug `smartctl`.
|
||||
`smartctl` (not Scrutiny) exited with an error code. Scrutiny will attempt to print a helpful error message to help you
|
||||
debug, but you can look at the table (and associated links) below to debug `smartctl`.
|
||||
|
||||
> smartctl Return Values
|
||||
> The return values of smartctl are defined by a bitmask. If all is well with the disk, the return value (exit status) of
|
||||
> smartctl is 0 (all bits turned off). If a problem occurs, or an error, potential error, or fault is detected, then
|
||||
> The return values of smartctl are defined by a bitmask. If all is well with the disk, the return value (exit status) of
|
||||
> smartctl is 0 (all bits turned off). If a problem occurs, or an error, potential error, or fault is detected, then
|
||||
> a non-zero status is returned. In this case, the eight different bits in the return value have the following meanings
|
||||
> for ATA disks; some of these values may also be returned for SCSI disks.
|
||||
>
|
||||
@@ -180,13 +217,94 @@ If Scrutiny detects that an attribute corresponds with a high rate of failure us
|
||||
This can cause some confusion when comparing Scrutiny's dashboard against other SMART analysis tools.
|
||||
If you hover over the "failed" label beside an attribute, Scrutiny will tell you if the failure was due to SMART or Scrutiny/BackBlaze data.
|
||||
|
||||
### Device failed but Smart & Scrutiny passed
|
||||
|
||||
Device SMART results are the source of truth for Scrutiny, however we don't just take into account the current SMART results, but also historical analysis of a disk.
|
||||
This means that if a device is marked as failed at any point in its history, it will continue to be stored in the database as failed until the device is removed (or status is reset -- see below).
|
||||
|
||||
In some cases, this historical failure may have been due to attribute analysis/thresholds that have since been relaxed:
|
||||
|
||||
- NVME - Numb Error Log Entries (v0.4.7)
|
||||
- ATA - Power Cycle Count (v0.4.7)
|
||||
- ATA - Read Error Rate (v0.4.13)
|
||||
- ATA - Seek Error Rate (v0.4.13)
|
||||
|
||||
If you'd like to reset the status of a disk (to healthy) and allow the next run of the collector to determine the actual status, you can run the following command:
|
||||
|
||||
```bash
|
||||
# connect to scrutiny docker container
|
||||
docker exec -it scrutiny bash
|
||||
|
||||
# install sqlite CLI tools (inside container)
|
||||
apt update && apt install -y sqlite3
|
||||
|
||||
# connect to the scrutiny database
|
||||
sqlite3 /opt/scrutiny/config/scrutiny.db
|
||||
|
||||
# reset/update the devices table, unset the failure status.
|
||||
UPDATE devices SET device_status = null;
|
||||
|
||||
# exit sqlite CLI
|
||||
.exit
|
||||
```
|
||||
|
||||
### Seagate Drives Failing
|
||||
|
||||
As thoroughly discussed in [#255](https://github.com/AnalogJ/scrutiny/issues/255), Seagate (Ironwolf & others) drives are almost always marked as failed by Scrutiny.
|
||||
|
||||
> The `Seek Error Rate` & `Read Error Rate` attribute raw values are typically very high, and the
|
||||
> normalised values (Current / Worst / Threshold) are usually quite low. Despite this, the numbers in most cases are perfectly OK
|
||||
>
|
||||
> The anxiety arises because we intuitively expect that the normalised values should reflect a "health" score, with
|
||||
> 100 being the ideal value. Similarly, we would expect that the raw values should reflect an error count, in
|
||||
> which case a value of 0 would be most desirable. However, Seagate calculates and applies these attribute values
|
||||
> in a counterintuitive way.
|
||||
>
|
||||
> http://www.users.on.net/~fzabkar/HDD/Seagate_SER_RRER_HEC.html
|
||||
|
||||
Some analysis has been done which shows that Seagate drives break the common SMART conventions, which also causes Scrutiny's
|
||||
comparison against BackBlaze data to detect these drives as failed.
|
||||
|
||||
**So what's the Solution?**
|
||||
|
||||
After taking a look at the BackBlaze data for the relevant Attributes (`Seek Error Rate` & `Read Error Rate`), I've decided
|
||||
to disable Scrutiny analysis for them. Both are non-critical, and have low-correlation with failure.
|
||||
|
||||
> Please note: SMART failures for these attributes will still cause the drive to be marked as failed. Only BackBlaze analysis has been disabled
|
||||
|
||||
If this is effecting your drives, you'll need to do the following:
|
||||
|
||||
1. Upgrade to v0.4.13+
|
||||
2. Reset your drive status using the SQLite script
|
||||
in [#device-failed-but-smart--scrutiny-passed](https://github.com/AnalogJ/scrutiny/blob/master/docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md#device-failed-but-smart--scrutiny-passed)
|
||||
3. Wait for (or manually start) the collector.
|
||||
|
||||
If you'd like to learn more about how the Seagate Ironwolf SMART attributes work under the hood, and how they differ
|
||||
from
|
||||
other drives, please read the following:
|
||||
|
||||
- http://www.users.on.net/~fzabkar/HDD/Seagate_SER_RRER_HEC.html
|
||||
- https://www.truenas.com/community/threads/seagate-ironwolf-smart-test-raw_read_error_rate-seek_error_rate.68634/
|
||||
|
||||
## Hub & Spoke model, with multiple Hosts.
|
||||
|
||||
When deploying Scrutiny in a hub & spoke model, it can be difficult to determine exactly which node a set of devices are associated with.
|
||||
Thankfully the collector has a special `--host-id` flag (or `COLLECTOR_HOST_ID` env variable) that can be used to associate devices with a friendly host name.
|
||||

|
||||
|
||||
See the [docs/INSTALL_HUB_SPOKE.md](/docs/INSTALL_HUB_SPOKE.md) guide for more information.
|
||||
When deploying Scrutiny in a hub & spoke model, it can be difficult to determine exactly which node a set of devices are
|
||||
associated with.
|
||||
Thankfully the collector has a special `--host-id` flag (or `COLLECTOR_HOST_ID` env variable) that can be used to
|
||||
associate devices with a friendly host name.
|
||||
|
||||
The host-id is passed from the collector to the web-api when SMART device data is uploaded. There's 3 ways you can set
|
||||
the host-id:
|
||||
|
||||
- using the collector config
|
||||
file: [master/example.collector.yaml#L19-L22](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml?rgh-link-date=2022-05-25T15%3A08%3A56Z#L19-L22)
|
||||
- using the `--host-id` collector CLI
|
||||
argument: [master/collector/cmd/collector-metrics/collector-metrics.go#L180-L185](https://github.com/AnalogJ/scrutiny/blob/master/collector/cmd/collector-metrics/collector-metrics.go?rgh-link-date=2022-05-25T15%3A08%3A56Z#L180-L185)
|
||||
- using the `COLLECTOR_HOST_ID` environmental variable.
|
||||
|
||||
See the [docs/INSTALL_HUB_SPOKE.md](/docs/INSTALL_HUB_SPOKE.md) guide for more information.
|
||||
|
||||
## Collector DEBUG mode
|
||||
|
||||
@@ -201,4 +319,21 @@ Or if you're not using docker, you can pass CLI arguments to the collector durin
|
||||
|
||||
```bash
|
||||
scrutiny-collector-metrics run --debug --log-file /tmp/collector.log
|
||||
```
|
||||
```
|
||||
|
||||
## Collector trigger on startup
|
||||
|
||||
When the `omnibus` docker image starts up, it will automatically trigger the collector, which will populate the Scrutiny
|
||||
Webui with your disks.
|
||||
This is not the case when running the collector docker image in **hub/spoke** mode, as the collector and webui are
|
||||
running in different containers (and potentially different host machines), so
|
||||
the web container may not be ready for incoming connections. By default the container will only run the collector at the
|
||||
time specified in the cron schedule.
|
||||
|
||||
You can force the collector to run on startup using the following env variables:
|
||||
|
||||
- `-e COLLECTOR_RUN_STARTUP=true` - forces the collector to run on startup (cron will be started after the collector
|
||||
completes)
|
||||
- `-e COLLECTOR_RUN_STARTUP_SLEEP=10` - if `COLLECTOR_RUN_STARTUP` is enabled, you can use this env variable to
|
||||
configure the delay before the collector is run (default: `1` second). Used to ensure the web container has started
|
||||
successfully.
|
||||
|
||||
@@ -14,12 +14,6 @@ is almost immediately created (and tagged with `latest`)
|
||||
|
||||
So changing from `master-omnibus -> latest` will be the same thing for all intents and purposes.
|
||||
|
||||
Having said that -- the one key difference is the `automated cron builds` that run on the `master` and `beta` branches.
|
||||
They trigger a `nightly` build, even if nothing has changed on the branch. This has a couple of benefits, but one is to
|
||||
ensure that there's no broken external dependencies in our (unchanged) code.
|
||||
|
||||
However, as everyone unfortunately found out recently, I had an error in my CI script, which caused failures to be
|
||||
ignored -- https://github.com/AnalogJ/scrutiny/issues/287. That has since been fixed.
|
||||
|
||||
Hope that gives you an understanding for how everything is wired up.
|
||||
|
||||
> NOTE: Previously, there was a `automated cron build` that ran on the `master` and `beta` branches.
|
||||
They used to trigger a `nightly` build, even if nothing has changed on the branch. This has a couple of benefits, but one is to
|
||||
ensure that there's no broken external dependencies in our (unchanged) code. This `nightly` build no longer updates the `master-omnibus` tag.
|
||||
|
||||
@@ -1,7 +1,19 @@
|
||||
# InfluxDB Troubleshooting
|
||||
|
||||
## Installation
|
||||
InfluxDB is a required dependency for Scrutiny v0.4.0+.
|
||||
## Why??
|
||||
|
||||
Scrutiny has many features, but the relevant one to this conversation is the "S.M.A.R.T metric tracking for historical
|
||||
trends". Basically Scrutiny not only shows you the current SMART values, but how they've changed over weeks, months (or
|
||||
even years).
|
||||
|
||||
To efficiently handle that data at scale (and to make my life easier as a developer) I decided to add InfluxDB as a
|
||||
dependency. It's a dedicated timeseries database, as opposed to the general purpose sqlite DB I used before. I also did
|
||||
a bunch of testing and analysis before I made the change. With InfluxDB the memory footprint for Scrutiny (at idle) is ~
|
||||
100mb, which is still fairly reasonable.
|
||||
|
||||
## Installation
|
||||
|
||||
InfluxDB is a required dependency for Scrutiny v0.4.0+.
|
||||
|
||||
https://docs.influxdata.com/influxdb/v2.2/install/
|
||||
|
||||
@@ -54,15 +66,361 @@ time="2022-05-13T14:38:05Z" level=info msg="Successfully connected to scrutiny s
|
||||
panic: a username and password is required for a setup
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
Start the scrutiny server
|
||||
time="2022-06-11T10:35:04-04:00" level=info msg="Trying to connect to scrutiny sqlite db: \n"
|
||||
time="2022-06-11T10:35:04-04:00" level=info msg="Successfully connected to scrutiny sqlite db: \n"
|
||||
panic: failed to check influxdb setup status - parse "://:": missing protocol scheme
|
||||
```
|
||||
|
||||
As discussed in [#248](https://github.com/AnalogJ/scrutiny/issues/248) and [#234](https://github.com/AnalogJ/scrutiny/issues/234),
|
||||
this usually related to either:
|
||||
|
||||
- Upgrading from the LSIO Scrutiny image to the Official Scrutiny image, without removing LSIO specific environmental variables
|
||||
- remove the `SCRUTINY_WEB=true` and `SCRUTINY_COLLECTOR=true` environmental variables. They were used by the LSIO image, but are unnecessary and cause issues with the official Scrutiny image.
|
||||
- Updated versions of the [LSIO Scrutiny images are broken](https://github.com/linuxserver/docker-scrutiny/issues/22), as they have not installed InfluxDB which is a required dependency of Scrutiny v0.4.x
|
||||
- You can revert to an earlier version of the LSIO image (`lscr.io/linuxserver/scrutiny:060ac7b8-ls34`), or just change to the official Scrutiny image (`ghcr.io/analogj/scrutiny:master-omnibus`)
|
||||
- Upgrading from the LSIO Scrutiny image to the Official Scrutiny image, without removing LSIO specific environmental
|
||||
variables
|
||||
- remove the `SCRUTINY_WEB=true` and `SCRUTINY_COLLECTOR=true` environmental variables. They were used by the LSIO
|
||||
image, but are unnecessary and cause issues with the official Scrutiny image.
|
||||
- Change your volume mappings to `/opt/scrutiny` from `/scrutiny`
|
||||
- Updated versions of the [LSIO Scrutiny images are broken](https://github.com/linuxserver/docker-scrutiny/issues/22),
|
||||
as they have not installed InfluxDB which is a required dependency of Scrutiny v0.4.x
|
||||
- You can revert to an earlier version of the LSIO image (`lscr.io/linuxserver/scrutiny:060ac7b8-ls34`), or just
|
||||
change to the official Scrutiny image (`ghcr.io/analogj/scrutiny:master-omnibus`)
|
||||
|
||||
Here's a couple of confirmed working docker-compose files that you may want to look at:
|
||||
|
||||
- https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml
|
||||
- https://github.com/AnalogJ/scrutiny/blob/master/docker/example.omnibus.docker-compose.yml
|
||||
|
||||
## Bring your own InfluxDB
|
||||
|
||||
> WARNING: Most users should not follow these steps. This is ONLY for users who have an EXISTING InfluxDB installation which contains data from multiple services.
|
||||
> The Scrutiny Docker omnibus image includes an empty InfluxDB instance which it can configure.
|
||||
> If you're deploying manually or via Hub/Spoke, you can just follow the installation instructions, Scrutiny knows how
|
||||
> to run the first-time setup automatically.
|
||||
|
||||
The goal here is to create an InfluxDB API key with minimal permissions for use by Scrutiny.
|
||||
|
||||
- Create Scrutiny buckets (`metrics`, `metrics_weekly`, `metrics_monthly`, `metrics_yearly`) with placeholder config
|
||||
- Create Downsampling tasks (`tsk-weekly-aggr`, `tsk-monthly-aggr`, `tsk-yearly-aggr`) with placeholder script.
|
||||
- Create API token with restricted scope
|
||||
- NOTE: Placeholder bucket & task configuration will be replaced automatically by Scrutiny during startup
|
||||
|
||||
The placeholder buckets and tasks need to be created before the API token can be created, as the resource ID's need to
|
||||
exist for the scope restriction to work.
|
||||
|
||||
Scopes:
|
||||
|
||||
- `orgs`: read - required for scrutiny to find it's configured org_id
|
||||
- `tasks`: scrutiny specific read/write access - Scrutiny only needs access to the downsampling tasks you created above
|
||||
- `buckets`: scrutiny specific read/write access - Scrutiny only needs access to the buckets you created above
|
||||
|
||||
### Setup Environmental Variables
|
||||
|
||||
```bash
|
||||
# replace the following values with correct values for your InfluxDB installation
|
||||
export INFLUXDB_ADMIN_TOKEN=pCqRq7xxxxxx-FZgNLfstIs0w==
|
||||
export INFLUXDB_ORG_ID=b2495xxxxx
|
||||
export INFLUXDB_HOSTNAME=http://localhost:8086
|
||||
|
||||
# if you want to change the bucket name prefix below, you'll also need to update the setting in the scrutiny.yaml config file.
|
||||
export INFLUXDB_SCRUTINY_BUCKET_BASENAME=metrics
|
||||
```
|
||||
|
||||
### Create placeholder buckets
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_weekly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_monthly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_yearly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Create placeholder tasks
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-weekly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-monthly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-yearly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Create InfluxDB API Token
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
# replace these values with placeholder bucket and task ids from your InfluxDB installation.
|
||||
export INFLUXDB_SCRUTINY_BASE_BUCKET_ID=1e0709xxxx
|
||||
export INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID=1af03dexxxxx
|
||||
export INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID=b3c59c7xxxxx
|
||||
export INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID=f381d8cxxxxx
|
||||
|
||||
export INFLUXDB_SCRUTINY_WEEKLY_TASK_ID=09a64ecxxxxx
|
||||
export INFLUXDB_SCRUTINY_MONTHLY_TASK_ID=09a64xxxxx
|
||||
export INFLUXDB_SCRUTINY_YEARLY_TASK_ID=09a64ecxxxxx
|
||||
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/authorizations \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"description": "scrutiny - restricted scope token",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"permissions": [
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "orgs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "tasks"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_BASE_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_BASE_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Save InfluxDB API Token
|
||||
|
||||
After running the Curl command above, you'll see a JSON response that looks like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"token": "ksVU2t5SkQwYkvIxxxxxxxYt2xUt0uRKSbSF1Po0UQ==",
|
||||
"status": "active",
|
||||
"description": "scrutiny - restricted scope token",
|
||||
"orgID": "b2495586xxxx",
|
||||
"org": "my-org",
|
||||
"user": "admin",
|
||||
"permissions": [
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "orgs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "tasks"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "09a64exxxxx",
|
||||
"orgID": "b24955860xxxxx",
|
||||
"org": "my-org"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
You must copy the token field from the JSON response, and save it in your `scrutiny.yaml` config file. After that's
|
||||
done, you can start the Scrutiny server
|
||||
|
||||
## Customize InfluxDB Admin Username & Password
|
||||
|
||||
The full set of InfluxDB configuration options are available
|
||||
in [code](https://github.com/AnalogJ/scrutiny/blob/master/webapp/backend/pkg/config/config.go?rgh-link-date=2023-01-19T16%3A23%3A40Z#L49-L51)
|
||||
.
|
||||
|
||||
During first startup Scrutiny will connect to the unprotected InfluxDB server, start the setup process (via API) using a
|
||||
username and password of `admin`:`password12345` and then create an API token of `scrutiny-default-admin-token`.
|
||||
|
||||
After that's complete, it will use the api token for all subsequent communication with InfluxDB.
|
||||
|
||||
You can configure the values for the Admin username, password and token using the config file, or env variables:
|
||||
|
||||
#### Config File Example
|
||||
|
||||
```yaml
|
||||
web:
|
||||
influxdb:
|
||||
token: 'my-custom-token'
|
||||
init_username: 'my-custom-username'
|
||||
init_password: 'my-custom-password'
|
||||
```
|
||||
|
||||
#### Environmental Variables Example
|
||||
|
||||
`SCRUTINY_WEB_INFLUXDB_TOKEN` , `SCRUTINY_WEB_INFLUXDB_INIT_USERNAME` and `SCRUTINY_WEB_INFLUXDB_INIT_PASSWORD`
|
||||
|
||||
It's safe to change the InfluxDB Admin username/password after setup has completed, only the API token is used for
|
||||
subsequent communication with InfluxDB.
|
||||
|
||||
@@ -21,5 +21,11 @@ SCRUTINY_DEVICE_NAME - eg. /dev/sda
|
||||
SCRUTINY_DEVICE_TYPE - ATA/SCSI/NVMe
|
||||
SCRUTINY_DEVICE_SERIAL - eg. WDDJ324KSO
|
||||
SCRUTINY_MESSAGE - eg. "Scrutiny SMART error notification for device: %s\nFailure Type: %s\nDevice Name: %s\nDevice Serial: %s\nDevice Type: %s\nDate: %s"
|
||||
SCRUTINY_HOST_ID - (optional) eg. "my-custom-host-id"
|
||||
```
|
||||
|
||||
# Testing Notifications
|
||||
You can test that your notifications are configured correctly by posting an empty payload to the notifications health check API.
|
||||
```
|
||||
curl -X POST http://localhost:8080/api/health/notify
|
||||
```
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
# Operating systems without udev
|
||||
|
||||
Some operating systems do not come with `udev` out of the box, for example Alpine Linux. In these instances you will not be able to bind `/run/udev` to the container for sharing device metadata. Some operating systems offer `udev` as a package that can be installed separately, or an alternative (such as `eudev` in the case of Alpine Linux) that provides the same functionality.
|
||||
|
||||
To install `eudev` in Alpine Linux (run as root):
|
||||
|
||||
```
|
||||
apk add eudev
|
||||
setup-udev
|
||||
```
|
||||
|
||||
Once your `udev` implementation is installed, create `/run/udev` with the following command:
|
||||
|
||||
```
|
||||
udevadm trigger
|
||||
```
|
||||
|
||||
On Alpine Linux, this also has the benefit of creating symlinks to device serial numbers in `/dev/disk/by-id`.
|
||||
+70
-44
@@ -1,62 +1,88 @@
|
||||
|
||||
// SQLite Table(s)
|
||||
Table device {
|
||||
created_at timestamp
|
||||
|
||||
wwn varchar [pk]
|
||||
Table Device {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
CreatedAt time
|
||||
UpdatedAt time
|
||||
DeletedAt time
|
||||
|
||||
//user provided
|
||||
label varchar
|
||||
host_id varchar
|
||||
WWN string
|
||||
|
||||
// smartctl provided
|
||||
device_name varchar
|
||||
manufacturer varchar
|
||||
model_name varchar
|
||||
interface_type varchar
|
||||
interface_speed varchar
|
||||
serial_number varchar
|
||||
firmware varchar
|
||||
rotational_speed varchar
|
||||
capacity varchar
|
||||
form_factor varchar
|
||||
smart_support varchar
|
||||
device_protocol varchar
|
||||
device_type varchar
|
||||
DeviceName string
|
||||
DeviceUUID string
|
||||
DeviceSerialID string
|
||||
DeviceLabel string
|
||||
|
||||
Manufacturer string
|
||||
ModelName string
|
||||
InterfaceType string
|
||||
InterfaceSpeed string
|
||||
SerialNumber string
|
||||
Firmware string
|
||||
RotationSpeed int
|
||||
Capacity int64
|
||||
FormFactor string
|
||||
SmartSupport bool
|
||||
DeviceProtocol string//protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||
DeviceType string//device type is used for querying with -d/t flag, should only be used by collector.
|
||||
|
||||
// User provided metadata
|
||||
Label string
|
||||
HostId string
|
||||
|
||||
// Data set by Scrutiny
|
||||
DeviceStatus enum
|
||||
}
|
||||
|
||||
Table Setting {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
|
||||
SettingKeyName string
|
||||
SettingKeyDescription string
|
||||
SettingDataType string
|
||||
|
||||
SettingValueNumeric int64
|
||||
SettingValueString string
|
||||
}
|
||||
|
||||
|
||||
// InfluxDB Tables
|
||||
Table device_temperature {
|
||||
//timestamp
|
||||
created_at timestamp
|
||||
|
||||
//tags (indexed & queryable)
|
||||
device_wwn varchar [pk]
|
||||
|
||||
//fields
|
||||
temp bigint
|
||||
}
|
||||
Table SmartTemperature {
|
||||
Date time
|
||||
DeviceWWN string //(tag)
|
||||
Temp int64
|
||||
}
|
||||
|
||||
|
||||
Table smart_ata_results {
|
||||
//timestamp
|
||||
created_at timestamp
|
||||
Table Smart {
|
||||
Date time
|
||||
DeviceWWN string //(tag)
|
||||
DeviceProtocol string
|
||||
|
||||
//tags (indexed & queryable)
|
||||
device_wwn varchar [pk]
|
||||
smart_status varchar
|
||||
scrutiny_status varchar
|
||||
//Metrics (fields)
|
||||
Temp int64
|
||||
PowerOnHours int64
|
||||
PowerCycleCount int64
|
||||
|
||||
//Smart Status
|
||||
Status enum
|
||||
|
||||
|
||||
//fields
|
||||
temp bigint
|
||||
power_on_hours bigint
|
||||
power_cycle_count bigint
|
||||
|
||||
//SMART Attributes (fields)
|
||||
Attr_ID_AttributeId int
|
||||
Attr_ID_Value int64
|
||||
Attr_ID_Threshold int64
|
||||
Attr_ID_Worst int64
|
||||
Attr_ID_RawValue int64
|
||||
Attr_ID_RawString string
|
||||
Attr_ID_WhenFailed string
|
||||
//Generated data
|
||||
Attr_ID_TransformedValue int64
|
||||
Attr_ID_Status enum
|
||||
Attr_ID_StatusReason string
|
||||
Attr_ID_FailureRate float64
|
||||
|
||||
}
|
||||
|
||||
Ref: device.wwn < smart_ata_results.device_wwn
|
||||
Ref: Device.WWN < Smart.DeviceWWN
|
||||
Ref: Device.WWN < SmartTemperature.DeviceWWN
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 78 KiB |
@@ -31,6 +31,10 @@ devices:
|
||||
# - device: /dev/sda
|
||||
# type: 'sat'
|
||||
#
|
||||
# # example for using `-d sat,auto`, notice the square brackets (workaround for #418)
|
||||
# - device: /dev/sda
|
||||
# type: ['sat,auto']
|
||||
#
|
||||
# # example to show how to ignore a specific disk/device.
|
||||
# - device: /dev/sda
|
||||
# ignore: true
|
||||
@@ -73,6 +77,7 @@ devices:
|
||||
|
||||
# example to show how to override the smartctl command args globally
|
||||
#commands:
|
||||
# metrics_smartctl_bin: 'smartctl' # change to provide custom `smartctl` binary path, eg. `/usr/sbin/smartctl`
|
||||
# metrics_scan_args: '--scan --json' # used to detect devices
|
||||
# metrics_info_args: '--info --json' # used to determine device unique ID & register device with Scrutiny
|
||||
# metrics_smart_args: '--xall --json' # used to retrieve smart data for each device.
|
||||
@@ -86,9 +91,6 @@ devices:
|
||||
########################################################################################################################
|
||||
|
||||
#collect:
|
||||
# metric:
|
||||
# enable: true
|
||||
# command: '-a -o on -S on'
|
||||
# long:
|
||||
# enable: false
|
||||
# command: ''
|
||||
|
||||
@@ -47,6 +47,11 @@ web:
|
||||
# org: 'my-org'
|
||||
# bucket: 'bucket'
|
||||
retention_policy: true
|
||||
# if you wish to disable TLS certificate verification,
|
||||
# when using self-signed certificates for example,
|
||||
# then uncomment the lines below and set `insecure_skip_verify: true`
|
||||
# tls:
|
||||
# insecure_skip_verify: false
|
||||
|
||||
log:
|
||||
file: '' #absolute or relative paths allowed, eg. web.log
|
||||
@@ -68,6 +73,7 @@ log:
|
||||
# - "pushbullet://api-token[/device/#channel/email]"
|
||||
# - "ifttt://key/?events=event1[,event2,...]&value1=value1&value2=value2&value3=value3"
|
||||
# - "mattermost://[username@]mattermost-host/token[/channel]"
|
||||
# - "ntfy://username:password@host:port/topic"
|
||||
# - "hangouts://chat.googleapis.com/v1/spaces/FOO/messages?key=bar&token=baz"
|
||||
# - "zulip://bot-mail:bot-key@zulip-domain/?stream=name-or-id&topic=name"
|
||||
# - "join://shoutrrr:api-key@join/?devices=device1[,device2, ...][&icon=icon][&title=title]"
|
||||
|
||||
@@ -1,83 +1,82 @@
|
||||
module github.com/analogj/scrutiny
|
||||
|
||||
go 1.17
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/analogj/go-util v0.0.0-20190301173314-5295e364eb14
|
||||
github.com/containrrr/shoutrrr v0.4.4
|
||||
github.com/fatih/color v1.10.0
|
||||
github.com/containrrr/shoutrrr v0.7.1
|
||||
github.com/fatih/color v1.15.0
|
||||
github.com/gin-gonic/gin v1.6.3
|
||||
github.com/glebarez/sqlite v1.4.5
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.0.0
|
||||
github.com/golang/mock v1.4.3
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.9.0
|
||||
github.com/jaypipes/ghw v0.6.1
|
||||
github.com/jinzhu/gorm v1.9.16
|
||||
github.com/mitchellh/mapstructure v1.2.2
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/samber/lo v1.25.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/spf13/viper v1.14.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/urfave/cli/v2 v2.2.0
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
|
||||
gorm.io/driver/sqlite v1.1.3
|
||||
gorm.io/gorm v1.20.2
|
||||
golang.org/x/sync v0.1.0
|
||||
gorm.io/gorm v1.23.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/citilinkru/libudev v1.0.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.17.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/go-playground/locales v0.13.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.2.0 // indirect
|
||||
github.com/golang/protobuf v1.4.2 // indirect
|
||||
github.com/google/uuid v1.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
|
||||
github.com/jaypipes/pcidb v0.5.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.1 // indirect
|
||||
github.com/json-iterator/go v1.1.9 // indirect
|
||||
github.com/klauspost/compress v1.12.1 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/kvz/logstreamer v0.0.0-20150507115422-a635b98146f0 // indirect
|
||||
github.com/jinzhu/now v1.1.4 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
||||
github.com/kvz/logstreamer v0.0.0-20201023134116-02d20f4338f5 // indirect
|
||||
github.com/leodido/go-urn v1.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.4 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/onsi/ginkgo v1.16.1 // indirect
|
||||
github.com/pelletier/go-toml v1.7.0 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.0.1 // indirect
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/cast v1.3.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/ugorji/go/codec v1.1.7 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 // indirect
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
||||
golang.org/x/text v0.3.5 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/protobuf v1.23.0 // indirect
|
||||
gopkg.in/ini.v1 v1.55.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0 // indirect
|
||||
gosrc.io/xmpp v0.5.1 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
modernc.org/libc v1.16.8 // indirect
|
||||
modernc.org/mathutil v1.4.1 // indirect
|
||||
modernc.org/memory v1.1.1 // indirect
|
||||
modernc.org/sqlite v1.17.2 // indirect
|
||||
)
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/web"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -26,11 +29,18 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configFilePath := "/opt/scrutiny/config/scrutiny.yaml"
|
||||
configFilePathAlternative := "/opt/scrutiny/config/scrutiny.yml"
|
||||
if !utils.FileExists(configFilePath) && utils.FileExists(configFilePathAlternative) {
|
||||
configFilePath = configFilePathAlternative
|
||||
}
|
||||
|
||||
//we're going to load the config file manually, since we need to validate it.
|
||||
err = config.ReadConfig("/opt/scrutiny/config/scrutiny.yaml") // Find and read the config file
|
||||
err = config.ReadConfig(configFilePath) // Find and read the config file
|
||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||
//ignore "could not find config file"
|
||||
} else if err != nil {
|
||||
log.Print(color.HiRedString("CONFIG ERROR: %v", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -107,7 +117,18 @@ OPTIONS:
|
||||
config.Set("log.file", c.String("log-file"))
|
||||
}
|
||||
|
||||
webServer := web.AppEngine{Config: config}
|
||||
webLogger, logFile, err := CreateLogger(config)
|
||||
if logFile != nil {
|
||||
defer logFile.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settingsData, err := json.Marshal(config.AllSettings())
|
||||
webLogger.Debug(string(settingsData), err)
|
||||
|
||||
webServer := web.AppEngine{Config: config, Logger: webLogger}
|
||||
|
||||
return webServer.Start()
|
||||
},
|
||||
@@ -140,3 +161,27 @@ OPTIONS:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
|
||||
logger := logrus.WithFields(logrus.Fields{
|
||||
"type": "web",
|
||||
})
|
||||
//set default log level
|
||||
if level, err := logrus.ParseLevel(appConfig.GetString("log.level")); err == nil {
|
||||
logger.Logger.SetLevel(level)
|
||||
} else {
|
||||
logger.Logger.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
var logFile *os.File
|
||||
var err error
|
||||
if appConfig.IsSet("log.file") && len(appConfig.GetString("log.file")) > 0 {
|
||||
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Logger.Errorf("Failed to open log file %s for output: %s", appConfig.GetString("log.file"), err)
|
||||
return nil, logFile, err
|
||||
}
|
||||
logger.Logger.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
return logger, logFile, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const DB_USER_SETTINGS_SUBKEY = "user"
|
||||
|
||||
// When initializing this class the following methods must be called:
|
||||
// Config.New
|
||||
// Config.Init
|
||||
@@ -47,22 +49,12 @@ func (c *configuration) Init() error {
|
||||
c.SetDefault("web.influxdb.init_username", "admin")
|
||||
c.SetDefault("web.influxdb.init_password", "password12345")
|
||||
c.SetDefault("web.influxdb.token", "scrutiny-default-admin-token")
|
||||
c.SetDefault("web.influxdb.tls.insecure_skip_verify", false)
|
||||
c.SetDefault("web.influxdb.retention_policy", true)
|
||||
|
||||
//c.SetDefault("disks.include", []string{})
|
||||
//c.SetDefault("disks.exclude", []string{})
|
||||
|
||||
//c.SetDefault("notify.metric.script", "/opt/scrutiny/config/notify-metrics.sh")
|
||||
//c.SetDefault("notify.long.script", "/opt/scrutiny/config/notify-long-test.sh")
|
||||
//c.SetDefault("notify.short.script", "/opt/scrutiny/config/notify-short-test.sh")
|
||||
|
||||
//c.SetDefault("collect.metric.enable", true)
|
||||
//c.SetDefault("collect.metric.command", "-a -o on -S on")
|
||||
//c.SetDefault("collect.long.enable", true)
|
||||
//c.SetDefault("collect.long.command", "-a -o on -S on")
|
||||
//c.SetDefault("collect.short.enable", true)
|
||||
//c.SetDefault("collect.short.command", "-a -o on -S on")
|
||||
|
||||
//if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
|
||||
c.SetConfigType("yaml")
|
||||
//c.SetConfigName("drawbridge")
|
||||
@@ -74,7 +66,18 @@ func (c *configuration) Init() error {
|
||||
c.AutomaticEnv()
|
||||
|
||||
//CLI options will be added via the `Set()` function
|
||||
return nil
|
||||
return c.ValidateConfig()
|
||||
}
|
||||
|
||||
func (c *configuration) SubKeys(key string) []string {
|
||||
return c.Sub(key).AllKeys()
|
||||
}
|
||||
|
||||
func (c *configuration) Sub(key string) Interface {
|
||||
config := configuration{
|
||||
Viper: c.Viper.Sub(key),
|
||||
}
|
||||
return &config
|
||||
}
|
||||
|
||||
func (c *configuration) ReadConfig(configFilePath string) error {
|
||||
@@ -117,24 +120,18 @@ func (c *configuration) ReadConfig(configFilePath string) error {
|
||||
// This function ensures that the merged config works correctly.
|
||||
func (c *configuration) ValidateConfig() error {
|
||||
|
||||
////deserialize Questions
|
||||
//questionsMap := map[string]Question{}
|
||||
//err := c.UnmarshalKey("questions", &questionsMap)
|
||||
//
|
||||
//if err != nil {
|
||||
// log.Printf("questions could not be deserialized correctly. %v", err)
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//for _, v := range questionsMap {
|
||||
//
|
||||
// typeContent, ok := v.Schema["type"].(string)
|
||||
// if !ok || len(typeContent) == 0 {
|
||||
// return errors.QuestionSyntaxError("`type` is required for questions")
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//
|
||||
//the following keys are deprecated, and no longer supported
|
||||
/*
|
||||
- notify.filter_attributes (replaced by metrics.status.filter_attributes SETTING)
|
||||
- notify.level (replaced by metrics.notify.level and metrics.status.threshold SETTING)
|
||||
*/
|
||||
//TODO add docs and upgrade doc.
|
||||
if c.IsSet("notify.filter_attributes") {
|
||||
return errors.ConfigValidationError("`notify.filter_attributes` configuration option is deprecated. Replaced by option in Dashboard Settings page")
|
||||
}
|
||||
if c.IsSet("notify.level") {
|
||||
return errors.ConfigValidationError("`notify.level` configuration option is deprecated. Replaced by option in Dashboard Settings page")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_MergeConfigMap(t *testing.T) {
|
||||
//setup
|
||||
testConfig := configuration{
|
||||
Viper: viper.New(),
|
||||
}
|
||||
testConfig.Set("user.dashboard_display", "hello")
|
||||
testConfig.SetDefault("user.layout", "hello")
|
||||
|
||||
mergeSettings := map[string]interface{}{
|
||||
"user": map[string]interface{}{
|
||||
"dashboard_display": "dashboard_display",
|
||||
"layout": "layout",
|
||||
},
|
||||
}
|
||||
//test
|
||||
err := testConfig.MergeConfigMap(mergeSettings)
|
||||
|
||||
//verify
|
||||
require.NoError(t, err)
|
||||
|
||||
// if using Set, the MergeConfigMap functionality will not override
|
||||
// if using SetDefault, the MergeConfigMap will override correctly
|
||||
require.Equal(t, "hello", testConfig.GetString("user.dashboard_display"))
|
||||
require.Equal(t, "layout", testConfig.GetString("user.layout"))
|
||||
|
||||
}
|
||||
@@ -12,12 +12,17 @@ type Interface interface {
|
||||
WriteConfig() error
|
||||
Set(key string, value interface{})
|
||||
SetDefault(key string, value interface{})
|
||||
MergeConfigMap(cfg map[string]interface{}) error
|
||||
|
||||
Sub(key string) Interface
|
||||
AllSettings() map[string]interface{}
|
||||
AllKeys() []string
|
||||
SubKeys(key string) []string
|
||||
IsSet(key string) bool
|
||||
Get(key string) interface{}
|
||||
GetBool(key string) bool
|
||||
GetInt(key string) int
|
||||
GetInt64(key string) int64
|
||||
GetString(key string) string
|
||||
GetStringSlice(key string) []string
|
||||
UnmarshalKey(key string, rawVal interface{}, decoderOpts ...viper.DecoderConfigOption) error
|
||||
|
||||
@@ -7,6 +7,7 @@ package mock_config
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
config "github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
viper "github.com/spf13/viper"
|
||||
)
|
||||
@@ -34,6 +35,20 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// AllKeys mocks base method.
|
||||
func (m *MockInterface) AllKeys() []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "AllKeys")
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// AllKeys indicates an expected call of AllKeys.
|
||||
func (mr *MockInterfaceMockRecorder) AllKeys() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllKeys", reflect.TypeOf((*MockInterface)(nil).AllKeys))
|
||||
}
|
||||
|
||||
// AllSettings mocks base method.
|
||||
func (m *MockInterface) AllSettings() map[string]interface{} {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -90,6 +105,20 @@ func (mr *MockInterfaceMockRecorder) GetInt(key interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInt", reflect.TypeOf((*MockInterface)(nil).GetInt), key)
|
||||
}
|
||||
|
||||
// GetInt64 mocks base method.
|
||||
func (m *MockInterface) GetInt64(key string) int64 {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetInt64", key)
|
||||
ret0, _ := ret[0].(int64)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetInt64 indicates an expected call of GetInt64.
|
||||
func (mr *MockInterfaceMockRecorder) GetInt64(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInt64", reflect.TypeOf((*MockInterface)(nil).GetInt64), key)
|
||||
}
|
||||
|
||||
// GetString mocks base method.
|
||||
func (m *MockInterface) GetString(key string) string {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -146,6 +175,20 @@ func (mr *MockInterfaceMockRecorder) IsSet(key interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSet", reflect.TypeOf((*MockInterface)(nil).IsSet), key)
|
||||
}
|
||||
|
||||
// MergeConfigMap mocks base method.
|
||||
func (m *MockInterface) MergeConfigMap(cfg map[string]interface{}) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "MergeConfigMap", cfg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// MergeConfigMap indicates an expected call of MergeConfigMap.
|
||||
func (mr *MockInterfaceMockRecorder) MergeConfigMap(cfg interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeConfigMap", reflect.TypeOf((*MockInterface)(nil).MergeConfigMap), cfg)
|
||||
}
|
||||
|
||||
// ReadConfig mocks base method.
|
||||
func (m *MockInterface) ReadConfig(configFilePath string) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -184,6 +227,34 @@ func (mr *MockInterfaceMockRecorder) SetDefault(key, value interface{}) *gomock.
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefault", reflect.TypeOf((*MockInterface)(nil).SetDefault), key, value)
|
||||
}
|
||||
|
||||
// Sub mocks base method.
|
||||
func (m *MockInterface) Sub(key string) config.Interface {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Sub", key)
|
||||
ret0, _ := ret[0].(config.Interface)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Sub indicates an expected call of Sub.
|
||||
func (mr *MockInterfaceMockRecorder) Sub(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sub", reflect.TypeOf((*MockInterface)(nil).Sub), key)
|
||||
}
|
||||
|
||||
// SubKeys mocks base method.
|
||||
func (m *MockInterface) SubKeys(key string) []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SubKeys", key)
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SubKeys indicates an expected call of SubKeys.
|
||||
func (mr *MockInterfaceMockRecorder) SubKeys(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubKeys", reflect.TypeOf((*MockInterface)(nil).SubKeys), key)
|
||||
}
|
||||
|
||||
// UnmarshalKey mocks base method.
|
||||
func (m *MockInterface) UnmarshalKey(key string, rawVal interface{}, decoderOpts ...viper.DecoderConfigOption) error {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
@@ -4,10 +4,11 @@ const DeviceProtocolAta = "ATA"
|
||||
const DeviceProtocolScsi = "SCSI"
|
||||
const DeviceProtocolNvme = "NVMe"
|
||||
|
||||
//go:generate stringer -type=AttributeStatus
|
||||
// AttributeStatus bitwise flag, 1,2,4,8,16,32,etc
|
||||
type AttributeStatus uint8
|
||||
|
||||
const (
|
||||
// AttributeStatusPassed binary, 1,2,4,8,16,32,etc
|
||||
AttributeStatusPassed AttributeStatus = 0
|
||||
AttributeStatusFailedSmart AttributeStatus = 1
|
||||
AttributeStatusWarningScrutiny AttributeStatus = 2
|
||||
@@ -22,10 +23,11 @@ func AttributeStatusClear(b, flag AttributeStatus) AttributeStatus { return b &
|
||||
func AttributeStatusToggle(b, flag AttributeStatus) AttributeStatus { return b ^ flag }
|
||||
func AttributeStatusHas(b, flag AttributeStatus) bool { return b&flag != 0 }
|
||||
|
||||
//go:generate stringer -type=DeviceStatus
|
||||
// DeviceStatus bitwise flag, 1,2,4,8,16,32,etc
|
||||
type DeviceStatus uint8
|
||||
|
||||
const (
|
||||
// DeviceStatusPassed binary, 1,2,4,8,16,32,etc
|
||||
DeviceStatusPassed DeviceStatus = 0
|
||||
DeviceStatusFailedSmart DeviceStatus = 1
|
||||
DeviceStatusFailedScrutiny DeviceStatus = 2
|
||||
@@ -35,3 +37,29 @@ func DeviceStatusSet(b, flag DeviceStatus) DeviceStatus { return b | flag }
|
||||
func DeviceStatusClear(b, flag DeviceStatus) DeviceStatus { return b &^ flag }
|
||||
func DeviceStatusToggle(b, flag DeviceStatus) DeviceStatus { return b ^ flag }
|
||||
func DeviceStatusHas(b, flag DeviceStatus) bool { return b&flag != 0 }
|
||||
|
||||
// Metrics Specific Filtering & Threshold Constants
|
||||
type MetricsNotifyLevel int64
|
||||
|
||||
const (
|
||||
MetricsNotifyLevelWarn MetricsNotifyLevel = 1
|
||||
MetricsNotifyLevelFail MetricsNotifyLevel = 2
|
||||
)
|
||||
|
||||
type MetricsStatusFilterAttributes int64
|
||||
|
||||
const (
|
||||
MetricsStatusFilterAttributesAll MetricsStatusFilterAttributes = 0
|
||||
MetricsStatusFilterAttributesCritical MetricsStatusFilterAttributes = 1
|
||||
)
|
||||
|
||||
// MetricsStatusThreshold bitwise flag, 1,2,4,8,16,32,etc
|
||||
type MetricsStatusThreshold int64
|
||||
|
||||
const (
|
||||
MetricsStatusThresholdSmart MetricsStatusThreshold = 1
|
||||
MetricsStatusThresholdScrutiny MetricsStatusThreshold = 2
|
||||
|
||||
//shortcut
|
||||
MetricsStatusThresholdBoth MetricsStatusThreshold = 3
|
||||
)
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||
"sort"
|
||||
)
|
||||
|
||||
func sortSmartMeasurementsDesc(smartResults []measurements.Smart) {
|
||||
sort.SliceStable(smartResults, func(i, j int) bool {
|
||||
return smartResults[i].Date.After(smartResults[j].Date)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test_sortSmartMeasurementsDesc_LatestFirst(t *testing.T) {
|
||||
//setup
|
||||
timeNow := time.Now()
|
||||
smartResults := []measurements.Smart{
|
||||
{
|
||||
Date: timeNow.AddDate(0, 0, -2),
|
||||
},
|
||||
{
|
||||
Date: timeNow,
|
||||
},
|
||||
{
|
||||
Date: timeNow.AddDate(0, 0, -1),
|
||||
},
|
||||
}
|
||||
|
||||
//test
|
||||
sortSmartMeasurementsDesc(smartResults)
|
||||
|
||||
//assert
|
||||
require.Equal(t, smartResults[0].Date, timeNow)
|
||||
}
|
||||
@@ -10,9 +10,7 @@ import (
|
||||
|
||||
type DeviceRepo interface {
|
||||
Close() error
|
||||
|
||||
//GetSettings()
|
||||
//SaveSetting()
|
||||
HealthCheck(ctx context.Context) error
|
||||
|
||||
RegisterDevice(ctx context.Context, dev models.Device) error
|
||||
GetDevices(ctx context.Context) ([]models.Device, error)
|
||||
@@ -28,4 +26,7 @@ type DeviceRepo interface {
|
||||
|
||||
GetSummary(ctx context.Context) (map[string]*models.DeviceSummary, error)
|
||||
GetSmartTemperatureHistory(ctx context.Context, durationKey string) (map[string][]measurements.SmartTemperature, error)
|
||||
|
||||
LoadSettings(ctx context.Context) (*models.Settings, error)
|
||||
SaveSettings(ctx context.Context, settings models.Settings) error
|
||||
}
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
package m20220716214900
|
||||
|
||||
import (
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type Setting struct {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
gorm.Model
|
||||
|
||||
SettingKeyName string `json:"setting_key_name"`
|
||||
SettingKeyDescription string `json:"setting_key_description"`
|
||||
SettingDataType string `json:"setting_data_type"`
|
||||
|
||||
SettingValueNumeric int `json:"setting_value_numeric"`
|
||||
SettingValueString string `json:"setting_value_string"`
|
||||
SettingValueBool bool `json:"setting_value_bool"`
|
||||
}
|
||||
@@ -2,15 +2,16 @@ package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/glebarez/sqlite"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||
"github.com/influxdata/influxdb-client-go/v2/domain"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -62,7 +63,20 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
|
||||
// Gorm/SQLite setup
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
globalLogger.Infof("Trying to connect to scrutiny sqlite db: %s\n", appConfig.GetString("web.database.location"))
|
||||
database, err := gorm.Open(sqlite.Open(appConfig.GetString("web.database.location")), &gorm.Config{
|
||||
|
||||
// When a transaction cannot lock the database, because it is already locked by another one,
|
||||
// SQLite by default throws an error: database is locked. This behavior is usually not appropriate when
|
||||
// concurrent access is needed, typically when multiple processes write to the same database.
|
||||
// PRAGMA busy_timeout lets you set a timeout or a handler for these events. When setting a timeout,
|
||||
// SQLite will try the transaction multiple times within this timeout.
|
||||
// fixes #341
|
||||
// https://rsqlite.r-dbi.org/reference/sqlitesetbusyhandler
|
||||
// retrying for 30000 milliseconds, 30seconds - this would be unreasonable for a distributed multi-tenant application,
|
||||
// but should be fine for local usage.
|
||||
pragmaStr := sqlitePragmaString(map[string]string{
|
||||
"busy_timeout": "30000",
|
||||
})
|
||||
database, err := gorm.Open(sqlite.Open(appConfig.GetString("web.database.location")+pragmaStr), &gorm.Config{
|
||||
//TODO: figure out how to log database queries again.
|
||||
//Logger: logger
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
@@ -82,11 +96,20 @@ func NewScrutinyRepository(appConfig config.Interface, globalLogger logrus.Field
|
||||
influxdbUrl := fmt.Sprintf("%s://%s:%s", appConfig.GetString("web.influxdb.scheme"), appConfig.GetString("web.influxdb.host"), appConfig.GetString("web.influxdb.port"))
|
||||
globalLogger.Debugf("InfluxDB url: %s", influxdbUrl)
|
||||
|
||||
client := influxdb2.NewClient(influxdbUrl, appConfig.GetString("web.influxdb.token"))
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: appConfig.GetBool("web.influxdb.tls.insecure_skip_verify"),
|
||||
}
|
||||
globalLogger.Infof("InfluxDB certificate verification: %t\n", !tlsConfig.InsecureSkipVerify)
|
||||
|
||||
client := influxdb2.NewClientWithOptions(
|
||||
influxdbUrl,
|
||||
appConfig.GetString("web.influxdb.token"),
|
||||
influxdb2.DefaultOptions().SetTLSConfig(tlsConfig),
|
||||
)
|
||||
|
||||
//if !appConfig.IsSet("web.influxdb.token") {
|
||||
globalLogger.Debugf("Determine Influxdb setup status...")
|
||||
influxSetupComplete, err := InfluxSetupComplete(influxdbUrl)
|
||||
influxSetupComplete, err := InfluxSetupComplete(influxdbUrl, tlsConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check influxdb setup status - %w", err)
|
||||
}
|
||||
@@ -182,7 +205,30 @@ func (sr *scrutinyRepository) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func InfluxSetupComplete(influxEndpoint string) (bool, error) {
|
||||
func (sr *scrutinyRepository) HealthCheck(ctx context.Context) error {
|
||||
//check influxdb
|
||||
status, err := sr.influxClient.Health(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("influxdb healthcheck failed: %w", err)
|
||||
}
|
||||
if status.Status != "pass" {
|
||||
return fmt.Errorf("influxdb healthcheckf failed: status=%s", status.Status)
|
||||
}
|
||||
|
||||
//check sqlite db.
|
||||
database, err := sr.gormClient.DB()
|
||||
if err != nil {
|
||||
return fmt.Errorf("sqlite healthcheck failed: %w", err)
|
||||
}
|
||||
err = database.Ping()
|
||||
if err != nil {
|
||||
return fmt.Errorf("sqlite healthcheck failed during ping: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func InfluxSetupComplete(influxEndpoint string, tlsConfig *tls.Config) (bool, error) {
|
||||
influxUri, err := url.Parse(influxEndpoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -192,7 +238,8 @@ func InfluxSetupComplete(influxEndpoint string) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
res, err := http.Get(influxUri.String())
|
||||
client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}}
|
||||
res, err := client.Get(influxUri.String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -242,21 +289,29 @@ func (sr *scrutinyRepository) EnsureBuckets(ctx context.Context, org *domain.Org
|
||||
|
||||
//create buckets (used for downsampling)
|
||||
weeklyBucket := fmt.Sprintf("%s_weekly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||
if _, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, weeklyBucket); foundErr != nil {
|
||||
if foundWeeklyBucket, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, weeklyBucket); foundErr != nil {
|
||||
// metrics_weekly bucket will have a retention period of 8+1 weeks (since it will be down-sampled once a month)
|
||||
_, err := sr.influxClient.BucketsAPI().CreateBucketWithName(ctx, org, weeklyBucket, weeklyBucketRetentionRule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if sr.appConfig.GetBool("web.influxdb.retention_policy") {
|
||||
//correctly set the retention period for the bucket (may not be able to do it during setup/creation)
|
||||
foundWeeklyBucket.RetentionRules = domain.RetentionRules{weeklyBucketRetentionRule}
|
||||
sr.influxClient.BucketsAPI().UpdateBucket(ctx, foundWeeklyBucket)
|
||||
}
|
||||
|
||||
monthlyBucket := fmt.Sprintf("%s_monthly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||
if _, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, monthlyBucket); foundErr != nil {
|
||||
if foundMonthlyBucket, foundErr := sr.influxClient.BucketsAPI().FindBucketByName(ctx, monthlyBucket); foundErr != nil {
|
||||
// metrics_monthly bucket will have a retention period of 24+1 months (since it will be down-sampled once a year)
|
||||
_, err := sr.influxClient.BucketsAPI().CreateBucketWithName(ctx, org, monthlyBucket, monthlyBucketRetentionRule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if sr.appConfig.GetBool("web.influxdb.retention_policy") {
|
||||
//correctly set the retention period for the bucket (may not be able to do it during setup/creation)
|
||||
foundMonthlyBucket.RetentionRules = domain.RetentionRules{monthlyBucketRetentionRule}
|
||||
sr.influxClient.BucketsAPI().UpdateBucket(ctx, foundMonthlyBucket)
|
||||
}
|
||||
|
||||
yearlyBucket := fmt.Sprintf("%s_yearly", sr.appConfig.GetString("web.influxdb.bucket"))
|
||||
@@ -442,3 +497,16 @@ func (sr *scrutinyRepository) lookupNestedDurationKeys(durationKey string) []str
|
||||
}
|
||||
return []string{DURATION_KEY_WEEK}
|
||||
}
|
||||
|
||||
func sqlitePragmaString(pragmas map[string]string) string {
|
||||
q := url.Values{}
|
||||
for key, val := range pragmas {
|
||||
q.Add("_pragma", key+"="+val)
|
||||
}
|
||||
|
||||
queryStr := q.Encode()
|
||||
if len(queryStr) > 0 {
|
||||
return "?" + queryStr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ func (sr *scrutinyRepository) SaveSmartAttributes(ctx context.Context, wwn strin
|
||||
return deviceSmartData, sr.saveDatapoint(sr.influxWriteApi, "smart", tags, fields, deviceSmartData.Date, ctx)
|
||||
}
|
||||
|
||||
// GetSmartAttributeHistory MUST return in sorted order, where newest entries are at the beginning of the list, and oldest are at the end.
|
||||
func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn string, durationKey string, attributes []string) ([]measurements.Smart, error) {
|
||||
// Get SMartResults from InfluxDB
|
||||
|
||||
@@ -64,6 +65,9 @@ func (sr *scrutinyRepository) GetSmartAttributeHistory(ctx context.Context, wwn
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//we have to sort the smartResults again, because the `union` command will return multiple 'tables' and only sort the records in each table.
|
||||
sortSmartMeasurementsDesc(smartResults)
|
||||
|
||||
return smartResults, nil
|
||||
|
||||
//if err := device.SquashHistory(); err != nil {
|
||||
|
||||
@@ -4,15 +4,17 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20201107210306"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220503120000"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220509170100"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database/migrations/m20220716214900"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||
_ "github.com/glebarez/sqlite"
|
||||
"github.com/go-gormigrate/gormigrate/v2"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api/http"
|
||||
_ "github.com/jinzhu/gorm/dialects/sqlite"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gorm.io/gorm"
|
||||
"strconv"
|
||||
@@ -267,6 +269,106 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
return tx.AutoMigrate(m20220509170100.Device{})
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "m20220709181300",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
|
||||
// delete devices with empty `wwn` field (they are impossible to delete manually), and are invalid.
|
||||
return tx.Where("wwn = ?", "").Delete(&models.Device{}).Error
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "m20220716214900", // add settings table.
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
|
||||
// adding the settings table.
|
||||
err := tx.AutoMigrate(m20220716214900.Setting{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//add defaults.
|
||||
|
||||
var defaultSettings = []m20220716214900.Setting{
|
||||
{
|
||||
SettingKeyName: "theme",
|
||||
SettingKeyDescription: "Frontend theme ('light' | 'dark' | 'system')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "system", // options: 'light' | 'dark' | 'system'
|
||||
},
|
||||
{
|
||||
SettingKeyName: "layout",
|
||||
SettingKeyDescription: "Frontend layout ('material')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "material",
|
||||
},
|
||||
{
|
||||
SettingKeyName: "dashboard_display",
|
||||
SettingKeyDescription: "Frontend device display title ('name' | 'serial_id' | 'uuid' | 'label')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "name",
|
||||
},
|
||||
{
|
||||
SettingKeyName: "dashboard_sort",
|
||||
SettingKeyDescription: "Frontend device sort by ('status' | 'title' | 'age')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "status",
|
||||
},
|
||||
{
|
||||
SettingKeyName: "temperature_unit",
|
||||
SettingKeyDescription: "Frontend temperature unit ('celsius' | 'fahrenheit')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "celsius",
|
||||
},
|
||||
{
|
||||
SettingKeyName: "file_size_si_units",
|
||||
SettingKeyDescription: "File size in SI units (true | false)",
|
||||
SettingDataType: "bool",
|
||||
SettingValueBool: false,
|
||||
},
|
||||
{
|
||||
SettingKeyName: "line_stroke",
|
||||
SettingKeyDescription: "Temperature chart line stroke ('smooth' | 'straight' | 'stepline')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "smooth",
|
||||
},
|
||||
|
||||
{
|
||||
SettingKeyName: "metrics.notify_level",
|
||||
SettingKeyDescription: "Determines which device status will cause a notification (fail or warn)",
|
||||
SettingDataType: "numeric",
|
||||
SettingValueNumeric: int(pkg.MetricsNotifyLevelFail), // options: 'fail' or 'warn'
|
||||
},
|
||||
{
|
||||
SettingKeyName: "metrics.status_filter_attributes",
|
||||
SettingKeyDescription: "Determines which attributes should impact device status",
|
||||
SettingDataType: "numeric",
|
||||
SettingValueNumeric: int(pkg.MetricsStatusFilterAttributesAll), // options: 'all' or 'critical'
|
||||
},
|
||||
{
|
||||
SettingKeyName: "metrics.status_threshold",
|
||||
SettingKeyDescription: "Determines which threshold should impact device status",
|
||||
SettingDataType: "numeric",
|
||||
SettingValueNumeric: int(pkg.MetricsStatusThresholdBoth), // options: 'scrutiny', 'smart', 'both'
|
||||
},
|
||||
}
|
||||
return tx.Create(&defaultSettings).Error
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "m20221115214900", // add line_stroke setting.
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
//add line_stroke setting default.
|
||||
var defaultSettings = []m20220716214900.Setting{
|
||||
{
|
||||
SettingKeyName: "line_stroke",
|
||||
SettingKeyDescription: "Temperature chart line stroke ('smooth' | 'straight' | 'stepline')",
|
||||
SettingDataType: "string",
|
||||
SettingValueString: "smooth",
|
||||
},
|
||||
}
|
||||
return tx.Create(&defaultSettings).Error
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err := m.Migrate(); err != nil {
|
||||
@@ -274,6 +376,30 @@ func (sr *scrutinyRepository) Migrate(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
sr.logger.Infoln("Database migration completed successfully")
|
||||
|
||||
//these migrations cannot be done within a transaction, so they are done as a separate group, with `UseTransaction = false`
|
||||
sr.logger.Infoln("SQLite global configuration migrations starting. Please wait....")
|
||||
globalMigrateOptions := gormigrate.DefaultOptions
|
||||
globalMigrateOptions.UseTransaction = false
|
||||
gm := gormigrate.New(sr.gormClient, globalMigrateOptions, []*gormigrate.Migration{
|
||||
{
|
||||
ID: "g20220802211500",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
//shrink the Database (maybe necessary after 20220503113100)
|
||||
if err := tx.Exec("VACUUM;").Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err := gm.Migrate(); err != nil {
|
||||
sr.logger.Errorf("SQLite global configuration migrations failed with error. \n Please open a github issue at https://github.com/AnalogJ/scrutiny and attach a copy of your scrutiny.db file. \n %v", err)
|
||||
return err
|
||||
}
|
||||
sr.logger.Infoln("SQLite global configuration migrations completed successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,85 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// LoadSettings will retrieve settings from the database, store them in the AppConfig object, and return a Settings struct
|
||||
func (sr *scrutinyRepository) LoadSettings(ctx context.Context) (*models.Settings, error) {
|
||||
settingsEntries := []models.SettingEntry{}
|
||||
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
|
||||
return nil, fmt.Errorf("Could not get settings from DB: %v", err)
|
||||
}
|
||||
|
||||
// store retrieved settings in the AppConfig obj
|
||||
for _, settingsEntry := range settingsEntries {
|
||||
configKey := fmt.Sprintf("%s.%s", config.DB_USER_SETTINGS_SUBKEY, settingsEntry.SettingKeyName)
|
||||
|
||||
if settingsEntry.SettingDataType == "numeric" {
|
||||
sr.appConfig.SetDefault(configKey, settingsEntry.SettingValueNumeric)
|
||||
} else if settingsEntry.SettingDataType == "string" {
|
||||
sr.appConfig.SetDefault(configKey, settingsEntry.SettingValueString)
|
||||
} else if settingsEntry.SettingDataType == "bool" {
|
||||
sr.appConfig.SetDefault(configKey, settingsEntry.SettingValueBool)
|
||||
}
|
||||
}
|
||||
|
||||
// unmarshal the dbsetting object data to a settings object.
|
||||
var settings models.Settings
|
||||
err := sr.appConfig.UnmarshalKey(config.DB_USER_SETTINGS_SUBKEY, &settings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &settings, nil
|
||||
}
|
||||
|
||||
// testing
|
||||
// curl -d '{"metrics": { "notify_level": 5, "status_filter_attributes": 5, "status_threshold": 5 }}' -H "Content-Type: application/json" -X POST http://localhost:9090/api/settings
|
||||
// SaveSettings will update settings in AppConfig object, then save the settings to the database.
|
||||
func (sr *scrutinyRepository) SaveSettings(ctx context.Context, settings models.Settings) error {
|
||||
//save the entries to the appconfig
|
||||
settingsMap := &map[string]interface{}{}
|
||||
err := mapstructure.Decode(settings, &settingsMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
settingsWrapperMap := map[string]interface{}{}
|
||||
settingsWrapperMap[config.DB_USER_SETTINGS_SUBKEY] = *settingsMap
|
||||
err = sr.appConfig.MergeConfigMap(settingsWrapperMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sr.logger.Debugf("after merge settings: %v", sr.appConfig.AllSettings())
|
||||
//retrieve current settings from the database
|
||||
settingsEntries := []models.SettingEntry{}
|
||||
if err := sr.gormClient.WithContext(ctx).Find(&settingsEntries).Error; err != nil {
|
||||
return fmt.Errorf("Could not get settings from DB: %v", err)
|
||||
}
|
||||
|
||||
//update settingsEntries
|
||||
for ndx, settingsEntry := range settingsEntries {
|
||||
configKey := fmt.Sprintf("%s.%s", config.DB_USER_SETTINGS_SUBKEY, strings.ToLower(settingsEntry.SettingKeyName))
|
||||
|
||||
if settingsEntry.SettingDataType == "numeric" {
|
||||
settingsEntries[ndx].SettingValueNumeric = sr.appConfig.GetInt(configKey)
|
||||
} else if settingsEntry.SettingDataType == "string" {
|
||||
settingsEntries[ndx].SettingValueString = sr.appConfig.GetString(configKey)
|
||||
} else if settingsEntry.SettingDataType == "bool" {
|
||||
settingsEntries[ndx].SettingValueBool = sr.appConfig.GetBool(configKey)
|
||||
}
|
||||
|
||||
// store in database.
|
||||
//TODO: this should be `sr.gormClient.Updates(&settingsEntries).Error`
|
||||
err := sr.gormClient.Model(&models.SettingEntry{}).Where([]uint{settingsEntry.ID}).Select("setting_value_numeric", "setting_value_string", "setting_value_bool").Updates(settingsEntries[ndx]).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -11,35 +11,71 @@ import (
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
func (sr *scrutinyRepository) EnsureTasks(ctx context.Context, orgID string) error {
|
||||
weeklyTaskName := "tsk-weekly-aggr"
|
||||
weeklyTaskScript := sr.DownsampleScript("weekly", weeklyTaskName, "0 1 * * 0")
|
||||
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: weeklyTaskName}); findErr == nil && len(found) == 0 {
|
||||
//weekly on Sunday at 1:00am
|
||||
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, weeklyTaskName, sr.DownsampleScript("weekly"), "0 1 * * 0", orgID)
|
||||
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, weeklyTaskScript, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(found) == 1 {
|
||||
//check if we should update
|
||||
task := &found[0]
|
||||
if weeklyTaskScript != task.Flux {
|
||||
sr.logger.Infoln("updating weekly task script")
|
||||
task.Flux = weeklyTaskScript
|
||||
_, err := sr.influxTaskApi.UpdateTask(ctx, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
monthlyTaskName := "tsk-monthly-aggr"
|
||||
monthlyTaskScript := sr.DownsampleScript("monthly", monthlyTaskName, "30 1 1 * *")
|
||||
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: monthlyTaskName}); findErr == nil && len(found) == 0 {
|
||||
//monthly on first day of the month at 1:30am
|
||||
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, monthlyTaskName, sr.DownsampleScript("monthly"), "30 1 1 * *", orgID)
|
||||
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, monthlyTaskScript, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(found) == 1 {
|
||||
//check if we should update
|
||||
task := &found[0]
|
||||
if monthlyTaskScript != task.Flux {
|
||||
sr.logger.Infoln("updating monthly task script")
|
||||
task.Flux = monthlyTaskScript
|
||||
_, err := sr.influxTaskApi.UpdateTask(ctx, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
yearlyTaskName := "tsk-yearly-aggr"
|
||||
yearlyTaskScript := sr.DownsampleScript("yearly", yearlyTaskName, "0 2 1 1 *")
|
||||
if found, findErr := sr.influxTaskApi.FindTasks(ctx, &api.TaskFilter{Name: yearlyTaskName}); findErr == nil && len(found) == 0 {
|
||||
//yearly on the first day of the year at 2:00am
|
||||
_, err := sr.influxTaskApi.CreateTaskWithCron(ctx, yearlyTaskName, sr.DownsampleScript("yearly"), "0 2 1 1 *", orgID)
|
||||
_, err := sr.influxTaskApi.CreateTaskByFlux(ctx, yearlyTaskScript, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(found) == 1 {
|
||||
//check if we should update
|
||||
task := &found[0]
|
||||
if yearlyTaskScript != task.Flux {
|
||||
sr.logger.Infoln("updating yearly task script")
|
||||
task.Flux = yearlyTaskScript
|
||||
_, err := sr.influxTaskApi.UpdateTask(ctx, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *scrutinyRepository) DownsampleScript(aggregationType string) string {
|
||||
func (sr *scrutinyRepository) DownsampleScript(aggregationType string, name string, cron string) string {
|
||||
var sourceBucket string // the source of the data
|
||||
var destBucket string // the destination for the aggregated data
|
||||
var rangeStart string
|
||||
@@ -88,30 +124,37 @@ func (sr *scrutinyRepository) DownsampleScript(aggregationType string) string {
|
||||
*/
|
||||
|
||||
return fmt.Sprintf(`
|
||||
sourceBucket = "%s"
|
||||
rangeStart = %s
|
||||
rangeEnd = %s
|
||||
aggWindow = %s
|
||||
destBucket = "%s"
|
||||
destOrg = "%s"
|
||||
option task = {
|
||||
name: "%s",
|
||||
cron: "%s",
|
||||
}
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
sourceBucket = "%s"
|
||||
rangeStart = %s
|
||||
rangeEnd = %s
|
||||
aggWindow = %s
|
||||
destBucket = "%s"
|
||||
destOrg = "%s"
|
||||
|
||||
temp_data = from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
|
||||
temp_data
|
||||
|> aggregateWindow(fn: mean, every: aggWindow)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||
|> set(key: "_measurement", value: "temp")
|
||||
|> set(key: "_field", value: "temp")
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
`,
|
||||
name,
|
||||
cron,
|
||||
sourceBucket,
|
||||
rangeStart,
|
||||
rangeEnd,
|
||||
|
||||
@@ -0,0 +1,164 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_DownsampleScript_Weekly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := "weekly"
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-weekly-aggr", "0 1 * * 0")
|
||||
|
||||
//assert
|
||||
require.Equal(t, `
|
||||
option task = {
|
||||
name: "tsk-weekly-aggr",
|
||||
cron: "0 1 * * 0",
|
||||
}
|
||||
|
||||
sourceBucket = "metrics"
|
||||
rangeStart = -2w
|
||||
rangeEnd = -1w
|
||||
aggWindow = 1w
|
||||
destBucket = "metrics_weekly"
|
||||
destOrg = "scrutiny"
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||
|> set(key: "_measurement", value: "temp")
|
||||
|> set(key: "_field", value: "temp")
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_DownsampleScript_Monthly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := "monthly"
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-monthly-aggr", "30 1 1 * *")
|
||||
|
||||
//assert
|
||||
require.Equal(t, `
|
||||
option task = {
|
||||
name: "tsk-monthly-aggr",
|
||||
cron: "30 1 1 * *",
|
||||
}
|
||||
|
||||
sourceBucket = "metrics_weekly"
|
||||
rangeStart = -2mo
|
||||
rangeEnd = -1mo
|
||||
aggWindow = 1mo
|
||||
destBucket = "metrics_monthly"
|
||||
destOrg = "scrutiny"
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||
|> set(key: "_measurement", value: "temp")
|
||||
|> set(key: "_field", value: "temp")
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_DownsampleScript_Yearly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := "yearly"
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.DownsampleScript(aggregationType, "tsk-yearly-aggr", "0 2 1 1 *")
|
||||
|
||||
//assert
|
||||
require.Equal(t, `
|
||||
option task = {
|
||||
name: "tsk-yearly-aggr",
|
||||
cron: "0 2 1 1 *",
|
||||
}
|
||||
|
||||
sourceBucket = "metrics_monthly"
|
||||
rangeStart = -2y
|
||||
rangeEnd = -1y
|
||||
aggWindow = 1y
|
||||
destBucket = "metrics_yearly"
|
||||
destOrg = "scrutiny"
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "smart" )
|
||||
|> group(columns: ["device_wwn", "_field"])
|
||||
|> aggregateWindow(every: aggWindow, fn: last, createEmpty: false)
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
|
||||
from(bucket: sourceBucket)
|
||||
|> range(start: rangeStart, stop: rangeEnd)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp")
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|> aggregateWindow(fn: mean, every: aggWindow, createEmpty: false)
|
||||
|> set(key: "_measurement", value: "temp")
|
||||
|> set(key: "_field", value: "temp")
|
||||
|> to(bucket: destBucket, org: destOrg)
|
||||
`, influxDbScript)
|
||||
}
|
||||
@@ -17,6 +17,10 @@ func (sr *scrutinyRepository) SaveSmartTemperature(ctx context.Context, wwn stri
|
||||
if len(collectorSmartData.AtaSctTemperatureHistory.Table) > 0 {
|
||||
|
||||
for ndx, temp := range collectorSmartData.AtaSctTemperatureHistory.Table {
|
||||
//temp value may be null, we must skip/ignore them. See #393
|
||||
if temp == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
minutesOffset := collectorSmartData.AtaSctTemperatureHistory.LoggingIntervalMinutes * int64(ndx) * 60
|
||||
smartTemp := measurements.SmartTemperature{
|
||||
|
||||
@@ -0,0 +1,185 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_aggregateTempQuery_Week(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := DURATION_KEY_WEEK
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
|
||||
|
||||
//assert
|
||||
require.Equal(t, `import "influxdata/influxdb/schema"
|
||||
weekData = from(bucket: "metrics")
|
||||
|> range(start: -1w, stop: now())
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
weekData
|
||||
|> schema.fieldsAsCols()
|
||||
|> yield()`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_aggregateTempQuery_Month(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := DURATION_KEY_MONTH
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
|
||||
|
||||
//assert
|
||||
require.Equal(t, `import "influxdata/influxdb/schema"
|
||||
weekData = from(bucket: "metrics")
|
||||
|> range(start: -1w, stop: now())
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
monthData = from(bucket: "metrics_weekly")
|
||||
|> range(start: -1mo, stop: -1w)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
union(tables: [weekData, monthData])
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> sort(columns: ["_time"], desc: false)
|
||||
|> schema.fieldsAsCols()`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_aggregateTempQuery_Year(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := DURATION_KEY_YEAR
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
|
||||
|
||||
//assert
|
||||
require.Equal(t, `import "influxdata/influxdb/schema"
|
||||
weekData = from(bucket: "metrics")
|
||||
|> range(start: -1w, stop: now())
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
monthData = from(bucket: "metrics_weekly")
|
||||
|> range(start: -1mo, stop: -1w)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
yearData = from(bucket: "metrics_monthly")
|
||||
|> range(start: -1y, stop: -1mo)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
union(tables: [weekData, monthData, yearData])
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> sort(columns: ["_time"], desc: false)
|
||||
|> schema.fieldsAsCols()`, influxDbScript)
|
||||
}
|
||||
|
||||
func Test_aggregateTempQuery_Forever(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
|
||||
deviceRepo := scrutinyRepository{
|
||||
appConfig: fakeConfig,
|
||||
}
|
||||
|
||||
aggregationType := DURATION_KEY_FOREVER
|
||||
|
||||
//test
|
||||
influxDbScript := deviceRepo.aggregateTempQuery(aggregationType)
|
||||
|
||||
//assert
|
||||
require.Equal(t, `import "influxdata/influxdb/schema"
|
||||
weekData = from(bucket: "metrics")
|
||||
|> range(start: -1w, stop: now())
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
monthData = from(bucket: "metrics_weekly")
|
||||
|> range(start: -1mo, stop: -1w)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
yearData = from(bucket: "metrics_monthly")
|
||||
|> range(start: -1y, stop: -1mo)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
foreverData = from(bucket: "metrics_yearly")
|
||||
|> range(start: -10y, stop: -1y)
|
||||
|> filter(fn: (r) => r["_measurement"] == "temp" )
|
||||
|> aggregateWindow(every: 1h, fn: mean, createEmpty: false)
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> toInt()
|
||||
|
||||
union(tables: [weekData, monthData, yearData, foreverData])
|
||||
|> group(columns: ["device_wwn"])
|
||||
|> sort(columns: ["_time"], desc: false)
|
||||
|> schema.fieldsAsCols()`, influxDbScript)
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
package models
|
||||
|
||||
// Temperature Format
|
||||
// Date Format
|
||||
// Device History window
|
||||
@@ -0,0 +1,23 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// SettingEntry matches a setting row in the database
|
||||
type SettingEntry struct {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
gorm.Model
|
||||
|
||||
SettingKeyName string `json:"setting_key_name" gorm:"unique;not null"`
|
||||
SettingKeyDescription string `json:"setting_key_description"`
|
||||
SettingDataType string `json:"setting_data_type"`
|
||||
|
||||
SettingValueNumeric int `json:"setting_value_numeric"`
|
||||
SettingValueString string `json:"setting_value_string"`
|
||||
SettingValueBool bool `json:"setting_value_bool"`
|
||||
}
|
||||
|
||||
func (s SettingEntry) TableName() string {
|
||||
return "settings"
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package models
|
||||
|
||||
// Settings is made up of parsed SettingEntry objects retrieved from the database
|
||||
//type Settings struct {
|
||||
// MetricsNotifyLevel pkg.MetricsNotifyLevel `json:"metrics.notify.level" mapstructure:"metrics.notify.level"`
|
||||
// MetricsStatusFilterAttributes pkg.MetricsStatusFilterAttributes `json:"metrics.status.filter_attributes" mapstructure:"metrics.status.filter_attributes"`
|
||||
// MetricsStatusThreshold pkg.MetricsStatusThreshold `json:"metrics.status.threshold" mapstructure:"metrics.status.threshold"`
|
||||
//}
|
||||
|
||||
type Settings struct {
|
||||
Theme string `json:"theme" mapstructure:"theme"`
|
||||
Layout string `json:"layout" mapstructure:"layout"`
|
||||
DashboardDisplay string `json:"dashboard_display" mapstructure:"dashboard_display"`
|
||||
DashboardSort string `json:"dashboard_sort" mapstructure:"dashboard_sort"`
|
||||
TemperatureUnit string `json:"temperature_unit" mapstructure:"temperature_unit"`
|
||||
FileSizeSIUnits bool `json:"file_size_si_units" mapstructure:"file_size_si_units"`
|
||||
LineStroke string `json:"line_stroke" mapstructure:"line_stroke"`
|
||||
|
||||
Metrics struct {
|
||||
NotifyLevel int `json:"notify_level" mapstructure:"notify_level"`
|
||||
StatusFilterAttributes int `json:"status_filter_attributes" mapstructure:"status_filter_attributes"`
|
||||
StatusThreshold int `json:"status_threshold" mapstructure:"status_threshold"`
|
||||
} `json:"metrics" mapstructure:"metrics"`
|
||||
}
|
||||
@@ -5,60 +5,199 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/analogj/go-util/utils"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/thresholds"
|
||||
"github.com/containrrr/shoutrrr"
|
||||
shoutrrrTypes "github.com/containrrr/shoutrrr/pkg/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const NotifyFailureTypeEmailTest = "EmailTest"
|
||||
const NotifyFailureTypeSmartPrefail = "SmartPreFailure"
|
||||
const NotifyFailureTypeBothFailure = "SmartFailure" //SmartFailure always takes precedence when Scrutiny & Smart failed.
|
||||
const NotifyFailureTypeSmartFailure = "SmartFailure"
|
||||
const NotifyFailureTypeSmartErrorLog = "SmartErrorLog"
|
||||
const NotifyFailureTypeSmartSelfTest = "SmartSelfTestLog"
|
||||
const NotifyFailureTypeScrutinyFailure = "ScrutinyFailure"
|
||||
|
||||
// TODO: include host and/or user label for device.
|
||||
// ShouldNotify check if the error Message should be filtered (level mismatch or filtered_attributes)
|
||||
func ShouldNotify(device models.Device, smartAttrs measurements.Smart, statusThreshold pkg.MetricsStatusThreshold, statusFilterAttributes pkg.MetricsStatusFilterAttributes) bool {
|
||||
// 1. check if the device is healthy
|
||||
if device.DeviceStatus == pkg.DeviceStatusPassed {
|
||||
return false
|
||||
}
|
||||
|
||||
//TODO: cannot check for warning notifyLevel yet.
|
||||
|
||||
// setup constants for comparison
|
||||
var requiredDeviceStatus pkg.DeviceStatus
|
||||
var requiredAttrStatus pkg.AttributeStatus
|
||||
if statusThreshold == pkg.MetricsStatusThresholdBoth {
|
||||
// either scrutiny or smart failures should trigger an email
|
||||
requiredDeviceStatus = pkg.DeviceStatusSet(pkg.DeviceStatusFailedSmart, pkg.DeviceStatusFailedScrutiny)
|
||||
requiredAttrStatus = pkg.AttributeStatusSet(pkg.AttributeStatusFailedSmart, pkg.AttributeStatusFailedScrutiny)
|
||||
} else if statusThreshold == pkg.MetricsStatusThresholdSmart {
|
||||
//only smart failures
|
||||
requiredDeviceStatus = pkg.DeviceStatusFailedSmart
|
||||
requiredAttrStatus = pkg.AttributeStatusFailedSmart
|
||||
} else {
|
||||
requiredDeviceStatus = pkg.DeviceStatusFailedScrutiny
|
||||
requiredAttrStatus = pkg.AttributeStatusFailedScrutiny
|
||||
}
|
||||
|
||||
// 2. check if the attributes that are failing should be filtered (non-critical)
|
||||
// 3. for any unfiltered attribute, store the failure reason (Smart or Scrutiny)
|
||||
if statusFilterAttributes == pkg.MetricsStatusFilterAttributesCritical {
|
||||
hasFailingCriticalAttr := false
|
||||
var statusFailingCriticalAttr pkg.AttributeStatus
|
||||
|
||||
for attrId, attrData := range smartAttrs.Attributes {
|
||||
//find failing attribute
|
||||
if attrData.GetStatus() == pkg.AttributeStatusPassed {
|
||||
continue //skip all passing attributes
|
||||
}
|
||||
|
||||
// merge the status's of all critical attributes
|
||||
statusFailingCriticalAttr = pkg.AttributeStatusSet(statusFailingCriticalAttr, attrData.GetStatus())
|
||||
|
||||
//found a failing attribute, see if its critical
|
||||
if device.IsScsi() && thresholds.ScsiMetadata[attrId].Critical {
|
||||
hasFailingCriticalAttr = true
|
||||
} else if device.IsNvme() && thresholds.NmveMetadata[attrId].Critical {
|
||||
hasFailingCriticalAttr = true
|
||||
} else {
|
||||
//this is ATA
|
||||
attrIdInt, err := strconv.Atoi(attrId)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if thresholds.AtaMetadata[attrIdInt].Critical {
|
||||
hasFailingCriticalAttr = true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !hasFailingCriticalAttr {
|
||||
//no critical attributes are failing, and notifyFilterAttributes == "critical"
|
||||
return false
|
||||
} else {
|
||||
// check if any of the critical attributes have a status that we're looking for
|
||||
return pkg.AttributeStatusHas(statusFailingCriticalAttr, requiredAttrStatus)
|
||||
}
|
||||
|
||||
} else {
|
||||
// 2. SKIP - we are processing every attribute.
|
||||
// 3. check if the device failure level matches the wanted failure level.
|
||||
return pkg.DeviceStatusHas(device.DeviceStatus, requiredDeviceStatus)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: include user label for device.
|
||||
type Payload struct {
|
||||
Date string `json:"date"` //populated by Send function.
|
||||
FailureType string `json:"failure_type"` //EmailTest, SmartFail, ScrutinyFail
|
||||
DeviceType string `json:"device_type"` //ATA/SCSI/NVMe
|
||||
DeviceName string `json:"device_name"` //dev/sda
|
||||
DeviceSerial string `json:"device_serial"` //WDDJ324KSO
|
||||
Test bool `json:"test"` // false
|
||||
HostId string `json:"host_id,omitempty"` //host id (optional)
|
||||
DeviceType string `json:"device_type"` //ATA/SCSI/NVMe
|
||||
DeviceName string `json:"device_name"` //dev/sda
|
||||
DeviceSerial string `json:"device_serial"` //WDDJ324KSO
|
||||
Test bool `json:"test"` // false
|
||||
|
||||
//should not be populated
|
||||
Subject string `json:"subject"`
|
||||
Message string `json:"message"`
|
||||
//private, populated during init (marked as Public for JSON serialization)
|
||||
Date string `json:"date"` //populated by Send function.
|
||||
FailureType string `json:"failure_type"` //EmailTest, BothFail, SmartFail, ScrutinyFail
|
||||
Subject string `json:"subject"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func NewPayload(device models.Device, test bool, currentTime ...time.Time) Payload {
|
||||
payload := Payload{
|
||||
HostId: strings.TrimSpace(device.HostId),
|
||||
DeviceType: device.DeviceType,
|
||||
DeviceName: device.DeviceName,
|
||||
DeviceSerial: device.SerialNumber,
|
||||
Test: test,
|
||||
}
|
||||
|
||||
//validate that the Payload is populated
|
||||
var sendDate time.Time
|
||||
if currentTime != nil && len(currentTime) > 0 {
|
||||
sendDate = currentTime[0]
|
||||
} else {
|
||||
sendDate = time.Now()
|
||||
}
|
||||
|
||||
payload.Date = sendDate.Format(time.RFC3339)
|
||||
payload.FailureType = payload.GenerateFailureType(device.DeviceStatus)
|
||||
payload.Subject = payload.GenerateSubject()
|
||||
payload.Message = payload.GenerateMessage()
|
||||
return payload
|
||||
}
|
||||
|
||||
func (p *Payload) GenerateFailureType(deviceStatus pkg.DeviceStatus) string {
|
||||
//generate a failure type, given Test and DeviceStatus
|
||||
if p.Test {
|
||||
return NotifyFailureTypeEmailTest // must be an email test if "Test" is true
|
||||
}
|
||||
if pkg.DeviceStatusHas(deviceStatus, pkg.DeviceStatusFailedSmart) && pkg.DeviceStatusHas(deviceStatus, pkg.DeviceStatusFailedScrutiny) {
|
||||
return NotifyFailureTypeBothFailure //both failed
|
||||
} else if pkg.DeviceStatusHas(deviceStatus, pkg.DeviceStatusFailedSmart) {
|
||||
return NotifyFailureTypeSmartFailure //only SMART failed
|
||||
} else {
|
||||
return NotifyFailureTypeScrutinyFailure //only Scrutiny failed
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Payload) GenerateSubject() string {
|
||||
//generate a detailed failure message
|
||||
return fmt.Sprintf("Scrutiny SMART error (%s) detected on device: %s", p.FailureType, p.DeviceName)
|
||||
var subject string
|
||||
if len(p.HostId) > 0 {
|
||||
subject = fmt.Sprintf("Scrutiny SMART error (%s) detected on [host]device: [%s]%s", p.FailureType, p.HostId, p.DeviceName)
|
||||
} else {
|
||||
subject = fmt.Sprintf("Scrutiny SMART error (%s) detected on device: %s", p.FailureType, p.DeviceName)
|
||||
}
|
||||
return subject
|
||||
}
|
||||
|
||||
func (p *Payload) GenerateMessage() string {
|
||||
//generate a detailed failure message
|
||||
message := fmt.Sprintf(
|
||||
`Scrutiny SMART error notification for device: %s
|
||||
Failure Type: %s
|
||||
Device Name: %s
|
||||
Device Serial: %s
|
||||
Device Type: %s
|
||||
|
||||
Date: %s`, p.DeviceName, p.FailureType, p.DeviceName, p.DeviceSerial, p.DeviceType, p.Date)
|
||||
messageParts := []string{}
|
||||
|
||||
if p.Test {
|
||||
message = "TEST NOTIFICATION:\n" + message
|
||||
messageParts = append(messageParts, fmt.Sprintf("Scrutiny SMART error notification for device: %s", p.DeviceName))
|
||||
if len(p.HostId) > 0 {
|
||||
messageParts = append(messageParts, fmt.Sprintf("Host Id: %s", p.HostId))
|
||||
}
|
||||
|
||||
return message
|
||||
messageParts = append(messageParts,
|
||||
fmt.Sprintf("Failure Type: %s", p.FailureType),
|
||||
fmt.Sprintf("Device Name: %s", p.DeviceName),
|
||||
fmt.Sprintf("Device Serial: %s", p.DeviceSerial),
|
||||
fmt.Sprintf("Device Type: %s", p.DeviceType),
|
||||
"",
|
||||
fmt.Sprintf("Date: %s", p.Date),
|
||||
)
|
||||
|
||||
if p.Test {
|
||||
messageParts = append([]string{"TEST NOTIFICATION:"}, messageParts...)
|
||||
}
|
||||
|
||||
return strings.Join(messageParts, "\n")
|
||||
}
|
||||
|
||||
func New(logger logrus.FieldLogger, appconfig config.Interface, device models.Device, test bool) Notify {
|
||||
return Notify{
|
||||
Logger: logger,
|
||||
Config: appconfig,
|
||||
Payload: NewPayload(device, test),
|
||||
}
|
||||
}
|
||||
|
||||
type Notify struct {
|
||||
@@ -68,11 +207,6 @@ type Notify struct {
|
||||
}
|
||||
|
||||
func (n *Notify) Send() error {
|
||||
//validate that the Payload is populated
|
||||
sendDate := time.Now()
|
||||
n.Payload.Date = sendDate.Format(time.RFC3339)
|
||||
n.Payload.Subject = n.Payload.GenerateSubject()
|
||||
n.Payload.Message = n.Payload.GenerateMessage()
|
||||
|
||||
//retrieve list of notification endpoints from config file
|
||||
configUrls := n.Config.GetStringSlice("notify.urls")
|
||||
@@ -176,6 +310,9 @@ func (n *Notify) SendScriptNotification(scriptUrl string) error {
|
||||
copyEnv = append(copyEnv, fmt.Sprintf("SCRUTINY_DEVICE_TYPE=%s", n.Payload.DeviceType))
|
||||
copyEnv = append(copyEnv, fmt.Sprintf("SCRUTINY_DEVICE_SERIAL=%s", n.Payload.DeviceSerial))
|
||||
copyEnv = append(copyEnv, fmt.Sprintf("SCRUTINY_MESSAGE=%s", n.Payload.Message))
|
||||
if len(n.Payload.HostId) > 0 {
|
||||
copyEnv = append(copyEnv, fmt.Sprintf("SCRUTINY_HOST_ID=%s", n.Payload.HostId))
|
||||
}
|
||||
err := utils.CmdExec(scriptPath, []string{}, "", copyEnv, "")
|
||||
if err != nil {
|
||||
n.Logger.Errorf("An error occurred while executing script %s: %v", scriptPath, err)
|
||||
@@ -250,6 +387,9 @@ func (n *Notify) GenShoutrrrNotificationParams(shoutrrrUrl string) (string, *sho
|
||||
case "join":
|
||||
(*params)["title"] = subject
|
||||
(*params)["icon"] = logoUrl
|
||||
case "ntfy":
|
||||
(*params)["title"] = subject
|
||||
(*params)["icon"] = logoUrl
|
||||
case "opsgenie":
|
||||
(*params)["title"] = subject
|
||||
case "pushbullet":
|
||||
|
||||
@@ -0,0 +1,244 @@
|
||||
package notify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/measurements"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestShouldNotify_MustSkipPassingDevices(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusPassed,
|
||||
}
|
||||
smartAttrs := measurements.Smart{}
|
||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||
|
||||
//assert
|
||||
require.False(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestShouldNotify_MetricsStatusThresholdBoth_FailingSmartDevice(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusFailedSmart,
|
||||
}
|
||||
smartAttrs := measurements.Smart{}
|
||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||
|
||||
//assert
|
||||
require.True(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestShouldNotify_MetricsStatusThresholdSmart_FailingSmartDevice(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusFailedSmart,
|
||||
}
|
||||
smartAttrs := measurements.Smart{}
|
||||
statusThreshold := pkg.MetricsStatusThresholdSmart
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||
|
||||
//assert
|
||||
require.True(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestShouldNotify_MetricsStatusThresholdScrutiny_FailingSmartDevice(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusFailedSmart,
|
||||
}
|
||||
smartAttrs := measurements.Smart{}
|
||||
statusThreshold := pkg.MetricsStatusThresholdScrutiny
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesAll
|
||||
|
||||
//assert
|
||||
require.False(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithCriticalAttrs(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusFailedSmart,
|
||||
}
|
||||
smartAttrs := measurements.Smart{Attributes: map[string]measurements.SmartAttribute{
|
||||
"5": &measurements.SmartAtaAttribute{
|
||||
Status: pkg.AttributeStatusFailedSmart,
|
||||
},
|
||||
}}
|
||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||
|
||||
//assert
|
||||
require.True(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithMultipleCriticalAttrs(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusFailedSmart,
|
||||
}
|
||||
smartAttrs := measurements.Smart{Attributes: map[string]measurements.SmartAttribute{
|
||||
"5": &measurements.SmartAtaAttribute{
|
||||
Status: pkg.AttributeStatusPassed,
|
||||
},
|
||||
"10": &measurements.SmartAtaAttribute{
|
||||
Status: pkg.AttributeStatusFailedScrutiny,
|
||||
},
|
||||
}}
|
||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||
|
||||
//assert
|
||||
require.True(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoCriticalAttrs(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusFailedSmart,
|
||||
}
|
||||
smartAttrs := measurements.Smart{Attributes: map[string]measurements.SmartAttribute{
|
||||
"1": &measurements.SmartAtaAttribute{
|
||||
Status: pkg.AttributeStatusFailedSmart,
|
||||
},
|
||||
}}
|
||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||
|
||||
//assert
|
||||
require.False(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_WithNoFailingCriticalAttrs(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusFailedSmart,
|
||||
}
|
||||
smartAttrs := measurements.Smart{Attributes: map[string]measurements.SmartAttribute{
|
||||
"5": &measurements.SmartAtaAttribute{
|
||||
Status: pkg.AttributeStatusPassed,
|
||||
},
|
||||
}}
|
||||
statusThreshold := pkg.MetricsStatusThresholdBoth
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||
|
||||
//assert
|
||||
require.False(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestShouldNotify_MetricsStatusFilterAttributesCritical_MetricsStatusThresholdSmart_WithCriticalAttrsFailingScrutiny(t *testing.T) {
|
||||
t.Parallel()
|
||||
//setup
|
||||
device := models.Device{
|
||||
DeviceStatus: pkg.DeviceStatusFailedSmart,
|
||||
}
|
||||
smartAttrs := measurements.Smart{Attributes: map[string]measurements.SmartAttribute{
|
||||
"5": &measurements.SmartAtaAttribute{
|
||||
Status: pkg.AttributeStatusPassed,
|
||||
},
|
||||
"10": &measurements.SmartAtaAttribute{
|
||||
Status: pkg.AttributeStatusFailedScrutiny,
|
||||
},
|
||||
}}
|
||||
statusThreshold := pkg.MetricsStatusThresholdSmart
|
||||
notifyFilterAttributes := pkg.MetricsStatusFilterAttributesCritical
|
||||
|
||||
//assert
|
||||
require.False(t, ShouldNotify(device, smartAttrs, statusThreshold, notifyFilterAttributes))
|
||||
}
|
||||
|
||||
func TestNewPayload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
device := models.Device{
|
||||
SerialNumber: "FAKEWDDJ324KSO",
|
||||
DeviceType: pkg.DeviceProtocolAta,
|
||||
DeviceName: "/dev/sda",
|
||||
DeviceStatus: pkg.DeviceStatusFailedScrutiny,
|
||||
}
|
||||
currentTime := time.Now()
|
||||
//test
|
||||
|
||||
payload := NewPayload(device, false, currentTime)
|
||||
|
||||
//assert
|
||||
require.Equal(t, "Scrutiny SMART error (ScrutinyFailure) detected on device: /dev/sda", payload.Subject)
|
||||
require.Equal(t, fmt.Sprintf(`Scrutiny SMART error notification for device: /dev/sda
|
||||
Failure Type: ScrutinyFailure
|
||||
Device Name: /dev/sda
|
||||
Device Serial: FAKEWDDJ324KSO
|
||||
Device Type: ATA
|
||||
|
||||
Date: %s`, currentTime.Format(time.RFC3339)), payload.Message)
|
||||
}
|
||||
|
||||
func TestNewPayload_TestMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
device := models.Device{
|
||||
SerialNumber: "FAKEWDDJ324KSO",
|
||||
DeviceType: pkg.DeviceProtocolAta,
|
||||
DeviceName: "/dev/sda",
|
||||
DeviceStatus: pkg.DeviceStatusFailedScrutiny,
|
||||
}
|
||||
currentTime := time.Now()
|
||||
//test
|
||||
|
||||
payload := NewPayload(device, true, currentTime)
|
||||
|
||||
//assert
|
||||
require.Equal(t, "Scrutiny SMART error (EmailTest) detected on device: /dev/sda", payload.Subject)
|
||||
require.Equal(t, fmt.Sprintf(`TEST NOTIFICATION:
|
||||
Scrutiny SMART error notification for device: /dev/sda
|
||||
Failure Type: EmailTest
|
||||
Device Name: /dev/sda
|
||||
Device Serial: FAKEWDDJ324KSO
|
||||
Device Type: ATA
|
||||
|
||||
Date: %s`, currentTime.Format(time.RFC3339)), payload.Message)
|
||||
}
|
||||
|
||||
func TestNewPayload_WithHostId(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
device := models.Device{
|
||||
SerialNumber: "FAKEWDDJ324KSO",
|
||||
DeviceType: pkg.DeviceProtocolAta,
|
||||
DeviceName: "/dev/sda",
|
||||
DeviceStatus: pkg.DeviceStatusFailedScrutiny,
|
||||
HostId: "custom-host",
|
||||
}
|
||||
currentTime := time.Now()
|
||||
//test
|
||||
|
||||
payload := NewPayload(device, false, currentTime)
|
||||
|
||||
//assert
|
||||
require.Equal(t, "Scrutiny SMART error (ScrutinyFailure) detected on [host]device: [custom-host]/dev/sda", payload.Subject)
|
||||
require.Equal(t, fmt.Sprintf(`Scrutiny SMART error notification for device: /dev/sda
|
||||
Host Id: custom-host
|
||||
Failure Type: ScrutinyFailure
|
||||
Device Name: /dev/sda
|
||||
Device Serial: FAKEWDDJ324KSO
|
||||
Device Type: ATA
|
||||
|
||||
Date: %s`, currentTime.Format(time.RFC3339)), payload.Message)
|
||||
}
|
||||
@@ -36,56 +36,6 @@ var AtaMetadata = map[int]AtaAttributeMetadata{
|
||||
Ideal: ObservedThresholdIdealLow,
|
||||
Critical: false,
|
||||
Description: "(Vendor specific raw value.) Stores data related to the rate of hardware read errors that occurred when reading data from a disk surface. The raw value has different structure for different vendors and is often not meaningful as a decimal number.",
|
||||
ObservedThresholds: []ObservedThreshold{
|
||||
{
|
||||
Low: 80,
|
||||
High: 95,
|
||||
AnnualFailureRate: 0.8879749768303985,
|
||||
ErrorInterval: []float64{0.682344353388663, 1.136105732920724},
|
||||
},
|
||||
{
|
||||
Low: 95,
|
||||
High: 110,
|
||||
AnnualFailureRate: 0.034155719633986996,
|
||||
ErrorInterval: []float64{0.030188482024981093, 0.038499386872354435},
|
||||
},
|
||||
{
|
||||
Low: 110,
|
||||
High: 125,
|
||||
AnnualFailureRate: 0.06390002135229157,
|
||||
ErrorInterval: []float64{0.05852004676110847, 0.06964160930553712},
|
||||
},
|
||||
{
|
||||
Low: 125,
|
||||
High: 140,
|
||||
AnnualFailureRate: 0,
|
||||
ErrorInterval: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Low: 140,
|
||||
High: 155,
|
||||
AnnualFailureRate: 0,
|
||||
ErrorInterval: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Low: 155,
|
||||
High: 170,
|
||||
AnnualFailureRate: 0,
|
||||
ErrorInterval: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Low: 170,
|
||||
High: 185,
|
||||
AnnualFailureRate: 0,
|
||||
ErrorInterval: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Low: 185,
|
||||
High: 200,
|
||||
AnnualFailureRate: 0.044823775021490854,
|
||||
ErrorInterval: []float64{0.032022762038723306, 0.06103725943096589},
|
||||
},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
ID: 2,
|
||||
@@ -290,56 +240,6 @@ var AtaMetadata = map[int]AtaAttributeMetadata{
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "(Vendor specific raw value.) Rate of seek errors of the magnetic heads. If there is a partial failure in the mechanical positioning system, then seek errors will arise. Such a failure may be due to numerous factors, such as damage to a servo, or thermal widening of the hard disk. The raw value has different structure for different vendors and is often not meaningful as a decimal number.",
|
||||
ObservedThresholds: []ObservedThreshold{
|
||||
{
|
||||
Low: 58,
|
||||
High: 76,
|
||||
AnnualFailureRate: 0.2040131025936549,
|
||||
ErrorInterval: []float64{0.17032852883286412, 0.2424096283327138},
|
||||
},
|
||||
{
|
||||
Low: 76,
|
||||
High: 94,
|
||||
AnnualFailureRate: 0.08725919610118257,
|
||||
ErrorInterval: []float64{0.08077138510999876, 0.09412943212007528},
|
||||
},
|
||||
{
|
||||
Low: 94,
|
||||
High: 112,
|
||||
AnnualFailureRate: 0.01087335627722523,
|
||||
ErrorInterval: []float64{0.008732197944943352, 0.013380600544561905},
|
||||
},
|
||||
{
|
||||
Low: 112,
|
||||
High: 130,
|
||||
AnnualFailureRate: 0,
|
||||
ErrorInterval: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Low: 130,
|
||||
High: 148,
|
||||
AnnualFailureRate: 0,
|
||||
ErrorInterval: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Low: 148,
|
||||
High: 166,
|
||||
AnnualFailureRate: 0,
|
||||
ErrorInterval: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Low: 166,
|
||||
High: 184,
|
||||
AnnualFailureRate: 0,
|
||||
ErrorInterval: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Low: 184,
|
||||
High: 202,
|
||||
AnnualFailureRate: 0.05316285755900475,
|
||||
ErrorInterval: []float64{0.03370069132942804, 0.07977038905848267},
|
||||
},
|
||||
},
|
||||
},
|
||||
8: {
|
||||
ID: 8,
|
||||
|
||||
@@ -19,7 +19,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "low",
|
||||
Critical: true,
|
||||
Description: "",
|
||||
Description: "The grown defect count shows the amount of swapped (defective) blocks since the drive was shipped by it's vendor. Each additional defective block increases the count by one.",
|
||||
},
|
||||
"read_errors_corrected_by_eccfast": {
|
||||
ID: "read_errors_corrected_by_eccfast",
|
||||
@@ -27,7 +27,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "",
|
||||
Description: "An error correction was applied to get perfect data (a.k.a. ECC on-the-fly). \"Without substantial delay\" means the correction did not postpone reading of later sectors (e.g. a revolution was not lost). The counter is incremented once for each logical block that requires correction. Two different blocks corrected during the same command are counted as two events.",
|
||||
},
|
||||
"read_errors_corrected_by_eccdelayed": {
|
||||
ID: "read_errors_corrected_by_eccdelayed",
|
||||
@@ -35,7 +35,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "",
|
||||
Description: "An error code or algorithm (e.g. ECC, checksum) is applied in order to get perfect data with substantial delay. \"With possible delay\" means the correction took longer than a sector time so that reading/writing of subsequent sectors was delayed (e.g. a lost revolution). The counter is incremented once for each logical block that requires correction. A block with a double error that is correctable counts as one event and two different blocks corrected during the same command count as two events. ",
|
||||
},
|
||||
"read_errors_corrected_by_rereads_rewrites": {
|
||||
ID: "read_errors_corrected_by_rereads_rewrites",
|
||||
@@ -43,7 +43,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "low",
|
||||
Critical: true,
|
||||
Description: "",
|
||||
Description: "This parameter code specifies the counter counting the number of errors that are corrected by applying retries. This counts errors recovered, not the number of retries. If five retries were required to recover one block of data, the counter increments by one, not five. The counter is incremented once for each logical block that is recovered using retries. If an error is not recoverable while applying retries and is recovered by ECC, it isn't counted by this counter; it will be counted by the counter specified by parameter code 01h - Errors Corrected With Possible Delays. ",
|
||||
},
|
||||
"read_total_errors_corrected": {
|
||||
ID: "read_total_errors_corrected",
|
||||
@@ -51,7 +51,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "",
|
||||
Description: "This counter counts the total of parameter code errors 00h, 01h and 02h (i.e. error corrected by ECC: fast and delayed plus errors corrected by rereads and rewrites). There is no \"double counting\" of data errors among these three counters. The sum of all correctable errors can be reached by adding parameter code 01h and 02h errors, not by using this total.",
|
||||
},
|
||||
"read_correction_algorithm_invocations": {
|
||||
ID: "read_correction_algorithm_invocations",
|
||||
@@ -59,7 +59,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "",
|
||||
Description: "This parameter code specifies the counter that counts the total number of retries, or \"times the retry algorithm is invoked\". If after five attempts a counter 02h type error is recovered, then five is added to this counter. If three retries are required to get stable ECC syndrome before a counter 01h type error is corrected, then those three retries are also counted here. The number of retries applied to unsuccessfully recover an error (counter 06h type error) are also counted by this counter. ",
|
||||
},
|
||||
"read_total_uncorrected_errors": {
|
||||
ID: "read_total_uncorrected_errors",
|
||||
@@ -67,7 +67,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "low",
|
||||
Critical: true,
|
||||
Description: "",
|
||||
Description: "This parameter code specifies the counter that contains the total number of blocks for which an uncorrected data error has occurred. ",
|
||||
},
|
||||
"write_errors_corrected_by_eccfast": {
|
||||
ID: "write_errors_corrected_by_eccfast",
|
||||
@@ -75,7 +75,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "",
|
||||
Description: "An error correction was applied to get perfect data (a.k.a. ECC on-the-fly). \"Without substantial delay\" means the correction did not postpone reading of later sectors (e.g. a revolution was not lost). The counter is incremented once for each logical block that requires correction. Two different blocks corrected during the same command are counted as two events. ",
|
||||
},
|
||||
"write_errors_corrected_by_eccdelayed": {
|
||||
ID: "write_errors_corrected_by_eccdelayed",
|
||||
@@ -83,7 +83,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "",
|
||||
Description: "An error code or algorithm (e.g. ECC, checksum) is applied in order to get perfect data with substantial delay. \"With possible delay\" means the correction took longer than a sector time so that reading/writing of subsequent sectors was delayed (e.g. a lost revolution). The counter is incremented once for each logical block that requires correction. A block with a double error that is correctable counts as one event and two different blocks corrected during the same command count as two events. ",
|
||||
},
|
||||
"write_errors_corrected_by_rereads_rewrites": {
|
||||
ID: "write_errors_corrected_by_rereads_rewrites",
|
||||
@@ -91,7 +91,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "low",
|
||||
Critical: true,
|
||||
Description: "",
|
||||
Description: "This parameter code specifies the counter counting the number of errors that are corrected by applying retries. This counts errors recovered, not the number of retries. If five retries were required to recover one block of data, the counter increments by one, not five. The counter is incremented once for each logical block that is recovered using retries. If an error is not recoverable while applying retries and is recovered by ECC, it isn't counted by this counter; it will be counted by the counter specified by parameter code 01h - Errors Corrected With Possible Delays.",
|
||||
},
|
||||
"write_total_errors_corrected": {
|
||||
ID: "write_total_errors_corrected",
|
||||
@@ -99,7 +99,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "",
|
||||
Description: "This counter counts the total of parameter code errors 00h, 01h and 02h (i.e. error corrected by ECC: fast and delayed plus errors corrected by rereads and rewrites). There is no \"double counting\" of data errors among these three counters. The sum of all correctable errors can be reached by adding parameter code 01h and 02h errors, not by using this total.",
|
||||
},
|
||||
"write_correction_algorithm_invocations": {
|
||||
ID: "write_correction_algorithm_invocations",
|
||||
@@ -107,7 +107,7 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "",
|
||||
Critical: false,
|
||||
Description: "",
|
||||
Description: "This parameter code specifies the counter that counts the total number of retries, or \"times the retry algorithm is invoked\". If after five attempts a counter 02h type error is recovered, then five is added to this counter. If three retries are required to get stable ECC syndrome before a counter 01h type error is corrected, then those three retries are also counted here. The number of retries applied to unsuccessfully recover an error (counter 06h type error) are also counted by this counter. ",
|
||||
},
|
||||
"write_total_uncorrected_errors": {
|
||||
ID: "write_total_uncorrected_errors",
|
||||
@@ -115,6 +115,6 @@ var ScsiMetadata = map[string]ScsiAttributeMetadata{
|
||||
DisplayType: "",
|
||||
Ideal: "low",
|
||||
Critical: true,
|
||||
Description: "",
|
||||
Description: " This parameter code specifies the counter that contains the total number of blocks for which an uncorrected data error has occurred.",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,4 +2,4 @@ package version
|
||||
|
||||
// VERSION is the app-global version string, which will be replaced with a
|
||||
// new value during packaging
|
||||
const VERSION = "0.4.11"
|
||||
const VERSION = "0.7.1"
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func DeleteDevice(c *gin.Context) {
|
||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
|
||||
err := deviceRepo.DeleteDevice(c, c.Param("wwn"))
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func GetDeviceDetails(c *gin.Context) {
|
||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
|
||||
device, err := deviceRepo.GetDeviceDetails(c, c.Param("wwn"))
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func GetDevicesSummary(c *gin.Context) {
|
||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
|
||||
summary, err := deviceRepo.GetSummary(c)
|
||||
@@ -18,6 +18,7 @@ func GetDevicesSummary(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
//this must match DeviceSummaryWrapper (webapp/backend/pkg/models/device_summary.go)
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"data": map[string]interface{}{
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func GetDevicesSummaryTempHistory(c *gin.Context) {
|
||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
|
||||
durationKey, exists := c.GetQuery("duration_key")
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func GetSettings(c *gin.Context) {
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
|
||||
settings, err := deviceRepo.LoadSettings(c)
|
||||
if err != nil {
|
||||
logger.Errorln("An error occurred while retrieving settings", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"settings": settings,
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func HealthCheck(c *gin.Context) {
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
logger.Infof("Checking Influxdb & Sqlite health")
|
||||
|
||||
//check sqlite and influxdb health
|
||||
err := deviceRepo.HealthCheck(c)
|
||||
if err != nil {
|
||||
logger.Errorln("An error occurred during healthcheck", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false, "error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
//TODO:
|
||||
// check if the /web folder is populated.
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
})
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/samber/lo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
// This function is run everytime a collector is about to start a run. It can be used to update device metadata.
|
||||
func RegisterDevices(c *gin.Context) {
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
|
||||
var collectorDeviceWrapper models.DeviceWrapper
|
||||
err := c.BindJSON(&collectorDeviceWrapper)
|
||||
@@ -22,8 +23,13 @@ func RegisterDevices(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
//filter any device with empty wwn (they are invalid)
|
||||
detectedStorageDevices := lo.Filter[models.Device](collectorDeviceWrapper.Data, func(dev models.Device, _ int) bool {
|
||||
return len(dev.WWN) > 0
|
||||
})
|
||||
|
||||
errs := []error{}
|
||||
for _, dev := range collectorDeviceWrapper.Data {
|
||||
for _, dev := range detectedStorageDevices {
|
||||
//insert devices into DB (and update specified columns if device is already registered)
|
||||
// update device fields that may change: (DeviceType, HostID)
|
||||
if err := deviceRepo.RegisterDevice(c, dev); err != nil {
|
||||
@@ -40,7 +46,7 @@ func RegisterDevices(c *gin.Context) {
|
||||
} else {
|
||||
c.JSON(http.StatusOK, models.DeviceWrapper{
|
||||
Success: true,
|
||||
Data: collectorDeviceWrapper.Data,
|
||||
Data: detectedStorageDevices,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func SaveSettings(c *gin.Context) {
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
|
||||
var settings models.Settings
|
||||
err := c.BindJSON(&settings)
|
||||
if err != nil {
|
||||
logger.Errorln("Cannot parse updated settings", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||
return
|
||||
}
|
||||
|
||||
err = deviceRepo.SaveSettings(c, settings)
|
||||
if err != nil {
|
||||
logger.Errorln("An error occurred while saving settings", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"success": false})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"settings": settings,
|
||||
})
|
||||
}
|
||||
@@ -13,19 +13,18 @@ import (
|
||||
// Send test notification
|
||||
func SendTestNotification(c *gin.Context) {
|
||||
appConfig := c.MustGet("CONFIG").(config.Interface)
|
||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
|
||||
testNotify := notify.Notify{
|
||||
Logger: logger,
|
||||
Config: appConfig,
|
||||
Payload: notify.Payload{
|
||||
FailureType: "EmailTest",
|
||||
DeviceSerial: "FAKEWDDJ324KSO",
|
||||
testNotify := notify.New(
|
||||
logger,
|
||||
appConfig,
|
||||
models.Device{
|
||||
SerialNumber: "FAKEWDDJ324KSO",
|
||||
DeviceType: pkg.DeviceProtocolAta,
|
||||
DeviceName: "/dev/sda",
|
||||
Test: true,
|
||||
},
|
||||
}
|
||||
true,
|
||||
)
|
||||
err := testNotify.Send()
|
||||
if err != nil {
|
||||
logger.Errorln("An error occurred while sending test notification", err)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||
@@ -13,13 +14,17 @@ import (
|
||||
|
||||
func UploadDeviceMetrics(c *gin.Context) {
|
||||
//db := c.MustGet("DB").(*gorm.DB)
|
||||
logger := c.MustGet("LOGGER").(logrus.FieldLogger)
|
||||
logger := c.MustGet("LOGGER").(*logrus.Entry)
|
||||
appConfig := c.MustGet("CONFIG").(config.Interface)
|
||||
//influxWriteDb := c.MustGet("INFLUXDB_WRITE").(*api.WriteAPIBlocking)
|
||||
deviceRepo := c.MustGet("DEVICE_REPOSITORY").(database.DeviceRepo)
|
||||
|
||||
//appConfig := c.MustGet("CONFIG").(config.Interface)
|
||||
|
||||
if c.Param("wwn") == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"success": false})
|
||||
}
|
||||
|
||||
var collectorSmartData collector.SmartInfo
|
||||
err := c.BindJSON(&collectorSmartData)
|
||||
if err != nil {
|
||||
@@ -63,20 +68,21 @@ func UploadDeviceMetrics(c *gin.Context) {
|
||||
}
|
||||
|
||||
//check for error
|
||||
if updatedDevice.DeviceStatus != pkg.DeviceStatusPassed {
|
||||
if notify.ShouldNotify(
|
||||
updatedDevice,
|
||||
smartData,
|
||||
pkg.MetricsStatusThreshold(appConfig.GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY))),
|
||||
pkg.MetricsStatusFilterAttributes(appConfig.GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY))),
|
||||
) {
|
||||
//send notifications
|
||||
testNotify := notify.Notify{
|
||||
Config: appConfig,
|
||||
Payload: notify.Payload{
|
||||
FailureType: notify.NotifyFailureTypeSmartFailure,
|
||||
DeviceName: updatedDevice.DeviceName,
|
||||
DeviceType: updatedDevice.DeviceProtocol,
|
||||
DeviceSerial: updatedDevice.SerialNumber,
|
||||
Test: false,
|
||||
},
|
||||
Logger: logger,
|
||||
}
|
||||
_ = testNotify.Send() //we ignore error message when sending notifications.
|
||||
|
||||
liveNotify := notify.New(
|
||||
logger,
|
||||
appConfig,
|
||||
updatedDevice,
|
||||
false,
|
||||
)
|
||||
_ = liveNotify.Send() //we ignore error message when sending notifications.
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"success": true})
|
||||
|
||||
@@ -28,11 +28,11 @@ import (
|
||||
var timeFormat = "02/Jan/2006:15:04:05 -0700"
|
||||
|
||||
// Logger is the logrus logger handler
|
||||
func LoggerMiddleware(logger logrus.FieldLogger) gin.HandlerFunc {
|
||||
func LoggerMiddleware(logger *logrus.Entry) gin.HandlerFunc {
|
||||
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostname = "unknow"
|
||||
hostname = "unknown"
|
||||
}
|
||||
|
||||
return func(c *gin.Context) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/database"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -14,6 +15,14 @@ func RepositoryMiddleware(appConfig config.Interface, globalLogger logrus.FieldL
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// ensure the settings have been loaded into the app config during startup.
|
||||
_, err = deviceRepo.LoadSettings(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
//settings.UpdateSettingEntries()
|
||||
|
||||
//TODO: determine where we can call defer deviceRepo.Close()
|
||||
return func(c *gin.Context) {
|
||||
c.Set("DEVICE_REPOSITORY", deviceRepo)
|
||||
|
||||
@@ -9,18 +9,17 @@ import (
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/web/middleware"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type AppEngine struct {
|
||||
Config config.Interface
|
||||
Logger *logrus.Entry
|
||||
}
|
||||
|
||||
func (ae *AppEngine) Setup(logger logrus.FieldLogger) *gin.Engine {
|
||||
func (ae *AppEngine) Setup(logger *logrus.Entry) *gin.Engine {
|
||||
r := gin.New()
|
||||
|
||||
r.Use(middleware.LoggerMiddleware(logger))
|
||||
@@ -35,11 +34,7 @@ func (ae *AppEngine) Setup(logger logrus.FieldLogger) *gin.Engine {
|
||||
{
|
||||
api := base.Group("/api")
|
||||
{
|
||||
api.GET("/health", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
})
|
||||
})
|
||||
api.GET("/health", handler.HealthCheck)
|
||||
api.POST("/health/notify", handler.SendTestNotification) //check if notifications are configured correctly
|
||||
|
||||
api.POST("/devices/register", handler.RegisterDevices) //used by Collector to register new devices and retrieve filtered list
|
||||
@@ -50,6 +45,8 @@ func (ae *AppEngine) Setup(logger logrus.FieldLogger) *gin.Engine {
|
||||
api.GET("/device/:wwn/details", handler.GetDeviceDetails) //used by Details
|
||||
api.DELETE("/device/:wwn", handler.DeleteDevice) //used by UI to delete device
|
||||
|
||||
api.GET("/settings", handler.GetSettings) //used to get settings
|
||||
api.POST("/settings", handler.SaveSettings) //used to save settings
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,26 +72,6 @@ func (ae *AppEngine) Start() error {
|
||||
gin.SetMode(gin.DebugMode)
|
||||
}
|
||||
|
||||
logger := logrus.New()
|
||||
//set default log level
|
||||
logLevel, err := logrus.ParseLevel(ae.Config.GetString("log.level"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.SetLevel(logLevel)
|
||||
//set the log file if present
|
||||
if len(ae.Config.GetString("log.file")) != 0 {
|
||||
logFile, err := os.OpenFile(ae.Config.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
defer logFile.Close()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to open log file %s for output: %s", ae.Config.GetString("log.file"), err)
|
||||
return err
|
||||
}
|
||||
|
||||
//configure the logrus default
|
||||
logger.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
|
||||
//check if the database parent directory exists, fail here rather than in a handler.
|
||||
if !utils.FileExists(filepath.Dir(ae.Config.GetString("web.database.location"))) {
|
||||
return errors.ConfigValidationError(fmt.Sprintf(
|
||||
@@ -102,7 +79,7 @@ func (ae *AppEngine) Start() error {
|
||||
filepath.Dir(ae.Config.GetString("web.database.location"))))
|
||||
}
|
||||
|
||||
r := ae.Setup(logger)
|
||||
r := ae.Setup(ae.Logger)
|
||||
|
||||
return r.Run(fmt.Sprintf("%s:%s", ae.Config.GetString("web.listen.host"), ae.Config.GetString("web.listen.port")))
|
||||
}
|
||||
|
||||
@@ -3,7 +3,9 @@ package web_test
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
mock_config "github.com/analogj/scrutiny/webapp/backend/pkg/config/mock"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||
@@ -89,6 +91,8 @@ func (suite *ServerTestSuite) TestHealthRoute() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
fakeConfig.EXPECT().GetString("web.database.location").Return(path.Join(parentPath, "scrutiny_test.db")).AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").Return(parentPath).AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -99,6 +103,7 @@ func (suite *ServerTestSuite) TestHealthRoute() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
@@ -111,7 +116,7 @@ func (suite *ServerTestSuite) TestHealthRoute() {
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
|
||||
//test
|
||||
w := httptest.NewRecorder()
|
||||
@@ -130,6 +135,8 @@ func (suite *ServerTestSuite) TestRegisterDevicesRoute() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
fakeConfig.EXPECT().GetString("web.database.location").Return(path.Join(parentPath, "scrutiny_test.db")).AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").Return(parentPath).AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -139,6 +146,7 @@ func (suite *ServerTestSuite) TestRegisterDevicesRoute() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
@@ -150,7 +158,7 @@ func (suite *ServerTestSuite) TestRegisterDevicesRoute() {
|
||||
ae := web.AppEngine{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
file, err := os.Open("testdata/register-devices-req.json")
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
@@ -170,6 +178,8 @@ func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -179,6 +189,7 @@ func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
@@ -186,11 +197,14 @@ func (suite *ServerTestSuite) TestUploadDeviceMetricsRoute() {
|
||||
} else {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.host").Return("localhost").AnyTimes()
|
||||
}
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.notify_level", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsNotifyLevelFail))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusFilterAttributesAll))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusThresholdBoth))
|
||||
|
||||
ae := web.AppEngine{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
devicesfile, err := os.Open("testdata/register-devices-single-req.json")
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
@@ -217,8 +231,13 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
//fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return("testdata/scrutiny_test.db")
|
||||
fakeConfig.EXPECT().GetStringSlice("notify.urls").Return([]string{}).AnyTimes()
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.notify_level", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsNotifyLevelFail))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusFilterAttributesAll))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusThresholdBoth))
|
||||
fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -228,6 +247,7 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
@@ -239,7 +259,7 @@ func (suite *ServerTestSuite) TestPopulateMultiple() {
|
||||
ae := web.AppEngine{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
devicesfile, err := os.Open("testdata/register-devices-req.json")
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
@@ -315,6 +335,8 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_WebhookFailure() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -324,8 +346,13 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_WebhookFailure() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetStringSlice("notify.urls").AnyTimes().Return([]string{"https://unroutable.domain.example.asdfghj"})
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.notify_level", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsNotifyLevelFail))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusFilterAttributesAll))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusThresholdBoth))
|
||||
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.host").Return("influxdb").AnyTimes()
|
||||
@@ -336,7 +363,7 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_WebhookFailure() {
|
||||
ae := web.AppEngine{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
|
||||
//test
|
||||
wr := httptest.NewRecorder()
|
||||
@@ -354,6 +381,8 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptFailure() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -363,8 +392,13 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptFailure() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetStringSlice("notify.urls").AnyTimes().Return([]string{"script:///missing/path/on/disk"})
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.notify_level", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsNotifyLevelFail))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusFilterAttributesAll))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusThresholdBoth))
|
||||
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.host").Return("influxdb").AnyTimes()
|
||||
@@ -375,7 +409,7 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptFailure() {
|
||||
ae := web.AppEngine{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
|
||||
//test
|
||||
wr := httptest.NewRecorder()
|
||||
@@ -393,6 +427,8 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptSuccess() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -402,8 +438,13 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptSuccess() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetStringSlice("notify.urls").AnyTimes().Return([]string{"script:///usr/bin/env"})
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.notify_level", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsNotifyLevelFail))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusFilterAttributesAll))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusThresholdBoth))
|
||||
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.host").Return("influxdb").AnyTimes()
|
||||
@@ -414,7 +455,7 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ScriptSuccess() {
|
||||
ae := web.AppEngine{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
|
||||
//test
|
||||
wr := httptest.NewRecorder()
|
||||
@@ -432,6 +473,8 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ShoutrrrFailure() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -441,8 +484,13 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ShoutrrrFailure() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetStringSlice("notify.urls").AnyTimes().Return([]string{"discord://invalidtoken@channel"})
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.notify_level", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsNotifyLevelFail))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusFilterAttributesAll))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusThresholdBoth))
|
||||
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.host").Return("influxdb").AnyTimes()
|
||||
@@ -452,7 +500,7 @@ func (suite *ServerTestSuite) TestSendTestNotificationRoute_ShoutrrrFailure() {
|
||||
ae := web.AppEngine{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
|
||||
//test
|
||||
wr := httptest.NewRecorder()
|
||||
@@ -470,6 +518,8 @@ func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
|
||||
mockCtrl := gomock.NewController(suite.T())
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().SetDefault(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
fakeConfig.EXPECT().UnmarshalKey(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
fakeConfig.EXPECT().GetString("web.database.location").AnyTimes().Return(path.Join(parentPath, "scrutiny_test.db"))
|
||||
fakeConfig.EXPECT().GetString("web.src.frontend.path").AnyTimes().Return(parentPath)
|
||||
fakeConfig.EXPECT().GetString("web.listen.basepath").Return(suite.Basepath).AnyTimes()
|
||||
@@ -479,8 +529,13 @@ func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.token").Return("my-super-secret-auth-token").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.org").Return("scrutiny").AnyTimes()
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.bucket").Return("metrics").AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.tls.insecure_skip_verify").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetBool("web.influxdb.retention_policy").Return(false).AnyTimes()
|
||||
fakeConfig.EXPECT().GetStringSlice("notify.urls").AnyTimes().Return([]string{})
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.notify_level", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsNotifyLevelFail))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_filter_attributes", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusFilterAttributesAll))
|
||||
fakeConfig.EXPECT().GetInt(fmt.Sprintf("%s.metrics.status_threshold", config.DB_USER_SETTINGS_SUBKEY)).AnyTimes().Return(int(pkg.MetricsStatusThresholdBoth))
|
||||
|
||||
if _, isGithubActions := os.LookupEnv("GITHUB_ACTIONS"); isGithubActions {
|
||||
// when running test suite in github actions, we run an influxdb service as a sidecar.
|
||||
fakeConfig.EXPECT().GetString("web.influxdb.host").Return("influxdb").AnyTimes()
|
||||
@@ -491,7 +546,7 @@ func (suite *ServerTestSuite) TestGetDevicesSummaryRoute_Nvme() {
|
||||
ae := web.AppEngine{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
router := ae.Setup(logrus.New())
|
||||
router := ae.Setup(logrus.WithField("test", suite.T().Name()))
|
||||
devicesfile, err := os.Open("testdata/register-devices-req-2.json")
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
|
||||
@@ -46,3 +46,5 @@ testem.log
|
||||
Thumbs.db
|
||||
|
||||
/dist
|
||||
|
||||
/coverage
|
||||
|
||||
@@ -52,7 +52,6 @@
|
||||
"optimization": true,
|
||||
"outputHashing": "all",
|
||||
"sourceMap": false,
|
||||
"extractCss": true,
|
||||
"namedChunks": false,
|
||||
"extractLicenses": true,
|
||||
"vendorChunk": false,
|
||||
@@ -101,10 +100,22 @@
|
||||
"src/favicon-32x32.png",
|
||||
"src/assets"
|
||||
],
|
||||
"stylePreprocessorOptions": {
|
||||
"includePaths": [
|
||||
"src/@treo/styles"
|
||||
]
|
||||
},
|
||||
"styles": [
|
||||
"src/styles.scss"
|
||||
"src/styles/vendors.scss",
|
||||
"src/@treo/styles/main.scss",
|
||||
"src/styles/styles.scss",
|
||||
"src/styles/tailwind.scss"
|
||||
],
|
||||
"scripts": []
|
||||
"scripts": [],
|
||||
"fileReplacements": [{
|
||||
"replace": "src/environments/environment.ts",
|
||||
"with": "src/environments/environment.prod.ts"
|
||||
}]
|
||||
}
|
||||
},
|
||||
"lint": {
|
||||
|
||||
@@ -10,24 +10,24 @@ module.exports = function (config)
|
||||
require('karma-jasmine'),
|
||||
require('karma-chrome-launcher'),
|
||||
require('karma-jasmine-html-reporter'),
|
||||
require('karma-coverage-istanbul-reporter'),
|
||||
require('karma-coverage'),
|
||||
require('@angular-devkit/build-angular/plugins/karma')
|
||||
],
|
||||
client : {
|
||||
client: {
|
||||
clearContext: false // leave Jasmine Spec Runner output visible in browser
|
||||
},
|
||||
coverageIstanbulReporter: {
|
||||
dir : require('path').join(__dirname, './coverage/treo'),
|
||||
reports : ['html', 'lcovonly', 'text-summary'],
|
||||
dir: require('path').join(__dirname, './coverage'),
|
||||
reports: ['html', 'lcovonly', 'text-summary'],
|
||||
fixWebpackSourcePaths: true
|
||||
},
|
||||
reporters : ['progress', 'kjhtml'],
|
||||
port : 9876,
|
||||
colors : true,
|
||||
logLevel : config.LOG_INFO,
|
||||
autoWatch : true,
|
||||
browsers : ['Chrome'],
|
||||
singleRun : false,
|
||||
restartOnFileChange : true
|
||||
reporters: ['progress', 'kjhtml'],
|
||||
port: 9876,
|
||||
colors: true,
|
||||
logLevel: config.LOG_INFO,
|
||||
autoWatch: true,
|
||||
browsers: ['Chrome'],
|
||||
singleRun: false,
|
||||
restartOnFileChange: true
|
||||
});
|
||||
};
|
||||
|
||||
Generated
+8291
-27143
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@
|
||||
"start": "ng serve --open",
|
||||
"start:mem": "node --max_old_space_size=6144 ./node_modules/@angular/cli/bin/ng serve --open",
|
||||
"build": "ng build",
|
||||
"build:prod": "ng build --prod",
|
||||
"build:prod": "ng build --configuration production",
|
||||
"build:prod:mem": "node --max_old_space_size=6144 ./node_modules/@angular/cli/bin/ng build --prod",
|
||||
"test": "ng test",
|
||||
"lint": "ng lint",
|
||||
@@ -20,66 +20,55 @@
|
||||
},
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@angular/animations": "9.1.4",
|
||||
"@angular/cdk": "9.2.2",
|
||||
"@angular/common": "9.1.4",
|
||||
"@angular/compiler": "9.1.4",
|
||||
"@angular/core": "9.1.4",
|
||||
"@angular/forms": "9.1.4",
|
||||
"@angular/material": "9.2.2",
|
||||
"@angular/material-moment-adapter": "9.2.2",
|
||||
"@angular/platform-browser": "9.1.4",
|
||||
"@angular/platform-browser-dynamic": "9.1.4",
|
||||
"@angular/router": "9.1.4",
|
||||
"@fullcalendar/angular": "4.4.5-beta",
|
||||
"@fullcalendar/core": "4.4.0",
|
||||
"@fullcalendar/daygrid": "4.4.0",
|
||||
"@fullcalendar/interaction": "4.4.0",
|
||||
"@fullcalendar/list": "4.4.0",
|
||||
"@fullcalendar/moment": "4.4.0",
|
||||
"@fullcalendar/rrule": "4.4.0",
|
||||
"@fullcalendar/timegrid": "4.4.0",
|
||||
"@types/humanize-duration": "^3.18.1",
|
||||
"apexcharts": "3.19.2",
|
||||
"crypto-js": "3.3.0",
|
||||
"highlight.js": "10.0.1",
|
||||
"humanize-duration": "^3.24.0",
|
||||
"lodash": "4.17.15",
|
||||
"moment": "2.24.0",
|
||||
"ng-apexcharts": "1.5.12",
|
||||
"ngx-markdown": "9.0.0",
|
||||
"ngx-quill": "9.1.0",
|
||||
"perfect-scrollbar": "1.5.0",
|
||||
"quill": "1.3.7",
|
||||
"rrule": "2.6.4",
|
||||
"rxjs": "6.5.5",
|
||||
"tslib": "1.11.1",
|
||||
"web-animations-js": "2.3.2",
|
||||
"zone.js": "0.10.3"
|
||||
"@angular/animations": "v13-lts",
|
||||
"@angular/cdk": "v13-lts",
|
||||
"@angular/common": "v13-lts",
|
||||
"@angular/compiler": "v13-lts",
|
||||
"@angular/core": "v13-lts",
|
||||
"@angular/forms": "v13-lts",
|
||||
"@angular/material": "v13-lts",
|
||||
"@angular/material-moment-adapter": "v13-lts",
|
||||
"@angular/platform-browser": "v13-lts",
|
||||
"@angular/platform-browser-dynamic": "v13-lts",
|
||||
"@angular/router": "v13-lts",
|
||||
"@types/humanize-duration": "^3.27.1",
|
||||
"crypto-js": "^4.1.1",
|
||||
"highlight.js": "^11.6.0",
|
||||
"humanize-duration": "^3.27.3",
|
||||
"lodash": "4.17.21",
|
||||
"moment": "^2.29.4",
|
||||
"ng-apexcharts": "^1.7.4",
|
||||
"ngx-markdown": "^13.1.0",
|
||||
"perfect-scrollbar": "^1.5.5",
|
||||
"quill": "^1.3.7",
|
||||
"rrule": "^2.7.1",
|
||||
"rxjs": "^7.5.7",
|
||||
"tslib": "^2.4.1",
|
||||
"web-animations-js": "^2.3.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@angular-devkit/build-angular": "0.901.4",
|
||||
"@angular/cli": "9.1.4",
|
||||
"@angular/compiler-cli": "9.1.4",
|
||||
"@angular/language-service": "9.1.4",
|
||||
"@types/crypto-js": "3.1.45",
|
||||
"@types/highlight.js": "9.12.3",
|
||||
"@types/jasmine": "3.5.10",
|
||||
"@types/jasminewd2": "2.0.8",
|
||||
"@types/lodash": "4.14.150",
|
||||
"@types/node": "12.12.37",
|
||||
"codelyzer": "5.2.2",
|
||||
"jasmine-core": "3.5.0",
|
||||
"jasmine-spec-reporter": "4.2.1",
|
||||
"karma": "5.0.4",
|
||||
"karma-chrome-launcher": "3.1.0",
|
||||
"karma-coverage-istanbul-reporter": "2.1.1",
|
||||
"karma-jasmine": "3.0.3",
|
||||
"karma-jasmine-html-reporter": "1.5.3",
|
||||
"protractor": "5.4.4",
|
||||
"tailwindcss": "1.4.4",
|
||||
"ts-node": "8.3.0",
|
||||
"tslint": "6.1.2",
|
||||
"typescript": "3.8.3"
|
||||
"@angular-devkit/build-angular": "v13-lts",
|
||||
"@angular/cli": "v13-lts",
|
||||
"@angular/compiler-cli": "v13-lts",
|
||||
"@angular/language-service": "v13-lts",
|
||||
"@types/crypto-js": "^4.1.1",
|
||||
"@types/highlight.js": "^10.1.0",
|
||||
"@types/jasmine": "^4.3.0",
|
||||
"@types/jasminewd2": "^2.0.10",
|
||||
"@types/lodash": "^4.14.188",
|
||||
"@types/node": "^18.11.9",
|
||||
"codelyzer": "^6.0.2",
|
||||
"jasmine-core": "^4.5.0",
|
||||
"jasmine-spec-reporter": "^7.0.0",
|
||||
"karma": "^6.4.1",
|
||||
"karma-chrome-launcher": "^3.1.1",
|
||||
"karma-coverage": "^2.2.0",
|
||||
"karma-jasmine": "^5.1.0",
|
||||
"karma-jasmine-html-reporter": "^2.0.0",
|
||||
"protractor": "^7.0.0",
|
||||
"tailwindcss": "^3.2.3",
|
||||
"ts-node": "^10.9.1",
|
||||
"tslint": "^6.1.3",
|
||||
"typescript": "^4.6.4"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ export class TreoDateRangeComponent implements ControlValueAccessor, OnInit, OnD
|
||||
private _timeFormat: string;
|
||||
private _timeRange: boolean;
|
||||
private readonly _timeRegExp: RegExp;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
@@ -31,7 +31,7 @@ export class TreoMessageComponent implements OnInit, OnDestroy
|
||||
private _dismissed: null | boolean;
|
||||
private _showIcon: boolean;
|
||||
private _type: TreoMessageType;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -23,7 +23,7 @@ export class TreoHorizontalNavigationBasicItemComponent implements OnInit, OnDes
|
||||
|
||||
// Private
|
||||
private _treoHorizontalNavigationComponent: TreoHorizontalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -32,7 +32,7 @@ export class TreoHorizontalNavigationBranchItemComponent implements OnInit, OnDe
|
||||
|
||||
// Private
|
||||
private _treoHorizontalNavigationComponent: TreoHorizontalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -23,7 +23,7 @@ export class TreoHorizontalNavigationDividerItemComponent implements OnInit, OnD
|
||||
|
||||
// Private
|
||||
private _treoHorizontalNavigationComponent: TreoHorizontalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -23,7 +23,7 @@ export class TreoHorizontalNavigationSpacerItemComponent implements OnInit, OnDe
|
||||
|
||||
// Private
|
||||
private _treoHorizontalNavigationComponent: TreoHorizontalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
@@ -23,7 +23,7 @@ export class TreoHorizontalNavigationComponent implements OnInit, OnDestroy
|
||||
|
||||
// Private
|
||||
private _navigation: TreoNavigationItem[];
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -35,7 +35,7 @@ export class TreoVerticalNavigationAsideItemComponent implements OnInit, OnDestr
|
||||
|
||||
// Private
|
||||
private _treoVerticalNavigationComponent: TreoVerticalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -23,7 +23,7 @@ export class TreoVerticalNavigationBasicItemComponent implements OnInit, OnDestr
|
||||
|
||||
// Private
|
||||
private _treoVerticalNavigationComponent: TreoVerticalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -38,7 +38,7 @@ export class TreoVerticalNavigationCollapsableItemComponent implements OnInit, O
|
||||
|
||||
// Private
|
||||
private _treoVerticalNavigationComponent: TreoVerticalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -23,7 +23,7 @@ export class TreoVerticalNavigationDividerItemComponent implements OnInit, OnDes
|
||||
|
||||
// Private
|
||||
private _treoVerticalNavigationComponent: TreoVerticalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -27,7 +27,7 @@ export class TreoVerticalNavigationGroupItemComponent implements OnInit, OnDestr
|
||||
|
||||
// Private
|
||||
private _treoVerticalNavigationComponent: TreoVerticalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
+1
-1
@@ -23,7 +23,7 @@ export class TreoVerticalNavigationSpacerItemComponent implements OnInit, OnDest
|
||||
|
||||
// Private
|
||||
private _treoVerticalNavigationComponent: TreoVerticalNavigationComponent;
|
||||
private _unsubscribeAll: Subject<any>;
|
||||
private _unsubscribeAll: Subject<void>;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user