Compare commits
753 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a3000fd6b0 | |||
| af59f2639c | |||
| b0ff0b3a48 | |||
| 56056b2d6a | |||
| 51f0ba6ee2 | |||
| 34b0347acd | |||
| 0565962a14 | |||
| 184bc4bec5 | |||
| bdbe13e320 | |||
| 761014a93f | |||
| 27be0b8327 | |||
| 69abe43a1d | |||
| 7c35d59552 | |||
| 742153e5dc | |||
| 5f7e4a3808 | |||
| bb98b8c45b | |||
| b71897fa5f | |||
| a182c691fb | |||
| 4066c84c8e | |||
| 4a72c9ef55 | |||
| 3e11583283 | |||
| ea9799d963 | |||
| e46ab7373e | |||
| 87f923e1f2 | |||
| 2244504023 | |||
| 192ae40f74 | |||
| 600cd153e0 | |||
| d11bf0a2fc | |||
| 50561f34ea | |||
| a58f9445c1 | |||
| 1ec478302f | |||
| 412f956782 | |||
| 9b28ac5069 | |||
| db2869ffc6 | |||
| 6e349244d1 | |||
| e6cd3ee3c6 | |||
| df6a4cef59 | |||
| 8cf7d64da7 | |||
| 3de12cd739 | |||
| affe05e145 | |||
| 9ad96e6d37 | |||
| 85d98316f3 | |||
| 0641b5e79d | |||
| c168e1e9fc | |||
| 56a9454730 | |||
| a783604c4e | |||
| 604dcf355c | |||
| 57dc547265 | |||
| e0fe17afbf | |||
| c9429c61b2 | |||
| 394ac0af2c | |||
| 48feee51d0 | |||
| d4fb7786d2 | |||
| c316f996c6 | |||
| 49108bd1ef | |||
| 0dafb65c5f | |||
| c5943a1ca4 | |||
| a5893f0bf9 | |||
| 142fe06df1 | |||
| 8b7ddd3042 | |||
| db57281557 | |||
| 5a5877b729 | |||
| 0a89c2bab3 | |||
| a18e2842ac | |||
| 806f7c64a0 | |||
| 8fa32c6dd7 | |||
| 5e6ab2290b | |||
| 67c0af9f59 | |||
| 55565e509d | |||
| f74d9c108a | |||
| 5977f7c7d4 | |||
| 3490a2ffc2 | |||
| a0f9e6e3f3 | |||
| 6a9b89b38a | |||
| 543f376015 | |||
| ca7bd2c151 | |||
| 1e74e96658 | |||
| 5e33c33e75 | |||
| 3ea223fa8e | |||
| 44275c66ca | |||
| 19bd59dc27 | |||
| b7fab3c94e | |||
| 09f4b34bf0 | |||
| f24abf254b | |||
| cc889f2a2d | |||
| 2aa242e364 | |||
| 1c193aa043 | |||
| 01c5a7fdfe | |||
| 98d958888c | |||
| 4e5c76b259 | |||
| 6417d71311 | |||
| 3285eb659f | |||
| db86bac9ef | |||
| a3dfce3561 | |||
| 240178d742 | |||
| 2dcb6cd6b6 | |||
| 56df7b5797 | |||
| d54a0abc8c | |||
| 061f55f5b1 | |||
| 5bbd4c3b64 | |||
| fb6c3d6a24 | |||
| 87dc51a9c0 | |||
| c3a0fb7fb5 | |||
| 5e87608587 | |||
| ab7fd107e7 | |||
| 550cd59093 | |||
| a8621d2bb0 | |||
| 4b1d9dc2d3 | |||
| 22769b962e | |||
| feb7909961 | |||
| 2f01e8c8e0 | |||
| 31c2daedf7 | |||
| ee893cc360 | |||
| d73907d357 | |||
| 0b50305f38 | |||
| ee3d719c3a | |||
| d76cdca4a5 | |||
| b34ed607b7 | |||
| 932e191510 | |||
| 3a6c407fe7 | |||
| 8c3afc31f4 | |||
| 2e4ba44952 | |||
| 4192ac719e | |||
| 539c94595f | |||
| de21e611a3 | |||
| dea362361e | |||
| 7b77519f49 | |||
| 94df7e1ec3 | |||
| babd8d3089 | |||
| 52ef28f091 | |||
| 80d72f8a1b | |||
| 2e8f4a0581 | |||
| 7fd2e2b050 | |||
| 8c65166a90 | |||
| 733b49c2c4 | |||
| 711b5c40b1 | |||
| f94e616d8d | |||
| fb7848f341 | |||
| 007857afd5 | |||
| e4bbe8c035 | |||
| cb5226f6e4 | |||
| c69770d1dd | |||
| e07a53046f | |||
| 19a0b8c2ac | |||
| 97f73703b1 | |||
| 4fcd11f497 | |||
| 7f1023fa9b | |||
| d49497da80 | |||
| d8c359bd8a | |||
| ad4b117f6e | |||
| 22d2f9847c | |||
| 1c7f299b98 | |||
| 602fdce0ee | |||
| 61fde6a2ca | |||
| d843bcc258 | |||
| f2856e0f26 | |||
| 58ef1aa311 | |||
| 6a6570b8e3 | |||
| 29a0860caa | |||
| fb760a9f6d | |||
| c9f13f4398 | |||
| dd03a8cf63 | |||
| 0a6ade4da9 | |||
| 5c8c11d78b | |||
| 00502cc565 | |||
| 0febe3fda5 | |||
| fcd4bb4561 | |||
| 89f763e65d | |||
| 075eb94fa2 | |||
| e9cf8a9180 | |||
| 64ad353628 | |||
| 5518865bc6 | |||
| 50321d897a | |||
| e18a7e9ce0 | |||
| 536b590080 | |||
| 098ce0673a | |||
| 2677796322 | |||
| 5cc7fb30ed | |||
| 222b8103d6 | |||
| 727d5b0ace | |||
| d7b45e5f01 | |||
| 578a262d90 | |||
| c6e11f88b4 | |||
| b795331efb | |||
| f1e5bd3ed4 | |||
| d8d56f77f9 | |||
| 26b221532e | |||
| 15d3206f6f | |||
| 59e2e928a8 | |||
| 51f59e4fcd | |||
| f823127825 | |||
| d41d535ab7 | |||
| 9a4a8de341 | |||
| 2d6f60abaa | |||
| d201f798fb | |||
| a1b0108503 | |||
| f0275d2349 | |||
| 9dafde8a43 | |||
| fa8f86ab7b | |||
| 41c9daa939 | |||
| 83186ba36e | |||
| 3205e3d022 | |||
| 3f272b36d4 | |||
| b238579fe6 | |||
| ce2f990eb1 | |||
| b11b8732aa | |||
| 5cd441da7b | |||
| 2e768fb491 | |||
| e8755ff617 | |||
| e41ee47371 | |||
| 7a68a68e76 | |||
| 94594db20a | |||
| 7e672e8b8e | |||
| 54e2cacb00 | |||
| c0f1dfdb0b | |||
| 29bc79996b | |||
| 99af2b8b16 | |||
| dd0c3e6fba | |||
| 5b2746f389 | |||
| e9c1de9664 | |||
| 6ca4bd4912 | |||
| c34ee85e48 | |||
| 91e8eb1def | |||
| a01b8fe083 | |||
| 550fb542d4 | |||
| 7841063783 | |||
| 8e05b2e2f8 | |||
| 64e1c93d16 | |||
| b227054b52 | |||
| 66bd6f99c5 | |||
| c6579864b8 | |||
| 2361c329e2 | |||
| 5ea149d878 | |||
| 30bd18f816 | |||
| 0f0efac866 | |||
| 04563c0d0d | |||
| 9316eccabe | |||
| b71d6660a6 | |||
| 0e2fec4e93 | |||
| ff171282cc | |||
| ea8fe208d0 | |||
| 9ae9c387cc | |||
| 772b4f6528 | |||
| 4a16ca0d5a | |||
| 316ce856f7 | |||
| 6e0321f488 | |||
| 338d2ae04e | |||
| 4419f7f429 | |||
| 797a6b0429 | |||
| d0b545dfb7 | |||
| b0bff53bbd | |||
| b4adf3d88d | |||
| eefdc548b2 | |||
| fb918e2d6e | |||
| 3d9001a5e4 | |||
| fbe7d63a24 | |||
| d718b0898b | |||
| 44c7211b5f | |||
| 157c93b967 | |||
| 7babc280a0 | |||
| e364e480e8 | |||
| bfefe7e98a | |||
| 831cca7853 | |||
| 46f3b1c02c | |||
| 8a1ae2ffa0 | |||
| 145c819fc1 | |||
| a9ea231de0 | |||
| c2488af1c3 | |||
| ecf7a447a7 | |||
| f8e61af2f9 | |||
| ee61d986d8 | |||
| 8fe8cec09a | |||
| b953456d6b | |||
| 4057699cad | |||
| d3e7fc6067 | |||
| 09a8574d83 | |||
| 7695cc185f | |||
| fc7208020e | |||
| 75d5930835 | |||
| 3c9e16169e | |||
| 9e1076f302 | |||
| 75ab87e109 | |||
| 0b8251fce2 | |||
| f57b71ae96 | |||
| ce324c3de1 | |||
| 281b56d287 | |||
| cbd23e334b | |||
| 7a0b9c9e0d | |||
| 44b3d982dd | |||
| 769f253e7d | |||
| fbd5bb57ac | |||
| b9eb5687cd | |||
| cbd230a7e0 | |||
| 892e9685f3 | |||
| 7ba7b6efda | |||
| 453069deec | |||
| de5f2c3324 | |||
| d486f14433 | |||
| cb47dd7185 | |||
| 6ae4d233cd | |||
| f8bb185854 | |||
| 1da07caaa6 | |||
| fe96c27732 | |||
| 7287775cca | |||
| 28ac3ac7ec | |||
| a6208c0d49 | |||
| 7840fe66da | |||
| 2ca44c967e | |||
| 4b767421f3 | |||
| 6005b8609a | |||
| df23ecdf33 | |||
| f4988cbac5 | |||
| f4f5d16b4a | |||
| 1c4dd33381 | |||
| 9e0ba4d269 | |||
| d9ecf6c0d3 | |||
| 8051ad4dde | |||
| 165f98dc09 | |||
| ca7772250c | |||
| 6e02e4da02 | |||
| 9c8498cea7 | |||
| 965fbb08da | |||
| e16933eeac | |||
| 4d0fc0eae8 | |||
| 8296a973b8 | |||
| 19a9957755 | |||
| 02e3947906 | |||
| 766a73455e | |||
| 6e64ae09aa | |||
| 411eca20e0 | |||
| 0243d9e2fa | |||
| 9aa0e97be0 | |||
| 488fcfc820 | |||
| b5dad487e5 | |||
| 8b01187892 | |||
| d9d6ce0f30 | |||
| 8d203b3547 | |||
| fe5dbcff1e | |||
| 99df104cdd | |||
| a53397210c | |||
| 2533d8d34f | |||
| af2523cfee | |||
| c6e1663f8a | |||
| ab83c389f7 | |||
| 6d22702864 | |||
| d78957353d | |||
| b208493af9 | |||
| 4aa1485246 | |||
| e1e1d321dd | |||
| 3971b37abc | |||
| cf1bd3ea6b | |||
| 9b901766e3 | |||
| e19ee78e70 | |||
| c7c55ab95c | |||
| d7ddf01ea0 | |||
| c539af1a67 | |||
| d93d24b52d | |||
| 5dbfad68ad | |||
| 92c4506cfa | |||
| fe80bed6bd | |||
| b6e69021b2 | |||
| 12e624a496 | |||
| e95b44c690 | |||
| 4ee947d55c | |||
| 21212c0a1d | |||
| d1376a2200 | |||
| 7d2daf4f6a | |||
| da4562d308 | |||
| f51de52ff7 | |||
| 987632df39 | |||
| 28a3c3e53f | |||
| 1bd86f5abd | |||
| 989fbc25f8 | |||
| 0f935ceb48 | |||
| f844a435fd | |||
| 3a970e7a27 | |||
| 307c2bcdef | |||
| d62928aaae | |||
| d08a1e3ef6 | |||
| 2292041f9f | |||
| 75e4bf1d6e | |||
| 97add04276 | |||
| 1423f55d78 | |||
| 46d0b70399 | |||
| 168ca802d1 | |||
| 8c07e91f39 | |||
| 7979950c3b | |||
| 9846ba13e0 | |||
| 83839f7faf | |||
| 85fa3b1f8f | |||
| 4190f9a633 | |||
| 743ce27d2e | |||
| 399a2450ff | |||
| 934f16f0a5 | |||
| 0aeb13c181 | |||
| 5899bf2026 | |||
| 3b137964fc | |||
| 1bfdd0043f | |||
| 999c12748c | |||
| 6f283fd736 | |||
| 65d31046a0 | |||
| 601d632ae4 | |||
| 8466c5e750 | |||
| aa786c0db8 | |||
| f3faee389b | |||
| 5ac0aa8f74 | |||
| a589d11d01 | |||
| 1a05868381 | |||
| a35c3bae08 | |||
| 0c908786e0 | |||
| 2ba196d6a8 | |||
| 0e00999d79 | |||
| 5adceeb9a5 | |||
| b5920e35e3 | |||
| fb8f248366 | |||
| 7a931bd018 | |||
| a004f85145 | |||
| eeb086c77f | |||
| 54b195f851 | |||
| 5dc79134b2 | |||
| f3fad47d9e | |||
| 489534cb73 | |||
| e7801619cd | |||
| 7b75b5f9bb | |||
| 0022d848d6 | |||
| d47c4ea99a | |||
| 62354f2ab8 | |||
| 3a0adb406f | |||
| 2a39421524 | |||
| a7dc68822f | |||
| 3ad87aecc6 | |||
| 2ab714f575 | |||
| 9e60fb8d73 | |||
| a846522830 | |||
| 3d7d276236 | |||
| 2660af7ce3 | |||
| f5af86fd46 | |||
| 4bad2d7b03 | |||
| a79930916e | |||
| 9ea283e8d2 | |||
| bd6d192006 | |||
| 39848eda0b | |||
| 2f67d6f9ae | |||
| 30153f9656 | |||
| 3150201348 | |||
| bce6225e9a | |||
| 893774c557 | |||
| 145996055a | |||
| 1cae5ea864 | |||
| 0fe6e74eb4 | |||
| eb4a738746 | |||
| 90e5d219a2 | |||
| c397a3237f | |||
| 381a6799cc | |||
| 54178eaaf0 | |||
| a6e34f7a44 | |||
| 3e444b199a | |||
| a2a80f3102 | |||
| 833fb96c15 | |||
| 5acc98d221 | |||
| d9632ae34b | |||
| de702414b9 | |||
| db73175f07 | |||
| 6a9db6a92b | |||
| 2967b6ca01 | |||
| 5ed69d7fc4 | |||
| 2b5c864a74 | |||
| 21d07a0712 | |||
| 2214febbd1 | |||
| 786e7d04f2 | |||
| 0cee744c29 | |||
| f39628efc3 | |||
| 87ba8ff632 | |||
| 8ea194b6ba | |||
| d26e452a4a | |||
| 77da0f5a57 | |||
| 2350c1335c | |||
| a893d2db47 | |||
| 6fbe710d09 | |||
| d48d0b9d93 | |||
| 57c0f899c6 | |||
| 5bab9ac04a | |||
| fabc629e40 | |||
| 3dbe59781c | |||
| 1ced2198c7 | |||
| 5f12fbb510 | |||
| 702518579b | |||
| fc5a9ba15e | |||
| 8fe0dbed6b | |||
| 7d963c96a6 | |||
| 2750ccef4a | |||
| 9d85920f49 | |||
| 97f6564c1e | |||
| 646d0eff15 | |||
| ce3d45e543 | |||
| 75de6ebfe0 | |||
| 49c1ef6a37 | |||
| ae99ffd57e | |||
| 08f247109a | |||
| 36617c8f1f | |||
| 035c94681f | |||
| 20411afb82 | |||
| 84f3327790 | |||
| ccbb9225c4 | |||
| 8d052f0265 | |||
| cfe77c9a36 | |||
| 62e5a71eb0 | |||
| 0dba9f8011 | |||
| d42faf30b0 | |||
| 5a4bcda1ec | |||
| e243d55153 | |||
| 9ee2674804 | |||
| 5a1e390acd | |||
| 5fb5b9afbe | |||
| 9ebf252d4f | |||
| 88a99a1c90 | |||
| 9a5c667437 | |||
| 8462d21e14 | |||
| 702c7cdf7a | |||
| 00bc6ecd92 | |||
| 7cd828ef0d | |||
| bd39b2cd4d | |||
| 0a9d364aea | |||
| f60636a6aa | |||
| 7a7771981a | |||
| 8e34ef8d79 | |||
| f569ab6474 | |||
| a8952eff0c | |||
| 903d5713fc | |||
| 0872da57d7 | |||
| 47e8595c9d | |||
| bff83de3a0 | |||
| 03bfdd3890 | |||
| 772063a843 | |||
| b776fb8886 | |||
| 8fb58591a6 | |||
| ce032c5609 | |||
| 060ac7b83a | |||
| 3ee342763c | |||
| e572c2051f | |||
| a4389b176d | |||
| c7603c8f62 | |||
| 7b7b4fe4e3 | |||
| 5789c836db | |||
| deba21fe19 | |||
| 31b5dfa038 | |||
| 9878985fa3 | |||
| fe3f38ae54 | |||
| 315b43ea05 | |||
| d346a394e6 | |||
| 5880e0af86 | |||
| 0460985bc5 | |||
| 0d580932ad | |||
| f26dbdf8b6 | |||
| 33e370e90f | |||
| 5c492986a5 | |||
| fb564afe30 | |||
| a4d151e3f7 | |||
| 124973d9fa | |||
| abfaa5259e | |||
| 975c034925 | |||
| abe7a16507 | |||
| 6d4196085e | |||
| fa5d0e44c4 | |||
| a60edfff26 | |||
| b197d349f4 | |||
| c97388ff60 | |||
| c8768387f6 | |||
| bd19230cbf | |||
| 80f4660130 | |||
| 1fc910f41b | |||
| 967a927e2d | |||
| ef014150a5 | |||
| 5c614c5512 | |||
| 694fc74ca0 | |||
| 8a46931399 | |||
| a7c8c75a49 | |||
| 2db6465639 | |||
| 8ac3ab79a4 | |||
| 234a8f9b01 | |||
| 54baeb4c4e | |||
| 48bc7cedf4 | |||
| 9fc11b7140 | |||
| ea3fbc09f1 | |||
| 86145be2b1 | |||
| fd4f0429e4 | |||
| 33d4b99a4b | |||
| 9be57f2271 | |||
| 8196447526 | |||
| 712119cb5e | |||
| 644a9418dd | |||
| 8431eef515 | |||
| df07261c57 | |||
| 08634f2a88 | |||
| 273be111b4 | |||
| a4e193fb25 | |||
| d252333ba9 | |||
| 0864b8000c | |||
| ecd6b7e128 | |||
| 527214f38c | |||
| 0f788cc9ce | |||
| 9ece82f3f5 | |||
| 3780f8e864 | |||
| fc3d6a33e3 | |||
| 2fc24d0e76 | |||
| d92a21fbca | |||
| 1d3d16eeaa | |||
| 4331f86ed4 | |||
| da890d95b6 | |||
| e5713e3a81 | |||
| 59d4a8e195 | |||
| 02ec32934f | |||
| 79de0fed2b | |||
| 3995c87856 | |||
| 5ea9a75039 | |||
| 93cf7891d3 | |||
| c55bdcbf28 | |||
| 011642a708 | |||
| 9778809cba | |||
| 27b923b5e9 | |||
| 698cb404cf | |||
| bab25de2f2 | |||
| 520136a5ec | |||
| 7fb717270f | |||
| 14ed3eb71e | |||
| e364fe95d9 | |||
| c285491c34 | |||
| c62af772af | |||
| fe5bad8c00 | |||
| 72e0681497 | |||
| 885a957197 | |||
| bc444918da | |||
| 1fe56136b9 | |||
| 491b1ed4aa | |||
| 36f53198be | |||
| 85dde0efc9 | |||
| bf98216e0d | |||
| 93cf676818 | |||
| 58eaa29e4d | |||
| 9898ff1a33 | |||
| 7edef7a2fb | |||
| ada3665ba0 | |||
| 7dbe108e33 | |||
| e2e0045ddd | |||
| edfa75739a | |||
| 404421bab4 | |||
| c160e3e50b | |||
| 048bf237e2 | |||
| 13d3d9f64e | |||
| 7293f2e48b | |||
| 77971edf5f | |||
| c89381b39e | |||
| d6c1d4aa04 | |||
| 5be3407489 | |||
| 4fd650a150 | |||
| 78d0dd8f3b | |||
| c9620b5f87 | |||
| a83aab79bf | |||
| 75b78a45c0 | |||
| bdbb7d0e1b | |||
| de3269409f | |||
| 96534c44b7 | |||
| c1aaf67152 | |||
| 95ef254b8c | |||
| 0fffc167fb | |||
| 216d3a6fe3 | |||
| c841ba56de | |||
| afca629848 | |||
| aa720f7065 | |||
| b7f97c27bc | |||
| 52a0f7575e | |||
| e7a3fd96f0 | |||
| e32af371f7 | |||
| 382175bbb1 | |||
| b09ec3b912 | |||
| 1b2e95532b | |||
| cb307409a0 | |||
| dd202c63ec | |||
| 9dd80d7d66 | |||
| bf07077154 | |||
| bd2f305d80 | |||
| e32fc5947e | |||
| ac6b02808c | |||
| 0a55a7076f | |||
| 9e10f3bf50 | |||
| f410e60235 | |||
| 1d35b14c9d | |||
| aff9a0b258 | |||
| 9ffc55ba13 | |||
| 99ec2eb2dc | |||
| 7e78fb4e7d | |||
| 4bd3dd4311 | |||
| f27883ea4c | |||
| 1e18086c4d | |||
| b04f1b07e8 | |||
| 8b5e95f1e1 | |||
| 22429800a7 | |||
| 27e9d2f465 | |||
| 9fac3c6308 | |||
| 1cd5ebaa43 | |||
| b44ef5cb9c | |||
| 9dfc57432b | |||
| 32e7044c67 | |||
| ac7c1f28cf | |||
| c8a4be6b07 | |||
| 80a382d098 | |||
| 81b05515d3 | |||
| 1c7ca35ea7 | |||
| a3438297e6 | |||
| 732eb039da | |||
| 2d903453d5 | |||
| 6ca4ce39de | |||
| 4a12caf043 | |||
| 46d04734b6 | |||
| d339e967a6 | |||
| 6377a258f6 | |||
| be378bd147 | |||
| 1246f5bba9 | |||
| adbbcb285a | |||
| f0c3fddf26 | |||
| 9816c750b4 | |||
| cd6c59599f | |||
| 94559d265c | |||
| 95e5a5e768 | |||
| 7742f4c6c3 | |||
| 6473ec1a28 | |||
| eac234ee2e | |||
| c7c1e37170 | |||
| 7de7935790 | |||
| 7988381433 | |||
| 013eccdd58 | |||
| a80794590a | |||
| f56200452a | |||
| 32682283da | |||
| 8775b02970 | |||
| 38ea7f7839 | |||
| cb0aa9823e | |||
| 75a5f7dfb6 | |||
| 2c2ecbd9f7 | |||
| 9bd8aec315 | |||
| e44864e64b | |||
| 8a336bf5c6 | |||
| 6a20228262 | |||
| 531fea76b2 | |||
| 5127399e94 | |||
| 8a975e2164 | |||
| 7cdacbaffc | |||
| 2fee2bf906 | |||
| 1c59b3c245 | |||
| 119e24f6ec | |||
| a57120d600 | |||
| f2dd87cf82 | |||
| 8b139ad157 |
@@ -0,0 +1,5 @@
|
||||
/vendor
|
||||
/.idea
|
||||
/.github
|
||||
/.git
|
||||
/webapp/frontend/node_modules
|
||||
@@ -0,0 +1,4 @@
|
||||
*.css linguist-detectable=false
|
||||
*.scss linguist-detectable=false
|
||||
*.js linguist-detectable=false
|
||||
*.ts linguist-detectable=false
|
||||
@@ -18,9 +18,25 @@ If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Log Files**
|
||||
If related to missing devices or SMART data, please run the `collector` in DEBUG mode, and attach the log file.
|
||||
See [/docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md](docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md) for other troubleshooting tips.
|
||||
|
||||
```
|
||||
docker exec scrutiny scrutiny-collector-metrics run --debug --log-file /tmp/test.log
|
||||
# then use docker cp to copy the log file out of the container.
|
||||
docker cp scrutiny:/tmp/test.log test.log
|
||||
docker run -it --rm -p 8080:8080 \
|
||||
-v `pwd`/config:/opt/scrutiny/config \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
-e DEBUG=true \
|
||||
-e COLLECTOR_LOG_FILE=/opt/scrutiny/config/collector.log \
|
||||
-e SCRUTINY_LOG_FILE=/opt/scrutiny/config/web.log \
|
||||
--name scrutiny \
|
||||
ghcr.io/analogj/scrutiny:master-omnibus
|
||||
|
||||
# in another terminal trigger the collector
|
||||
docker exec scrutiny scrutiny-collector-metrics run
|
||||
```
|
||||
|
||||
The log files will be available on your host in the `config` directory. Please attach them to this issue.
|
||||
|
||||
Please also provide the output of `docker info`
|
||||
@@ -1,60 +0,0 @@
|
||||
name: CI
|
||||
# This workflow is triggered on pushes to the repository.
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
container: golang:1.13
|
||||
env:
|
||||
PROJECT_PATH: /go/src/github.com/analogj/scrutiny
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
env:
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
run: |
|
||||
mkdir -p $PROJECT_PATH
|
||||
cp -a $GITHUB_WORKSPACE/* $PROJECT_PATH/
|
||||
cd $PROJECT_PATH
|
||||
|
||||
go mod vendor
|
||||
|
||||
go test -race -coverprofile=coverage.txt -covermode=atomic -v -tags "static" $(go list ./... | grep -v /vendor/)
|
||||
|
||||
go build -ldflags "-X main.goos=linux -X main.goarch=amd64" -o scrutiny-web-linux-amd64 -tags "static" webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
go build -ldflags "-X main.goos=linux -X main.goarch=amd64" -o scrutiny-collector-metrics-linux-amd64 -tags "static" collector/cmd/collector-metrics/collector-metrics.go
|
||||
|
||||
chmod +x scrutiny-web-linux-amd64
|
||||
chmod +x scrutiny-collector-metrics-linux-amd64
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: binaries
|
||||
path: |
|
||||
${{ env.PROJECT_PATH }}/scrutiny-web-linux-amd64
|
||||
${{ env.PROJECT_PATH }}/scrutiny-collector-metrics-linux-amd64
|
||||
- uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: ${{ env.PROJECT_PATH }}/coverage.txt
|
||||
flags: unittests
|
||||
fail_ci_if_error: true
|
||||
build-docker:
|
||||
name: Build Docker
|
||||
runs-on: ubuntu-latest
|
||||
container: docker:19.03.2
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Build Docker
|
||||
run: |
|
||||
docker build -t analogj/scrutiny -f docker/Dockerfile .
|
||||
docker save -o docker-analogj-scrutiny-latest.tar analogj/scrutiny:latest
|
||||
- name: Archive Docker
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: docker
|
||||
path: docker-analogj-scrutiny-latest.tar
|
||||
@@ -0,0 +1,115 @@
|
||||
name: CI
|
||||
# This workflow is triggered on pushes & pull requests
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
test-frontend:
|
||||
name: Test Frontend
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test Frontend
|
||||
run: |
|
||||
make binary-frontend-test-coverage
|
||||
- name: Upload coverage
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
path: ${{ github.workspace }}/webapp/frontend/coverage/lcov.info
|
||||
retention-days: 1
|
||||
test-backend:
|
||||
name: Test Backend
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/packagrio/packagr:latest-golang
|
||||
# Service containers to run with `build` (Required for end-to-end testing)
|
||||
services:
|
||||
influxdb:
|
||||
image: influxdb:2.2
|
||||
env:
|
||||
DOCKER_INFLUXDB_INIT_MODE: setup
|
||||
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
||||
DOCKER_INFLUXDB_INIT_PASSWORD: password12345
|
||||
DOCKER_INFLUXDB_INIT_ORG: scrutiny
|
||||
DOCKER_INFLUXDB_INIT_BUCKET: metrics
|
||||
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: my-super-secret-auth-token
|
||||
ports:
|
||||
- 8086:8086
|
||||
env:
|
||||
STATIC: true
|
||||
steps:
|
||||
- name: Git
|
||||
run: |
|
||||
apt-get update && apt-get install -y software-properties-common
|
||||
add-apt-repository ppa:git-core/ppa && apt-get update && apt-get install -y git
|
||||
git --version
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test Backend
|
||||
run: |
|
||||
make binary-clean binary-test-coverage
|
||||
- name: Upload coverage
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
path: ${{ github.workspace }}/coverage.txt
|
||||
retention-days: 1
|
||||
test-coverage:
|
||||
name: Test Coverage Upload
|
||||
needs:
|
||||
- test-backend
|
||||
- test-frontend
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Download coverage reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
- name: Upload coverage reports
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${{ github.workspace }}/coverage.txt,${{ github.workspace }}/lcov.info
|
||||
flags: unittests
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.cfg.goos }}/${{ matrix.cfg.goarch }}
|
||||
runs-on: ${{ matrix.cfg.on }}
|
||||
env:
|
||||
GOOS: ${{ matrix.cfg.goos }}
|
||||
GOARCH: ${{ matrix.cfg.goarch }}
|
||||
GOARM: ${{ matrix.cfg.goarm }}
|
||||
STATIC: true
|
||||
strategy:
|
||||
matrix:
|
||||
cfg:
|
||||
- { on: ubuntu-latest, goos: linux, goarch: amd64 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 5 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 6 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 7 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: amd64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: arm64 }
|
||||
- { on: macos-latest, goos: freebsd, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: arm64 }
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.20.1'
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
make binary-clean binary-all
|
||||
- name: Archive
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: binaries-${{ matrix.cfg.on }}-${{ matrix.cfg.goos }}-${{ matrix.cfg.goarch }}-${{ matrix.cfg.goarm || 'na' }}.zip
|
||||
path: |
|
||||
scrutiny-web-*
|
||||
scrutiny-collector-metrics-*
|
||||
@@ -0,0 +1,171 @@
|
||||
name: Docker
|
||||
on:
|
||||
push:
|
||||
branches: [ master, beta ]
|
||||
# Publish semver tags as releases.
|
||||
tags: [ 'v*.*.*' ]
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
collector:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Extract metadata (tags, labels) for Docker
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-collector
|
||||
type=semver,pattern=v{{major}}.{{minor}}.{{patch}},suffix=-collector
|
||||
type=semver,pattern=v{{major}}.{{minor}},suffix=-collector
|
||||
type=semver,pattern=v{{major}},suffix=-collector
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
context: .
|
||||
file: docker/Dockerfile.collector
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
|
||||
web:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Extract metadata (tags, labels) for Docker
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-web
|
||||
type=semver,pattern=v{{major}}.{{minor}}.{{patch}},suffix=-web
|
||||
type=semver,pattern=v{{major}}.{{minor}},suffix=-web
|
||||
type=semver,pattern=v{{major}},suffix=-web
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
context: .
|
||||
file: docker/Dockerfile.web
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
omnibus:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Extract metadata (tags, labels) for Docker
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-omnibus
|
||||
type=semver,pattern=v{{major}}.{{minor}}.{{patch}},suffix=-omnibus
|
||||
type=semver,pattern=v{{major}}.{{minor}},suffix=-omnibus
|
||||
type=semver,pattern=v{{major}},suffix=-omnibus
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
@@ -0,0 +1,59 @@
|
||||
name: Docker - Nightly
|
||||
on:
|
||||
schedule:
|
||||
- cron: '36 12 * * *'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
omnibus:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: "Populate frontend version information"
|
||||
run: "cd webapp/frontend && ./git.version.sh"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: 'arm64,arm'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
# Login against a Docker registry except on PR
|
||||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Extract metadata (tags, labels) for Docker
|
||||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
tags: |
|
||||
type=ref,enable=true,event=branch,suffix=-omnibus-nightly
|
||||
type=ref,enable=true,event=tag,suffix=-omnibus-nightly
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
push: false
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
+157
-62
@@ -13,81 +13,176 @@ on:
|
||||
default: 'webapp/backend/pkg/version/version.go'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
release:
|
||||
name: Create Release Commit
|
||||
runs-on: ubuntu-latest
|
||||
container: golang:1.13
|
||||
container: ghcr.io/packagrio/packagr:latest-golang
|
||||
# Service containers to run with `build` (Required for end-to-end testing)
|
||||
services:
|
||||
influxdb:
|
||||
image: influxdb:2.2
|
||||
env:
|
||||
DOCKER_INFLUXDB_INIT_MODE: setup
|
||||
DOCKER_INFLUXDB_INIT_USERNAME: admin
|
||||
DOCKER_INFLUXDB_INIT_PASSWORD: password12345
|
||||
DOCKER_INFLUXDB_INIT_ORG: scrutiny
|
||||
DOCKER_INFLUXDB_INIT_BUCKET: metrics
|
||||
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: my-super-secret-auth-token
|
||||
ports:
|
||||
- 8086:8086
|
||||
env:
|
||||
PROJECT_PATH: /go/src/github.com/analogj/scrutiny
|
||||
STATIC: true
|
||||
steps:
|
||||
- name: Git
|
||||
run: |
|
||||
apt-get update && apt-get install -y software-properties-common
|
||||
add-apt-repository ppa:git-core/ppa && apt-get update && apt-get install -y git
|
||||
git --version
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
uses: packagrio/action-bumpr-go@master
|
||||
with:
|
||||
version_bump_type: ${{ github.event.inputs.version_bump_type }}
|
||||
version_metadata_path: ${{ github.event.inputs.version_metadata_path }}
|
||||
github_token: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
- name: Build
|
||||
env:
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
run: |
|
||||
mkdir -p $PROJECT_PATH
|
||||
cp -a $GITHUB_WORKSPACE/. $PROJECT_PATH/
|
||||
cd $PROJECT_PATH
|
||||
|
||||
go mod vendor
|
||||
|
||||
go test -v -tags "static" $(go list ./... | grep -v /vendor/)
|
||||
|
||||
go build -ldflags "-X main.goos=linux -X main.goarch=amd64" -o scrutiny-web-linux-amd64 -tags "static" webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
go build -ldflags "-X main.goos=linux -X main.goarch=amd64" -o scrutiny-collector-metrics-linux-amd64 -tags "static" collector/cmd/collector-metrics/collector-metrics.go
|
||||
|
||||
chmod +x scrutiny-web-linux-amd64
|
||||
chmod +x scrutiny-collector-metrics-linux-amd64
|
||||
- name: Commit
|
||||
uses: EndBug/add-and-commit@v4 # You can change this to use a specific version
|
||||
with:
|
||||
|
||||
author_name: Jason Kulatunga
|
||||
author_email: jason@thesparktree.com
|
||||
cwd: ${{ env.PROJECT_PATH }}
|
||||
force: false
|
||||
signoff: true
|
||||
message: '(${{steps.bump_version.outputs.release_version}}) Automated packaging of release by Packagr'
|
||||
tag: ${{steps.bump_version.outputs.release_version}}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }} # Leave this line unchanged
|
||||
- name: Test
|
||||
run: |
|
||||
make binary-clean binary-test-coverage
|
||||
- name: Commit Changes Locally
|
||||
id: commit
|
||||
uses: packagrio/action-releasr-go@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }} # Leave this line unchanged
|
||||
with:
|
||||
version_metadata_path: ${{ github.event.inputs.version_metadata_path }}
|
||||
- name: Upload workspace
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: workspace
|
||||
include-hidden-files: true
|
||||
path: ${{ github.workspace }}/**/*
|
||||
retention-days: 1
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
build:
|
||||
name: Build ${{ matrix.cfg.goos }}/${{ matrix.cfg.goarch }}${{ matrix.cfg.goarm }}
|
||||
needs: release
|
||||
runs-on: ${{ matrix.cfg.on }}
|
||||
env:
|
||||
GOOS: ${{ matrix.cfg.goos }}
|
||||
GOARCH: ${{ matrix.cfg.goarch }}
|
||||
GOARM: ${{ matrix.cfg.goarm }}
|
||||
STATIC: true
|
||||
strategy:
|
||||
matrix:
|
||||
cfg:
|
||||
- { on: ubuntu-latest, goos: linux, goarch: amd64 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 5 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 6 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm, goarm: 7 }
|
||||
- { on: ubuntu-latest, goos: linux, goarch: arm64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: amd64 }
|
||||
- { on: macos-latest, goos: darwin, goarch: arm64 }
|
||||
- { on: macos-latest, goos: freebsd, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: amd64 }
|
||||
- { on: windows-latest, goos: windows, goarch: arm64 }
|
||||
steps:
|
||||
- name: Download workspace
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
tag_name: ${{ steps.bump_version.outputs.release_version }}
|
||||
release_name: Release ${{ steps.bump_version.outputs.release_version }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
- name: Upload Web Backend Release Asset
|
||||
id: upload-release-asset1
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
name: workspace
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ${{ env.PROJECT_PATH }}/scrutiny-web-linux-amd64
|
||||
asset_name: scrutiny-web-linux-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
- name: Upload Collector Release Asset
|
||||
id: upload-release-asset2
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }}
|
||||
go-version: '1.20.1' # The Go version to download (if necessary) and use.
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
make binary-clean binary-all
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
|
||||
asset_path: ${{ env.PROJECT_PATH }}/scrutiny-collector-metrics-linux-amd64
|
||||
asset_name: scrutiny-collector-metrics-linux-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
name: scrutiny-${{ matrix.cfg.goos }}-${{ matrix.cfg.goarch }}${{ case(matrix.cfg.goarm != '', format('-{0}', matrix.cfg.goarm), '') }}.zip
|
||||
path: |
|
||||
scrutiny-web-${{ matrix.cfg.goos }}-${{ matrix.cfg.goarch }}${{ case(matrix.cfg.goarm != '', format('-{0}', matrix.cfg.goarm), '') }}${{ case(matrix.cfg.goos == 'windows', '.exe', '') }}
|
||||
scrutiny-collector-metrics-${{ matrix.cfg.goos }}-${{ matrix.cfg.goarch }}${{ case(matrix.cfg.goarm != '', format('-{0}', matrix.cfg.goarm), '') }}${{ case(matrix.cfg.goos == 'windows', '.exe', '') }}
|
||||
|
||||
build_frontend:
|
||||
name: Build Frontend
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
container: node:lts-slim
|
||||
steps:
|
||||
- name: Download workspace
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: workspace
|
||||
- name: "Generate frontend version information"
|
||||
run: "cd webapp/frontend && chmod +x git.version.sh && ./git.version.sh"
|
||||
- name: Build Frontend
|
||||
run: |
|
||||
apt-get update && apt-get install -y make
|
||||
make binary-frontend
|
||||
tar -czf scrutiny-web-frontend.tar.gz dist
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: scrutiny-web-frontend.zip
|
||||
path: scrutiny-web-frontend.tar.gz
|
||||
|
||||
release-publish:
|
||||
name: Publish Release
|
||||
needs:
|
||||
- build
|
||||
- build_frontend
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download workspace
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: workspace
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
merge-multiple: true
|
||||
pattern: scrutiny-*.zip
|
||||
- name: Download frontend
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: scrutiny-web-frontend.zip
|
||||
- name: List
|
||||
shell: bash
|
||||
run: |
|
||||
ls -alt
|
||||
- name: Publish Release & Assets
|
||||
id: publish
|
||||
uses: packagrio/action-publishr-go@master
|
||||
env:
|
||||
# This is necessary in order to push a commit to the repo
|
||||
GITHUB_TOKEN: ${{ secrets.SCRUTINY_GITHUB_TOKEN }} # Leave this line unchanged
|
||||
with:
|
||||
version_metadata_path: ${{ github.event.inputs.version_metadata_path }}
|
||||
upload_assets:
|
||||
scrutiny-collector-metrics-darwin-amd64
|
||||
scrutiny-collector-metrics-darwin-arm64
|
||||
scrutiny-collector-metrics-freebsd-amd64
|
||||
scrutiny-collector-metrics-linux-amd64
|
||||
scrutiny-collector-metrics-linux-arm-5
|
||||
scrutiny-collector-metrics-linux-arm-6
|
||||
scrutiny-collector-metrics-linux-arm-7
|
||||
scrutiny-collector-metrics-linux-arm64
|
||||
scrutiny-collector-metrics-windows-amd64.exe
|
||||
scrutiny-collector-metrics-windows-arm64.exe
|
||||
scrutiny-web-frontend.tar.gz
|
||||
scrutiny-web-darwin-amd64
|
||||
scrutiny-web-darwin-arm64
|
||||
scrutiny-web-freebsd-amd64
|
||||
scrutiny-web-linux-amd64
|
||||
scrutiny-web-linux-arm-5
|
||||
scrutiny-web-linux-arm-6
|
||||
scrutiny-web-linux-arm-7
|
||||
scrutiny-web-linux-arm64
|
||||
scrutiny-web-windows-amd64.exe
|
||||
scrutiny-web-windows-arm64.exe
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
name: Cleanup Artifacts
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Every day at 1am
|
||||
- cron: '0 1 * * *'
|
||||
|
||||
jobs:
|
||||
remove-old-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Remove old artifacts
|
||||
uses: c-hive/gha-remove-artifacts@v1
|
||||
with:
|
||||
age: '1 day'
|
||||
skip-tags: true
|
||||
skip-recent: 5
|
||||
@@ -0,0 +1,15 @@
|
||||
name: Label sponsors
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened]
|
||||
issues:
|
||||
types: [opened]
|
||||
jobs:
|
||||
build:
|
||||
name: is-sponsor-label
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ false }}
|
||||
steps:
|
||||
- uses: JasonEtco/is-sponsor-label-action@v1.2.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
+7
-3
@@ -58,9 +58,13 @@ scrutiny.db
|
||||
/dist/
|
||||
vendor
|
||||
/scrutiny
|
||||
/scrutiny-collector-metrics-linux-amd64
|
||||
/scrutiny-web-linux-amd64
|
||||
/scrutiny-collector-metrics-*
|
||||
/scrutiny-web-*
|
||||
scrutiny-*.db
|
||||
scrutiny_test.db
|
||||
|
||||
scrutiny.yaml
|
||||
coverage.txt
|
||||
/config
|
||||
/influxdb
|
||||
.angular
|
||||
web.log
|
||||
@@ -0,0 +1,73 @@
|
||||
# AI Usage Policy
|
||||
|
||||
scrutiny has strict rules for AI usage:
|
||||
|
||||
- **All AI usage in any form must be disclosed.** You must state
|
||||
the tool you used (e.g. Claude Code, Cursor, Amp) along with
|
||||
the extent that the work was AI-assisted.
|
||||
|
||||
- **Pull requests created in any way by AI can only be for accepted issues.**
|
||||
Drive-by pull requests that do not reference an accepted issue will be
|
||||
closed. If AI isn't disclosed but a maintainer suspects its use, the
|
||||
PR will be closed. If you want to share code for a non-accepted issue,
|
||||
open a discussion or attach it to an existing discussion.
|
||||
|
||||
- **Pull requests created by AI must have been fully verified with
|
||||
human use.** AI must not create hypothetically correct code that
|
||||
hasn't been tested. Importantly, you must not allow AI to write
|
||||
code for platforms or environments you don't have access to manually
|
||||
test on.
|
||||
|
||||
- **Issues and discussions can use AI assistance but must have a full
|
||||
human-in-the-loop.** This means that any content generated with AI
|
||||
must have been reviewed _and edited_ by a human before submission.
|
||||
AI is very good at being overly verbose and including noise that
|
||||
distracts from the main point. Humans must do their research and
|
||||
trim this down.
|
||||
|
||||
- **No AI-generated media is allowed (art, images, videos, audio, etc.).**
|
||||
Text and code are the only acceptable AI-generated content, per the
|
||||
other rules in this policy.
|
||||
|
||||
- **Bad AI drivers will be banned and ridiculed in public.** You've
|
||||
been warned. We love to help junior developers learn and grow, but
|
||||
if you're interested in that then don't use AI, and we'll help you.
|
||||
I'm sorry that bad AI drivers have ruined this for you.
|
||||
|
||||
These rules apply only to outside contributions to scrutiny. Maintainers
|
||||
and repeat contributors (with explicit permission) are exempt from these
|
||||
rules and may use AI tools at their discretion; they've proven themselves
|
||||
trustworthy to apply good judgment.
|
||||
|
||||
## There are Humans Here
|
||||
|
||||
Please remember that scrutiny is maintained by humans.
|
||||
|
||||
Every discussion, issue, and pull request is read and reviewed by
|
||||
humans (and sometimes machines, too). It is a boundary point at which
|
||||
people interact with each other and the work done. It is rude and
|
||||
disrespectful to approach this boundary with low-effort, unqualified
|
||||
work, since it puts the burden of validation on the maintainer.
|
||||
|
||||
In a perfect world, AI would produce high-quality, accurate work
|
||||
every time. But today, that reality depends on the driver of the AI.
|
||||
And today, most drivers of AI are just not good enough. So, until either
|
||||
the people get better, the AI gets better, or both, we have to have
|
||||
strict rules to protect maintainers.
|
||||
|
||||
## AI is Welcome Here
|
||||
|
||||
Many maintainers embrace AI tools as a productive tool in their workflow.
|
||||
As a project, scrutiny welcomes AI as a tool!
|
||||
|
||||
**Our reason for the strict AI policy is not due to an anti-AI stance**, but
|
||||
instead due to the number of highly unqualified people using AI. It's the
|
||||
people, not the tools, that are the problem.
|
||||
|
||||
This section is included to be transparent about the project's usage about
|
||||
AI for people who may disagree with it, and to address the misconception
|
||||
that this policy is anti-AI in nature.
|
||||
|
||||
# Credit
|
||||
|
||||
Adopted from [ghostty's AI policy](https://github.com/ghostty-org/ghostty/blob/1b7a15899ad40fba4ce020f537055d30eaf99ee8/AI_POLICY.md)
|
||||
+180
-32
@@ -1,42 +1,190 @@
|
||||
# Contributing
|
||||
|
||||
There are multiple ways to develop on the scrutiny codebase locally. The two most popular are:
|
||||
- Docker Development Container - only requires docker
|
||||
- Run Components Locally - requires smartmontools, golang & nodejs installed locally
|
||||
**Please see our [AI policy](./AI_POLICY.md).**
|
||||
|
||||
The Scrutiny repository is a [monorepo](https://en.wikipedia.org/wiki/Monorepo) containing source code for:
|
||||
- Scrutiny Backend Server (API)
|
||||
- Scrutiny Frontend Angular SPA
|
||||
- S.M.A.R.T Collector
|
||||
|
||||
Depending on the functionality you are adding, you may need to setup a development environment for 1 or more projects.
|
||||
|
||||
# Modifying the Scrutiny Backend Server (API)
|
||||
|
||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.20+)
|
||||
2. download the `scrutiny-web-frontend.tar.gz` for
|
||||
the [latest release](https://github.com/AnalogJ/scrutiny/releases/latest). Extract to a folder named `dist`
|
||||
3. create a `scrutiny.yaml` config file
|
||||
```yaml
|
||||
# config file for local development. store as scrutiny.yaml
|
||||
version: 1
|
||||
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
database:
|
||||
# can also set absolute path here
|
||||
location: ./scrutiny.db
|
||||
src:
|
||||
frontend:
|
||||
path: ./dist
|
||||
influxdb:
|
||||
retention_policy: false
|
||||
|
||||
log:
|
||||
file: 'web.log' #absolute or relative paths allowed, eg. web.log
|
||||
level: DEBUG
|
||||
|
||||
```
|
||||
4. start a InfluxDB docker container.
|
||||
```bash
|
||||
docker run -p 8086:8086 --rm influxdb:2.2
|
||||
```
|
||||
5. start the scrutiny web server
|
||||
```bash
|
||||
go mod vendor
|
||||
go run webapp/backend/cmd/scrutiny/scrutiny.go start --config ./scrutiny.yaml
|
||||
```
|
||||
6. open your browser to [http://localhost:8080/web](http://localhost:8080/web)
|
||||
|
||||
# Modifying the Scrutiny Frontend Angular SPA
|
||||
|
||||
The frontend is written in Angular. If you're working on the frontend and can use mocked data rather than a real backend, you can follow the instructions below:
|
||||
|
||||
1. install [NodeJS](https://nodejs.org/en/download/)
|
||||
2. start the Angular Frontend Application
|
||||
```bash
|
||||
cd webapp/frontend
|
||||
npm install
|
||||
npm run start -- --serve-path="/web/" --port 4200
|
||||
```
|
||||
3. open your browser and visit [http://localhost:4200/web](http://localhost:4200/web)
|
||||
|
||||
# Modifying both Scrutiny Backend and Frontend Applications
|
||||
If you're developing a feature that requires changes to the backend and the frontend, or a frontend feature that requires real data,
|
||||
you'll need to follow the steps below:
|
||||
|
||||
1. install the [Go runtime](https://go.dev/doc/install) (v1.20+)
|
||||
2. install [NodeJS](https://nodejs.org/en/download/)
|
||||
3. create a `scrutiny.yaml` config file
|
||||
```yaml
|
||||
# config file for local development. store as scrutiny.yaml
|
||||
version: 1
|
||||
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
database:
|
||||
# can also set absolute path here
|
||||
location: ./scrutiny.db
|
||||
src:
|
||||
frontend:
|
||||
path: ./dist
|
||||
influxdb:
|
||||
retention_policy: false
|
||||
|
||||
log:
|
||||
file: 'web.log' #absolute or relative paths allowed, eg. web.log
|
||||
level: DEBUG
|
||||
|
||||
```
|
||||
4. start a InfluxDB docker container.
|
||||
```bash
|
||||
docker run -p 8086:8086 --rm influxdb:2.2
|
||||
```
|
||||
5. build the Angular Frontend Application
|
||||
```bash
|
||||
cd webapp/frontend
|
||||
npm install
|
||||
npm run build:prod -- --watch --output-path=../../dist
|
||||
# Note: if you do not add `--prod` flag, app will display mocked data for api calls.
|
||||
```
|
||||
6. start the scrutiny web server
|
||||
```bash
|
||||
go mod vendor
|
||||
go run webapp/backend/cmd/scrutiny/scrutiny.go start --config ./scrutiny.yaml
|
||||
```
|
||||
7. open your browser to [http://localhost:8080/web](http://localhost:8080/web)
|
||||
|
||||
|
||||
If you'd like to populate the database with some test data, you can run the following commands:
|
||||
|
||||
> NOTE: you may need to update the `local_time` key within the JSON file, any timestamps older than ~3 weeks will be automatically ignored
|
||||
> (since the downsampling & retention policy takes effect at 2 weeks)
|
||||
> This is done automatically by the `webapp/backend/pkg/models/testdata/helper.go` script
|
||||
|
||||
## Docker Development
|
||||
```
|
||||
docker build -f docker/Dockerfile . -t analogj/scrutiny
|
||||
docker run -it --rm -p 9090:8080 -v /run:/run -v /dev/disk:/dev/disk --privileged analogj/scrutiny
|
||||
/scrutiny/bin/scrutiny-collector-metrics run
|
||||
docker run -p 8086:8086 --rm influxdb:2.2
|
||||
|
||||
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/web/testdata/register-devices-req.json localhost:8080/api/devices/register
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-ata.json localhost:8080/api/device/0x5000cca264eb01d7/smart
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-ata-date.json localhost:8080/api/device/0x5000cca264eb01d7/smart
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-ata-date2.json localhost:8080/api/device/0x5000cca264eb01d7/smart
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-fail2.json localhost:8080/api/device/0x5000cca264ec3183/smart
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-nvme.json localhost:8080/api/device/0x5002538e40a22954/smart
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-scsi.json localhost:8080/api/device/0x5000cca252c859cc/smart
|
||||
# curl -X POST -H "Content-Type: application/json" -d @webapp/backend/pkg/models/testdata/smart-scsi2.json localhost:8080/api/device/0x5000cca264ebc248/smart
|
||||
go run webapp/backend/pkg/models/testdata/helper.go
|
||||
|
||||
curl localhost:8080/api/summary
|
||||
|
||||
```
|
||||
|
||||
|
||||
## Local Development
|
||||
|
||||
### Frontend
|
||||
The frontend is written in Angular.
|
||||
If you're working on the frontend and can use mocked data rather than a real backend, you can use
|
||||
```
|
||||
cd webapp/frontend && ng serve
|
||||
```
|
||||
|
||||
However, if you need to also run the backend, and use real data, you'll need to run the following command:
|
||||
```
|
||||
cd webapp/frontend && ng build --watch --output-path=../../dist --deploy-url="/web/" --base-href="/web/" --prod
|
||||
```
|
||||
|
||||
> Note: if you do not add `--prod` flag, app will display mocked data for api calls.
|
||||
|
||||
### Backend
|
||||
```
|
||||
go run webapp/backend/cmd/scrutiny/scrutiny.go start --config ./example.scrutiny.yaml
|
||||
```
|
||||
Now visit http://localhost:8080
|
||||
|
||||
|
||||
### Collector
|
||||
# Modifying the Collector
|
||||
```
|
||||
brew install smartmontools
|
||||
go run collector/cmd/collector-metrics/collector-metrics.go run --debug
|
||||
```
|
||||
|
||||
|
||||
# Debugging
|
||||
|
||||
If you need more verbose logs for debugging, you can use the following environmental variables:
|
||||
|
||||
- `DEBUG=true` - enables debug level logging on both the `collector` and `webapp`
|
||||
- `COLLECTOR_DEBUG=true` - enables debug level logging on the `collector`
|
||||
- `SCRUTINY_DEBUG=true` - enables debug level logging on the `webapp`
|
||||
|
||||
In addition, you can instruct scrutiny to write its logs to a file using the following environmental variables:
|
||||
|
||||
- `COLLECTOR_LOG_FILE=/tmp/collector.log` - write the `collector` logs to a file
|
||||
- `SCRUTINY_LOG_FILE=/tmp/web.log` - write the `webapp` logs to a file
|
||||
|
||||
Finally, you can copy the files from the scrutiny container to your host using the following command(s)
|
||||
|
||||
```
|
||||
docker cp scrutiny:/tmp/collector.log collector.log
|
||||
docker cp scrutiny:/tmp/web.log web.log
|
||||
```
|
||||
|
||||
# Docker Development
|
||||
|
||||
```
|
||||
docker build -f docker/Dockerfile . -t ghcr.io/analogj/scrutiny:master-omnibus
|
||||
docker run -it --rm -p 8080:8080 \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
ghcr.io/analogj/scrutiny:master-omnibus
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
```
|
||||
|
||||
|
||||
# Running Tests
|
||||
|
||||
```bash
|
||||
docker run -p 8086:8086 -d --rm \
|
||||
-e DOCKER_INFLUXDB_INIT_MODE=setup \
|
||||
-e DOCKER_INFLUXDB_INIT_USERNAME=admin \
|
||||
-e DOCKER_INFLUXDB_INIT_PASSWORD=password12345 \
|
||||
-e DOCKER_INFLUXDB_INIT_ORG=scrutiny \
|
||||
-e DOCKER_INFLUXDB_INIT_BUCKET=metrics \
|
||||
-e DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token \
|
||||
influxdb:2.2
|
||||
go test ./...
|
||||
|
||||
```
|
||||
|
||||
@@ -0,0 +1,133 @@
|
||||
.ONESHELL: # Applies to every targets in the file! .ONESHELL instructs make to invoke a single instance of the shell and provide it with the entire recipe, regardless of how many lines it contains.
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
########################################################################################################################
|
||||
# Global Env Settings
|
||||
########################################################################################################################
|
||||
|
||||
GO_WORKSPACE ?= /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COLLECTOR_BINARY_NAME = scrutiny-collector-metrics
|
||||
WEB_BINARY_NAME = scrutiny-web
|
||||
LD_FLAGS =
|
||||
|
||||
STATIC_TAGS =
|
||||
# enable multiarch docker image builds
|
||||
DOCKER_TARGETARCH_BUILD_ARG =
|
||||
ifdef TARGETARCH
|
||||
DOCKER_TARGETARCH_BUILD_ARG := $(DOCKER_TARGETARCH_BUILD_ARG) --build-arg TARGETARCH=$(TARGETARCH)
|
||||
endif
|
||||
|
||||
# enable to build static binaries.
|
||||
ifdef STATIC
|
||||
export CGO_ENABLED = 0
|
||||
LD_FLAGS := $(LD_FLAGS) -extldflags=-static
|
||||
STATIC_TAGS := $(STATIC_TAGS) -tags "static netgo"
|
||||
endif
|
||||
ifdef GOOS
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOOS)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOOS)
|
||||
LD_FLAGS := $(LD_FLAGS) -X main.goos=$(GOOS)
|
||||
endif
|
||||
ifdef GOARCH
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOARCH)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOARCH)
|
||||
LD_FLAGS := $(LD_FLAGS) -X main.goarch=$(GOARCH)
|
||||
endif
|
||||
ifdef GOARM
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME)-$(GOARM)
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME)-$(GOARM)
|
||||
endif
|
||||
ifeq ($(OS),Windows_NT)
|
||||
COLLECTOR_BINARY_NAME := $(COLLECTOR_BINARY_NAME).exe
|
||||
WEB_BINARY_NAME := $(WEB_BINARY_NAME).exe
|
||||
endif
|
||||
|
||||
########################################################################################################################
|
||||
# Binary
|
||||
########################################################################################################################
|
||||
.PHONY: all
|
||||
all: binary-all
|
||||
|
||||
.PHONY: binary-all
|
||||
binary-all: binary-collector binary-web
|
||||
@echo "built binary-collector and binary-web targets"
|
||||
|
||||
|
||||
.PHONY: binary-clean
|
||||
binary-clean:
|
||||
go clean
|
||||
|
||||
.PHONY: binary-dep
|
||||
binary-dep:
|
||||
go mod vendor
|
||||
|
||||
.PHONY: binary-test
|
||||
binary-test: binary-dep
|
||||
go test -v $(STATIC_TAGS) ./...
|
||||
|
||||
.PHONY: binary-test-coverage
|
||||
binary-test-coverage: binary-dep
|
||||
go test -coverprofile=coverage.txt -covermode=atomic -v $(STATIC_TAGS) ./...
|
||||
|
||||
.PHONY: binary-collector
|
||||
binary-collector: binary-dep
|
||||
go build -ldflags "$(LD_FLAGS)" -o $(COLLECTOR_BINARY_NAME) $(STATIC_TAGS) ./collector/cmd/collector-metrics/
|
||||
ifneq ($(OS),Windows_NT)
|
||||
chmod +x $(COLLECTOR_BINARY_NAME)
|
||||
file $(COLLECTOR_BINARY_NAME) || true
|
||||
ldd $(COLLECTOR_BINARY_NAME) || true
|
||||
./$(COLLECTOR_BINARY_NAME) || true
|
||||
endif
|
||||
|
||||
.PHONY: binary-web
|
||||
binary-web: binary-dep
|
||||
go build -ldflags "$(LD_FLAGS)" -o $(WEB_BINARY_NAME) $(STATIC_TAGS) ./webapp/backend/cmd/scrutiny/
|
||||
ifneq ($(OS),Windows_NT)
|
||||
chmod +x $(WEB_BINARY_NAME)
|
||||
file $(WEB_BINARY_NAME) || true
|
||||
ldd $(WEB_BINARY_NAME) || true
|
||||
./$(WEB_BINARY_NAME) || true
|
||||
endif
|
||||
|
||||
########################################################################################################################
|
||||
# Binary
|
||||
########################################################################################################################
|
||||
|
||||
.PHONY: binary-frontend
|
||||
# reduce logging, disable angular-cli analytics for ci environment
|
||||
binary-frontend: export NPM_CONFIG_LOGLEVEL = warn
|
||||
binary-frontend: export NG_CLI_ANALYTICS = false
|
||||
binary-frontend:
|
||||
cd webapp/frontend
|
||||
npm install -g @angular/cli@v13-lts
|
||||
mkdir -p $(CURDIR)/dist
|
||||
npm ci
|
||||
npm run build:prod -- --output-path=$(CURDIR)/dist
|
||||
|
||||
.PHONY: binary-frontend-test-coverage
|
||||
# reduce logging, disable angular-cli analytics for ci environment
|
||||
binary-frontend-test-coverage:
|
||||
cd webapp/frontend
|
||||
npm ci
|
||||
npx ng test --watch=false --browsers=ChromeHeadless --code-coverage
|
||||
|
||||
########################################################################################################################
|
||||
# Docker
|
||||
# NOTE: these docker make targets are only used for local development (not used by Github Actions/CI)
|
||||
# NOTE: docker-web and docker-omnibus require `make binary-frontend` or frontend.tar.gz content in /dist before executing.
|
||||
########################################################################################################################
|
||||
.PHONY: docker-collector
|
||||
docker-collector:
|
||||
@echo "building collector docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.collector -t analogj/scrutiny-dev:collector .
|
||||
|
||||
.PHONY: docker-web
|
||||
docker-web:
|
||||
@echo "building web docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile.web -t analogj/scrutiny-dev:web .
|
||||
|
||||
.PHONY: docker-omnibus
|
||||
docker-omnibus:
|
||||
@echo "building omnibus docker image"
|
||||
docker build $(DOCKER_TARGETARCH_BUILD_ARG) -f docker/Dockerfile -t analogj/scrutiny-dev:omnibus .
|
||||
@@ -14,7 +14,6 @@
|
||||
[](https://goreportcard.com/report/github.com/AnalogJ/scrutiny)
|
||||
[](https://github.com/AnalogJ/scrutiny/releases)
|
||||
|
||||
|
||||
WebUI for smartd S.M.A.R.T monitoring
|
||||
|
||||
> NOTE: Scrutiny is a Work-in-Progress and still has some rough edges.
|
||||
@@ -27,7 +26,7 @@ If you run a server with more than a couple of hard drives, you're probably alre
|
||||
|
||||
> smartd is a daemon that monitors the Self-Monitoring, Analysis and Reporting Technology (SMART) system built into many ATA, IDE and SCSI-3 hard drives. The purpose of SMART is to monitor the reliability of the hard drive and predict drive failures, and to carry out different types of drive self-tests.
|
||||
|
||||
Theses S.M.A.R.T hard drive self-tests can help you detect and replace failing hard drives before they cause permanent data loss. However, there's a couple issues with `smartd`:
|
||||
These S.M.A.R.T hard drive self-tests can help you detect and replace failing hard drives before they cause permanent data loss. However, there's a couple issues with `smartd`:
|
||||
|
||||
- There are more than a hundred S.M.A.R.T attributes, however `smartd` does not differentiate between critical and informational metrics
|
||||
- `smartd` does not record S.M.A.R.T attribute history, so it can be hard to determine if an attribute is degrading slowly over time.
|
||||
@@ -47,64 +46,210 @@ Scrutiny is a simple but focused application, with a couple of core features:
|
||||
- Customized thresholds using real world failure rates
|
||||
- Temperature tracking
|
||||
- Provided as an all-in-one Docker image (but can be installed manually)
|
||||
- (Future) Configurable Alerting/Notifications via Webhooks
|
||||
- Configurable Alerting/Notifications via Webhooks
|
||||
- (Future) Hard Drive performance testing & tracking
|
||||
|
||||
# Getting Started
|
||||
|
||||
## RAID/Virtual Drives
|
||||
|
||||
Scrutiny uses `smartctl --scan` to detect devices/drives.
|
||||
|
||||
- All RAID controllers supported by `smartctl` are automatically supported by Scrutiny.
|
||||
- While some RAID controllers support passing through the underlying SMART data to `smartctl` others do not.
|
||||
- In some cases `--scan` does not correctly detect the device type, returning [incomplete SMART data](https://github.com/AnalogJ/scrutiny/issues/45).
|
||||
Scrutiny supports overriding detected device type via the config file: see [example.collector.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml)
|
||||
- If you use docker, you **must** pass though the RAID virtual disk to the container using `--device` (see below)
|
||||
- This device may be in `/dev/*` or `/dev/bus/*`.
|
||||
- If you're unsure, run `smartctl --scan` on your host, and pass all listed devices to the container.
|
||||
|
||||
See [docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md](./docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md) for help
|
||||
|
||||
## Docker
|
||||
|
||||
If you're using Docker, getting started is as simple as running the following command:
|
||||
|
||||
> See [docker/example.omnibus.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.omnibus.docker-compose.yml) for a docker-compose file.
|
||||
|
||||
```bash
|
||||
docker run -it --rm -p 8080:8080 \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
-v /dev/disk:/dev/disk \
|
||||
--name scrutiny \
|
||||
--privileged analogj/scrutiny
|
||||
docker run -p 8080:8080 -p 8086:8086 --restart unless-stopped \
|
||||
-v `pwd`/scrutiny:/opt/scrutiny/config \
|
||||
-v `pwd`/influxdb2:/opt/scrutiny/influxdb \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
--name scrutiny \
|
||||
ghcr.io/analogj/scrutiny:master-omnibus
|
||||
```
|
||||
|
||||
- `/run/udev` and `/dev/disk` are necessary to provide the Scrutiny collector with access to your drive metadata.
|
||||
- `--privileged` is required to ensure that your hard disk devices are accessible within the container (this will be changed in a future release)
|
||||
- `analogj/scrutiny` is a omnibus image, containing both the webapp server (frontend & api) as well as the S.M.A.R.T metric collector. (see below)
|
||||
- `/run/udev` is necessary to provide the Scrutiny collector with access to your device metadata
|
||||
- `--cap-add SYS_RAWIO` is necessary to allow `smartctl` permission to query your device SMART data
|
||||
- NOTE: If you have **NVMe** drives, you must add `--cap-add SYS_ADMIN` as well. See issue [#26](https://github.com/AnalogJ/scrutiny/issues/26#issuecomment-696817130)
|
||||
- `--device` entries are required to ensure that your hard disk devices are accessible within the container.
|
||||
- `ghcr.io/analogj/scrutiny:master-omnibus` is a omnibus image, containing both the webapp server (frontend & api) as well as the S.M.A.R.T metric collector. (see below)
|
||||
|
||||
### Hub/Spoke Deployment
|
||||
|
||||
In addition to the Omnibus image (available under the `latest` tag) there are 2 other Docker images available:
|
||||
In addition to the Omnibus image (available under the `latest` tag) you can deploy in Hub/Spoke mode, which requires 3
|
||||
other Docker images:
|
||||
|
||||
- `analogj/scrutiny:collector` - Contains the Scrutiny data collector, `smartctl` binary and cron-like scheduler. You can run one collector on each server.
|
||||
- `analogj/scrutiny:web` - Contains the Web UI, API and Database. Only one container necessary
|
||||
- `ghcr.io/analogj/scrutiny:master-collector` - Contains the Scrutiny data collector, `smartctl` binary and cron-like
|
||||
scheduler. You can run one collector on each server.
|
||||
- `ghcr.io/analogj/scrutiny:master-web` - Contains the Web UI and API. Only one container necessary
|
||||
- `influxdb:2.2` - InfluxDB image, used by the Web container to persist SMART data. Only one container necessary
|
||||
See [docs/TROUBLESHOOTING_INFLUXDB.md](./docs/TROUBLESHOOTING_INFLUXDB.md)
|
||||
|
||||
> See [docker/example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml) for a docker-compose file.
|
||||
|
||||
```bash
|
||||
docker run -it --rm -p 8080:8080 \
|
||||
--name scrutiny-web \
|
||||
analogj/scrutiny:web
|
||||
docker run -p 8086:8086 --restart unless-stopped \
|
||||
-v `pwd`/influxdb2:/var/lib/influxdb2 \
|
||||
--name scrutiny-influxdb \
|
||||
influxdb:2.2
|
||||
|
||||
docker run -it --rm \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
-v /dev/disk:/dev/disk \
|
||||
-e SCRUTINY_API_ENDPOINT=http://SCRUTINY_WEB_IPADDRESS:8080 \
|
||||
--name scrutiny-collector \
|
||||
--privileged analogj/scrutiny:collector
|
||||
docker run -p 8080:8080 --restart unless-stopped \
|
||||
-v `pwd`/scrutiny:/opt/scrutiny/config \
|
||||
--name scrutiny-web \
|
||||
ghcr.io/analogj/scrutiny:master-web
|
||||
|
||||
docker run --restart unless-stopped \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
-e COLLECTOR_API_ENDPOINT=http://SCRUTINY_WEB_IPADDRESS:8080 \
|
||||
--name scrutiny-collector \
|
||||
ghcr.io/analogj/scrutiny:master-collector
|
||||
```
|
||||
|
||||
## Manual Installation (without-Docker)
|
||||
|
||||
While the easiest way to get started with [Scrutiny is using Docker](https://github.com/AnalogJ/scrutiny#docker),
|
||||
it is possible to run it manually without much work. You can even mix and match, using Docker for one component and
|
||||
a manual installation for the other.
|
||||
|
||||
See [docs/INSTALL_MANUAL.md](docs/INSTALL_MANUAL.md) for instructions.
|
||||
|
||||
## Usage
|
||||
|
||||
Once scrutiny is running, you can open your browser to `http://localhost:8080` and take a look at the dashboard.
|
||||
|
||||
Initially it will be empty, however after the first collector run, you'll be greeted with a list of all your hard drives and their current smart status.
|
||||
If you're using the omnibus image, the collector should already have run, and your dashboard should be populate with every
|
||||
drive that Scrutiny detected. The collector is configured to run once a day, but you can trigger it manually by running the command below.
|
||||
|
||||
The collector is configured to run once a day, but you can trigger it manually by running the following command
|
||||
For users of the docker Hub/Spoke deployment or manual install: initially the dashboard will be empty.
|
||||
After the first collector run, you'll be greeted with a list of all your hard drives and their current smart status.
|
||||
|
||||
```
|
||||
docker exec scrutiny /scrutiny/bin/scrutiny-collector-metrics run
|
||||
```bash
|
||||
docker exec scrutiny /opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
```
|
||||
|
||||
# Configuration
|
||||
We support a global YAML configuration file that must be located at /scrutiny/config/scrutiny.yaml
|
||||
By default Scrutiny looks for its YAML configuration files in `/opt/scrutiny/config`
|
||||
|
||||
There are two configuration files available:
|
||||
|
||||
- Webapp/API config via `scrutiny.yaml` - [example.scrutiny.yaml](example.scrutiny.yaml).
|
||||
- Collector config via `collector.yaml` - [example.collector.yaml](example.collector.yaml).
|
||||
|
||||
Neither file is required, however if provided, it allows you to configure how Scrutiny functions.
|
||||
|
||||
## Cron Schedule
|
||||
Unfortunately the Cron schedule cannot be configured via the `collector.yaml` (as the collector binary needs to be trigged by a scheduler/cron).
|
||||
However, if you are using the official `ghcr.io/analogj/scrutiny:master-collector` or `ghcr.io/analogj/scrutiny:master-omnibus` docker images,
|
||||
you can use the `COLLECTOR_CRON_SCHEDULE` environmental variable to override the default cron schedule (daily @ midnight - `0 0 * * *`).
|
||||
|
||||
`docker run -e COLLECTOR_CRON_SCHEDULE="0 0 * * *" ...`
|
||||
|
||||
## Notifications
|
||||
|
||||
Scrutiny supports sending SMART device failure notifications via the following services:
|
||||
- Custom Script (data provided via environmental variables)
|
||||
- Email
|
||||
- Webhooks
|
||||
- Discord
|
||||
- Gotify
|
||||
- Hangouts
|
||||
- IFTTT
|
||||
- Join
|
||||
- Mattermost
|
||||
- ntfy
|
||||
- Pushbullet
|
||||
- Pushover
|
||||
- Slack
|
||||
- Teams
|
||||
- Telegram
|
||||
- Tulip
|
||||
|
||||
Check the `notify.urls` section of [example.scrutiny.yml](example.scrutiny.yaml) for examples.
|
||||
|
||||
For more information and troubleshooting, see the [TROUBLESHOOTING_NOTIFICATIONS.md](./docs/TROUBLESHOOTING_NOTIFICATIONS.md) file
|
||||
|
||||
### Testing Notifications
|
||||
|
||||
You can test that your notifications are configured correctly by posting an empty payload to the notifications health check API.
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/api/health/notify
|
||||
```
|
||||
|
||||
# Debug mode & Log Files
|
||||
Scrutiny provides various methods to change the log level to debug and generate log files.
|
||||
|
||||
## Web Server/API
|
||||
|
||||
You can use environmental variables to enable debug logging and/or log files for the web server:
|
||||
|
||||
```bash
|
||||
DEBUG=true
|
||||
SCRUTINY_LOG_FILE=/tmp/web.log
|
||||
```
|
||||
|
||||
You can configure the log level and log file in the config file:
|
||||
|
||||
```yml
|
||||
log:
|
||||
file: '/tmp/web.log'
|
||||
level: DEBUG
|
||||
```
|
||||
|
||||
Or if you're not using docker, you can pass CLI arguments to the web server during startup:
|
||||
|
||||
```bash
|
||||
scrutiny start --debug --log-file /tmp/web.log
|
||||
```
|
||||
|
||||
## Collector
|
||||
|
||||
You can use environmental variables to enable debug logging and/or log files for the collector:
|
||||
|
||||
```bash
|
||||
DEBUG=true
|
||||
COLLECTOR_LOG_FILE=/tmp/collector.log
|
||||
```
|
||||
|
||||
Or if you're not using docker, you can pass CLI arguments to the collector during startup:
|
||||
|
||||
```bash
|
||||
scrutiny-collector-metrics run --debug --log-file /tmp/collector.log
|
||||
```
|
||||
|
||||
# Supported Architectures
|
||||
|
||||
| Architecture Name | Binaries | Docker |
|
||||
| --- | --- | --- |
|
||||
| linux-amd64 | :white_check_mark: | :white_check_mark: |
|
||||
| linux-arm-5 | :white_check_mark: | |
|
||||
| linux-arm-6 | :white_check_mark: | |
|
||||
| linux-arm-7 | :white_check_mark: | web/collector only. see [#236](https://github.com/AnalogJ/scrutiny/issues/236) |
|
||||
| linux-arm64 | :white_check_mark: | :white_check_mark: |
|
||||
| freebsd-amd64 | :white_check_mark: | |
|
||||
| macos-amd64 | :white_check_mark: | :white_check_mark: |
|
||||
| macos-arm64 | :white_check_mark: | :white_check_mark: |
|
||||
| windows-amd64 | :white_check_mark: | WIP, see [#15](https://github.com/AnalogJ/scrutiny/issues/15) |
|
||||
| windows-arm64 | :white_check_mark: | |
|
||||
|
||||
Check the [example.scrutiny.yml](example.scrutiny.yaml) file for a fully commented version.
|
||||
|
||||
# Contributing
|
||||
|
||||
@@ -120,7 +265,8 @@ We use SemVer for versioning. For the versions available, see the tags on this r
|
||||
|
||||
# Authors
|
||||
|
||||
Jason Kulatunga - Initial Development - @AnalogJ
|
||||
* Jason Kulatunga - Initial Development - [@AnalogJ](https://github.com/AnalogJ/)
|
||||
* Aram Akhavan - Maintenence - [@kaysond](https://github.com/kaysond/)
|
||||
|
||||
# Licenses
|
||||
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/collector/pkg/collector"
|
||||
"github.com/analogj/scrutiny/collector/pkg/config"
|
||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
utils "github.com/analogj/go-util/utils"
|
||||
@@ -20,6 +24,26 @@ var goarch string
|
||||
|
||||
func main() {
|
||||
|
||||
config, err := config.Create()
|
||||
if err != nil {
|
||||
fmt.Printf("FATAL: %+v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configFilePath := "/opt/scrutiny/config/collector.yaml"
|
||||
configFilePathAlternative := "/opt/scrutiny/config/collector.yml"
|
||||
if !utils.FileExists(configFilePath) && utils.FileExists(configFilePathAlternative) {
|
||||
configFilePath = configFilePathAlternative
|
||||
}
|
||||
|
||||
//we're going to load the config file manually, since we need to validate it.
|
||||
err = config.ReadConfig(configFilePath) // Find and read the config file
|
||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||
//ignore "could not find config file"
|
||||
} else if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cli.CommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
USAGE:
|
||||
@@ -75,30 +99,48 @@ OPTIONS:
|
||||
Name: "run",
|
||||
Usage: "Run the scrutiny smartctl metrics collector",
|
||||
Action: func(c *cli.Context) error {
|
||||
|
||||
collectorLogger := logrus.WithFields(logrus.Fields{
|
||||
"type": "metrics",
|
||||
})
|
||||
if c.IsSet("config") {
|
||||
err = config.ReadConfig(c.String("config")) // Find and read the config file
|
||||
if err != nil { // Handle errors reading the config file
|
||||
//ignore "could not find config file"
|
||||
fmt.Printf("Could not find config file at specified path: %s", c.String("config"))
|
||||
return err
|
||||
}
|
||||
}
|
||||
//override config with flags if set
|
||||
if c.IsSet("host-id") {
|
||||
config.Set("host.id", c.String("host-id")) // set/override the host-id using CLI.
|
||||
}
|
||||
|
||||
if c.Bool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
} else {
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
config.Set("log.level", "DEBUG")
|
||||
}
|
||||
|
||||
if c.IsSet("log-file") {
|
||||
logFile, err := os.OpenFile(c.String("log-file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to open log file %s for output: %s", c.String("log-file"), err)
|
||||
return err
|
||||
}
|
||||
defer logFile.Close()
|
||||
logrus.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
config.Set("log.file", c.String("log-file"))
|
||||
}
|
||||
|
||||
if c.IsSet("api-endpoint") {
|
||||
//if the user is providing an api-endpoint with a basepath (eg. http://localhost:8080/scrutiny),
|
||||
//we need to ensure the basepath has a trailing slash, otherwise the url.Parse() path concatenation doesnt work.
|
||||
apiEndpoint := strings.TrimSuffix(c.String("api-endpoint"), "/") + "/"
|
||||
config.Set("api.endpoint", apiEndpoint)
|
||||
}
|
||||
|
||||
collectorLogger, logFile, err := CreateLogger(config)
|
||||
if logFile != nil {
|
||||
defer logFile.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settingsData, err := json.MarshalIndent(config.AllSettings(), "", "\t")
|
||||
collectorLogger.Debug(string(settingsData), err)
|
||||
metricCollector, err := collector.CreateMetricsCollector(
|
||||
config,
|
||||
collectorLogger,
|
||||
c.String("api-endpoint"),
|
||||
config.GetString("api.endpoint"),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -109,32 +151,66 @@ OPTIONS:
|
||||
},
|
||||
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Specify the path to the devices file",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "api-endpoint",
|
||||
Usage: "The api server endpoint",
|
||||
Value: "http://localhost:8080",
|
||||
EnvVars: []string{"SCRUTINY_API_ENDPOINT"},
|
||||
EnvVars: []string{"COLLECTOR_API_ENDPOINT", "SCRUTINY_API_ENDPOINT"},
|
||||
//SCRUTINY_API_ENDPOINT is deprecated, but kept for backwards compatibility
|
||||
},
|
||||
|
||||
&cli.StringFlag{
|
||||
Name: "log-file",
|
||||
Usage: "Path to file for logging. Leave empty to use STDOUT",
|
||||
Value: "",
|
||||
Name: "log-file",
|
||||
Usage: "Path to file for logging. Leave empty to use STDOUT",
|
||||
EnvVars: []string{"COLLECTOR_LOG_FILE"},
|
||||
},
|
||||
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Enable debug logging",
|
||||
EnvVars: []string{"DEBUG"},
|
||||
EnvVars: []string{"COLLECTOR_DEBUG", "DEBUG"},
|
||||
},
|
||||
|
||||
&cli.StringFlag{
|
||||
Name: "host-id",
|
||||
Usage: "Host identifier/label, used for grouping devices",
|
||||
Value: "",
|
||||
EnvVars: []string{"COLLECTOR_HOST_ID"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := app.Run(os.Args)
|
||||
err = app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatal(color.HiRedString("ERROR: %v", err))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
|
||||
logger := logrus.WithFields(logrus.Fields{
|
||||
"type": "metrics",
|
||||
})
|
||||
|
||||
if level, err := logrus.ParseLevel(appConfig.GetString("log.level")); err == nil {
|
||||
logger.Logger.SetLevel(level)
|
||||
} else {
|
||||
logger.Logger.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
var logFile *os.File
|
||||
var err error
|
||||
if appConfig.IsSet("log.file") && len(appConfig.GetString("log.file")) > 0 {
|
||||
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Logger.Errorf("Failed to open log file %s for output: %s", appConfig.GetString("log.file"), err)
|
||||
return nil, logFile, err
|
||||
}
|
||||
logger.Logger.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
return logger, logFile, nil
|
||||
}
|
||||
|
||||
@@ -96,6 +96,7 @@ OPTIONS:
|
||||
logrus.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
|
||||
//TODO: pass in the collector, use configuration from collector-metrics
|
||||
stCollector, err := collector.CreateSelfTestCollector(
|
||||
collectorLogger,
|
||||
c.String("api-endpoint"),
|
||||
@@ -113,19 +114,20 @@ OPTIONS:
|
||||
Name: "api-endpoint",
|
||||
Usage: "The api server endpoint",
|
||||
Value: "http://localhost:8080",
|
||||
EnvVars: []string{"SCRUTINY_API_ENDPOINT"},
|
||||
EnvVars: []string{"COLLECTOR_API_ENDPOINT"},
|
||||
},
|
||||
|
||||
&cli.StringFlag{
|
||||
Name: "log-file",
|
||||
Usage: "Path to file for logging. Leave empty to use STDOUT",
|
||||
Value: "",
|
||||
Name: "log-file",
|
||||
Usage: "Path to file for logging. Leave empty to use STDOUT",
|
||||
Value: "",
|
||||
EnvVars: []string{"COLLECTOR_LOG_FILE"},
|
||||
},
|
||||
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Enable debug logging",
|
||||
EnvVars: []string{"DEBUG"},
|
||||
EnvVars: []string{"COLLECTOR_DEBUG", "DEBUG"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var httpClient = &http.Client{Timeout: 10 * time.Second}
|
||||
var httpClient = &http.Client{Timeout: 60 * time.Second}
|
||||
|
||||
type BaseCollector struct {
|
||||
logger *logrus.Entry
|
||||
@@ -40,6 +40,7 @@ func (c *BaseCollector) postJson(url string, body interface{}, target interface{
|
||||
return json.NewDecoder(r.Body).Decode(target)
|
||||
}
|
||||
|
||||
// http://www.linuxguide.it/command_line/linux-manpage/do.php?file=smartctl#sect7
|
||||
func (c *BaseCollector) LogSmartctlExitCode(exitCode int) {
|
||||
if exitCode&0x01 != 0 {
|
||||
c.logger.Errorln("smartctl could not parse commandline")
|
||||
|
||||
@@ -4,34 +4,41 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/collector/pkg/common"
|
||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||
"github.com/analogj/scrutiny/collector/pkg/config"
|
||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/samber/lo"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type MetricsCollector struct {
|
||||
config config.Interface
|
||||
BaseCollector
|
||||
apiEndpoint *url.URL
|
||||
shell shell.Interface
|
||||
}
|
||||
|
||||
func CreateMetricsCollector(logger *logrus.Entry, apiEndpoint string) (MetricsCollector, error) {
|
||||
func CreateMetricsCollector(appConfig config.Interface, logger *logrus.Entry, apiEndpoint string) (MetricsCollector, error) {
|
||||
apiEndpointUrl, err := url.Parse(apiEndpoint)
|
||||
if err != nil {
|
||||
return MetricsCollector{}, err
|
||||
}
|
||||
|
||||
sc := MetricsCollector{
|
||||
config: appConfig,
|
||||
apiEndpoint: apiEndpointUrl,
|
||||
BaseCollector: BaseCollector{
|
||||
logger: logger,
|
||||
},
|
||||
shell: shell.Create(),
|
||||
}
|
||||
|
||||
return sc, nil
|
||||
@@ -44,18 +51,24 @@ func (mc *MetricsCollector) Run() error {
|
||||
}
|
||||
|
||||
apiEndpoint, _ := url.Parse(mc.apiEndpoint.String())
|
||||
apiEndpoint.Path = "/api/devices/register"
|
||||
apiEndpoint, _ = apiEndpoint.Parse("api/devices/register") //this acts like filepath.Join()
|
||||
|
||||
deviceRespWrapper := new(models.DeviceWrapper)
|
||||
|
||||
deviceDetector := detect.Detect{
|
||||
Logger: mc.logger,
|
||||
Config: mc.config,
|
||||
}
|
||||
detectedStorageDevices, err := deviceDetector.Start()
|
||||
rawDetectedStorageDevices, err := deviceDetector.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//filter any device with empty wwn (they are invalid)
|
||||
detectedStorageDevices := lo.Filter[models.Device](rawDetectedStorageDevices, func(dev models.Device, _ int) bool {
|
||||
return len(dev.WWN) > 0
|
||||
})
|
||||
|
||||
mc.logger.Infoln("Sending detected devices to API, for filtering & validation")
|
||||
jsonObj, _ := json.Marshal(detectedStorageDevices)
|
||||
mc.logger.Debugf("Detected devices: %v", string(jsonObj))
|
||||
@@ -68,19 +81,24 @@ func (mc *MetricsCollector) Run() error {
|
||||
|
||||
if !deviceRespWrapper.Success {
|
||||
mc.logger.Errorln("An error occurred while retrieving filtered devices")
|
||||
mc.logger.Debugln(deviceRespWrapper)
|
||||
return errors.ApiServerCommunicationError("An error occurred while retrieving filtered devices")
|
||||
} else {
|
||||
mc.logger.Debugln(deviceRespWrapper)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
//var wg sync.WaitGroup
|
||||
for _, device := range deviceRespWrapper.Data {
|
||||
// execute collection in parallel go-routines
|
||||
wg.Add(1)
|
||||
go mc.Collect(&wg, device.WWN, device.DeviceName, device.DeviceType)
|
||||
//wg.Add(1)
|
||||
//go mc.Collect(&wg, device.WWN, device.DeviceName, device.DeviceType)
|
||||
mc.Collect(device.WWN, device.DeviceName, device.DeviceType)
|
||||
|
||||
if mc.config.GetInt("commands.metrics_smartctl_wait") > 0 {
|
||||
time.Sleep(time.Duration(mc.config.GetInt("commands.metrics_smartctl_wait")) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
mc.logger.Infoln("Main: Waiting for workers to finish")
|
||||
wg.Wait()
|
||||
//mc.logger.Infoln("Main: Waiting for workers to finish")
|
||||
//wg.Wait()
|
||||
mc.logger.Infoln("Main: Completed")
|
||||
}
|
||||
|
||||
@@ -89,27 +107,33 @@ func (mc *MetricsCollector) Run() error {
|
||||
|
||||
func (mc *MetricsCollector) Validate() error {
|
||||
mc.logger.Infoln("Verifying required tools")
|
||||
_, lookErr := exec.LookPath("smartctl")
|
||||
_, lookErr := exec.LookPath(mc.config.GetString("commands.metrics_smartctl_bin"))
|
||||
|
||||
if lookErr != nil {
|
||||
return errors.DependencyMissingError("smartctl is missing")
|
||||
return errors.DependencyMissingError(fmt.Sprintf("%s binary is missing", mc.config.GetString("commands.metrics_smartctl_bin")))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Collect(wg *sync.WaitGroup, deviceWWN string, deviceName string, deviceType string) {
|
||||
defer wg.Done()
|
||||
// func (mc *MetricsCollector) Collect(wg *sync.WaitGroup, deviceWWN string, deviceName string, deviceType string) {
|
||||
func (mc *MetricsCollector) Collect(deviceWWN string, deviceName string, deviceType string) {
|
||||
//defer wg.Done()
|
||||
if len(deviceWWN) == 0 {
|
||||
mc.logger.Errorf("no device WWN detected for %s. Skipping collection for this device (no data association possible).\n", deviceName)
|
||||
return
|
||||
}
|
||||
mc.logger.Infof("Collecting smartctl results for %s\n", deviceName)
|
||||
|
||||
args := []string{"-a", "-j"}
|
||||
fullDeviceName := fmt.Sprintf("%s%s", detect.DevicePrefix(), deviceName)
|
||||
args := strings.Split(mc.config.GetCommandMetricsSmartArgs(fullDeviceName), " ")
|
||||
//only include the device type if its a non-standard one. In some cases ata drives are detected as scsi in docker, and metadata is lost.
|
||||
if len(deviceType) > 0 && deviceType != "scsi" && deviceType != "ata" {
|
||||
args = append(args, "-d", deviceType)
|
||||
args = append(args, "--device", deviceType)
|
||||
}
|
||||
args = append(args, fmt.Sprintf("%s%s", detect.DevicePrefix(), deviceName))
|
||||
args = append(args, fullDeviceName)
|
||||
|
||||
result, err := common.ExecCmd(mc.logger, "smartctl", args, "", os.Environ())
|
||||
result, err := mc.shell.Command(mc.logger, mc.config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
resultBytes := []byte(result)
|
||||
if err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
@@ -133,7 +157,7 @@ func (mc *MetricsCollector) Publish(deviceWWN string, payload []byte) error {
|
||||
mc.logger.Infof("Publishing smartctl results for %s\n", deviceWWN)
|
||||
|
||||
apiEndpoint, _ := url.Parse(mc.apiEndpoint.String())
|
||||
apiEndpoint.Path = fmt.Sprintf("/api/device/%s/smart", strings.ToLower(deviceWWN))
|
||||
apiEndpoint, _ = apiEndpoint.Parse(fmt.Sprintf("api/device/%s/smart", strings.ToLower(deviceWWN)))
|
||||
|
||||
resp, err := httpClient.Post(apiEndpoint.String(), "application/json", bytes.NewBuffer(payload))
|
||||
if err != nil {
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestApiEndpointParse(t *testing.T) {
|
||||
baseURL, _ := url.Parse("http://localhost:8080/")
|
||||
|
||||
url1, _ := baseURL.Parse("d/e")
|
||||
require.Equal(t, "http://localhost:8080/d/e", url1.String())
|
||||
|
||||
url2, _ := baseURL.Parse("/d/e")
|
||||
require.Equal(t, "http://localhost:8080/d/e", url2.String())
|
||||
}
|
||||
|
||||
func TestApiEndpointParse_WithBasepathWithoutTrailingSlash(t *testing.T) {
|
||||
baseURL, _ := url.Parse("http://localhost:8080/scrutiny")
|
||||
|
||||
//This testcase is unexpected and can cause issues. We need to ensure the apiEndpoint always has a trailing slash.
|
||||
url1, _ := baseURL.Parse("d/e")
|
||||
require.Equal(t, "http://localhost:8080/d/e", url1.String())
|
||||
|
||||
url2, _ := baseURL.Parse("/d/e")
|
||||
require.Equal(t, "http://localhost:8080/d/e", url2.String())
|
||||
}
|
||||
|
||||
func TestApiEndpointParse_WithBasepathWithTrailingSlash(t *testing.T) {
|
||||
baseURL, _ := url.Parse("http://localhost:8080/scrutiny/")
|
||||
|
||||
url1, _ := baseURL.Parse("d/e")
|
||||
require.Equal(t, "http://localhost:8080/scrutiny/d/e", url1.String())
|
||||
|
||||
url2, _ := baseURL.Parse("/d/e")
|
||||
require.Equal(t, "http://localhost:8080/d/e", url2.String())
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
package shell
|
||||
|
||||
func Create() Interface {
|
||||
return new(localShell)
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
package shell
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create mock using:
|
||||
// mockgen -source=collector/pkg/common/shell/interface.go -destination=collector/pkg/common/shell/mock/mock_shell.go
|
||||
type Interface interface {
|
||||
Command(logger *logrus.Entry, cmdName string, cmdArgs []string, workingDir string, environ []string) (string, error)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package common
|
||||
package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -10,12 +10,22 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ExecCmd(logger *logrus.Entry, cmdName string, cmdArgs []string, workingDir string, environ []string) (string, error) {
|
||||
type localShell struct{}
|
||||
|
||||
func (s *localShell) Command(logger *logrus.Entry, cmdName string, cmdArgs []string, workingDir string, environ []string) (string, error) {
|
||||
logger.Infof("Executing command: %s %s", cmdName, strings.Join(cmdArgs, " "))
|
||||
|
||||
cmd := exec.Command(cmdName, cmdArgs...)
|
||||
var stdBuffer bytes.Buffer
|
||||
mw := io.MultiWriter(logger.Logger.Out, &stdBuffer)
|
||||
|
||||
logWriters := []io.Writer{
|
||||
&stdBuffer,
|
||||
}
|
||||
if logger.Logger.Level == logrus.DebugLevel {
|
||||
logWriters = append(logWriters, logger.Logger.Out)
|
||||
}
|
||||
|
||||
mw := io.MultiWriter(logWriters...)
|
||||
|
||||
cmd.Stdout = mw
|
||||
cmd.Stderr = mw
|
||||
@@ -1,33 +1,33 @@
|
||||
package common_test
|
||||
package shell
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/common"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExecCmd(t *testing.T) {
|
||||
func TestLocalShellCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
|
||||
testShell := localShell{}
|
||||
//test
|
||||
result, err := common.ExecCmd(logrus.WithField("exec", "test"), "echo", []string{"hello world"}, "", nil)
|
||||
result, err := testShell.Command(logrus.WithField("exec", "test"), "echo", []string{"hello world"}, "", nil)
|
||||
|
||||
//assert
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "hello world\n", result)
|
||||
}
|
||||
|
||||
func TestExecCmd_Date(t *testing.T) {
|
||||
func TestLocalShellCommand_Date(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testShell := localShell{}
|
||||
|
||||
//test
|
||||
_, err := common.ExecCmd(logrus.WithField("exec", "test"), "date", []string{}, "", nil)
|
||||
_, err := testShell.Command(logrus.WithField("exec", "test"), "date", []string{}, "", nil)
|
||||
|
||||
//assert
|
||||
require.NoError(t, err)
|
||||
@@ -51,13 +51,14 @@ func TestExecCmd_Date(t *testing.T) {
|
||||
//}
|
||||
//
|
||||
|
||||
func TestExecCmd_InvalidCommand(t *testing.T) {
|
||||
func TestLocalShellCommand_InvalidCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testShell := localShell{}
|
||||
|
||||
//test
|
||||
_, err := common.ExecCmd(logrus.WithField("exec", "test"), "invalid_binary", []string{}, "", nil)
|
||||
_, err := testShell.Command(logrus.WithField("exec", "test"), "invalid_binary", []string{}, "", nil)
|
||||
|
||||
//assert
|
||||
_, castOk := err.(*exec.ExitError)
|
||||
@@ -0,0 +1,50 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: collector/pkg/common/shell/interface.go
|
||||
|
||||
// Package mock_shell is a generated GoMock package.
|
||||
package mock_shell
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
logrus "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// MockInterface is a mock of Interface interface.
|
||||
type MockInterface struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockInterfaceMockRecorder
|
||||
}
|
||||
|
||||
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
|
||||
type MockInterfaceMockRecorder struct {
|
||||
mock *MockInterface
|
||||
}
|
||||
|
||||
// NewMockInterface creates a new mock instance.
|
||||
func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
|
||||
mock := &MockInterface{ctrl: ctrl}
|
||||
mock.recorder = &MockInterfaceMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Command mocks base method.
|
||||
func (m *MockInterface) Command(logger *logrus.Entry, cmdName string, cmdArgs []string, workingDir string, environ []string) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Command", logger, cmdName, cmdArgs, workingDir, environ)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Command indicates an expected call of Command.
|
||||
func (mr *MockInterfaceMockRecorder) Command(logger, cmdName, cmdArgs, workingDir, environ interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Command", reflect.TypeOf((*MockInterface)(nil).Command), logger, cmdName, cmdArgs, workingDir, environ)
|
||||
}
|
||||
@@ -0,0 +1,211 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/analogj/go-util/utils"
|
||||
"github.com/analogj/scrutiny/collector/pkg/errors"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/spf13/viper"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// When initializing this class the following methods must be called:
|
||||
// Config.New
|
||||
// Config.Init
|
||||
// This is done automatically when created via the Factory.
|
||||
type configuration struct {
|
||||
*viper.Viper
|
||||
|
||||
deviceOverrides []models.ScanOverride
|
||||
}
|
||||
|
||||
//Viper uses the following precedence order. Each item takes precedence over the item below it:
|
||||
// explicit call to Set
|
||||
// flag
|
||||
// env
|
||||
// config
|
||||
// key/value store
|
||||
// default
|
||||
|
||||
func (c *configuration) Init() error {
|
||||
c.Viper = viper.New()
|
||||
//set defaults
|
||||
c.SetDefault("host.id", "")
|
||||
|
||||
c.SetDefault("devices", []string{})
|
||||
|
||||
c.SetDefault("log.level", "INFO")
|
||||
c.SetDefault("log.file", "")
|
||||
|
||||
c.SetDefault("api.endpoint", "http://localhost:8080")
|
||||
|
||||
c.SetDefault("commands.metrics_smartctl_bin", "smartctl")
|
||||
c.SetDefault("commands.metrics_scan_args", "--scan --json")
|
||||
c.SetDefault("commands.metrics_info_args", "--info --json")
|
||||
c.SetDefault("commands.metrics_smart_args", "--xall --json")
|
||||
c.SetDefault("commands.metrics_smartctl_wait", 0)
|
||||
|
||||
//configure env variable parsing.
|
||||
c.SetEnvPrefix("COLLECTOR")
|
||||
c.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_"))
|
||||
c.AutomaticEnv()
|
||||
|
||||
//c.SetDefault("collect.short.command", "-a -o on -S on")
|
||||
|
||||
c.SetDefault("allow_listed_devices", []string{})
|
||||
|
||||
//if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
|
||||
c.SetConfigType("yaml")
|
||||
//c.SetConfigName("drawbridge")
|
||||
//c.AddConfigPath("$HOME/")
|
||||
|
||||
//CLI options will be added via the `Set()` function
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *configuration) ReadConfig(configFilePath string) error {
|
||||
configFilePath, err := utils.ExpandPath(configFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !utils.FileExists(configFilePath) {
|
||||
log.Printf("No configuration file found at %v. Using Defaults.", configFilePath)
|
||||
return errors.ConfigFileMissingError("The configuration file could not be found.")
|
||||
}
|
||||
|
||||
//validate config file contents
|
||||
//err = c.ValidateConfigFile(configFilePath)
|
||||
//if err != nil {
|
||||
// log.Printf("Config file at `%v` is invalid: %s", configFilePath, err)
|
||||
// return err
|
||||
//}
|
||||
|
||||
log.Printf("Loading configuration file: %s", configFilePath)
|
||||
|
||||
config_data, err := os.Open(configFilePath)
|
||||
if err != nil {
|
||||
log.Printf("Error reading configuration file: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.MergeConfig(config_data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.ValidateConfig()
|
||||
}
|
||||
|
||||
// This function ensures that the merged config works correctly.
|
||||
func (c *configuration) ValidateConfig() error {
|
||||
|
||||
//TODO:
|
||||
// check that device prefix matches OS
|
||||
// check that schema of config file is valid
|
||||
|
||||
// check that the collector commands are valid
|
||||
commandArgStrings := map[string]string{
|
||||
"commands.metrics_scan_args": c.GetString("commands.metrics_scan_args"),
|
||||
"commands.metrics_info_args": c.GetString("commands.metrics_info_args"),
|
||||
"commands.metrics_smart_args": c.GetString("commands.metrics_smart_args"),
|
||||
}
|
||||
|
||||
errorStrings := []string{}
|
||||
for configKey, commandArgString := range commandArgStrings {
|
||||
args := strings.Split(commandArgString, " ")
|
||||
//ensure that the args string contains `--json` or `-j` flag
|
||||
containsJsonFlag := false
|
||||
containsDeviceFlag := false
|
||||
for _, flag := range args {
|
||||
if strings.HasPrefix(flag, "--json") || strings.HasPrefix(flag, "-j") {
|
||||
containsJsonFlag = true
|
||||
}
|
||||
if strings.HasPrefix(flag, "--device") || strings.HasPrefix(flag, "-d") {
|
||||
containsDeviceFlag = true
|
||||
}
|
||||
}
|
||||
|
||||
if !containsJsonFlag {
|
||||
errorStrings = append(errorStrings, fmt.Sprintf("configuration key '%s' is missing '--json' flag", configKey))
|
||||
}
|
||||
|
||||
if containsDeviceFlag {
|
||||
errorStrings = append(errorStrings, fmt.Sprintf("configuration key '%s' must not contain '--device' or '-d' flag", configKey))
|
||||
}
|
||||
}
|
||||
//sort(errorStrings)
|
||||
sort.Strings(errorStrings)
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
} else {
|
||||
return errors.ConfigValidationError(strings.Join(errorStrings, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *configuration) GetDeviceOverrides() []models.ScanOverride {
|
||||
// we have to support 2 types of device types.
|
||||
// - simple device type (device_type: 'sat')
|
||||
// and list of device types (type: \n- 3ware,0 \n- 3ware,1 \n- 3ware,2)
|
||||
// GetString will return "" if this is a list of device types.
|
||||
|
||||
if c.deviceOverrides == nil {
|
||||
overrides := []models.ScanOverride{}
|
||||
c.UnmarshalKey("devices", &overrides, func(c *mapstructure.DecoderConfig) { c.WeaklyTypedInput = true })
|
||||
c.deviceOverrides = overrides
|
||||
}
|
||||
|
||||
return c.deviceOverrides
|
||||
}
|
||||
|
||||
func (c *configuration) GetCommandMetricsInfoArgs(deviceName string) string {
|
||||
overrides := c.GetDeviceOverrides()
|
||||
|
||||
for _, deviceOverrides := range overrides {
|
||||
if strings.ToLower(deviceName) == strings.ToLower(deviceOverrides.Device) {
|
||||
//found matching device
|
||||
if len(deviceOverrides.Commands.MetricsInfoArgs) > 0 {
|
||||
return deviceOverrides.Commands.MetricsInfoArgs
|
||||
} else {
|
||||
return c.GetString("commands.metrics_info_args")
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.GetString("commands.metrics_info_args")
|
||||
}
|
||||
|
||||
func (c *configuration) GetCommandMetricsSmartArgs(deviceName string) string {
|
||||
overrides := c.GetDeviceOverrides()
|
||||
|
||||
for _, deviceOverrides := range overrides {
|
||||
if strings.ToLower(deviceName) == strings.ToLower(deviceOverrides.Device) {
|
||||
//found matching device
|
||||
if len(deviceOverrides.Commands.MetricsSmartArgs) > 0 {
|
||||
return deviceOverrides.Commands.MetricsSmartArgs
|
||||
} else {
|
||||
return c.GetString("commands.metrics_smart_args")
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.GetString("commands.metrics_smart_args")
|
||||
}
|
||||
|
||||
func (c *configuration) IsAllowlistedDevice(deviceName string) bool {
|
||||
allowList := c.GetStringSlice("allow_listed_devices")
|
||||
if len(allowList) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, item := range allowList {
|
||||
if item == deviceName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -0,0 +1,172 @@
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/config"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/stretchr/testify/require"
|
||||
"path"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConfiguration_InvalidConfigPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig("does_not_exist.yaml")
|
||||
|
||||
//assert
|
||||
require.Error(t, err, "should return an error")
|
||||
}
|
||||
|
||||
func TestConfiguration_GetScanOverrides_Simple(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "simple_device.yaml"))
|
||||
require.NoError(t, err, "should correctly load simple device config")
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat"}, Ignore: false}}, scanOverrides)
|
||||
}
|
||||
|
||||
// fixes #418
|
||||
func TestConfiguration_GetScanOverrides_DeviceTypeComma(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "device_type_comma.yaml"))
|
||||
require.NoError(t, err, "should correctly load simple device config")
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{
|
||||
{Device: "/dev/sda", DeviceType: []string{"sat", "auto"}, Ignore: false},
|
||||
{Device: "/dev/sdb", DeviceType: []string{"sat,auto"}, Ignore: false},
|
||||
}, scanOverrides)
|
||||
}
|
||||
|
||||
func TestConfiguration_GetScanOverrides_Ignore(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "ignore_device.yaml"))
|
||||
require.NoError(t, err, "should correctly load ignore device config")
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}}, scanOverrides)
|
||||
}
|
||||
|
||||
func TestConfiguration_GetScanOverrides_Raid(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "raid_device.yaml"))
|
||||
require.NoError(t, err, "should correctly load ignore device config")
|
||||
scanOverrides := testConfig.GetDeviceOverrides()
|
||||
|
||||
//assert
|
||||
require.Equal(t, []models.ScanOverride{
|
||||
{
|
||||
Device: "/dev/bus/0",
|
||||
DeviceType: []string{"megaraid,14", "megaraid,15", "megaraid,18", "megaraid,19", "megaraid,20", "megaraid,21"},
|
||||
Ignore: false,
|
||||
},
|
||||
{
|
||||
Device: "/dev/twa0",
|
||||
DeviceType: []string{"3ware,0", "3ware,1", "3ware,2", "3ware,3", "3ware,4", "3ware,5"},
|
||||
Ignore: false,
|
||||
}}, scanOverrides)
|
||||
}
|
||||
|
||||
func TestConfiguration_InvalidCommands_MissingJson(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "invalid_commands_missing_json.yaml"))
|
||||
require.EqualError(t, err, `ConfigValidationError: "configuration key 'commands.metrics_scan_args' is missing '--json' flag"`, "should throw an error because json flag is missing")
|
||||
}
|
||||
|
||||
func TestConfiguration_InvalidCommands_IncludesDevice(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "invalid_commands_includes_device.yaml"))
|
||||
require.EqualError(t, err, `ConfigValidationError: "configuration key 'commands.metrics_info_args' must not contain '--device' or '-d' flag, configuration key 'commands.metrics_smart_args' must not contain '--device' or '-d' flag"`, "should throw an error because device flags detected")
|
||||
}
|
||||
|
||||
func TestConfiguration_OverrideCommands(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "override_commands.yaml"))
|
||||
require.NoError(t, err, "should not throw an error")
|
||||
require.Equal(t, "--xall --json -T permissive", testConfig.GetString("commands.metrics_smart_args"))
|
||||
}
|
||||
|
||||
func TestConfiguration_OverrideDeviceCommands_MetricsInfoArgs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//setup
|
||||
testConfig, _ := config.Create()
|
||||
|
||||
//test
|
||||
err := testConfig.ReadConfig(path.Join("testdata", "override_device_commands.yaml"))
|
||||
require.NoError(t, err, "should correctly override device command")
|
||||
|
||||
//assert
|
||||
require.Equal(t, "--info --json -T permissive", testConfig.GetCommandMetricsInfoArgs("/dev/sda"))
|
||||
require.Equal(t, "--info --json", testConfig.GetCommandMetricsInfoArgs("/dev/sdb"))
|
||||
//require.Equal(t, []models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Commands: {MetricsInfoArgs: "--info --json -T "}}}, scanOverrides)
|
||||
}
|
||||
|
||||
func TestConfiguration_DeviceAllowList(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("present", func(t *testing.T) {
|
||||
testConfig, err := config.Create()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, testConfig.ReadConfig(path.Join("testdata", "allow_listed_devices_present.yaml")))
|
||||
|
||||
require.True(t, testConfig.IsAllowlistedDevice("/dev/sda"), "/dev/sda should be allow listed")
|
||||
require.False(t, testConfig.IsAllowlistedDevice("/dev/sdc"), "/dev/sda should not be allow listed")
|
||||
})
|
||||
|
||||
t.Run("missing", func(t *testing.T) {
|
||||
testConfig, err := config.Create()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Really just any other config where the key is full missing
|
||||
require.NoError(t, testConfig.ReadConfig(path.Join("testdata", "override_device_commands.yaml")))
|
||||
|
||||
// Anything should be allow listed if the key isnt there
|
||||
require.True(t, testConfig.IsAllowlistedDevice("/dev/sda"), "/dev/sda should be allow listed")
|
||||
require.True(t, testConfig.IsAllowlistedDevice("/dev/sdc"), "/dev/sda should be allow listed")
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
package config
|
||||
|
||||
func Create() (Interface, error) {
|
||||
config := new(configuration)
|
||||
if err := config.Init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// Create mock using:
|
||||
// mockgen -source=collector/pkg/config/interface.go -destination=collector/pkg/config/mock/mock_config.go
|
||||
type Interface interface {
|
||||
Init() error
|
||||
ReadConfig(configFilePath string) error
|
||||
Set(key string, value interface{})
|
||||
SetDefault(key string, value interface{})
|
||||
|
||||
AllSettings() map[string]interface{}
|
||||
IsSet(key string) bool
|
||||
Get(key string) interface{}
|
||||
GetBool(key string) bool
|
||||
GetInt(key string) int
|
||||
GetString(key string) string
|
||||
GetStringSlice(key string) []string
|
||||
UnmarshalKey(key string, rawVal interface{}, decoderOpts ...viper.DecoderConfigOption) error
|
||||
|
||||
GetDeviceOverrides() []models.ScanOverride
|
||||
GetCommandMetricsInfoArgs(deviceName string) string
|
||||
GetCommandMetricsSmartArgs(deviceName string) string
|
||||
|
||||
IsAllowlistedDevice(deviceName string) bool
|
||||
}
|
||||
@@ -0,0 +1,261 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: collector/pkg/config/interface.go
|
||||
|
||||
// Package mock_config is a generated GoMock package.
|
||||
package mock_config
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
models "github.com/analogj/scrutiny/collector/pkg/models"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
viper "github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// MockInterface is a mock of Interface interface.
|
||||
type MockInterface struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockInterfaceMockRecorder
|
||||
}
|
||||
|
||||
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
|
||||
type MockInterfaceMockRecorder struct {
|
||||
mock *MockInterface
|
||||
}
|
||||
|
||||
// NewMockInterface creates a new mock instance.
|
||||
func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
|
||||
mock := &MockInterface{ctrl: ctrl}
|
||||
mock.recorder = &MockInterfaceMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// AllSettings mocks base method.
|
||||
func (m *MockInterface) AllSettings() map[string]interface{} {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "AllSettings")
|
||||
ret0, _ := ret[0].(map[string]interface{})
|
||||
return ret0
|
||||
}
|
||||
|
||||
// AllSettings indicates an expected call of AllSettings.
|
||||
func (mr *MockInterfaceMockRecorder) AllSettings() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllSettings", reflect.TypeOf((*MockInterface)(nil).AllSettings))
|
||||
}
|
||||
|
||||
// Get mocks base method.
|
||||
func (m *MockInterface) Get(key string) interface{} {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", key)
|
||||
ret0, _ := ret[0].(interface{})
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get.
|
||||
func (mr *MockInterfaceMockRecorder) Get(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), key)
|
||||
}
|
||||
|
||||
// GetBool mocks base method.
|
||||
func (m *MockInterface) GetBool(key string) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetBool", key)
|
||||
ret0, _ := ret[0].(bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetBool indicates an expected call of GetBool.
|
||||
func (mr *MockInterfaceMockRecorder) GetBool(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBool", reflect.TypeOf((*MockInterface)(nil).GetBool), key)
|
||||
}
|
||||
|
||||
// GetCommandMetricsInfoArgs mocks base method.
|
||||
func (m *MockInterface) GetCommandMetricsInfoArgs(deviceName string) string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCommandMetricsInfoArgs", deviceName)
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetCommandMetricsInfoArgs indicates an expected call of GetCommandMetricsInfoArgs.
|
||||
func (mr *MockInterfaceMockRecorder) GetCommandMetricsInfoArgs(deviceName interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommandMetricsInfoArgs", reflect.TypeOf((*MockInterface)(nil).GetCommandMetricsInfoArgs), deviceName)
|
||||
}
|
||||
|
||||
// GetCommandMetricsSmartArgs mocks base method.
|
||||
func (m *MockInterface) GetCommandMetricsSmartArgs(deviceName string) string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCommandMetricsSmartArgs", deviceName)
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetCommandMetricsSmartArgs indicates an expected call of GetCommandMetricsSmartArgs.
|
||||
func (mr *MockInterfaceMockRecorder) GetCommandMetricsSmartArgs(deviceName interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommandMetricsSmartArgs", reflect.TypeOf((*MockInterface)(nil).GetCommandMetricsSmartArgs), deviceName)
|
||||
}
|
||||
|
||||
// GetDeviceOverrides mocks base method.
|
||||
func (m *MockInterface) GetDeviceOverrides() []models.ScanOverride {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetDeviceOverrides")
|
||||
ret0, _ := ret[0].([]models.ScanOverride)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetDeviceOverrides indicates an expected call of GetDeviceOverrides.
|
||||
func (mr *MockInterfaceMockRecorder) GetDeviceOverrides() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceOverrides", reflect.TypeOf((*MockInterface)(nil).GetDeviceOverrides))
|
||||
}
|
||||
|
||||
// GetInt mocks base method.
|
||||
func (m *MockInterface) GetInt(key string) int {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetInt", key)
|
||||
ret0, _ := ret[0].(int)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetInt indicates an expected call of GetInt.
|
||||
func (mr *MockInterfaceMockRecorder) GetInt(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInt", reflect.TypeOf((*MockInterface)(nil).GetInt), key)
|
||||
}
|
||||
|
||||
// GetString mocks base method.
|
||||
func (m *MockInterface) GetString(key string) string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetString", key)
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetString indicates an expected call of GetString.
|
||||
func (mr *MockInterfaceMockRecorder) GetString(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetString", reflect.TypeOf((*MockInterface)(nil).GetString), key)
|
||||
}
|
||||
|
||||
// GetStringSlice mocks base method.
|
||||
func (m *MockInterface) GetStringSlice(key string) []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetStringSlice", key)
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetStringSlice indicates an expected call of GetStringSlice.
|
||||
func (mr *MockInterfaceMockRecorder) GetStringSlice(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStringSlice", reflect.TypeOf((*MockInterface)(nil).GetStringSlice), key)
|
||||
}
|
||||
|
||||
// Init mocks base method.
|
||||
func (m *MockInterface) Init() error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Init")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Init indicates an expected call of Init.
|
||||
func (mr *MockInterfaceMockRecorder) Init() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockInterface)(nil).Init))
|
||||
}
|
||||
|
||||
// IsAllowlistedDevice mocks base method.
|
||||
func (m *MockInterface) IsAllowlistedDevice(deviceName string) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsAllowlistedDevice", deviceName)
|
||||
ret0, _ := ret[0].(bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// IsAllowlistedDevice indicates an expected call of IsAllowlistedDevice.
|
||||
func (mr *MockInterfaceMockRecorder) IsAllowlistedDevice(deviceName interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsAllowlistedDevice", reflect.TypeOf((*MockInterface)(nil).IsAllowlistedDevice), deviceName)
|
||||
}
|
||||
|
||||
// IsSet mocks base method.
|
||||
func (m *MockInterface) IsSet(key string) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsSet", key)
|
||||
ret0, _ := ret[0].(bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// IsSet indicates an expected call of IsSet.
|
||||
func (mr *MockInterfaceMockRecorder) IsSet(key interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSet", reflect.TypeOf((*MockInterface)(nil).IsSet), key)
|
||||
}
|
||||
|
||||
// ReadConfig mocks base method.
|
||||
func (m *MockInterface) ReadConfig(configFilePath string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ReadConfig", configFilePath)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ReadConfig indicates an expected call of ReadConfig.
|
||||
func (mr *MockInterfaceMockRecorder) ReadConfig(configFilePath interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadConfig", reflect.TypeOf((*MockInterface)(nil).ReadConfig), configFilePath)
|
||||
}
|
||||
|
||||
// Set mocks base method.
|
||||
func (m *MockInterface) Set(key string, value interface{}) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Set", key, value)
|
||||
}
|
||||
|
||||
// Set indicates an expected call of Set.
|
||||
func (mr *MockInterfaceMockRecorder) Set(key, value interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockInterface)(nil).Set), key, value)
|
||||
}
|
||||
|
||||
// SetDefault mocks base method.
|
||||
func (m *MockInterface) SetDefault(key string, value interface{}) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "SetDefault", key, value)
|
||||
}
|
||||
|
||||
// SetDefault indicates an expected call of SetDefault.
|
||||
func (mr *MockInterfaceMockRecorder) SetDefault(key, value interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefault", reflect.TypeOf((*MockInterface)(nil).SetDefault), key, value)
|
||||
}
|
||||
|
||||
// UnmarshalKey mocks base method.
|
||||
func (m *MockInterface) UnmarshalKey(key string, rawVal interface{}, decoderOpts ...viper.DecoderConfigOption) error {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{key, rawVal}
|
||||
for _, a := range decoderOpts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "UnmarshalKey", varargs...)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UnmarshalKey indicates an expected call of UnmarshalKey.
|
||||
func (mr *MockInterfaceMockRecorder) UnmarshalKey(key, rawVal interface{}, decoderOpts ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{key, rawVal}, decoderOpts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnmarshalKey", reflect.TypeOf((*MockInterface)(nil).UnmarshalKey), varargs...)
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
allow_listed_devices:
|
||||
- /dev/sda
|
||||
- /dev/sdb
|
||||
@@ -0,0 +1,9 @@
|
||||
version: 1
|
||||
devices:
|
||||
# the scrutiny config parser will detect `sat,auto` as two separate items in a list. If you want to use `-d sat,auto` you must
|
||||
# set 'sat,auto' in a list (see eg. /dev/sbd)
|
||||
- device: /dev/sda
|
||||
type: 'sat,auto'
|
||||
- device: /dev/sdb
|
||||
type:
|
||||
- sat,auto
|
||||
@@ -0,0 +1,4 @@
|
||||
version: 1
|
||||
devices:
|
||||
- device: /dev/sda
|
||||
ignore: true
|
||||
@@ -0,0 +1,4 @@
|
||||
commands:
|
||||
metrics_scan_args: '--scan --json' # used to detect devices
|
||||
metrics_info_args: '--info --json --device=sat' # used to determine device unique ID & register device with Scrutiny
|
||||
metrics_smart_args: '--xall --json -d sat' # used to retrieve smart data for each device.
|
||||
@@ -0,0 +1,4 @@
|
||||
commands:
|
||||
metrics_scan_args: '--scan' # used to detect devices
|
||||
metrics_info_args: '--info -j' # used to determine device unique ID & register device with Scrutiny
|
||||
metrics_smart_args: '--xall --json' # used to retrieve smart data for each device.
|
||||
@@ -0,0 +1,4 @@
|
||||
commands:
|
||||
metrics_scan_args: '--scan --json' # used to detect devices
|
||||
metrics_info_args: '--info -j' # used to determine device unique ID & register device with Scrutiny
|
||||
metrics_smart_args: '--xall --json -T permissive' # used to retrieve smart data for each device.
|
||||
@@ -0,0 +1,5 @@
|
||||
version: 1
|
||||
devices:
|
||||
- device: /dev/sda
|
||||
commands:
|
||||
metrics_info_args: "--info --json -T permissive"
|
||||
+19
@@ -0,0 +1,19 @@
|
||||
version: 1
|
||||
devices:
|
||||
- device: /dev/bus/0
|
||||
type:
|
||||
- megaraid,14
|
||||
- megaraid,15
|
||||
- megaraid,18
|
||||
- megaraid,19
|
||||
- megaraid,20
|
||||
- megaraid,21
|
||||
|
||||
- device: /dev/twa0
|
||||
type:
|
||||
- 3ware,0
|
||||
- 3ware,1
|
||||
- 3ware,2
|
||||
- 3ware,3
|
||||
- 3ware,4
|
||||
- 3ware,5
|
||||
@@ -0,0 +1,27 @@
|
||||
version: 1
|
||||
devices:
|
||||
- device: /dev/sda
|
||||
type: 'sat'
|
||||
#
|
||||
# # example to show how to ignore a specific disk/device.
|
||||
# - device: /dev/sda
|
||||
# ignore: true
|
||||
#
|
||||
# # examples showing how to force smartctl to detect disks inside a raid array/virtual disk
|
||||
# - device: /dev/bus/0
|
||||
# type:
|
||||
# - megaraid,14
|
||||
# - megaraid,15
|
||||
# - megaraid,18
|
||||
# - megaraid,19
|
||||
# - megaraid,20
|
||||
# - megaraid,21
|
||||
#
|
||||
# - device: /dev/twa0
|
||||
# type:
|
||||
# - 3ware,0
|
||||
# - 3ware,1
|
||||
# - 3ware,2
|
||||
# - 3ware,3
|
||||
# - 3ware,4
|
||||
# - 3ware,5
|
||||
+116
-29
@@ -3,17 +3,20 @@ package detect
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/collector/pkg/common"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||
"github.com/denisbrodbeck/machineid"
|
||||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||
"github.com/analogj/scrutiny/collector/pkg/config"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/models/collector"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Detect struct {
|
||||
Logger *logrus.Entry
|
||||
Config config.Interface
|
||||
Shell shell.Interface
|
||||
}
|
||||
|
||||
//private/common functions
|
||||
@@ -24,9 +27,10 @@ type Detect struct {
|
||||
//
|
||||
// To handle these issues, we have OS specific wrapper functions that update/modify these detected devices.
|
||||
// models.Device returned from this function only contain the minimum data for smartctl to execute: device type and device name (device file).
|
||||
func (d *Detect) smartctlScan() ([]models.Device, error) {
|
||||
func (d *Detect) SmartctlScan() ([]models.Device, error) {
|
||||
//we use smartctl to detect all the drives available.
|
||||
detectedDeviceConnJson, err := common.ExecCmd(d.Logger, "smartctl", []string{"--scan", "-j"}, "", os.Environ())
|
||||
args := strings.Split(d.Config.GetString("commands.metrics_scan_args"), " ")
|
||||
detectedDeviceConnJson, err := d.Shell.Command(d.Logger, d.Config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
if err != nil {
|
||||
d.Logger.Errorf("Error scanning for devices: %v", err)
|
||||
return nil, err
|
||||
@@ -39,32 +43,25 @@ func (d *Detect) smartctlScan() ([]models.Device, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
detectedDevices := []models.Device{}
|
||||
|
||||
for _, detectedDevice := range detectedDeviceConns.Devices {
|
||||
detectedDevices = append(detectedDevices, models.Device{
|
||||
DeviceType: detectedDevice.Type,
|
||||
DeviceName: strings.TrimPrefix(detectedDevice.Name, DevicePrefix()),
|
||||
})
|
||||
}
|
||||
detectedDevices := d.TransformDetectedDevices(detectedDeviceConns)
|
||||
|
||||
return detectedDevices, nil
|
||||
}
|
||||
|
||||
//updates a device model with information from smartctl --scan
|
||||
// updates a device model with information from smartctl --scan
|
||||
// It has a couple of issues however:
|
||||
// - WWN is provided as component data, rather than a "string". We'll have to generate the WWN value ourselves
|
||||
// - WWN from smartctl only provided for ATA protocol drives, NVMe and SCSI drives do not include WWN.
|
||||
func (d *Detect) smartCtlInfo(device *models.Device) error {
|
||||
|
||||
args := []string{"--info", "-j"}
|
||||
func (d *Detect) SmartCtlInfo(device *models.Device) error {
|
||||
fullDeviceName := fmt.Sprintf("%s%s", DevicePrefix(), device.DeviceName)
|
||||
args := strings.Split(d.Config.GetCommandMetricsInfoArgs(fullDeviceName), " ")
|
||||
//only include the device type if its a non-standard one. In some cases ata drives are detected as scsi in docker, and metadata is lost.
|
||||
if len(device.DeviceType) > 0 && device.DeviceType != "scsi" && device.DeviceType != "ata" {
|
||||
args = append(args, "-d", device.DeviceType)
|
||||
args = append(args, "--device", device.DeviceType)
|
||||
}
|
||||
args = append(args, fmt.Sprintf("%s%s", DevicePrefix(), device.DeviceName))
|
||||
args = append(args, fullDeviceName)
|
||||
|
||||
availableDeviceInfoJson, err := common.ExecCmd(d.Logger, "smartctl", args, "", os.Environ())
|
||||
availableDeviceInfoJson, err := d.Shell.Command(d.Logger, d.Config.GetString("commands.metrics_smartctl_bin"), args, "", os.Environ())
|
||||
if err != nil {
|
||||
d.Logger.Errorf("Could not retrieve device information for %s: %v", device.DeviceName, err)
|
||||
return err
|
||||
@@ -77,16 +74,17 @@ func (d *Detect) smartCtlInfo(device *models.Device) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//DeviceType and DeviceName are already populated.
|
||||
//WWN
|
||||
//WWN: this is a serial number/world-wide number that will not change.
|
||||
//DeviceType and DeviceName are already populated, however may change between collector runs (eg. config/host restart)
|
||||
//InterfaceType:
|
||||
device.ModelName = availableDeviceInfo.ModelName
|
||||
device.InterfaceSpeed = availableDeviceInfo.InterfaceSpeed.Current.String
|
||||
device.SerialNumber = availableDeviceInfo.SerialNumber
|
||||
device.Firmware = availableDeviceInfo.FirmwareVersion
|
||||
device.RotationSpeed = availableDeviceInfo.RotationRate
|
||||
device.Capacity = availableDeviceInfo.UserCapacity.Bytes
|
||||
device.Capacity = availableDeviceInfo.Capacity()
|
||||
device.FormFactor = availableDeviceInfo.FormFactor.Name
|
||||
device.DeviceType = availableDeviceInfo.Device.Type
|
||||
device.DeviceProtocol = availableDeviceInfo.Device.Protocol
|
||||
if len(availableDeviceInfo.Vendor) > 0 {
|
||||
device.Manufacturer = availableDeviceInfo.Vendor
|
||||
@@ -100,17 +98,106 @@ func (d *Detect) smartCtlInfo(device *models.Device) error {
|
||||
Oui: availableDeviceInfo.Wwn.Oui,
|
||||
Id: availableDeviceInfo.Wwn.ID,
|
||||
}
|
||||
device.WWN = wwn.ToString()
|
||||
device.WWN = strings.ToLower(wwn.ToString())
|
||||
d.Logger.Debugf("NAA: %d OUI: %d Id: %d => WWN: %s", wwn.Naa, wwn.Oui, wwn.Id, device.WWN)
|
||||
} else {
|
||||
d.Logger.Info("Using WWN Fallback")
|
||||
d.wwnFallback(device)
|
||||
}
|
||||
if len(device.WWN) == 0 {
|
||||
// no WWN populated after WWN lookup and fallback. we need to throw an error
|
||||
errMsg := fmt.Sprintf("no WWN (or fallback) populated for device: %s. Device will be registered, but no data will be published for this device. ", device.DeviceName)
|
||||
d.Logger.Errorf(errMsg)
|
||||
return fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//uses https://github.com/denisbrodbeck/machineid to get a OS specific unique machine ID.
|
||||
func (d *Detect) getMachineId() (string, error) {
|
||||
return machineid.ProtectedID("scrutiny")
|
||||
// function will remove devices that are marked for "ignore" in config file
|
||||
// will also add devices that are specified in config file, but "missing" from smartctl --scan
|
||||
// this function will also update the deviceType to the option specified in config.
|
||||
func (d *Detect) TransformDetectedDevices(detectedDeviceConns models.Scan) []models.Device {
|
||||
groupedDevices := map[string][]models.Device{}
|
||||
|
||||
for _, scannedDevice := range detectedDeviceConns.Devices {
|
||||
|
||||
deviceFile := strings.ToLower(scannedDevice.Name)
|
||||
|
||||
// If the user has defined a device allow list, and this device isnt there, then ignore it
|
||||
if !d.Config.IsAllowlistedDevice(deviceFile) {
|
||||
continue
|
||||
}
|
||||
|
||||
detectedDevice := models.Device{
|
||||
HostId: d.Config.GetString("host.id"),
|
||||
DeviceType: scannedDevice.Type,
|
||||
DeviceName: strings.TrimPrefix(deviceFile, DevicePrefix()),
|
||||
}
|
||||
|
||||
//find (or create) a slice to contain the devices in this group
|
||||
if groupedDevices[deviceFile] == nil {
|
||||
groupedDevices[deviceFile] = []models.Device{}
|
||||
}
|
||||
|
||||
// add this scanned device to the group
|
||||
groupedDevices[deviceFile] = append(groupedDevices[deviceFile], detectedDevice)
|
||||
}
|
||||
|
||||
//now tha we've "grouped" all the devices, lets override any groups specified in the config file.
|
||||
|
||||
for _, overrideDevice := range d.Config.GetDeviceOverrides() {
|
||||
overrideDeviceFile := strings.ToLower(overrideDevice.Device)
|
||||
|
||||
if overrideDevice.Ignore {
|
||||
// this device file should be deleted if it exists
|
||||
delete(groupedDevices, overrideDeviceFile)
|
||||
} else {
|
||||
//create a new device group, and replace the one generated by smartctl --scan
|
||||
overrideDeviceGroup := []models.Device{}
|
||||
|
||||
if overrideDevice.DeviceType != nil {
|
||||
for _, overrideDeviceType := range overrideDevice.DeviceType {
|
||||
overrideDeviceGroup = append(overrideDeviceGroup, models.Device{
|
||||
HostId: d.Config.GetString("host.id"),
|
||||
DeviceType: overrideDeviceType,
|
||||
DeviceName: strings.TrimPrefix(overrideDeviceFile, DevicePrefix()),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
//user may have specified device in config file without device type (default to scanned device type)
|
||||
|
||||
//check if the device file was detected by the scanner
|
||||
var deviceType string
|
||||
if scannedDevice, foundScannedDevice := groupedDevices[overrideDeviceFile]; foundScannedDevice {
|
||||
if len(scannedDevice) > 0 {
|
||||
//take the device type from the first grouped device
|
||||
deviceType = scannedDevice[0].DeviceType
|
||||
} else {
|
||||
deviceType = "ata"
|
||||
}
|
||||
|
||||
} else {
|
||||
//fallback to ata if no scanned device detected
|
||||
deviceType = "ata"
|
||||
}
|
||||
|
||||
overrideDeviceGroup = append(overrideDeviceGroup, models.Device{
|
||||
HostId: d.Config.GetString("host.id"),
|
||||
DeviceType: deviceType,
|
||||
DeviceName: strings.TrimPrefix(overrideDeviceFile, DevicePrefix()),
|
||||
})
|
||||
}
|
||||
|
||||
groupedDevices[overrideDeviceFile] = overrideDeviceGroup
|
||||
}
|
||||
}
|
||||
|
||||
//flatten map
|
||||
detectedDevices := []models.Device{}
|
||||
for _, group := range groupedDevices {
|
||||
detectedDevices = append(detectedDevices, group...)
|
||||
}
|
||||
|
||||
return detectedDevices
|
||||
}
|
||||
|
||||
@@ -0,0 +1,410 @@
|
||||
package detect_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
mock_shell "github.com/analogj/scrutiny/collector/pkg/common/shell/mock"
|
||||
mock_config "github.com/analogj/scrutiny/collector/pkg/config/mock"
|
||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDetect_SmartctlScan(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice(gomock.Any()).AnyTimes().Return(true)
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
testScanResults, err := os.ReadFile("testdata/smartctl_scan_simple.json")
|
||||
fakeShell.EXPECT().Command(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(string(testScanResults), err)
|
||||
|
||||
d := detect.Detect{
|
||||
Logger: logrus.WithFields(logrus.Fields{}),
|
||||
Shell: fakeShell,
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
scannedDevices, err := d.SmartctlScan()
|
||||
|
||||
// assert
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 7, len(scannedDevices))
|
||||
require.Equal(t, "scsi", scannedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
func TestDetect_SmartctlScan_Megaraid(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice(gomock.Any()).AnyTimes().Return(true)
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
testScanResults, err := os.ReadFile("testdata/smartctl_scan_megaraid.json")
|
||||
fakeShell.EXPECT().Command(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(string(testScanResults), err)
|
||||
|
||||
d := detect.Detect{
|
||||
Logger: logrus.WithFields(logrus.Fields{}),
|
||||
Shell: fakeShell,
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
scannedDevices, err := d.SmartctlScan()
|
||||
|
||||
// assert
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(scannedDevices))
|
||||
require.Equal(t, []models.Device{
|
||||
{DeviceName: "bus/0", DeviceType: "megaraid,0"},
|
||||
{DeviceName: "bus/0", DeviceType: "megaraid,1"},
|
||||
}, scannedDevices)
|
||||
}
|
||||
|
||||
func TestDetect_SmartctlScan_Nvme(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice(gomock.Any()).AnyTimes().Return(true)
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(mockCtrl)
|
||||
testScanResults, err := os.ReadFile("testdata/smartctl_scan_nvme.json")
|
||||
fakeShell.EXPECT().Command(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(string(testScanResults), err)
|
||||
|
||||
d := detect.Detect{
|
||||
Logger: logrus.WithFields(logrus.Fields{}),
|
||||
Shell: fakeShell,
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
scannedDevices, err := d.SmartctlScan()
|
||||
|
||||
// assert
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(scannedDevices))
|
||||
require.Equal(t, []models.Device{
|
||||
{DeviceName: "nvme0", DeviceType: "nvme"},
|
||||
}, scannedDevices)
|
||||
}
|
||||
|
||||
func TestDetect_TransformDetectedDevices_Empty(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice(gomock.Any()).AnyTimes().Return(true)
|
||||
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
Name: "/dev/sda",
|
||||
InfoName: "/dev/sda",
|
||||
Protocol: "scsi",
|
||||
Type: "scsi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
// assert
|
||||
require.Equal(t, "sda", transformedDevices[0].DeviceName)
|
||||
require.Equal(t, "scsi", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
func TestDetect_TransformDetectedDevices_Ignore(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: nil, Ignore: true}})
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice(gomock.Any()).AnyTimes().Return(true)
|
||||
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
Name: "/dev/sda",
|
||||
InfoName: "/dev/sda",
|
||||
Protocol: "scsi",
|
||||
Type: "scsi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
// assert
|
||||
require.Equal(t, []models.Device{}, transformedDevices)
|
||||
}
|
||||
|
||||
func TestDetect_TransformDetectedDevices_Raid(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice(gomock.Any()).AnyTimes().Return(true)
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{
|
||||
{
|
||||
Device: "/dev/bus/0",
|
||||
DeviceType: []string{"megaraid,14", "megaraid,15", "megaraid,18", "megaraid,19", "megaraid,20", "megaraid,21"},
|
||||
Ignore: false,
|
||||
},
|
||||
{
|
||||
Device: "/dev/twa0",
|
||||
DeviceType: []string{"3ware,0", "3ware,1", "3ware,2", "3ware,3", "3ware,4", "3ware,5"},
|
||||
Ignore: false,
|
||||
},
|
||||
})
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
Name: "/dev/bus/0",
|
||||
InfoName: "/dev/bus/0",
|
||||
Protocol: "scsi",
|
||||
Type: "scsi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
// assert
|
||||
require.Equal(t, 12, len(transformedDevices))
|
||||
}
|
||||
|
||||
func TestDetect_TransformDetectedDevices_Simple(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat+megaraid"}}})
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice(gomock.Any()).AnyTimes().Return(true)
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
Name: "/dev/sda",
|
||||
InfoName: "/dev/sda",
|
||||
Protocol: "ata",
|
||||
Type: "ata",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
// assert
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "sat+megaraid", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
// test https://github.com/AnalogJ/scrutiny/issues/255#issuecomment-1164024126
|
||||
func TestDetect_TransformDetectedDevices_WithoutDeviceTypeOverride(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda"}})
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice(gomock.Any()).AnyTimes().Return(true)
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
Name: "/dev/sda",
|
||||
InfoName: "/dev/sda",
|
||||
Protocol: "ata",
|
||||
Type: "scsi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
// assert
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "scsi", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
func TestDetect_TransformDetectedDevices_WhenDeviceNotDetected(t *testing.T) {
|
||||
// setup
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda"}})
|
||||
detectedDevices := models.Scan{}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
// assert
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "ata", transformedDevices[0].DeviceType)
|
||||
}
|
||||
|
||||
func TestDetect_TransformDetectedDevices_AllowListFilters(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fakeConfig := mock_config.NewMockInterface(mockCtrl)
|
||||
fakeConfig.EXPECT().GetString("host.id").AnyTimes().Return("")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_smartctl_bin").AnyTimes().Return("smartctl")
|
||||
fakeConfig.EXPECT().GetString("commands.metrics_scan_args").AnyTimes().Return("--scan --json")
|
||||
fakeConfig.EXPECT().GetDeviceOverrides().AnyTimes().Return([]models.ScanOverride{{Device: "/dev/sda", DeviceType: []string{"sat+megaraid"}}})
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice("/dev/sda").Return(true)
|
||||
fakeConfig.EXPECT().IsAllowlistedDevice("/dev/sdb").Return(false)
|
||||
detectedDevices := models.Scan{
|
||||
Devices: []models.ScanDevice{
|
||||
{
|
||||
Name: "/dev/sda",
|
||||
InfoName: "/dev/sda",
|
||||
Protocol: "ata",
|
||||
Type: "ata",
|
||||
},
|
||||
{
|
||||
Name: "/dev/sdb",
|
||||
InfoName: "/dev/sdb",
|
||||
Protocol: "ata",
|
||||
Type: "ata",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := detect.Detect{
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
// test
|
||||
transformedDevices := d.TransformDetectedDevices(detectedDevices)
|
||||
|
||||
// assert
|
||||
require.Equal(t, 1, len(transformedDevices))
|
||||
require.Equal(t, "sda", transformedDevices[0].DeviceName)
|
||||
}
|
||||
|
||||
func TestDetect_SmartCtlInfo(t *testing.T) {
|
||||
t.Run("should report nvme info", func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
const (
|
||||
someArgs = "--info --json"
|
||||
|
||||
// device info
|
||||
someDeviceName = "some-device-name"
|
||||
someModelName = "KCD61LUL3T84"
|
||||
someSerialNumber = "61Q0A05UT7B8"
|
||||
someFirmware = "8002"
|
||||
someDeviceProtocol = "NVMe"
|
||||
someDeviceType = "nvme"
|
||||
someCapacity int64 = 3840755982336
|
||||
)
|
||||
|
||||
fakeConfig := mock_config.NewMockInterface(ctrl)
|
||||
fakeConfig.EXPECT().
|
||||
GetCommandMetricsInfoArgs("/dev/" + someDeviceName).
|
||||
Return(someArgs)
|
||||
fakeConfig.EXPECT().
|
||||
GetString("commands.metrics_smartctl_bin").
|
||||
Return("smartctl")
|
||||
|
||||
someLogger := logrus.WithFields(logrus.Fields{})
|
||||
|
||||
smartctlInfoResults, err := os.ReadFile("testdata/smartctl_info_nvme.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
fakeShell := mock_shell.NewMockInterface(ctrl)
|
||||
fakeShell.EXPECT().
|
||||
Command(someLogger, "smartctl", append(strings.Split(someArgs, " "), "/dev/"+someDeviceName), "", gomock.Any()).
|
||||
Return(string(smartctlInfoResults), err)
|
||||
|
||||
d := detect.Detect{
|
||||
Logger: someLogger,
|
||||
Shell: fakeShell,
|
||||
Config: fakeConfig,
|
||||
}
|
||||
|
||||
someDevice := &models.Device{
|
||||
WWN: "some wwn",
|
||||
DeviceName: someDeviceName,
|
||||
}
|
||||
|
||||
require.NoError(t, d.SmartCtlInfo(someDevice))
|
||||
|
||||
assert.Equal(t, someDeviceName, someDevice.DeviceName)
|
||||
assert.Equal(t, someModelName, someDevice.ModelName)
|
||||
assert.Equal(t, someSerialNumber, someDevice.SerialNumber)
|
||||
assert.Equal(t, someFirmware, someDevice.Firmware)
|
||||
assert.Equal(t, someDeviceProtocol, someDevice.DeviceProtocol)
|
||||
assert.Equal(t, someDeviceType, someDevice.DeviceType)
|
||||
assert.Equal(t, someCapacity, someDevice.Capacity)
|
||||
})
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package detect
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/jaypipes/ghw"
|
||||
"strings"
|
||||
@@ -11,8 +12,9 @@ func DevicePrefix() string {
|
||||
}
|
||||
|
||||
func (d *Detect) Start() ([]models.Device, error) {
|
||||
// call the base/common functionality to get a list of devicess
|
||||
detectedDevices, err := d.smartctlScan()
|
||||
d.Shell = shell.Create()
|
||||
// call the base/common functionality to get a list of devices
|
||||
detectedDevices, err := d.SmartctlScan()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -25,7 +27,7 @@ func (d *Detect) Start() ([]models.Device, error) {
|
||||
|
||||
//inflate device info for detected devices.
|
||||
for ndx, _ := range detectedDevices {
|
||||
d.smartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
}
|
||||
|
||||
return detectedDevices, nil
|
||||
@@ -92,7 +94,7 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||
block, err := ghw.Block()
|
||||
if err == nil {
|
||||
for _, disk := range block.Disks {
|
||||
if disk.Name == detectedDevice.DeviceName {
|
||||
if disk.Name == detectedDevice.DeviceName && strings.ToLower(disk.WWN) != "unknown" {
|
||||
d.Logger.Debugf("Found matching block device. WWN: %s", disk.WWN)
|
||||
detectedDevice.WWN = disk.WWN
|
||||
break
|
||||
@@ -105,4 +107,7 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
|
||||
detectedDevice.WWN = detectedDevice.SerialNumber
|
||||
}
|
||||
|
||||
//wwn must always be lowercase.
|
||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
package detect
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/jaypipes/ghw"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func DevicePrefix() string {
|
||||
return "/dev/"
|
||||
}
|
||||
|
||||
func (d *Detect) Start() ([]models.Device, error) {
|
||||
d.Shell = shell.Create()
|
||||
// call the base/common functionality to get a list of devices
|
||||
detectedDevices, err := d.SmartctlScan()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//inflate device info for detected devices.
|
||||
for ndx, _ := range detectedDevices {
|
||||
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
}
|
||||
|
||||
return detectedDevices, nil
|
||||
}
|
||||
|
||||
//WWN values NVMe and SCSI
|
||||
func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||
block, err := ghw.Block()
|
||||
if err == nil {
|
||||
for _, disk := range block.Disks {
|
||||
if disk.Name == detectedDevice.DeviceName && strings.ToLower(disk.WWN) != "unknown" {
|
||||
d.Logger.Debugf("Found matching block device. WWN: %s", disk.WWN)
|
||||
detectedDevice.WWN = disk.WWN
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//no WWN found, or could not open Block devices. Either way, fallback to serial number
|
||||
if len(detectedDevice.WWN) == 0 {
|
||||
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
|
||||
detectedDevice.WWN = detectedDevice.SerialNumber
|
||||
}
|
||||
|
||||
//wwn must always be lowercase.
|
||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
||||
}
|
||||
@@ -1,8 +1,13 @@
|
||||
package detect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"github.com/jaypipes/ghw"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func DevicePrefix() string {
|
||||
@@ -10,15 +15,17 @@ func DevicePrefix() string {
|
||||
}
|
||||
|
||||
func (d *Detect) Start() ([]models.Device, error) {
|
||||
d.Shell = shell.Create()
|
||||
// call the base/common functionality to get a list of devices
|
||||
detectedDevices, err := d.smartctlScan()
|
||||
detectedDevices, err := d.SmartctlScan()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//inflate device info for detected devices.
|
||||
for ndx, _ := range detectedDevices {
|
||||
d.smartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
populateUdevInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
}
|
||||
|
||||
return detectedDevices, nil
|
||||
@@ -29,7 +36,7 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||
block, err := ghw.Block()
|
||||
if err == nil {
|
||||
for _, disk := range block.Disks {
|
||||
if disk.Name == detectedDevice.DeviceName {
|
||||
if disk.Name == detectedDevice.DeviceName && strings.ToLower(disk.WWN) != "unknown" {
|
||||
d.Logger.Debugf("Found matching block device. WWN: %s", disk.WWN)
|
||||
detectedDevice.WWN = disk.WWN
|
||||
break
|
||||
@@ -42,4 +49,55 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||
d.Logger.Debugf("WWN is empty, falling back to serial number: %s", detectedDevice.SerialNumber)
|
||||
detectedDevice.WWN = detectedDevice.SerialNumber
|
||||
}
|
||||
|
||||
//wwn must always be lowercase.
|
||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
||||
}
|
||||
|
||||
// as discussed in
|
||||
// - https://github.com/AnalogJ/scrutiny/issues/225
|
||||
// - https://github.com/jaypipes/ghw/issues/59#issue-361915216
|
||||
// udev exposes its data in a standardized way under /run/udev/data/....
|
||||
func populateUdevInfo(detectedDevice *models.Device) error {
|
||||
// Get device major:minor numbers
|
||||
// `cat /sys/class/block/sda/dev`
|
||||
devNo, err := ioutil.ReadFile(filepath.Join("/sys/class/block/", detectedDevice.DeviceName, "dev"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Look up block device in udev runtime database
|
||||
// `cat /run/udev/data/b8:0`
|
||||
udevID := "b" + strings.TrimSpace(string(devNo))
|
||||
udevBytes, err := ioutil.ReadFile(filepath.Join("/run/udev/data/", udevID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deviceMountPaths := []string{}
|
||||
udevInfo := make(map[string]string)
|
||||
for _, udevLine := range strings.Split(string(udevBytes), "\n") {
|
||||
if strings.HasPrefix(udevLine, "E:") {
|
||||
if s := strings.SplitN(udevLine[2:], "=", 2); len(s) == 2 {
|
||||
udevInfo[s[0]] = s[1]
|
||||
}
|
||||
} else if strings.HasPrefix(udevLine, "S:") {
|
||||
deviceMountPaths = append(deviceMountPaths, udevLine[2:])
|
||||
}
|
||||
}
|
||||
|
||||
//Set additional device information.
|
||||
if deviceLabel, exists := udevInfo["ID_FS_LABEL"]; exists {
|
||||
detectedDevice.DeviceLabel = deviceLabel
|
||||
}
|
||||
if deviceUUID, exists := udevInfo["ID_FS_UUID"]; exists {
|
||||
detectedDevice.DeviceUUID = deviceUUID
|
||||
}
|
||||
if deviceSerialID, exists := udevInfo["ID_SERIAL"]; exists {
|
||||
detectedDevice.DeviceSerialID = fmt.Sprintf("%s-%s", udevInfo["ID_BUS"], deviceSerialID)
|
||||
}
|
||||
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
package detect_test
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/detect"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDevicePrefix(t *testing.T) {
|
||||
//setup
|
||||
|
||||
//test
|
||||
|
||||
//assert
|
||||
require.Equal(t, "/dev/", detect.DevicePrefix())
|
||||
}
|
||||
@@ -1,19 +1,26 @@
|
||||
package detect
|
||||
|
||||
import (
|
||||
"github.com/analogj/scrutiny/collector/pkg/common/shell"
|
||||
"github.com/analogj/scrutiny/collector/pkg/models"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func DevicePrefix() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d *Detect) Start() ([]models.Device, error) {
|
||||
d.Shell = shell.Create()
|
||||
// call the base/common functionality to get a list of devices
|
||||
detectedDevices, err := d.smartctlScan()
|
||||
detectedDevices, err := d.SmartctlScan()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//inflate device info for detected devices.
|
||||
for ndx, _ := range detectedDevices {
|
||||
d.smartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
d.SmartCtlInfo(&detectedDevices[ndx]) //ignore errors.
|
||||
}
|
||||
|
||||
return detectedDevices, nil
|
||||
@@ -26,4 +33,7 @@ func (d *Detect) wwnFallback(detectedDevice *models.Device) {
|
||||
if len(detectedDevice.WWN) == 0 {
|
||||
detectedDevice.WWN = detectedDevice.SerialNumber
|
||||
}
|
||||
|
||||
//wwn must always be lowercase.
|
||||
detectedDevice.WWN = strings.ToLower(detectedDevice.WWN)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
2
|
||||
],
|
||||
"svn_revision": "5155",
|
||||
"platform_info": "x86_64-linux-6.1.69-talos",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"--info",
|
||||
"--json",
|
||||
"/dev/nvme4"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"device": {
|
||||
"name": "/dev/nvme4",
|
||||
"info_name": "/dev/nvme4",
|
||||
"type": "nvme",
|
||||
"protocol": "NVMe"
|
||||
},
|
||||
"model_name": "KCD61LUL3T84",
|
||||
"serial_number": "61Q0A05UT7B8",
|
||||
"firmware_version": "8002",
|
||||
"nvme_pci_vendor": {
|
||||
"id": 7695,
|
||||
"subsystem_id": 7695
|
||||
},
|
||||
"nvme_ieee_oui_identifier": 9233294,
|
||||
"nvme_total_capacity": 3840755982336,
|
||||
"nvme_unallocated_capacity": 0,
|
||||
"nvme_controller_id": 1,
|
||||
"nvme_version": {
|
||||
"string": "1.4",
|
||||
"value": 66560
|
||||
},
|
||||
"nvme_number_of_namespaces": 16,
|
||||
"local_time": {
|
||||
"time_t": 1706045146,
|
||||
"asctime": "Tue Jan 23 21:25:46 2024 UTC"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
1
|
||||
],
|
||||
"svn_revision": "5022",
|
||||
"platform_info": "x86_64-linux-5.4.0-45-generic",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"-j",
|
||||
"--scan"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"name": "/dev/bus/0",
|
||||
"info_name": "/dev/bus/0 [megaraid_disk_00]",
|
||||
"type": "megaraid,0",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
{
|
||||
"name": "/dev/bus/0",
|
||||
"info_name": "/dev/bus/0 [megaraid_disk_01]",
|
||||
"type": "megaraid,1",
|
||||
"protocol": "SCSI"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
0
|
||||
],
|
||||
"svn_revision": "4883",
|
||||
"platform_info": "x86_64-linux-4.19.107-Unraid",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"-j",
|
||||
"--scan"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"name": "/dev/nvme0",
|
||||
"info_name": "/dev/nvme0",
|
||||
"type": "nvme",
|
||||
"protocol": "NVMe"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
0
|
||||
],
|
||||
"svn_revision": "4883",
|
||||
"platform_info": "x86_64-linux-5.15.32-flatcar",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"--scan",
|
||||
"-j"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"name": "/dev/sda",
|
||||
"info_name": "/dev/sda",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
{
|
||||
"name": "/dev/sdb",
|
||||
"info_name": "/dev/sdb",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
{
|
||||
"name": "/dev/sdc",
|
||||
"info_name": "/dev/sdc",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
{
|
||||
"name": "/dev/sdd",
|
||||
"info_name": "/dev/sdd",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
{
|
||||
"name": "/dev/sde",
|
||||
"info_name": "/dev/sde",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
{
|
||||
"name": "/dev/sdf",
|
||||
"info_name": "/dev/sdf",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
{
|
||||
"name": "/dev/sdg",
|
||||
"info_name": "/dev/sdg",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package detect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Wwn struct {
|
||||
@@ -54,5 +55,5 @@ func (wwn *Wwn) ToString() string {
|
||||
|
||||
//TODO: may need to support additional versions in the future.
|
||||
|
||||
return fmt.Sprintf("%#x", wwnBuffer)
|
||||
return strings.ToLower(fmt.Sprintf("%#x", wwnBuffer))
|
||||
}
|
||||
|
||||
@@ -4,6 +4,10 @@ type Device struct {
|
||||
WWN string `json:"wwn"`
|
||||
|
||||
DeviceName string `json:"device_name"`
|
||||
DeviceUUID string `json:"device_uuid"`
|
||||
DeviceSerialID string `json:"device_serial_id"`
|
||||
DeviceLabel string `json:"device_label"`
|
||||
|
||||
Manufacturer string `json:"manufacturer"`
|
||||
ModelName string `json:"model_name"`
|
||||
InterfaceType string `json:"interface_type"`
|
||||
@@ -16,6 +20,10 @@ type Device struct {
|
||||
SmartSupport bool `json:"smart_support"`
|
||||
DeviceProtocol string `json:"device_protocol"` //protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||
DeviceType string `json:"device_type"` //device type is used for querying with -d/t flag, should only be used by collector.
|
||||
|
||||
// User provided metadata
|
||||
Label string `json:"label"`
|
||||
HostId string `json:"host_id"`
|
||||
}
|
||||
|
||||
type DeviceWrapper struct {
|
||||
|
||||
@@ -10,10 +10,11 @@ type Scan struct {
|
||||
Argv []string `json:"argv"`
|
||||
ExitStatus int `json:"exit_status"`
|
||||
} `json:"smartctl"`
|
||||
Devices []struct {
|
||||
Name string `json:"name"`
|
||||
InfoName string `json:"info_name"`
|
||||
Type string `json:"type"`
|
||||
Protocol string `json:"protocol"`
|
||||
} `json:"devices"`
|
||||
Devices []ScanDevice `json:"devices"`
|
||||
}
|
||||
type ScanDevice struct {
|
||||
Name string `json:"name"`
|
||||
InfoName string `json:"info_name"`
|
||||
Type string `json:"type"`
|
||||
Protocol string `json:"protocol"`
|
||||
}
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package models
|
||||
|
||||
type ScanOverride struct {
|
||||
Device string `mapstructure:"device"`
|
||||
DeviceType []string `mapstructure:"type"`
|
||||
Ignore bool `mapstructure:"ignore"`
|
||||
Commands struct {
|
||||
MetricsInfoArgs string `mapstructure:"metrics_info_args"`
|
||||
MetricsSmartArgs string `mapstructure:"metrics_smart_args"`
|
||||
} `mapstructure:"commands"`
|
||||
}
|
||||
+66
-44
@@ -1,55 +1,77 @@
|
||||
########
|
||||
FROM golang:1.14.4-buster as backendbuild
|
||||
# syntax=docker/dockerfile:1.4
|
||||
########################################################################################################################
|
||||
# Omnibus Image
|
||||
########################################################################################################################
|
||||
|
||||
######## Build the frontend
|
||||
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN make binary-frontend
|
||||
|
||||
|
||||
######## Build the backend
|
||||
FROM golang:1.20-bookworm as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny webapp/backend/cmd/scrutiny/scrutiny.go && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-selftest collector/cmd/collector-selftest/collector-selftest.go && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-metrics collector/cmd/collector-metrics/collector-metrics.go
|
||||
|
||||
########
|
||||
FROM node:lts-slim as frontendbuild
|
||||
|
||||
#reduce logging, disable angular-cli analytics for ci environment
|
||||
ENV NPM_CONFIG_LOGLEVEL=warn NG_CLI_ANALYTICS=false
|
||||
|
||||
WORKDIR /scrutiny/src
|
||||
COPY webapp/frontend /scrutiny/src
|
||||
|
||||
RUN npm install -g @angular/cli@9.1.4 && \
|
||||
mkdir -p /scrutiny/dist && \
|
||||
npm install && \
|
||||
ng build --output-path=/scrutiny/dist --deploy-url="/web/" --base-href="/web/" --prod
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y --no-install-recommends \
|
||||
file \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
||||
|
||||
|
||||
########
|
||||
FROM ubuntu:bionic as runtime
|
||||
######## Combine build artifacts in runtime image
|
||||
FROM debian:bookworm-slim as runtime
|
||||
ARG TARGETARCH
|
||||
EXPOSE 8080
|
||||
WORKDIR /scrutiny
|
||||
ENV PATH="/scrutiny/bin:${PATH}"
|
||||
WORKDIR /opt/scrutiny
|
||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
ENV INFLUXD_CONFIG_PATH=/opt/scrutiny/influxdb
|
||||
ENV S6VER="3.1.6.2"
|
||||
ENV INFLUXVER="2.2.0"
|
||||
ENV S6_SERVICES_READYTIME=1000
|
||||
SHELL ["/usr/bin/sh", "-c"]
|
||||
|
||||
ADD https://github.com/dshearer/jobber/releases/download/v1.4.4/jobber_1.4.4-1_amd64.deb /tmp/
|
||||
RUN apt install /tmp/jobber_1.4.4-1_amd64.deb
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
cron \
|
||||
curl \
|
||||
smartmontools \
|
||||
tzdata \
|
||||
procps \
|
||||
xz-utils \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& update-ca-certificates \
|
||||
&& case ${TARGETARCH} in \
|
||||
"amd64") S6_ARCH=x86_64 ;; \
|
||||
"arm64") S6_ARCH=aarch64 ;; \
|
||||
esac \
|
||||
&& curl https://github.com/just-containers/s6-overlay/releases/download/v${S6VER}/s6-overlay-noarch.tar.xz -L -s --output /tmp/s6-overlay-noarch.tar.xz \
|
||||
&& tar -Jxpf /tmp/s6-overlay-noarch.tar.xz -C / \
|
||||
&& rm -rf /tmp/s6-overlay-noarch.tar.xz \
|
||||
&& curl https://github.com/just-containers/s6-overlay/releases/download/v${S6VER}/s6-overlay-${S6_ARCH}.tar.xz -L -s --output /tmp/s6-overlay-${S6_ARCH}.tar.xz \
|
||||
&& tar -Jxpf /tmp/s6-overlay-${S6_ARCH}.tar.xz -C / \
|
||||
&& rm -rf /tmp/s6-overlay-${S6_ARCH}.tar.xz
|
||||
RUN curl -L https://dl.influxdata.com/influxdb/releases/influxdb2-${INFLUXVER}-${TARGETARCH}.deb --output /tmp/influxdb2-${INFLUXVER}-${TARGETARCH}.deb \
|
||||
&& dpkg -i --force-all /tmp/influxdb2-${INFLUXVER}-${TARGETARCH}.deb \
|
||||
&& rm -rf /tmp/influxdb2-${INFLUXVER}-${TARGETARCH}.deb
|
||||
|
||||
RUN apt-get update && apt-get install -y smartmontools=7.0-0ubuntu1~ubuntu18.04.1
|
||||
|
||||
ADD https://github.com/just-containers/s6-overlay/releases/download/v1.21.8.0/s6-overlay-amd64.tar.gz /tmp/
|
||||
RUN tar xzf /tmp/s6-overlay-amd64.tar.gz -C /
|
||||
COPY /rootfs /
|
||||
|
||||
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny /scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-selftest /scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /scrutiny/bin/
|
||||
COPY --from=frontendbuild /scrutiny/dist /scrutiny/web
|
||||
RUN chmod +x /scrutiny/bin/scrutiny && \
|
||||
chmod +x /scrutiny/bin/scrutiny-collector-selftest && \
|
||||
chmod +x /scrutiny/bin/scrutiny-collector-metrics && \
|
||||
mkdir -p /scrutiny/web && \
|
||||
mkdir -p /scrutiny/config && \
|
||||
mkdir -p /scrutiny/jobber
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||
COPY --link --from=frontendbuild --chmod=644 /go/src/github.com/analogj/scrutiny/dist /opt/scrutiny/web
|
||||
RUN chmod 0644 /etc/cron.d/scrutiny && \
|
||||
rm -f /etc/cron.daily/* && \
|
||||
mkdir -p /opt/scrutiny/web && \
|
||||
mkdir -p /opt/scrutiny/config && \
|
||||
chmod -R ugo+rwx /opt/scrutiny/config && \
|
||||
chmod +x /etc/cont-init.d/* && \
|
||||
chmod +x /etc/services.d/*/run && \
|
||||
chmod +x /etc/services.d/*/finish
|
||||
|
||||
CMD ["/init"]
|
||||
|
||||
+20
-20
@@ -1,31 +1,31 @@
|
||||
########################################################################################################################
|
||||
# Collector Image
|
||||
########################################################################################################################
|
||||
|
||||
|
||||
########
|
||||
FROM golang:1.14.4-buster as backendbuild
|
||||
FROM golang:1.20-bookworm as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-selftest collector/cmd/collector-selftest/collector-selftest.go && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny-collector-metrics collector/cmd/collector-metrics/collector-metrics.go
|
||||
RUN apt-get update && apt-get install -y file && rm -rf /var/lib/apt/lists/*
|
||||
RUN make binary-clean binary-collector
|
||||
|
||||
########
|
||||
FROM ubuntu:bionic as runtime
|
||||
EXPOSE 8080
|
||||
WORKDIR /scrutiny
|
||||
ENV PATH="/scrutiny/bin:${PATH}"
|
||||
FROM debian:bookworm-slim as runtime
|
||||
WORKDIR /opt/scrutiny
|
||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
|
||||
ADD https://github.com/dshearer/jobber/releases/download/v1.4.4/jobber_1.4.4-1_amd64.deb /tmp/
|
||||
RUN apt install /tmp/jobber_1.4.4-1_amd64.deb
|
||||
RUN apt-get update && apt-get install -y cron smartmontools ca-certificates tzdata && rm -rf /var/lib/apt/lists/* && update-ca-certificates
|
||||
|
||||
RUN apt-get update && apt-get install -y smartmontools=7.0-0ubuntu1~ubuntu18.04.1
|
||||
COPY /docker/entrypoint-collector.sh /entrypoint-collector.sh
|
||||
COPY /rootfs/etc/cron.d/scrutiny /etc/cron.d/scrutiny
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /opt/scrutiny/bin/
|
||||
RUN chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics && \
|
||||
chmod +x /entrypoint-collector.sh && \
|
||||
chmod 0644 /etc/cron.d/scrutiny && \
|
||||
rm -f /etc/cron.daily/apt /etc/cron.daily/dpkg /etc/cron.daily/passwd
|
||||
|
||||
COPY /rootfs/scrutiny /scrutiny
|
||||
|
||||
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-selftest /scrutiny/bin/
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny-collector-metrics /scrutiny/bin/
|
||||
RUN chmod +x /scrutiny/bin/scrutiny-collector-selftest && \
|
||||
chmod +x /scrutiny/bin/scrutiny-collector-metrics
|
||||
|
||||
CMD ["/usr/lib/x86_64-linux-gnu/jobberrunner", "/scrutiny/jobber/jobber.yaml"]
|
||||
CMD ["/entrypoint-collector.sh"]
|
||||
|
||||
+30
-30
@@ -1,37 +1,37 @@
|
||||
########
|
||||
FROM golang:1.14.4-buster as backendbuild
|
||||
# syntax=docker/dockerfile:1.4
|
||||
########################################################################################################################
|
||||
# Web Image
|
||||
########################################################################################################################
|
||||
|
||||
######## Build the frontend
|
||||
FROM --platform=${BUILDPLATFORM} node AS frontendbuild
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN make binary-frontend
|
||||
|
||||
######## Build the backend
|
||||
FROM golang:1.20-bookworm as backendbuild
|
||||
|
||||
WORKDIR /go/src/github.com/analogj/scrutiny
|
||||
COPY --link . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
COPY . /go/src/github.com/analogj/scrutiny
|
||||
|
||||
RUN go mod vendor && \
|
||||
go build -ldflags '-w -extldflags "-static"' -o scrutiny webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
|
||||
########
|
||||
FROM node:lts-slim as frontendbuild
|
||||
|
||||
#reduce logging, disable angular-cli analytics for ci environment
|
||||
ENV NPM_CONFIG_LOGLEVEL=warn NG_CLI_ANALYTICS=false
|
||||
|
||||
WORKDIR /scrutiny/src
|
||||
COPY webapp/frontend /scrutiny/src
|
||||
|
||||
RUN npm install -g @angular/cli@9.1.4 && \
|
||||
mkdir -p /scrutiny/dist && \
|
||||
npm install && \
|
||||
ng build --output-path=/scrutiny/dist --deploy-url="/web/" --base-href="/web/" --prod
|
||||
RUN apt-get update && apt-get install -y file && rm -rf /var/lib/apt/lists/*
|
||||
RUN make binary-clean binary-all WEB_BINARY_NAME=scrutiny
|
||||
|
||||
|
||||
########
|
||||
FROM ubuntu:bionic as runtime
|
||||
######## Combine build artifacts in runtime image
|
||||
FROM debian:bookworm-slim as runtime
|
||||
EXPOSE 8080
|
||||
WORKDIR /scrutiny
|
||||
ENV PATH="/scrutiny/bin:${PATH}"
|
||||
WORKDIR /opt/scrutiny
|
||||
ENV PATH="/opt/scrutiny/bin:${PATH}"
|
||||
|
||||
COPY --from=backendbuild /go/src/github.com/analogj/scrutiny/scrutiny /scrutiny/bin/
|
||||
COPY --from=frontendbuild /scrutiny/dist /scrutiny/web
|
||||
RUN chmod +x /scrutiny/bin/scrutiny && \
|
||||
mkdir -p /scrutiny/web && \
|
||||
mkdir -p /scrutiny/config
|
||||
CMD ["/scrutiny/bin/scrutiny", "start"]
|
||||
RUN apt-get update && apt-get install -y ca-certificates curl tzdata && rm -rf /var/lib/apt/lists/* && update-ca-certificates
|
||||
|
||||
COPY --link --from=backendbuild --chmod=755 /go/src/github.com/analogj/scrutiny/scrutiny /opt/scrutiny/bin/
|
||||
COPY --link --from=frontendbuild --chmod=644 /go/src/github.com/analogj/scrutiny/dist /opt/scrutiny/web
|
||||
RUN mkdir -p /opt/scrutiny/web && \
|
||||
mkdir -p /opt/scrutiny/config && \
|
||||
chmod -R a+rX /opt/scrutiny && \
|
||||
chmod -R a+w /opt/scrutiny/config
|
||||
CMD ["/opt/scrutiny/bin/scrutiny", "start"]
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
`rootfs` is only used by Dockerfile and Dockerfile.collector
|
||||
Executable
+28
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Cron runs in its own isolated environment (usually using only /etc/environment )
|
||||
# So when the container starts up, we will do a dump of the runtime environment into a .env file that we
|
||||
# will then source into the crontab file (/etc/cron.d/scrutiny)
|
||||
(set -o posix; export -p) > /env.sh
|
||||
|
||||
# adding ability to customize the cron schedule.
|
||||
COLLECTOR_CRON_SCHEDULE=${COLLECTOR_CRON_SCHEDULE:-"0 0 * * *"}
|
||||
COLLECTOR_RUN_STARTUP=${COLLECTOR_RUN_STARTUP:-"false"}
|
||||
COLLECTOR_RUN_STARTUP_SLEEP=${COLLECTOR_RUN_STARTUP_SLEEP:-"1"}
|
||||
|
||||
# if the cron schedule has been overridden via env variable (eg docker-compose) we should make sure to strip quotes
|
||||
[[ "${COLLECTOR_CRON_SCHEDULE}" == \"*\" || "${COLLECTOR_CRON_SCHEDULE}" == \'*\' ]] && COLLECTOR_CRON_SCHEDULE="${COLLECTOR_CRON_SCHEDULE:1:-1}"
|
||||
|
||||
# replace placeholder with correct value
|
||||
sed -i 's|{COLLECTOR_CRON_SCHEDULE}|'"${COLLECTOR_CRON_SCHEDULE}"'|g' /etc/cron.d/scrutiny
|
||||
|
||||
if [[ "${COLLECTOR_RUN_STARTUP}" == "true" ]]; then
|
||||
sleep ${COLLECTOR_RUN_STARTUP_SLEEP}
|
||||
echo "starting scrutiny collector (run-once mode. subsequent calls will be triggered via cron service)"
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
fi
|
||||
|
||||
|
||||
# now that we have the env start cron in the foreground
|
||||
echo "starting cron"
|
||||
exec su -c "cron -f -L 15" root
|
||||
@@ -0,0 +1,55 @@
|
||||
version: '2.4'
|
||||
|
||||
services:
|
||||
influxdb:
|
||||
restart: unless-stopped
|
||||
image: influxdb:2.2
|
||||
ports:
|
||||
- '8086:8086'
|
||||
volumes:
|
||||
- './influxdb:/var/lib/influxdb2'
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8086/health"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 20
|
||||
|
||||
|
||||
web:
|
||||
restart: unless-stopped
|
||||
image: 'ghcr.io/analogj/scrutiny:master-web'
|
||||
ports:
|
||||
- '8080:8080'
|
||||
volumes:
|
||||
- './config:/opt/scrutiny/config'
|
||||
environment:
|
||||
SCRUTINY_WEB_INFLUXDB_HOST: 'influxdb'
|
||||
depends_on:
|
||||
influxdb:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/api/health"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 20
|
||||
start_period: 10s
|
||||
|
||||
collector:
|
||||
restart: unless-stopped
|
||||
image: 'ghcr.io/analogj/scrutiny:master-collector'
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
volumes:
|
||||
- '/run/udev:/run/udev:ro'
|
||||
environment:
|
||||
COLLECTOR_API_ENDPOINT: 'http://web:8080'
|
||||
COLLECTOR_HOST_ID: 'scrutiny-collector-hostname'
|
||||
# If true forces the collector to run on startup (cron will be started after the collector completes)
|
||||
# see: https://github.com/AnalogJ/scrutiny/blob/master/docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md#collector-trigger-on-startup
|
||||
COLLECTOR_RUN_STARTUP: false
|
||||
depends_on:
|
||||
web:
|
||||
condition: service_healthy
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
@@ -0,0 +1,19 @@
|
||||
version: '3.5'
|
||||
|
||||
services:
|
||||
scrutiny:
|
||||
restart: unless-stopped
|
||||
container_name: scrutiny
|
||||
image: ghcr.io/analogj/scrutiny:master-omnibus
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
ports:
|
||||
- "8080:8080" # webapp
|
||||
- "8086:8086" # influxDB admin
|
||||
volumes:
|
||||
- /run/udev:/run/udev:ro
|
||||
- ./config:/opt/scrutiny/config
|
||||
- ./influxdb:/opt/scrutiny/influxdb
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
@@ -0,0 +1,43 @@
|
||||
# Downsampling
|
||||
|
||||
Scrutiny collects alot of data, that can cause the database to grow unbounded.
|
||||
|
||||
- Smart data
|
||||
- Smart test data
|
||||
- Temperature data
|
||||
- Disk metrics (capacity/usage)
|
||||
- etc
|
||||
|
||||
This data must be accurate in the short term, and is useful for doing trend analysis in the long term.
|
||||
However, for trend analysis we only need aggregate data, individual data points are not as useful.
|
||||
|
||||
Scrutiny will automatically downsample data on a schedule to ensure that the database size stays reasonable, while still
|
||||
ensuring historical data is present for comparisons.
|
||||
|
||||
|
||||
| Bucket Name | Retention Period | Downsampling Range | Downsampling Aggregation Window | Downsampling Cron | Comments |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| `metrics` | 15 days | `-2w -1w` | `1w` | main bucket, weekly on Sunday at 1:00am |
|
||||
| `metrics_weekly` | 9 weeks | `-2mo -1mo` | `1mo` | monthly on first day of the month at 1:30am
|
||||
| `metrics_monthly` | 25 months | `-2y -1y` | `1y` | yearly on the first day of the year at 2:00am
|
||||
| `metrics_yearly` | forever | - | - | - | |
|
||||
|
||||
|
||||
After 5 months, here's how may data points should exist in each bucket for one disk
|
||||
|
||||
| Bucket Name | Datapoints | Comments |
|
||||
| --- | --- | --- |
|
||||
| `metrics` | 15 | 7 daily datapoints , up to 7 pending data, 1 buffer data point |
|
||||
| `metrics_weekly` | 9 | 4 aggregated weekly data points, 4 pending datapoints, 1 buffer data point |
|
||||
| `metrics_monthly` | 3 | 3 aggregated monthly data points |
|
||||
| `metrics_yearly` | 0 | |
|
||||
|
||||
After 5 years, here's how may data points should exist in each bucket for one disk
|
||||
|
||||
| Bucket Name | Datapoints | Comments |
|
||||
| --- | --- | --- |
|
||||
| `metrics` | - | - |
|
||||
| `metrics_weekly` | - |
|
||||
| `metrics_monthly` | - |
|
||||
| `metrics_yearly` | - |
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
# Ansible Install
|
||||
|
||||
[Zorlin](https://github.com/Zorlin) has developed and now maintains [an Ansible playbook](https://github.com/Zorlin/scrutiny-playbook) which automates the steps involved in manually setting up Scrutiny.
|
||||
|
||||
Using it is simple:
|
||||
|
||||
* Grab a copy of the playbook
|
||||
* Follow the directions in the playbook repository
|
||||
* Run `ansible-playbook site.yml`
|
||||
* Visit http://your-machine:8080 to see your new Scrutiny installation.
|
||||
|
||||
It will automatically pull metrics from machines once a day, at 1am.
|
||||
|
||||
You can see it in action below.
|
||||
|
||||
[](https://asciinema.org/a/493531)
|
||||
|
||||
@@ -0,0 +1,182 @@
|
||||
>
|
||||
See [docker/example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
for a docker-compose file.
|
||||
|
||||
> The following guide was contributed by @TinJoy59 in #417
|
||||
> It describes how to deploy the Scrutiny in Hub/Spoke mode, where the Hub is running in Docker, and the Spokes (
|
||||
> collectors) are running as binaries.
|
||||
> He's using Proxmox & Synology in his guide, however this should be applicable for almost anyone
|
||||
|
||||
# S.M.A.R.T. Monitoring with Scrutiny across machines
|
||||
|
||||

|
||||
|
||||
### 🤔 The problem:
|
||||
|
||||
Scrutiny offers a nice Docker package called "Omnibus" that can monitor HDDs attached to a Docker host with relative
|
||||
ease. Scrutiny can also be installed in a Hub-Spoke layout where Web interface, Database and Collector come in 3
|
||||
separate packages. The official documentation assumes that the spokes in the "Hub-Spokes layout" run Docker, which is
|
||||
not always the case. The third approach is to install Scrutiny manually, entirely outside of Docker.
|
||||
|
||||
### 💡 The solution:
|
||||
|
||||
This tutorial provides a hybrid configuration where the Hub lives in a Docker instance while the spokes have only
|
||||
Scrutiny Collector installed manually. The Collector periodically send data to the Hub. It's not mind-boggling hard to
|
||||
understand but someone might struggle with the setup. This is for them.
|
||||
|
||||
### 🖥️ My setup:
|
||||
|
||||
I have a Proxmox cluster where one VM runs Docker and all monitoring services - Grafana, Prometheus, various exporters,
|
||||
InfluxDB and so forth. Another VM runs the NAS - OpenMediaVault v6, where all hard drives reside. The Scrutiny Collector
|
||||
is triggered every 30min to collect data on the drives. The data is sent to the Docker VM, running InfluxDB.
|
||||
|
||||
## Setting up the Hub
|
||||
|
||||

|
||||
|
||||
The Hub consists of Scrutiny Web - a web interface for viewing the SMART data. And InfluxDB, where the smartmon data is
|
||||
stored.
|
||||
|
||||
[🔗This is the official Hub-Spoke layout in docker-compose.](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
We are going to reuse parts of it. The ENV variables provide the necessary configuration for the initial setup, both for
|
||||
InfluxDB and Scrutiny.
|
||||
|
||||
If you are working with and existing InfluxDB instance, you can forgo all the `INIT` variables as they already exist.
|
||||
|
||||
The official Scrutiny documentation has a
|
||||
sample [scrutiny.yaml ](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml)file that normally
|
||||
contains the connection and notification details but I always find it easier to configure as much as possible in the
|
||||
docker-compose.
|
||||
|
||||
```yaml
|
||||
version: "3.4"
|
||||
|
||||
networks:
|
||||
monitoring: # A common network for all monitoring services to communicate into
|
||||
external: true
|
||||
notifications: # To Gotify or another Notification service
|
||||
external: true
|
||||
|
||||
services:
|
||||
influxdb:
|
||||
restart: unless-stopped
|
||||
container_name: influxdb
|
||||
image: influxdb:2.1-alpine
|
||||
ports:
|
||||
- 8086:8086
|
||||
volumes:
|
||||
- ${DIR_CONFIG}/influxdb2/db:/var/lib/influxdb2
|
||||
- ${DIR_CONFIG}/influxdb2/config:/etc/influxdb2
|
||||
environment:
|
||||
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||
- DOCKER_INFLUXDB_INIT_USERNAME=Admin
|
||||
- DOCKER_INFLUXDB_INIT_PASSWORD=${PASSWORD}
|
||||
- DOCKER_INFLUXDB_INIT_ORG=homelab
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=scrutiny
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=your-very-secret-token
|
||||
networks:
|
||||
- monitoring
|
||||
|
||||
scrutiny:
|
||||
restart: unless-stopped
|
||||
container_name: scrutiny
|
||||
image: ghcr.io/analogj/scrutiny:master-web
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ${DIR_CONFIG}/scrutiny/config:/opt/scrutiny/config
|
||||
environment:
|
||||
- SCRUTINY_WEB_INFLUXDB_HOST=influxdb
|
||||
- SCRUTINY_WEB_INFLUXDB_PORT=8086
|
||||
- SCRUTINY_WEB_INFLUXDB_TOKEN=your-very-secret-token
|
||||
- SCRUTINY_WEB_INFLUXDB_ORG=homelab
|
||||
- SCRUTINY_WEB_INFLUXDB_BUCKET=scrutiny
|
||||
# Optional but highly recommended to notify you in case of a problem
|
||||
- SCRUTINY_NOTIFY_URLS=["http://gotify:80/message?token=a-gotify-token"]
|
||||
depends_on:
|
||||
- influxdb
|
||||
networks:
|
||||
- notifications
|
||||
- monitoring
|
||||
```
|
||||
|
||||
A freshly initialized Scrutiny instance can be accessed on port 8080, eg. `192.168.0.100:8080`. The interface will be
|
||||
empty because no metrics have been collected yet.
|
||||
|
||||
## Setting up a Spoke ***without*** Docker
|
||||
|
||||

|
||||
|
||||
A spoke consists of the Scrutiny Collector binary that is run on a set interval via crontab and sends the data to the
|
||||
Hub. The official
|
||||
documentation [describes the manual setup of the Collector](https://github.com/AnalogJ/scrutiny/blob/master/docs/INSTALL_MANUAL.md#collector)
|
||||
- dependencies and step by step commands. I have a shortened version that does the same thing but in one line of code.
|
||||
|
||||
```bash
|
||||
# Installing dependencies
|
||||
apt install smartmontools -y
|
||||
|
||||
# 1. Create directory for the binary
|
||||
# 2. Download the binary into that directory
|
||||
# 3. Make it exacutable
|
||||
# 4. List the contents of the library for confirmation
|
||||
mkdir -p /opt/scrutiny/bin && \
|
||||
curl -L https://github.com/AnalogJ/scrutiny/releases/download/v0.8.1/scrutiny-collector-metrics-linux-amd64 > /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 && \
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 && \
|
||||
ls -lha /opt/scrutiny/bin
|
||||
```
|
||||
|
||||
<p class="callout warning">When downloading Github Release Assests, make sure that you have the correct version. The provided example is with Release v0.5.0. [The release list can be found here.](https://github.com/analogj/scrutiny/releases) </p>
|
||||
|
||||
Once the Collector is installed, you can run it with the following command. Make sure to add the correct address and
|
||||
port of your Hub as `--api-endpoint`.
|
||||
|
||||
```bash
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://192.168.0.100:8080"
|
||||
```
|
||||
|
||||
This will run the Collector once and populate the Web interface of your Scrutiny instance. In order to collect metrics
|
||||
for a time series, you need to run the command repeatedly. Here is an example for crontab, running the Collector every
|
||||
15min.
|
||||
|
||||
```bash
|
||||
# open crontab
|
||||
crontab -e
|
||||
|
||||
# add a line for Scrutiny
|
||||
*/15 * * * * /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://192.168.0.100:8080"
|
||||
```
|
||||
|
||||
The Collector has its own independent config file that lives in `/opt/scrutiny/config/collector.yaml` but I did not find
|
||||
a need to modify
|
||||
it. [A default collector.yaml can be found in the official documentation.](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml)
|
||||
|
||||
## Setting up a Spoke ***with*** Docker
|
||||
|
||||

|
||||
|
||||
Setting up a remote Spoke in Docker requires you to split
|
||||
the [official Hub-Spoke layout docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
. In the following docker-compose you need to provide the `${API_ENDPOINT}`, in my case `http://192.168.0.100:8080`.
|
||||
Also all drives that you wish to monitor need to be presented to the container under `devices`.
|
||||
|
||||
The image handles the periodic scanning of the drives.
|
||||
|
||||
```yaml
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
|
||||
collector:
|
||||
restart: unless-stopped
|
||||
image: 'ghcr.io/analogj/scrutiny:master-collector'
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
volumes:
|
||||
- '/run/udev:/run/udev:ro'
|
||||
environment:
|
||||
COLLECTOR_API_ENDPOINT: ${API_ENDPOINT}
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
```
|
||||
|
||||
@@ -1 +1,172 @@
|
||||
# Manual Install
|
||||
|
||||
While the easiest way to get started with [Scrutiny is using Docker](https://github.com/AnalogJ/scrutiny#docker),
|
||||
it is possible to run it manually without much work. You can even mix and match, using Docker for one component and
|
||||
a manual installation for the other. There's also [an installer](INSTALL_ANSIBLE.md) which automates this manual installation procedure.
|
||||
|
||||
Scrutiny is made up of three components: an influxdb Database, a collector and a webapp/api. Here's how each component can be deployed manually.
|
||||
|
||||
> Note: the `/opt/scrutiny` directory is not hardcoded, you can use any directory name/path.
|
||||
|
||||
## InfluxDB
|
||||
|
||||
Please follow the official InfluxDB installation guide. Note, you'll need to install v2.2.0+.
|
||||
|
||||
https://docs.influxdata.com/influxdb/v2.2/install/
|
||||
|
||||
## Webapp/API
|
||||
|
||||
### Dependencies
|
||||
|
||||
Since the webapp is packaged as a stand alone binary, there isn't really any software you need to install other than `glibc`
|
||||
which is included by most linux OS's already.
|
||||
|
||||
|
||||
### Directory Structure
|
||||
|
||||
Now let's create a directory structure to contain the Scrutiny files & binary.
|
||||
|
||||
```
|
||||
mkdir -p /opt/scrutiny/config
|
||||
mkdir -p /opt/scrutiny/web
|
||||
mkdir -p /opt/scrutiny/bin
|
||||
```
|
||||
|
||||
### Config file
|
||||
|
||||
While it is possible to run the webapp/api without a config file, the defaults are designed for use in a container environment,
|
||||
and so will need to be overridden. So the first thing you'll need to do is create a config file that looks like the following:
|
||||
|
||||
```
|
||||
# stored in /opt/scrutiny/config/scrutiny.yaml
|
||||
|
||||
version: 1
|
||||
|
||||
web:
|
||||
database:
|
||||
# The Scrutiny webapp will create a database for you, however the parent directory must exist.
|
||||
location: /opt/scrutiny/config/scrutiny.db
|
||||
src:
|
||||
frontend:
|
||||
# The path to the Scrutiny frontend files (js, css, images) must be specified.
|
||||
# We'll populate it with files in the next section
|
||||
path: /opt/scrutiny/web
|
||||
|
||||
# if you're runnning influxdb on a different host (or using a cloud-provider) you'll need to update the host & port below.
|
||||
# token, org, bucket are unnecessary for a new InfluxDB installation, as Scrutiny will automatically run the InfluxDB setup,
|
||||
# and store the information in the config file. If you 're re-using an existing influxdb installation, you'll need to provide
|
||||
# the `token`
|
||||
influxdb:
|
||||
host: localhost
|
||||
port: 8086
|
||||
# token: 'my-token'
|
||||
# org: 'my-org'
|
||||
# bucket: 'bucket'
|
||||
```
|
||||
|
||||
> Note: for a full list of available configuration options, please check the [example.scrutiny.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml) file.
|
||||
|
||||
### Download Files
|
||||
|
||||
Next, we'll download the Scrutiny API binary and frontend files from the [latest Github release](https://github.com/analogj/scrutiny/releases).
|
||||
The files you need to download are named:
|
||||
|
||||
- **scrutiny-web-linux-amd64** - save this file to `/opt/scrutiny/bin`
|
||||
- **scrutiny-web-frontend.tar.gz** - save this file to `/opt/scrutiny/web`
|
||||
|
||||
### Prepare Scrutiny
|
||||
|
||||
Now that we have downloaded the required files, let's prepare the filesystem.
|
||||
|
||||
```
|
||||
# Let's make sure the Scrutiny webapp is executable.
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-web-linux-amd64
|
||||
|
||||
# Next, lets extract the frontend files.
|
||||
# NOTE: after extraction, there **should not** be a `dist` subdirectory in `/opt/scrutiny/web` directory.
|
||||
cd /opt/scrutiny/web
|
||||
tar xvzf scrutiny-web-frontend.tar.gz --strip-components 1 -C .
|
||||
|
||||
|
||||
# Cleanup
|
||||
rm -rf scrutiny-web-frontend.tar.gz
|
||||
```
|
||||
|
||||
### Start Scrutiny Webapp
|
||||
|
||||
Finally, we start the Scrutiny webapp:
|
||||
|
||||
```
|
||||
/opt/scrutiny/bin/scrutiny-web-linux-amd64 start --config /opt/scrutiny/config/scrutiny.yaml
|
||||
```
|
||||
|
||||
The webapp listens for traffic on `http://0.0.0.0:8080` by default.
|
||||
|
||||
|
||||
## Collector
|
||||
|
||||
### Dependencies
|
||||
|
||||
Unlike the webapp, the collector does have some dependencies:
|
||||
|
||||
- `smartctl`, v7+
|
||||
- `cron` (or an alternative process scheduler)
|
||||
|
||||
Unfortunately the version of `smartmontools` (which contains `smartctl`) available in some of the base OS repositories is ancient.
|
||||
So you'll need to install the v7+ version using one of the following commands:
|
||||
|
||||
- **Ubuntu (22.04/Jammy/LTS):** `apt-get install -y smartmontools`
|
||||
- **Ubuntu (18.04/Bionic):** `apt-get install -y smartmontools=7.0-0ubuntu1~ubuntu18.04.1`
|
||||
- **Centos8:**
|
||||
- `dnf install https://extras.getpagespeed.com/release-el8-latest.rpm`
|
||||
- `dnf install smartmontools`
|
||||
- **FreeBSD:** `pkg install smartmontools`
|
||||
|
||||
### Directory Structure
|
||||
|
||||
Now let's create a directory structure to contain the Scrutiny collector binary.
|
||||
|
||||
```
|
||||
mkdir -p /opt/scrutiny/bin
|
||||
```
|
||||
|
||||
|
||||
### Download Files
|
||||
|
||||
Next, we'll download the Scrutiny collector binary from the [latest Github release](https://github.com/analogj/scrutiny/releases).
|
||||
The file you need to download is named:
|
||||
|
||||
- **scrutiny-collector-metrics-linux-amd64** - save this file to `/opt/scrutiny/bin`
|
||||
|
||||
|
||||
### Prepare Scrutiny
|
||||
|
||||
Now that we have downloaded the required files, let's prepare the filesystem.
|
||||
|
||||
```
|
||||
# Let's make sure the Scrutiny collector is executable.
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64
|
||||
```
|
||||
|
||||
### Start Scrutiny Collector, Populate Webapp
|
||||
|
||||
Next, we will manually trigger the collector, to populate the Scrutiny dashboard:
|
||||
|
||||
> NOTE: if you need to pass a config file to the scrutiny collector, you can provide it using the `--config` flag.
|
||||
|
||||
```
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://localhost:8080"
|
||||
```
|
||||
|
||||
### Schedule Collector with Cron
|
||||
|
||||
Finally you need to schedule the collector to run periodically.
|
||||
This may be different depending on your OS/environment, but it may look something like this:
|
||||
|
||||
```
|
||||
# open crontab
|
||||
crontab -e
|
||||
|
||||
# add a line for Scrutiny
|
||||
*/15 * * * * . /etc/profile; /opt/scrutiny/bin/scrutiny-collector-metrics-linux-amd64 run --api-endpoint "http://localhost:8080"
|
||||
```
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
# Manual Windows Install
|
||||
|
||||
This guide is specifically for people who are on a Windows machine using [WSL](https://learn.microsoft.com/en-us/windows/wsl/about) with Docker.
|
||||
|
||||
Scrutiny is made up of three components: an influxdb Database, a collector and a webapp/api. Docker will be used for
|
||||
the influxdb and webapp/API, the collector component will be facilitated by [Windows Task Scheduler](https://learn.microsoft.com/en-us/windows/win32/taskschd/task-scheduler-start-page).
|
||||
|
||||
> **NOTE:** If you are **NOT** using WSL with docker, then the easiest way to get started with [Scrutiny is the omnibus Docker image](https://github.com/AnalogJ/scrutiny#docker).
|
||||
|
||||
## InfluxDB and Webapp/API (Docker)
|
||||
|
||||
1. Copy the [example.hubspoke.docker-compose.yml](https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml)
|
||||
file and delete the collector section near the bottom of the file.
|
||||
2. Run `docker-compose up -d` to verify that the DB and webapp are working correctly and once its completed, your webapp
|
||||
should be up and running but the dashboard will be empty (default location is `localhost:8080`)
|
||||
|
||||
## Collector (Windows Task Scheduler)
|
||||
|
||||
1. Download the latest `scrutiny-collector-metrics-windows-amd64.exe` from the [releases page](https://github.com/AnalogJ/scrutiny/releases) (under assets)
|
||||
2. On your windows host, open [Windows Task Scheduler](https://www.wikihow.com/Open-Task-Scheduler-in-Windows-10) as **Administrator**
|
||||
1. In the **Start Menu** (Windows key), type `Task Scheduler` and then right click `Run as Administrator` to open
|
||||
3. On the status bar (under the `action` tab), click `Create Task...`
|
||||
4. A new window should open with the `General` Tab open, enter relevant information into the `Name` and `Description` fields
|
||||
1. Under **Security Options** check:
|
||||
1. **Run whether user is logged on or not**
|
||||
2. **Run with highest privileges**
|
||||
5. Next, click the `Triggers` tab and then click `New...` (bottom left-hand side of the window)
|
||||
6. Here you can set how often you want this task to run, example settings are the following:
|
||||
1. **Settings:**
|
||||
1. `Daily`, start at `TODAYS_DATE` `12:00:00 AM`, Recur every `1` days,
|
||||
2. **Advanced Settings:**
|
||||
1. Repeat Task every: `1 hour` for a duration of `Indefinitely`
|
||||
2. Stop task if it runs longer than: `30 minutes`
|
||||
3. Click Ok when satisfied with your schedule
|
||||
> **NOTE:** The above settings will trigger the task **every day at midnight** and then **run every hour after that** (modify as needed)
|
||||
7. Next, click the `Actions` tab and then click `New...` (bottom left-hand side of the window)
|
||||
1. **Action Settings:**
|
||||
1. In the **Program/Script** field, put: `scrutiny-collector-metrics-windows-amd64.exe`
|
||||
2. In the **Add arguments (optional)** field, put: `run --api-endpoint "http://localhost:8080" --config collector.yaml`
|
||||
> **NOTE:**
|
||||
> * Make sure that you put the correct port number (as specified in the docker-compose file) for the webapp (default is `8080`)
|
||||
> * The `--config` param is optional and is not needed if you just want to use the default collector config, see
|
||||
[example.collector.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml) for more info on the collector config.
|
||||
3. In the **Start in (optional)** field, put: FOLDER_PATH_TO_YOUR `scrutiny-collector-metrics-windows-amd64.exe` file
|
||||
> **NOTE:** Must be exact and do not include `scrutiny-collector-metrics-windows-amd64.exe` in the path
|
||||
4. Click Ok when finished
|
||||
8. Next, click the `Conditions` tab and make sure that everything is unchecked (unless you want to specify otherwise)
|
||||
9. Next, click the `Settings` tab and check everything except for the last checkbox
|
||||
1. **Examples for the following settings:**
|
||||
1. If the task fails, restart every: `5 minutes`
|
||||
2. Attempt restart up to: `3` times
|
||||
3. Stop the task if it runs longer than `1 hour`
|
||||
10. Next, once satisfied with everything, click Ok
|
||||
11. Then, find your newly created task (by its name) in the scheduler task list and then manually run it (right click it and then click `Run`)
|
||||
12. Finally, refresh your dashboard after a minute or two and your drive information should have populated the webapp dashboard.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,72 @@
|
||||
# pfsense Install
|
||||
|
||||
This bascially follows the [Manual collector instructions](https://github.com/AnalogJ/scrutiny/blob/master/docs/INSTALL_MANUAL.md#collector) and assumes you are running a hub and spoke deployment and already have the web app setup.
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
SSH into pfsense, hit `8` for the shell and install the required dependencies.
|
||||
|
||||
```
|
||||
pkg install smartmontools
|
||||
```
|
||||
|
||||
Ensure smartmontools is v7+. This won't be a problem in pfsense 2.6.0+
|
||||
|
||||
|
||||
### Directory Structure
|
||||
|
||||
Now let's create a directory structure to contain the Scrutiny collector binary.
|
||||
|
||||
```
|
||||
mkdir -p /opt/scrutiny/bin
|
||||
```
|
||||
|
||||
|
||||
### Download Files
|
||||
|
||||
Next, we'll download the Scrutiny collector binary from the [latest Github release](https://github.com/analogj/scrutiny/releases).
|
||||
|
||||
> NOTE: Ensure you have the latest version in the below command
|
||||
|
||||
```
|
||||
fetch -o /opt/scrutiny/bin https://github.com/AnalogJ/scrutiny/releases/download/vX.X.X/scrutiny-collector-metrics-freebsd-amd64
|
||||
```
|
||||
|
||||
|
||||
### Prepare Scrutiny
|
||||
|
||||
Now that we have downloaded the required files, let's prepare the filesystem.
|
||||
|
||||
```
|
||||
chmod +x /opt/scrutiny/bin/scrutiny-collector-metrics-freebsd-amd64
|
||||
```
|
||||
|
||||
|
||||
### Start Scrutiny Collector, Populate Webapp
|
||||
|
||||
Next, we will manually trigger the collector, to populate the Scrutiny dashboard:
|
||||
|
||||
> NOTE: if you need to pass a config file to the scrutiny collector, you can provide it using the `--config` flag.
|
||||
|
||||
```
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics-freebsd-amd64 run --api-endpoint "http://localhost:8080"
|
||||
```
|
||||
> NOTE: change the IP address to that of your web app
|
||||
|
||||
### Schedule Collector with Cron
|
||||
|
||||
Finally you need to schedule the collector to run periodically.
|
||||
|
||||
Login to the pfsense webGUI and head to `Services/Cron` add an entry with the following details:
|
||||
|
||||
```
|
||||
Minute: */15
|
||||
Hour: *
|
||||
Day of the Month: *
|
||||
Month of the Year: *
|
||||
Day of the Week: *
|
||||
User: root
|
||||
Command: /opt/scrutiny/bin/scrutiny-collector-metrics-freebsd-amd64 run --api-endpoint "http://localhost:8080" >/dev/null 2>&1
|
||||
```
|
||||
> NOTE: `>/dev/null 2>&1` is used to stop cron confirmation emails being sent.
|
||||
@@ -0,0 +1,138 @@
|
||||
# Install collector on Synology
|
||||
|
||||
## Install Entware
|
||||
|
||||
This will allow you to install a newer version of smartmontools on your Synology. Follow the instructions here (This is tested on DSM7) - https://github.com/Entware/Entware/wiki/Install-on-Synology-NAS
|
||||
|
||||
**PLEASE NOTE THAT IF YOU UPDATE DSM FIRMWARE YOU MAY BORK THE EXISTING ENTWARE INSTALLATION, FOR ANYTHING THAT MAY RELATE TO ENTWARE PLEASE VISIT THEIR REPO**
|
||||
|
||||
## Collector Setup
|
||||
|
||||
**1. Run an update**
|
||||
|
||||
`sudo opkg update`
|
||||
|
||||
**2. Run an upgrade**
|
||||
|
||||
`sudo opkg upgrade`
|
||||
|
||||
**3. Install smartmontools**
|
||||
|
||||
`sudo opkg install smartmontools`
|
||||
|
||||
*It should install v7.2-2*
|
||||
|
||||
`Installing smartmontools (7.2-2) to root...`
|
||||
|
||||
**4. We will now create the directories.**
|
||||
|
||||
```
|
||||
mkdir -p /volume1/\@Entware/scrutiny/bin
|
||||
mkdir -p /volume1/\@Entware/scrutiny/conf
|
||||
```
|
||||
|
||||
**5. change into the bin directory**
|
||||
|
||||
`cd /volume1/\@Entware/scrutiny/bin`
|
||||
|
||||
**6. Download the collector binary for your architecture and make it executable**
|
||||
|
||||
`wget https://github.com/AnalogJ/scrutiny/releases/download/v0.4.12/scrutiny-collector-metrics-linux-arm64`
|
||||
|
||||
`chmod +x /volume1/\@Entware/scrutiny/bin/scrutiny-collector-metrics-linux-arm64`
|
||||
|
||||
**7. Create a config file for the collector**
|
||||
|
||||
```
|
||||
cd /volume1/\@Entware/scrutiny/conf
|
||||
wget https://raw.githubusercontent.com/AnalogJ/scrutiny/master/example.collector.yaml
|
||||
mv example.collector.yaml collector.yaml
|
||||
```
|
||||
|
||||
**8. Lets make some changes in the [collector config file](../example.collector.yaml), these are what i uncommented/added, please tweak the device paths to your needs**
|
||||
|
||||
```
|
||||
host:
|
||||
id: 'Server_Name'
|
||||
|
||||
|
||||
devices:
|
||||
# # example for forcing device type detection for a single disk
|
||||
- device: /dev/sda
|
||||
type: 'sat'
|
||||
- device: /dev/sdb
|
||||
type: 'sat'
|
||||
- device: /dev/sdc
|
||||
type: 'sat'
|
||||
- device: /dev/sdd
|
||||
type: 'sat'
|
||||
|
||||
api:
|
||||
endpoint: 'http://<url>:8080'
|
||||
```
|
||||
|
||||
**9. Let's update the smartd db**
|
||||
|
||||
```
|
||||
cd /volume1/\@Entware/scrutiny/bin/
|
||||
wget https://raw.githubusercontent.com/smartmontools/smartmontools/master/smartmontools/drivedb.h
|
||||
```
|
||||
|
||||
**10. I ran it like this but you can tweak to your liking, the most important part is the --drivedb, as this loads it into the aplication for future use**
|
||||
|
||||
`smartctl -d sat --all /dev/sda --drivedb=/volume1/\@Entware/scrutiny/bin/drivedb.h`
|
||||
|
||||
**11. Now lets create a small bash script, this will be used for the scheduled task inside Synology**
|
||||
|
||||
`vim /volume1/\@Entware/scrutiny/bin/run_collect.sh`
|
||||
|
||||
**The contents are below, copy and paste them in**
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
|
||||
/volume1/\@Entware/scrutiny/bin/scrutiny-collector-metrics-linux-arm64 run --config /volume1/\@Entware/scrutiny/conf/collector.yaml
|
||||
```
|
||||
|
||||
**Make `run_collect.sh` executable**
|
||||
|
||||
`chmod +x /volume1/\@Entware/scrutiny/bin/run_collect.sh`
|
||||
|
||||
## Set up Synology to run a scheduled task.
|
||||
|
||||
Log in to DSM and do the following:
|
||||
|
||||
Goto: DSM > Control Panel > Task Scheduler
|
||||
|
||||
Create > Scheduled Task > User Defined Script
|
||||
|
||||
###### General
|
||||
|
||||
```
|
||||
Task: Scrutiny_Collector
|
||||
User: root
|
||||
Enabled: yes
|
||||
```
|
||||
|
||||
###### Schedule
|
||||
```
|
||||
Run on the following days: Daily
|
||||
```
|
||||
###### Time:
|
||||
|
||||
```
|
||||
Frequency: <Your desired frequency>
|
||||
```
|
||||
|
||||
###### Task Settings
|
||||
|
||||
**Run Command**
|
||||
|
||||
```
|
||||
. /opt/etc/profile; /volume1/\@Entware/scrutiny/bin/run_collect.sh
|
||||
```
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you have any issues with your devices being detected, or incorrect data, please take a look at [TROUBLESHOOTING_DEVICE_COLLECTOR.md](./TROUBLESHOOTING_DEVICE_COLLECTOR.md)
|
||||
@@ -0,0 +1,32 @@
|
||||
# UnRAID Install
|
||||
|
||||
Installation of Scrutiny in UnRAID follows the same process as installing any other docker container, utilizing the Community Applications plugin
|
||||
|
||||
## Install the 'Community Applications' Plugin
|
||||
|
||||
All docker containers in UnRAID are typically installed utilizing the Community Applications plugin. To get started:
|
||||
- Navigate to the plugins tab ( <UnRaid_IP_Address>/Plugins )
|
||||
- Select the 'Install Plugin' tab, and enter the following address into the input field
|
||||
```
|
||||
https://raw.githubusercontent.com/Squidly271/community.applications/master/plugins/community.applications.plg
|
||||
```
|
||||
|
||||
You're all set with the pre-requisites!
|
||||
|
||||
## Installing the Scrutiny docker image
|
||||
|
||||
To install, simply click 'Install'; the configuration parameters should not need modification as the template within CA already defines the necessary parameters.
|
||||
|
||||
As a docker image can be created using various OS bases, the image choice is entirely the users choice. Recommendations of a specific image from a specific maintainer is beyond the scope of this guide. However, to provide some context given the number of questions posed regarding the various versions available:
|
||||
|
||||
- **ghcr.io/analogj/scrutiny:master-omnibus**
|
||||
- `Image maintained directly by the application author`
|
||||
- `Debian based docker image`
|
||||
- **linuxserver/scrutiny**
|
||||
- `Image maintained by the LinuxServer.io group`
|
||||
- `Alpine based docker image`
|
||||
- **hotio/scrutiny**
|
||||
- `Image maintained by hotio`
|
||||
- `DETAILS TBD`
|
||||
|
||||
The support for a given image is provided by that images maintainers, while support for the application itself remains with the developer - i.e. LinuxServer.io supports the docker image of Scrutiny which they create, to the extent an issue is specific to that image. If an issue/enhancement pertains directly to the source code, support would still come directly from this repository's contributors.
|
||||
@@ -0,0 +1,22 @@
|
||||
# Officially Supported NAS/OS's
|
||||
|
||||
These are the officially supported NAS OS's (with documentation and setup guides). Once a guide is created (
|
||||
in `docs/guides/` or elsewhere) it will be linked here.
|
||||
|
||||
- [x] [freenas/truenas](https://blog.stefandroid.com/2022/01/14/smart-scrutiny.html)
|
||||
- [x] [unraid](./INSTALL_UNRAID.md)
|
||||
- [ ] ESXI
|
||||
- [ ] Proxmox
|
||||
- [x] Synology
|
||||
- [Hub/Spoke Deployment - Collector](./INSTALL_SYNOLOGY_COLLECTOR.md)
|
||||
- [Omnibus Deployment](https://drfrankenstein.co.uk/2022/07/28/scrutiny-in-docker-on-a-synology-nas) - Docker Package
|
||||
- [Omnibus Deployment](https://drfrankenstein.co.uk/scrutiny-in-container-manager-on-a-synology-nas/) - Container Manager Package
|
||||
- [ ] OMV
|
||||
- [ ] Amahi
|
||||
- [ ] Running in a LXC container
|
||||
- [x] [PFSense](./INSTALL_PFSENSE.md)
|
||||
- [x] QNAP
|
||||
- [x] [RockStor](https://rockstor.com/docs/interface/docker-based-rock-ons/scrutiny.html)
|
||||
- [ ] Solaris/OmniOS CE Support
|
||||
- [ ] Kubernetes
|
||||
- [x] [Windows](./INSTALL_MANUAL_WINDOWS.md)
|
||||
@@ -0,0 +1,20 @@
|
||||
# Testers
|
||||
|
||||
Scrutiny supports many operating systems, CPU architectures and runtime environments. Unfortunately that makes it incredibly
|
||||
difficult to test.
|
||||
Thankfully the following users have been gracious enough to test/validate Scrutiny works on their system.
|
||||
|
||||
> NOTE: If you're interested in volunteering to test Scrutiny beta builds on your system, please [open an issue](https://github.com/AnalogJ/scrutiny/issues).
|
||||
|
||||
| Architecture Name | Binaries | Docker |
|
||||
| --- | --- | --- |
|
||||
| linux-amd64 | @TizzAmmazz | @feroxy @rshxyz |
|
||||
| linux-arm-5 | -- | |
|
||||
| linux-arm-6 | -- | |
|
||||
| linux-arm-7 | @Zorlin | @martini1992 |
|
||||
| linux-arm64 | @SiM22 @Zorlin | @ViRb3 @agneevX @benamajin |
|
||||
| freebsd-amd64 | @BadCo-NZ @varunsridharan @martadinata666 @KenwoodFox @FingerlessGlov3s | |
|
||||
| macos-amd64 | -- | -- |
|
||||
| macos-arm64 | -- | -- |
|
||||
| windows-amd64 | @gabrielv33 | -- |
|
||||
| windows-arm64 | -- | -- |
|
||||
@@ -0,0 +1,349 @@
|
||||
# Scrutiny <-> SmartMonTools
|
||||
|
||||
Scrutiny uses `smartctl --scan` to detect devices/drives. If your devices are not being detected by Scrutiny, or some
|
||||
data is missing, this is probably due to a `smartctl` issue.
|
||||
The following page will document commonly asked questions and troubleshooting steps for the Scrutiny S.M.A.R.T. data collector.
|
||||
|
||||
## WWN vs Device name
|
||||
As discussed in [`#117`](https://github.com/AnalogJ/scrutiny/issues/117), `/dev/sd*` device paths are ephemeral.
|
||||
|
||||
> Device paths in Linux aren't guaranteed to be consistent across restarts. Device names consist of major numbers (letters) and minor numbers. When the Linux storage device driver detects a new device, the driver assigns major and minor numbers from the available range to the device. When a device is removed, the device numbers are freed for reuse.
|
||||
>
|
||||
> The problem occurs because device scanning in Linux is scheduled by the SCSI subsystem to happen asynchronously. As a result, a device path name can vary across restarts.
|
||||
>
|
||||
> https://docs.microsoft.com/en-us/troubleshoot/azure/virtual-machines/troubleshoot-device-names-problems
|
||||
|
||||
While the Docker Scrutiny collector does require devices to attached to the docker container by device name (using `--device=/dev/sd..`), internally
|
||||
Scrutiny stores and references the devices by their `WWN` which is globally unique, and never changes.
|
||||
|
||||
As such, passing devices to the Scrutiny collector container using `/dev/disk/by-id/`, `/dev/disk/by-label/`, `/dev/disk/by-path/` and `/dev/disk/by-uuid/`
|
||||
paths are unnecessary, unless you'd like to ensure the docker run command never needs to change.
|
||||
|
||||
#### Force /dev/disk/by-id paths
|
||||
|
||||
Since Scrutiny uses WWN under the hood, it really doesn't care about `/dev/sd*` vs `/dev/disk/by-id/`. The problem is the interaction between docker and smartmontools when using `--device /dev/disk/by-id` paths.
|
||||
|
||||
Basically Scrutiny offloads all device detection to smartmontools, which doesn't seem to detect devices that have been passed into the docker container using `/dev/disk/by-id` paths.
|
||||
|
||||
If you must use "static" device references, you can map the host device id/uuid/wwn references to device names within the container:
|
||||
|
||||
```
|
||||
# --device=<Host Device>:<Container Device Mapping>
|
||||
|
||||
docker run ....
|
||||
--device=/dev/disk/by-id/wwn-0x5000xxxxx:/dev/sda
|
||||
--device=/dev/disk/by-id/wwn-0x5001xxxxx:/dev/sdb
|
||||
--device=/dev/disk/by-id/wwn-0x5003xxxxx:/dev/sdc
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Device Detection By Smartctl
|
||||
|
||||
The first thing you'll want to do is run `smartctl` locally (not in Docker) and make sure the output shows all your drives as expected.
|
||||
See the `Drive Types` section below for what this output should look like for `NVMe`/`ATA`/`RAID` drives.
|
||||
|
||||
```bash
|
||||
smartctl --scan
|
||||
|
||||
/dev/sda -d scsi # /dev/sda, SCSI device
|
||||
/dev/sdb -d scsi # /dev/sdb, SCSI device
|
||||
/dev/sdc -d scsi # /dev/sdc, SCSI device
|
||||
/dev/sdd -d scsi # /dev/sdd, SCSI device
|
||||
```
|
||||
|
||||
Once you've verified that `smartctl` correctly detects your drives, make sure scrutiny is correctly detecting them as well.
|
||||
> NOTE: make sure you specify all the devices you'd like scrutiny to process using `--device=` flags.
|
||||
|
||||
```bash
|
||||
docker run -it --rm \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--cap-add SYS_RAWIO \
|
||||
--device=/dev/sda \
|
||||
--device=/dev/sdb \
|
||||
ghcr.io/analogj/scrutiny:master-collector smartctl --scan
|
||||
```
|
||||
|
||||
If the output is the same, your devices will be processed by Scrutiny.
|
||||
|
||||
### Collector Config File
|
||||
In some cases `--scan` does not correctly detect the device type, returning [incomplete SMART data](https://github.com/AnalogJ/scrutiny/issues/45).
|
||||
Scrutiny will supports overriding the detected device type via the config file.
|
||||
|
||||
[example.collector.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml)
|
||||
|
||||
### RAID Controllers (Megaraid/3ware/HBA/Adaptec/HPE/etc)
|
||||
Smartctl has support for a large number of [RAID controllers](https://www.smartmontools.org/wiki/Supported_RAID-Controllers), however this
|
||||
support is not automatic, and may require some additional device type hinting. You can provide this information to the Scrutiny collector
|
||||
using a collector config file. See [example.collector.yaml](/example.collector.yaml)
|
||||
|
||||
> NOTE: If you use docker, you **must** pass though the RAID virtual disk to the container using `--device` (see below)
|
||||
>
|
||||
> This device may be in `/dev/*` or `/dev/bus/*`.
|
||||
> If you do not see a virtual device file `/dev/bus/*` you may need to use the `--privileged` flag. See [#366 for more info](https://github.com/AnalogJ/scrutiny/issues/366#issuecomment-1253196407)
|
||||
>
|
||||
> If you're unsure, run `smartctl --scan` on your host, and pass all listed devices to the container.
|
||||
|
||||
```yaml
|
||||
# /opt/scrutiny/config/collector.yaml
|
||||
devices:
|
||||
# Dell PERC/Broadcom Megaraid example: https://github.com/AnalogJ/scrutiny/issues/30
|
||||
- device: /dev/bus/0
|
||||
type:
|
||||
- megaraid,14
|
||||
- megaraid,15
|
||||
- megaraid,18
|
||||
- megaraid,19
|
||||
- megaraid,20
|
||||
- megaraid,21
|
||||
|
||||
- device: /dev/twa0
|
||||
type:
|
||||
- 3ware,0
|
||||
- 3ware,1
|
||||
- 3ware,2
|
||||
- 3ware,3
|
||||
- 3ware,4
|
||||
- 3ware,5
|
||||
|
||||
# Adapec RAID: https://github.com/AnalogJ/scrutiny/issues/189
|
||||
- device: /dev/sdb
|
||||
type:
|
||||
- aacraid,0,0,0
|
||||
- aacraid,0,0,1
|
||||
|
||||
# HPE Smart Array example: https://github.com/AnalogJ/scrutiny/issues/213
|
||||
- device: /dev/sda
|
||||
type:
|
||||
- 'cciss,0'
|
||||
- 'cciss,1'
|
||||
```
|
||||
|
||||
>
|
||||
|
||||
### NVMe Drives
|
||||
|
||||
As mentioned in the [README.md](/README.md), NVMe devices require both `--cap-add SYS_RAWIO` and `--cap-add SYS_ADMIN`
|
||||
to allow smartctl permission to query your NVMe device SMART data [#26](https://github.com/AnalogJ/scrutiny/issues/26)
|
||||
|
||||
When attaching NVMe devices using `--device=/dev/nvme..`, make sure to provide the device controller (`/dev/nvme0`)
|
||||
instead of the block device (`/dev/nvme0n1`). See [#209](https://github.com/AnalogJ/scrutiny/issues/209).
|
||||
|
||||
> The character device /dev/nvme0 is the NVME device controller, and block devices like /dev/nvme0n1 are the NVME storage namespaces: the devices you use for actual storage, which will behave essentially as disks.
|
||||
>
|
||||
> In enterprise-grade hardware, there might be support for several namespaces, thin provisioning within namespaces and other features. For now, you could think namespaces as sort of meta-partitions with extra features for enterprise use.
|
||||
|
||||
### ATA
|
||||
|
||||
### USB Devices
|
||||
|
||||
The following information is extracted from [#266](https://github.com/AnalogJ/scrutiny/issues/266)
|
||||
|
||||
External HDDs support two modes of operation usb-storage (old, slower, stable) and uas (new, faster, sometimes unstable)
|
||||
. On some external HDDs, uas mode does not properly pass through SMART information, or even causes hardware issues, so
|
||||
it has been disabled by the kernel. No amount of smartctl parameters will fix this, as it is being rejected by the
|
||||
kernel. This is especially true with Seagate HDDs. One solution is to force these devices into usb-storage mode, which
|
||||
will incur some performance penalty, but may work well enough for you. More info:
|
||||
|
||||
- https://smartmontools.org/wiki/Supported_USB-Devices
|
||||
- https://smartmontools.org/wiki/SAT-with-UAS-Linux
|
||||
- https://forums.raspberrypi.com/viewtopic.php?t=245931
|
||||
|
||||
### Exit Codes
|
||||
|
||||
If you see an error message similar to `smartctl returned an error code (2) while processing /dev/sda`, this means that
|
||||
`smartctl` (not Scrutiny) exited with an error code. Scrutiny will attempt to print a helpful error message to help you
|
||||
debug, but you can look at the table (and associated links) below to debug `smartctl`.
|
||||
|
||||
> smartctl Return Values
|
||||
> The return values of smartctl are defined by a bitmask. If all is well with the disk, the return value (exit status) of
|
||||
> smartctl is 0 (all bits turned off). If a problem occurs, or an error, potential error, or fault is detected, then
|
||||
> a non-zero status is returned. In this case, the eight different bits in the return value have the following meanings
|
||||
> for ATA disks; some of these values may also be returned for SCSI disks.
|
||||
>
|
||||
> source: http://www.linuxguide.it/command_line/linux-manpage/do.php?file=smartctl#sect7
|
||||
|
||||
|
||||
| Exit Code (Isolated) | Binary | Problem Message |
|
||||
| --- | --- | --- |
|
||||
| 1 | Bit 0 | Command line did not parse. |
|
||||
| 2 | Bit 1 | Device open failed, or device did not return an IDENTIFY DEVICE structure. |
|
||||
| 4 | Bit 2 | Some SMART command to the disk failed, or there was a checksum error in a SMART data structure (see В´-bВ´ option above). |
|
||||
| 8 | Bit 3 | SMART status check returned “DISK FAILING". |
|
||||
| 16 | Bit 4 | We found prefail Attributes <= threshold. |
|
||||
| 32 | Bit 5 | SMART status check returned “DISK OK” but we found that some (usage or prefail) Attributes have been <= threshold at some time in the past. |
|
||||
| 64 | Bit 6 | The device error log contains records of errors. |
|
||||
| 128 | Bit 7 | The device self-test log contains records of errors. |
|
||||
|
||||
#### Standby/Sleeping Disks
|
||||
|
||||
Disks in Standby/Sleep can also cause `smartctl` to exit abnormally, usually with `exit code: 2`.
|
||||
|
||||
- https://github.com/AnalogJ/scrutiny/issues/221
|
||||
- https://github.com/AnalogJ/scrutiny/issues/157
|
||||
|
||||
### Volume Mount All Devices (`/dev`) - Privileged
|
||||
|
||||
> WARNING: This is an insecure/dangerous workaround. Running Scrutiny (or any Docker image) with `--privileged` is equivalent to running it with root access.
|
||||
|
||||
If you have exhausted all other mechanisms to get your disks working with `smartctl` running within a container, you can try running the docker image with the following additional flags:
|
||||
|
||||
- `--privileged` (instead of `--cap-add`) - this gives the docker container full access to your system. Scrutiny does not require this permission, however it can be helpful for `smartctl`
|
||||
- `-v /dev:/dev:ro` (instead of `--device`) - this mounts the `/dev` folder (containing all your device files) into the container, allowing `smartctl` to see your disks, exactly as if it were running on your host directly.
|
||||
|
||||
With this workaround your `docker run` command would look similar to the following:
|
||||
|
||||
```bash
|
||||
docker run -it --rm -p 8080:8080 -p 8086:8086 \
|
||||
-v `pwd`/scrutiny:/opt/scrutiny/config \
|
||||
-v `pwd`/influxdb2:/opt/scrutiny/influxdb \
|
||||
-v /run/udev:/run/udev:ro \
|
||||
--privileged \
|
||||
-v /dev:/dev \
|
||||
--name scrutiny \
|
||||
ghcr.io/analogj/scrutiny:master-omnibus
|
||||
```
|
||||
|
||||
## Scrutiny detects Failure but SMART Passed?
|
||||
|
||||
There's 2 different mechanisms that Scrutiny uses to detect failures.
|
||||
|
||||
The first is simple SMART failures. If SMART thinks an attribute is in a failed state, Scrutiny will display it as failed as well.
|
||||
|
||||
The second is using BackBlaze failure data: [https://backblaze.com/blog-smart-stats-2014-8.html](https://backblaze.com/blog-smart-stats-2014-8.html)
|
||||
If Scrutiny detects that an attribute corresponds with a high rate of failure using BackBlaze's data, it will also mark that attribute (and disk) as failed (even though SMART may think the device is still healthy).
|
||||
|
||||
This can cause some confusion when comparing Scrutiny's dashboard against other SMART analysis tools.
|
||||
If you hover over the "failed" label beside an attribute, Scrutiny will tell you if the failure was due to SMART or Scrutiny/BackBlaze data.
|
||||
|
||||
### Device failed but Smart & Scrutiny passed
|
||||
|
||||
Device SMART results are the source of truth for Scrutiny, however we don't just take into account the current SMART results, but also historical analysis of a disk.
|
||||
This means that if a device is marked as failed at any point in its history, it will continue to be stored in the database as failed until the device is removed (or status is reset -- see below).
|
||||
|
||||
In some cases, this historical failure may have been due to attribute analysis/thresholds that have since been relaxed:
|
||||
|
||||
- NVME - Numb Error Log Entries (v0.4.7)
|
||||
- ATA - Power Cycle Count (v0.4.7)
|
||||
- ATA - Read Error Rate (v0.4.13)
|
||||
- ATA - Seek Error Rate (v0.4.13)
|
||||
|
||||
If you'd like to reset the status of a disk (to healthy) and allow the next run of the collector to determine the actual status, you can run the following command:
|
||||
|
||||
```bash
|
||||
# connect to scrutiny docker container
|
||||
docker exec -it scrutiny bash
|
||||
|
||||
# install sqlite CLI tools (inside container)
|
||||
apt update && apt install -y sqlite3
|
||||
|
||||
# connect to the scrutiny database
|
||||
sqlite3 /opt/scrutiny/config/scrutiny.db
|
||||
|
||||
# reset/update the devices table, unset the failure status.
|
||||
UPDATE devices SET device_status = null;
|
||||
|
||||
# exit sqlite CLI
|
||||
.exit
|
||||
```
|
||||
|
||||
### Seagate Drives Failing
|
||||
|
||||
As thoroughly discussed in [#255](https://github.com/AnalogJ/scrutiny/issues/255) and [#522](https://github.com/AnalogJ/scrutiny/issues/522), Seagate (Ironwolf & others) drives are almost always marked as failed by Scrutiny.
|
||||
|
||||
#### Seek Error Rate & Read Error Rate (#255)
|
||||
> The `Seek Error Rate` & `Read Error Rate` attribute raw values are typically very high, and the
|
||||
> normalised values (Current / Worst / Threshold) are usually quite low. Despite this, the numbers in most cases are perfectly OK
|
||||
>
|
||||
> The anxiety arises because we intuitively expect that the normalised values should reflect a "health" score, with
|
||||
> 100 being the ideal value. Similarly, we would expect that the raw values should reflect an error count, in
|
||||
> which case a value of 0 would be most desirable. However, Seagate calculates and applies these attribute values
|
||||
> in a counterintuitive way.
|
||||
>
|
||||
> http://www.users.on.net/~fzabkar/HDD/Seagate_SER_RRER_HEC.html
|
||||
|
||||
Some analysis has been done which shows that Seagate drives break the common SMART conventions, which also causes Scrutiny's
|
||||
comparison against BackBlaze data to detect these drives as failed.
|
||||
|
||||
**So what's the Solution?**
|
||||
|
||||
After taking a look at the BackBlaze data for the relevant Attributes (`Seek Error Rate` & `Read Error Rate`), I've decided
|
||||
to disable Scrutiny analysis for them. Both are non-critical, and have low-correlation with failure.
|
||||
|
||||
> Please note: SMART failures for these attributes will still cause the drive to be marked as failed. Only BackBlaze analysis has been disabled
|
||||
|
||||
If this is effecting your drives, you'll need to do the following:
|
||||
|
||||
1. Upgrade to v0.4.13+
|
||||
2. Reset your drive status using the SQLite script
|
||||
in [#device-failed-but-smart--scrutiny-passed](https://github.com/AnalogJ/scrutiny/blob/master/docs/TROUBLESHOOTING_DEVICE_COLLECTOR.md#device-failed-but-smart--scrutiny-passed)
|
||||
3. Wait for (or manually start) the collector.
|
||||
|
||||
If you'd like to learn more about how the Seagate Ironwolf SMART attributes work under the hood, and how they differ
|
||||
from
|
||||
other drives, please read the following:
|
||||
|
||||
- http://www.users.on.net/~fzabkar/HDD/Seagate_SER_RRER_HEC.html
|
||||
- https://www.truenas.com/community/threads/seagate-ironwolf-smart-test-raw_read_error_rate-seek_error_rate.68634/
|
||||
|
||||
#### Seagate Raw Values are incorrect (#522)
|
||||
Basically Seagate drives are known to use a custom data format for a number of their SMART attributes. This causes issues with Scrutiny's threshold analysis.
|
||||
|
||||
- The workaround is to customize the smartctl command that Scrutiny uses for your drive by [creating a collector.yaml file](https://github.com/AnalogJ/scrutiny/issues/522#issuecomment-1766727988) and "fixing" each attribute
|
||||
- The **real "fix"** is to make sure your Seagate drive is correctly supported by smartmontools. See this [PR](https://github.com/smartmontools/smartmontools/pull/247)
|
||||
|
||||
Sorry for the bad news, but this is a known issue and there's just not much we can do on the Scrutiny side.
|
||||
|
||||
|
||||
## Hub & Spoke model, with multiple Hosts.
|
||||
|
||||

|
||||
|
||||
When deploying Scrutiny in a hub & spoke model, it can be difficult to determine exactly which node a set of devices are
|
||||
associated with.
|
||||
Thankfully the collector has a special `--host-id` flag (or `COLLECTOR_HOST_ID` env variable) that can be used to
|
||||
associate devices with a friendly host name.
|
||||
|
||||
The host-id is passed from the collector to the web-api when SMART device data is uploaded. There's 3 ways you can set
|
||||
the host-id:
|
||||
|
||||
- using the collector config
|
||||
file: [master/example.collector.yaml#L19-L22](https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml?rgh-link-date=2022-05-25T15%3A08%3A56Z#L19-L22)
|
||||
- using the `--host-id` collector CLI
|
||||
argument: [master/collector/cmd/collector-metrics/collector-metrics.go#L180-L185](https://github.com/AnalogJ/scrutiny/blob/master/collector/cmd/collector-metrics/collector-metrics.go?rgh-link-date=2022-05-25T15%3A08%3A56Z#L180-L185)
|
||||
- using the `COLLECTOR_HOST_ID` environmental variable.
|
||||
|
||||
See the [docs/INSTALL_HUB_SPOKE.md](/docs/INSTALL_HUB_SPOKE.md) guide for more information.
|
||||
|
||||
## Collector DEBUG mode
|
||||
|
||||
You can use environmental variables to enable debug logging and/or log files for the collector:
|
||||
|
||||
```bash
|
||||
DEBUG=true
|
||||
COLLECTOR_LOG_FILE=/tmp/collector.log
|
||||
```
|
||||
|
||||
Or if you're not using docker, you can pass CLI arguments to the collector during startup:
|
||||
|
||||
```bash
|
||||
scrutiny-collector-metrics run --debug --log-file /tmp/collector.log
|
||||
```
|
||||
|
||||
## Collector trigger on startup
|
||||
|
||||
When the `omnibus` docker image starts up, it will automatically trigger the collector, which will populate the Scrutiny
|
||||
Webui with your disks.
|
||||
This is not the case when running the collector docker image in **hub/spoke** mode, as the collector and webui are
|
||||
running in different containers (and potentially different host machines), so
|
||||
the web container may not be ready for incoming connections. By default the container will only run the collector at the
|
||||
time specified in the cron schedule.
|
||||
|
||||
You can force the collector to run on startup using the following env variables:
|
||||
|
||||
- `-e COLLECTOR_RUN_STARTUP=true` - forces the collector to run on startup (cron will be started after the collector
|
||||
completes)
|
||||
- `-e COLLECTOR_RUN_STARTUP_SLEEP=10` - if `COLLECTOR_RUN_STARTUP` is enabled, you can use this env variable to
|
||||
configure the delay before the collector is run (default: `1` second). Used to ensure the web container has started
|
||||
successfully.
|
||||
@@ -0,0 +1,27 @@
|
||||
# Docker Images `master-omnibus` vs `latest`
|
||||
|
||||
> TL;DR; The `master-omnibus` and `latest` tags are almost semantically identical, as I follow a `golden master`
|
||||
development process. However if you want to ensure you're only using the latest release, you can change to `latest`
|
||||
|
||||
The CI script used to orchestrate the docker image builds can be found here: https://github.com/AnalogJ/scrutiny/blob/master/.github/workflows/docker-build.yaml#L166-L184
|
||||
|
||||
In general Scrutiny follows a `golden master` development process, which means that the `master` branch is not directly updated (unless its for documentation changes),
|
||||
instead development is done in a feature branch, or committed to the `beta` branch.
|
||||
|
||||
As development progresses, and we're satisfied that a feature is complete, and the quality is acceptable,
|
||||
I merge the changes to `master` and trigger the creation of a new release -- ie, when master is updated, a new release
|
||||
is almost immediately created (and tagged with `latest`)
|
||||
|
||||
So changing from `master-omnibus -> latest` will be the same thing for all intents and purposes.
|
||||
|
||||
> NOTE: Previously, there was a `automated cron build` that ran on the `master` and `beta` branches.
|
||||
They used to trigger a `nightly` build, even if nothing has changed on the branch. This has a couple of benefits, but one is to
|
||||
ensure that there's no broken external dependencies in our (unchanged) code. This `nightly` build no longer updates the `master-omnibus` tag.
|
||||
|
||||
# Running Docker `rootless`
|
||||
|
||||
To avoid that the container(s) restart when you installed Docker as `rootless` you need to isssue the following commands to allow the session to stay alive even after you close your (SSH) sesssion:
|
||||
|
||||
`sudo loginctl enable-linger $(whoami)`
|
||||
|
||||
`systemctl --user enable docker`
|
||||
@@ -0,0 +1,452 @@
|
||||
# InfluxDB Troubleshooting
|
||||
|
||||
## Why??
|
||||
|
||||
Scrutiny has many features, but the relevant one to this conversation is the "S.M.A.R.T metric tracking for historical
|
||||
trends". Basically Scrutiny not only shows you the current SMART values, but how they've changed over weeks, months (or
|
||||
even years).
|
||||
|
||||
To efficiently handle that data at scale (and to make my life easier as a developer) I decided to add InfluxDB as a
|
||||
dependency. It's a dedicated timeseries database, as opposed to the general purpose sqlite DB I used before. I also did
|
||||
a bunch of testing and analysis before I made the change. With InfluxDB the memory footprint for Scrutiny (at idle) is ~
|
||||
100mb, which is still fairly reasonable.
|
||||
|
||||
### Data Size
|
||||
|
||||
It's surprisingly easy to reach extremely large database sizes, if you don't use downsampling, or you downsample incorrectly.
|
||||
The growth rate is pretty unintuitive -- see https://github.com/AnalogJ/scrutiny/issues/650#issuecomment-2365174940
|
||||
|
||||
> Fasten stores the SMART metrics in a timeseries database (InfluxDB), and automatically downsamples the data on a schedule.
|
||||
>
|
||||
> The expectation was that cron would run daily, and there would be:
|
||||
>
|
||||
> - 7 daily data points
|
||||
> - 3 weekly data points
|
||||
> - 11 monthly data points
|
||||
> - and infinite yearly data points.
|
||||
>
|
||||
> These data points would be for each SMART metric, for each device.
|
||||
> eg. in one year, (7+3+11)*80ish SMART attributes = 1680 datapoints for one device
|
||||
>
|
||||
> If you're running cron every 15 minutes, your browser will instead be attempting to display:
|
||||
>
|
||||
> - 96*7 daily data points
|
||||
> - 3 weekly
|
||||
> - 11 monthly
|
||||
>
|
||||
> so (96*7 + 3 + 11)*80 = 54,880 datapoints for each device 😭
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
InfluxDB is a required dependency for Scrutiny v0.4.0+.
|
||||
|
||||
https://docs.influxdata.com/influxdb/v2.2/install/
|
||||
|
||||
## Persistence
|
||||
|
||||
To ensure that all data is correctly stored, you must also persist the InfluxDB database directory
|
||||
|
||||
- If you're using the Official Scrutiny Omnibus image (`ghcr.io/analogj/scrutiny:master-omnibus`), the path is `/opt/scrutiny/influxdb`
|
||||
- If you're deploying in Hub/Spoke mode with the InfluxDB maintained image (`influxdb:2.2`), the path is `/var/lib/influxdb2`
|
||||
|
||||
If you attempt to restart Scrutiny but you forgot to persist the InfluxDB directory, you will get an error message like follows:
|
||||
|
||||
```
|
||||
scrutiny | time="2022-05-12T22:54:12Z" level=info msg="Trying to connect to scrutiny sqlite db: /opt/scrutiny/config/scrutiny.db\n"
|
||||
scrutiny | time="2022-05-12T22:54:12Z" level=info msg="Successfully connected to scrutiny sqlite db: /opt/scrutiny/config/scrutiny.db\n"
|
||||
scrutiny | ts=2022-05-12T22:54:12.240791Z lvl=info msg=Unauthorized log_id=0aQcVlOW000 error="authorization not found"
|
||||
scrutiny | panic: unauthorized: unauthorized access
|
||||
```
|
||||
|
||||
Unfortunately this may mean that your database is lost, and the previous Scrutiny data is unavailable.
|
||||
You should fix the docker-compose/docker run command that you're using to ensure that your database folder is persisted correctly,
|
||||
then delete the `web.influxdb.token` field in your `scrutiny.yaml` file, and then restart Scrutiny.
|
||||
|
||||
|
||||
## First Start
|
||||
The web/api service will trigger an InfluxDB onboarding process automatically when it first starts. After that, it will store the newly generated influxdb api token in the Scrutiny config file.
|
||||
|
||||
If this Credential is not correctly stored in the scrutiny config file, Scrutiny will fail to start (with an authentication error)
|
||||
|
||||
```
|
||||
scrutiny | time="2022-05-12T22:52:55Z" level=info msg="Successfully connected to scrutiny sqlite db: /opt/scrutiny/config/scrutiny.db\n"
|
||||
scrutiny | ts=2022-05-12T22:52:55.235753Z lvl=error msg="failed to onboard user admin" log_id=0aQcRnc0000 handler=onboard error="onboarding has already been completed" took=0.038ms
|
||||
scrutiny | ts=2022-05-12T22:52:55.235816Z lvl=error msg="api error encountered" log_id=0aQcRnc0000 error="onboarding has already been completed"
|
||||
scrutiny | panic: conflict: onboarding has already been completed
|
||||
```
|
||||
|
||||
You can fix this issue by authenticating to the InfluxDB admin portal (the default credentials are username: `admin`, password: `password12345`),
|
||||
then retrieving the API token, and writing it to your `scrutiny.yaml` config file under the `web.influxdb.token` field:
|
||||
|
||||

|
||||
|
||||
## Upgrading from v0.3.x to v0.4.x
|
||||
|
||||
When upgrading from v0.3.x to v0.4.x, some users have noticed problems such as:
|
||||
|
||||
```
|
||||
2022/05/13 14:38:05 Loading configuration file: /opt/scrutiny/config/scrutiny.yaml
|
||||
time="2022-05-13T14:38:05Z" level=info msg="Trying to connect to scrutiny sqlite db:"
|
||||
time="2022-05-13T14:38:05Z" level=info msg="Successfully connected to scrutiny sqlite db:"
|
||||
panic: a username and password is required for a setup
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
Start the scrutiny server
|
||||
time="2022-06-11T10:35:04-04:00" level=info msg="Trying to connect to scrutiny sqlite db: \n"
|
||||
time="2022-06-11T10:35:04-04:00" level=info msg="Successfully connected to scrutiny sqlite db: \n"
|
||||
panic: failed to check influxdb setup status - parse "://:": missing protocol scheme
|
||||
```
|
||||
|
||||
As discussed in [#248](https://github.com/AnalogJ/scrutiny/issues/248) and [#234](https://github.com/AnalogJ/scrutiny/issues/234),
|
||||
this usually related to either:
|
||||
|
||||
- Upgrading from the LSIO Scrutiny image to the Official Scrutiny image, without removing LSIO specific environmental
|
||||
variables
|
||||
- remove the `SCRUTINY_WEB=true` and `SCRUTINY_COLLECTOR=true` environmental variables. They were used by the LSIO
|
||||
image, but are unnecessary and cause issues with the official Scrutiny image.
|
||||
- Change your volume mappings to `/opt/scrutiny` from `/scrutiny`
|
||||
- Updated versions of the [LSIO Scrutiny images are broken](https://github.com/linuxserver/docker-scrutiny/issues/22),
|
||||
as they have not installed InfluxDB which is a required dependency of Scrutiny v0.4.x
|
||||
- You can revert to an earlier version of the LSIO image (`lscr.io/linuxserver/scrutiny:060ac7b8-ls34`), or just
|
||||
change to the official Scrutiny image (`ghcr.io/analogj/scrutiny:master-omnibus`)
|
||||
|
||||
Here's a couple of confirmed working docker-compose files that you may want to look at:
|
||||
|
||||
- https://github.com/AnalogJ/scrutiny/blob/master/docker/example.hubspoke.docker-compose.yml
|
||||
- https://github.com/AnalogJ/scrutiny/blob/master/docker/example.omnibus.docker-compose.yml
|
||||
|
||||
## Bring your own InfluxDB
|
||||
|
||||
> WARNING: Most users should not follow these steps. This is ONLY for users who have an EXISTING InfluxDB installation which contains data from multiple services.
|
||||
> The Scrutiny Docker omnibus image includes an empty InfluxDB instance which it can configure.
|
||||
> If you're deploying manually or via Hub/Spoke, you can just follow the installation instructions, Scrutiny knows how
|
||||
> to run the first-time setup automatically.
|
||||
|
||||
The goal here is to create an InfluxDB API key with minimal permissions for use by Scrutiny.
|
||||
|
||||
- Create Scrutiny buckets (`metrics`, `metrics_weekly`, `metrics_monthly`, `metrics_yearly`) with placeholder config
|
||||
- Create Downsampling tasks (`tsk-weekly-aggr`, `tsk-monthly-aggr`, `tsk-yearly-aggr`) with placeholder script.
|
||||
- Create API token with restricted scope
|
||||
- NOTE: Placeholder bucket & task configuration will be replaced automatically by Scrutiny during startup
|
||||
|
||||
The placeholder buckets and tasks need to be created before the API token can be created, as the resource ID's need to
|
||||
exist for the scope restriction to work.
|
||||
|
||||
Scopes:
|
||||
|
||||
- `orgs`: read - required for scrutiny to find it's configured org_id
|
||||
- `tasks`: scrutiny specific read/write access - Scrutiny only needs access to the downsampling tasks you created above
|
||||
- `buckets`: scrutiny specific read/write access - Scrutiny only needs access to the buckets you created above
|
||||
|
||||
### Setup Environmental Variables
|
||||
|
||||
```bash
|
||||
# replace the following values with correct values for your InfluxDB installation
|
||||
export INFLUXDB_ADMIN_TOKEN=pCqRq7xxxxxx-FZgNLfstIs0w==
|
||||
export INFLUXDB_ORG_ID=b2495xxxxx
|
||||
export INFLUXDB_HOSTNAME=http://localhost:8086
|
||||
|
||||
# if you want to change the bucket name prefix below, you'll also need to update the setting in the scrutiny.yaml config file.
|
||||
export INFLUXDB_SCRUTINY_BUCKET_BASENAME=metrics
|
||||
```
|
||||
|
||||
### Create placeholder buckets
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_weekly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_monthly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/buckets \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"name": "${INFLUXDB_SCRUTINY_BUCKET_BASENAME}_yearly",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"retentionRules": []
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Create placeholder tasks
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-weekly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-monthly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/tasks \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"flux": "option task = {name: \"tsk-yearly-aggr\", every: 1y} \nyield now()"
|
||||
}
|
||||
EOF
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Create InfluxDB API Token
|
||||
|
||||
<details>
|
||||
<summary>Click to expand!</summary>
|
||||
|
||||
```bash
|
||||
# replace these values with placeholder bucket and task ids from your InfluxDB installation.
|
||||
export INFLUXDB_SCRUTINY_BASE_BUCKET_ID=1e0709xxxx
|
||||
export INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID=1af03dexxxxx
|
||||
export INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID=b3c59c7xxxxx
|
||||
export INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID=f381d8cxxxxx
|
||||
|
||||
export INFLUXDB_SCRUTINY_WEEKLY_TASK_ID=09a64ecxxxxx
|
||||
export INFLUXDB_SCRUTINY_MONTHLY_TASK_ID=09a64xxxxx
|
||||
export INFLUXDB_SCRUTINY_YEARLY_TASK_ID=09a64ecxxxxx
|
||||
|
||||
|
||||
curl -sS -X POST ${INFLUXDB_HOSTNAME}/api/v2/authorizations \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Token ${INFLUXDB_ADMIN_TOKEN}" \
|
||||
--data-binary @- << EOF
|
||||
{
|
||||
"description": "scrutiny - restricted scope token",
|
||||
"orgID": "${INFLUXDB_ORG_ID}",
|
||||
"permissions": [
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "orgs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "tasks"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_TASK_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_BASE_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_BASE_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_WEEKLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_MONTHLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "buckets",
|
||||
"id": "${INFLUXDB_SCRUTINY_YEARLY_BUCKET_ID}",
|
||||
"orgID": "${INFLUXDB_ORG_ID}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Save InfluxDB API Token
|
||||
|
||||
After running the Curl command above, you'll see a JSON response that looks like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"token": "ksVU2t5SkQwYkvIxxxxxxxYt2xUt0uRKSbSF1Po0UQ==",
|
||||
"status": "active",
|
||||
"description": "scrutiny - restricted scope token",
|
||||
"orgID": "b2495586xxxx",
|
||||
"org": "my-org",
|
||||
"user": "admin",
|
||||
"permissions": [
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "orgs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "read",
|
||||
"resource": {
|
||||
"type": "tasks"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "write",
|
||||
"resource": {
|
||||
"type": "tasks",
|
||||
"id": "09a64exxxxx",
|
||||
"orgID": "b24955860xxxxx",
|
||||
"org": "my-org"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
You must copy the token field from the JSON response, and save it in your `scrutiny.yaml` config file. After that's
|
||||
done, you can start the Scrutiny server
|
||||
|
||||
## Customize InfluxDB Admin Username & Password
|
||||
|
||||
The full set of InfluxDB configuration options are available
|
||||
in [code](https://github.com/AnalogJ/scrutiny/blob/master/webapp/backend/pkg/config/config.go?rgh-link-date=2023-01-19T16%3A23%3A40Z#L49-L51)
|
||||
.
|
||||
|
||||
During first startup Scrutiny will connect to the unprotected InfluxDB server, start the setup process (via API) using a
|
||||
username and password of `admin`:`password12345` and then create an API token of `scrutiny-default-admin-token`.
|
||||
|
||||
After that's complete, it will use the api token for all subsequent communication with InfluxDB.
|
||||
|
||||
You can configure the values for the Admin username, password and token using the config file, or env variables:
|
||||
|
||||
#### Config File Example
|
||||
|
||||
```yaml
|
||||
web:
|
||||
influxdb:
|
||||
token: 'my-custom-token'
|
||||
init_username: 'my-custom-username'
|
||||
init_password: 'my-custom-password'
|
||||
```
|
||||
|
||||
#### Environmental Variables Example
|
||||
|
||||
`SCRUTINY_WEB_INFLUXDB_TOKEN` , `SCRUTINY_WEB_INFLUXDB_INIT_USERNAME` and `SCRUTINY_WEB_INFLUXDB_INIT_PASSWORD`
|
||||
|
||||
It's safe to change the InfluxDB Admin username/password after setup has completed, only the API token is used for
|
||||
subsequent communication with InfluxDB.
|
||||
@@ -0,0 +1,46 @@
|
||||
# Notifications
|
||||
|
||||
As documented in [example.scrutiny.yaml](https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml#L59-L75)
|
||||
there are multiple ways to configure notifications for Scrutiny.
|
||||
|
||||
Under the hood we use a library called [Shoutrrr](https://github.com/containrrr/shoutrrr) to send our notifications, and you should use their documentation if you run into
|
||||
any issues: https://containrrr.dev/shoutrrr/services/overview/
|
||||
|
||||
|
||||
# Script Notifications
|
||||
|
||||
While the Shoutrrr library supports many popular providers for sending notifications Scrutiny also supports a "script" based
|
||||
notification system, allowing you to execute a custom script whenever a notification needs to be sent.
|
||||
Data is provided to this script using the following environmental variables:
|
||||
|
||||
```
|
||||
SCRUTINY_SUBJECT - eg. "Scrutiny SMART error (%s) detected on device: %s"
|
||||
SCRUTINY_DATE
|
||||
SCRUTINY_FAILURE_TYPE - EmailTest, SmartFail, ScrutinyFail
|
||||
SCRUTINY_DEVICE_NAME - eg. /dev/sda
|
||||
SCRUTINY_DEVICE_TYPE - ATA/SCSI/NVMe
|
||||
SCRUTINY_DEVICE_SERIAL - eg. WDDJ324KSO
|
||||
SCRUTINY_MESSAGE - eg. "Scrutiny SMART error notification for device: %s\nFailure Type: %s\nDevice Name: %s\nDevice Serial: %s\nDevice Type: %s\nDate: %s"
|
||||
SCRUTINY_HOST_ID - (optional) eg. "my-custom-host-id"
|
||||
```
|
||||
|
||||
# Special Characters
|
||||
|
||||
`Shoutrrr` supports special characters in the username and password fields, however you'll need to url-encode the
|
||||
username and the password separately.
|
||||
|
||||
- if your username is: `myname@example.com`
|
||||
- if your password is `124@34$1`
|
||||
|
||||
Then your `shoutrrr` url will look something like:
|
||||
|
||||
- `smtp://myname%40example%2Ecom:124%4034%241@ms.my.domain.com:587`
|
||||
|
||||
# Testing Notifications
|
||||
|
||||
You can test that your notifications are configured correctly by posting an empty payload to the notifications health
|
||||
check API.
|
||||
|
||||
```
|
||||
curl -X POST http://localhost:8080/api/health/notify
|
||||
```
|
||||
@@ -0,0 +1,139 @@
|
||||
# Reverse Proxy Support
|
||||
|
||||
Scrutiny is designed so that it can be used with a reverse proxy, leveraging `domain`, `port` or `path` based matching to correctly route to the Scrutiny service.
|
||||
|
||||
For simple `domain` and/or `port` based routing, this is easy.
|
||||
|
||||
If your domain:port pair is similar to `http://scrutiny.example.com` or `http://localhost:54321`, just update your reverse proxy configuration
|
||||
to route traffic to the Scrutiny backend, which is listening on `0.0.0.0:8080` by default.
|
||||
|
||||
```yaml
|
||||
# default config
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
```
|
||||
|
||||
However if you're using `path` based routing to differentiate your reverse proxy protected services, things become more complicated.
|
||||
|
||||
If you'd like to access Scrutiny using a path like: `http://example.com/scrutiny/`, then we need a way to configure Scrutiny so that it
|
||||
understands `http://example.com/scrutiny/api/health` actually means `http://localhost:8080/api/health`.
|
||||
|
||||
Thankfully this can be done by changing **two** settings (both are required).
|
||||
|
||||
1. The webserver has a `web.listen.basepath` key
|
||||
2. The collectors have a `api.endpoint` key.
|
||||
|
||||
## Webserver Configuration
|
||||
|
||||
When setting the `web.listen.basepath` key in the web config file, make sure the `basepath` key is prefixed with `/`.
|
||||
|
||||
```yaml
|
||||
# customized webserver config
|
||||
web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
# if you're using a reverse proxy like apache/nginx, you can override this value to serve scrutiny on a subpath.
|
||||
# eg. http://example.com/custombasepath/* vs http://example.com:8080
|
||||
basepath: '/custombasepath'
|
||||
```
|
||||
|
||||
## Collector Configuration
|
||||
|
||||
Here's how you can update the collector `api.endpoint` key:
|
||||
|
||||
```yaml
|
||||
# customized collector config
|
||||
api:
|
||||
endpoint: 'http://localhost:8080/custombasepath'
|
||||
```
|
||||
|
||||
# Environmental Variables.
|
||||
|
||||
You may also configure these values using the following environmental variables (both are required).
|
||||
|
||||
- `COLLECTOR_API_ENDPOINT=http://localhost:8080/custombasepath`
|
||||
- `SCRUTINY_WEB_LISTEN_BASEPATH=/custombasepath`
|
||||
|
||||
# Real Examples
|
||||
|
||||
## Caddy
|
||||
|
||||
1. Create a Caddyfile
|
||||
```yaml
|
||||
# Caddyfile
|
||||
:9090
|
||||
|
||||
# The `scrutiny` text in this file must match the service name in the docker-compose file below.
|
||||
# The `/custom/` text is the custom base path scrutiny will be availble on.
|
||||
reverse_proxy /custom/* scrutiny:8080
|
||||
|
||||
```
|
||||
2. Create a `docker-compose.yml` file
|
||||
|
||||
```yaml
|
||||
# docker-compose.yml
|
||||
version: '3.5'
|
||||
|
||||
services:
|
||||
scrutiny:
|
||||
container_name: scrutiny
|
||||
image: ghcr.io/analogj/scrutiny:master-omnibus
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
ports:
|
||||
- "8086:8086" # influxDB admin
|
||||
volumes:
|
||||
- /run/udev:/run/udev:ro
|
||||
- ./config:/opt/scrutiny/config
|
||||
- ./influxdb:/opt/scrutiny/influxdb
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
environment:
|
||||
- SCRUTINY_WEB_LISTEN_BASEPATH=/custom
|
||||
- COLLECTOR_API_ENDPOINT=http://localhost:8080/custom
|
||||
caddy:
|
||||
image: caddy
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile
|
||||
ports:
|
||||
- "9090:9090"
|
||||
```
|
||||
3. run `docker-compose up`
|
||||
4. visit [http://localhost:9090/custom/web](http://localhost:9090/custom/web) - access the scrutiny container via caddy reverse proxy
|
||||
|
||||
## Traefik
|
||||
|
||||
Assuming, that you have Traefik up and running with [AutoDiscovery Using Traefik For Docker ](https://doc.traefik.io/traefik/providers/docker/),
|
||||
here is an example of a `docker-compose.yml` file, with labels to enable Traefik reverse proxy and basic auth
|
||||
```yaml
|
||||
version: '3.5'
|
||||
services:
|
||||
scrutiny:
|
||||
container_name: scrutiny
|
||||
image: ghcr.io/analogj/scrutiny:master-omnibus
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
- SYS_ADMIN
|
||||
volumes:
|
||||
- /run/udev:/run/udev:ro
|
||||
- ./config:/opt/scrutiny/config
|
||||
- ./influxdb:/opt/scrutiny/influxdb
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.scrutiny.rule=Host(`example.com`)
|
||||
- traefik.http.services.scrutiny.loadbalancer.server.port=8080
|
||||
# 2 labels below are optional, in case you want basic auth in Traefik:
|
||||
- traefik.http.routers.scrutiny.middlewares=auth
|
||||
- "traefik.http.middlewares.auth.basicauth.users=user:$$2y$$05$$G11Wm/dlWpXHENK..m8se.zxvaE8USJBp1Ws56sSCrOcwWDjsYHni"
|
||||
# Note: when used in docker-compose.yml all dollar signs in the hash need to be doubled for escaping.
|
||||
# To create user:password pair, it's possible to use this command:
|
||||
# echo $(htpasswd -nB user) | sed -e s/\\$/\\$\\$/g
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
- "/dev/nvme0"
|
||||
```
|
||||
@@ -0,0 +1,18 @@
|
||||
# Operating systems without udev
|
||||
|
||||
Some operating systems do not come with `udev` out of the box, for example Alpine Linux. In these instances you will not be able to bind `/run/udev` to the container for sharing device metadata. Some operating systems offer `udev` as a package that can be installed separately, or an alternative (such as `eudev` in the case of Alpine Linux) that provides the same functionality.
|
||||
|
||||
To install `eudev` in Alpine Linux (run as root):
|
||||
|
||||
```
|
||||
apk add eudev
|
||||
setup-udev
|
||||
```
|
||||
|
||||
Once your `udev` implementation is installed, create `/run/udev` with the following command:
|
||||
|
||||
```
|
||||
udevadm trigger
|
||||
```
|
||||
|
||||
On Alpine Linux, this also has the benefit of creating symlinks to device serial numbers in `/dev/disk/by-id`.
|
||||
@@ -0,0 +1,89 @@
|
||||
|
||||
// SQLite Table(s)
|
||||
|
||||
Table Device {
|
||||
Archived bool
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
CreatedAt time
|
||||
UpdatedAt time
|
||||
DeletedAt time
|
||||
|
||||
WWN string
|
||||
|
||||
DeviceName string
|
||||
DeviceUUID string
|
||||
DeviceSerialID string
|
||||
DeviceLabel string
|
||||
|
||||
Manufacturer string
|
||||
ModelName string
|
||||
InterfaceType string
|
||||
InterfaceSpeed string
|
||||
SerialNumber string
|
||||
Firmware string
|
||||
RotationSpeed int
|
||||
Capacity int64
|
||||
FormFactor string
|
||||
SmartSupport bool
|
||||
DeviceProtocol string//protocol determines which smart attribute types are available (ATA, NVMe, SCSI)
|
||||
DeviceType string//device type is used for querying with -d/t flag, should only be used by collector.
|
||||
|
||||
// User provided metadata
|
||||
Label string
|
||||
HostId string
|
||||
|
||||
// Data set by Scrutiny
|
||||
DeviceStatus enum
|
||||
}
|
||||
|
||||
Table Setting {
|
||||
//GORM attributes, see: http://gorm.io/docs/conventions.html
|
||||
|
||||
SettingKeyName string
|
||||
SettingKeyDescription string
|
||||
SettingDataType string
|
||||
|
||||
SettingValueNumeric int64
|
||||
SettingValueString string
|
||||
}
|
||||
|
||||
|
||||
// InfluxDB Tables
|
||||
Table SmartTemperature {
|
||||
Date time
|
||||
DeviceWWN string //(tag)
|
||||
Temp int64
|
||||
}
|
||||
|
||||
|
||||
Table Smart {
|
||||
Date time
|
||||
DeviceWWN string //(tag)
|
||||
DeviceProtocol string
|
||||
|
||||
//Metrics (fields)
|
||||
Temp int64
|
||||
PowerOnHours int64
|
||||
PowerCycleCount int64
|
||||
|
||||
//Smart Status
|
||||
Status enum
|
||||
|
||||
//SMART Attributes (fields)
|
||||
Attr_ID_AttributeId int
|
||||
Attr_ID_Value int64
|
||||
Attr_ID_Threshold int64
|
||||
Attr_ID_Worst int64
|
||||
Attr_ID_RawValue int64
|
||||
Attr_ID_RawString string
|
||||
Attr_ID_WhenFailed string
|
||||
//Generated data
|
||||
Attr_ID_TransformedValue int64
|
||||
Attr_ID_Status enum
|
||||
Attr_ID_StatusReason string
|
||||
Attr_ID_FailureRate float64
|
||||
|
||||
}
|
||||
|
||||
Ref: Device.WWN < Smart.DeviceWWN
|
||||
Ref: Device.WWN < SmartTemperature.DeviceWWN
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 35 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 78 KiB |
@@ -0,0 +1,100 @@
|
||||
# Commented Scrutiny Configuration File
|
||||
#
|
||||
# The default location for this file is /opt/scrutiny/config/collector.yaml.
|
||||
# In some cases to improve clarity default values are specified,
|
||||
# uncommented. Other example values are commented out.
|
||||
#
|
||||
# When this file is parsed by Scrutiny, all configuration file keys are
|
||||
# lowercased automatically. As such, Configuration keys are case-insensitive,
|
||||
# and should be lowercase in this file to be consistent with usage.
|
||||
|
||||
|
||||
######################################################################
|
||||
# Version
|
||||
#
|
||||
# version specifies the version of this configuration file schema, not
|
||||
# the scrutiny binary. There is only 1 version available at the moment
|
||||
version: 1
|
||||
|
||||
# The host id is a label used for identifying groups of disks running on the same host
|
||||
# Primiarly used for hub/spoke deployments (can be left empty if using all-in-one image).
|
||||
host:
|
||||
id: ""
|
||||
|
||||
|
||||
# This block allows you to override/customize the settings for devices detected by
|
||||
# Scrutiny via `smartctl --scan`
|
||||
# See the "--device=TYPE" section of https://linux.die.net/man/8/smartctl
|
||||
# type can be a 'string' or a 'list'
|
||||
devices:
|
||||
# # example for forcing device type detection for a single disk
|
||||
# - device: /dev/sda
|
||||
# type: 'sat'
|
||||
#
|
||||
# # example for using `-d sat,auto`, notice the square brackets (workaround for #418)
|
||||
# - device: /dev/sda
|
||||
# type: ['sat,auto']
|
||||
#
|
||||
# # example to show how to ignore a specific disk/device.
|
||||
# - device: /dev/sda
|
||||
# ignore: true
|
||||
#
|
||||
# # examples showing how to force smartctl to detect disks inside a raid array/virtual disk
|
||||
# - device: /dev/bus/0
|
||||
# type:
|
||||
# - megaraid,14
|
||||
# - megaraid,15
|
||||
# - megaraid,18
|
||||
# - megaraid,19
|
||||
# - megaraid,20
|
||||
# - megaraid,21
|
||||
#
|
||||
# - device: /dev/twa0
|
||||
# type:
|
||||
# - 3ware,0
|
||||
# - 3ware,1
|
||||
# - 3ware,2
|
||||
# - 3ware,3
|
||||
# - 3ware,4
|
||||
# - 3ware,5
|
||||
#
|
||||
# # example to show how to override the smartctl command args (per device), see below for how to override these globally.
|
||||
# - device: /dev/sda
|
||||
# commands:
|
||||
# metrics_info_args: '--info --json -T permissive' # used to determine device unique ID & register device with Scrutiny
|
||||
# metrics_smart_args: '--xall --json -T permissive' # used to retrieve smart data for each device.
|
||||
|
||||
|
||||
#log:
|
||||
# file: '' #absolute or relative paths allowed, eg. web.log
|
||||
# level: INFO
|
||||
#
|
||||
#api:
|
||||
# endpoint: 'http://localhost:8080'
|
||||
# endpoint: 'http://localhost:8080/custombasepath'
|
||||
# if you need to use a custom base path (for a reverse proxy), you can add a suffix to the endpoint.
|
||||
# See docs/TROUBLESHOOTING_REVERSE_PROXY.md for more info,
|
||||
|
||||
# example to show how to override the smartctl command args globally
|
||||
#commands:
|
||||
# metrics_smartctl_bin: 'smartctl' # change to provide custom `smartctl` binary path, eg. `/usr/sbin/smartctl`
|
||||
# metrics_scan_args: '--scan --json' # used to detect devices
|
||||
# metrics_info_args: '--info --json' # used to determine device unique ID & register device with Scrutiny
|
||||
# metrics_smart_args: '--xall --json' # used to retrieve smart data for each device.
|
||||
# metrics_smartctl_wait: 0 # time to wait in seconds between each disk's check
|
||||
|
||||
|
||||
########################################################################################################################
|
||||
# FEATURES COMING SOON
|
||||
#
|
||||
# The following commented out sections are a preview of additional configuration options that will be available soon.
|
||||
#
|
||||
########################################################################################################################
|
||||
|
||||
#collect:
|
||||
# long:
|
||||
# enable: false
|
||||
# command: ''
|
||||
# short:
|
||||
# enable: false
|
||||
# command: ''
|
||||
+78
-49
@@ -1,6 +1,6 @@
|
||||
# Commented Scrutiny Configuration File
|
||||
#
|
||||
# The default location for this file is ~/scrutiny.yaml.
|
||||
# The default location for this file is /opt/scrutiny/config/scrutiny.yaml.
|
||||
# In some cases to improve clarity default values are specified,
|
||||
# uncommented. Other example values are commented out.
|
||||
#
|
||||
@@ -20,59 +20,88 @@ web:
|
||||
listen:
|
||||
port: 8080
|
||||
host: 0.0.0.0
|
||||
|
||||
# if you're using a reverse proxy like apache/nginx, you can override this value to serve scrutiny on a subpath.
|
||||
# eg. http://example.com/scrutiny/* vs http://example.com:8080
|
||||
# see docs/TROUBLESHOOTING_REVERSE_PROXY.md
|
||||
# basepath: `/scrutiny`
|
||||
# leave empty unless behind a path prefixed proxy
|
||||
basepath: ''
|
||||
database:
|
||||
# can also set absolute path here
|
||||
location: ./scrutiny.db
|
||||
location: /opt/scrutiny/config/scrutiny.db
|
||||
src:
|
||||
# the location on the filesystem where scrutiny javascript + css is located
|
||||
frontend:
|
||||
path: ./dist
|
||||
path: /opt/scrutiny/web
|
||||
|
||||
disks:
|
||||
include:
|
||||
# - /dev/sda
|
||||
exclude:
|
||||
# - /dev/sdb
|
||||
# if you're running influxdb on a different host (or using a cloud-provider) you'll need to update the host & port below.
|
||||
# token, org, bucket are unnecessary for a new InfluxDB installation, as Scrutiny will automatically run the InfluxDB setup,
|
||||
# and store the information in the config file. If you 're re-using an existing influxdb installation, you'll need to provide
|
||||
# the `token`
|
||||
influxdb:
|
||||
# scheme: 'http'
|
||||
host: 0.0.0.0
|
||||
port: 8086
|
||||
# token: 'my-token'
|
||||
# org: 'my-org'
|
||||
# bucket: 'bucket'
|
||||
retention_policy: true
|
||||
# if you wish to disable TLS certificate verification,
|
||||
# when using self-signed certificates for example,
|
||||
# then uncomment the lines below and set `insecure_skip_verify: true`
|
||||
# tls:
|
||||
# insecure_skip_verify: false
|
||||
|
||||
notify:
|
||||
urls:
|
||||
- "discord://token@channel"
|
||||
- "telegram://token@telegram?channels=channel-1[,channel-2,...]"
|
||||
- "pushover://shoutrrr:apiToken@userKey/?devices=device1[,device2, ...]"
|
||||
- "slack://[botname@]token-a/token-b/token-c"
|
||||
- "smtp://username:password@host:port/?fromAddress=fromAddress&toAddresses=recipient1[,recipient2,...]"
|
||||
- "teams://token-a/token-b/token-c"
|
||||
- "gotify://gotify-host/token"
|
||||
- "pushbullet://api-token[/device/#channel/email]"
|
||||
- "ifttt://key/?events=event1[,event2,...]&value1=value1&value2=value2&value3=value3"
|
||||
- "mattermost://[username@]mattermost-host/token[/channel]"
|
||||
- "hangouts://chat.googleapis.com/v1/spaces/FOO/messages?key=bar&token=baz"
|
||||
- "zulip://bot-mail:bot-key@zulip-domain/?stream=name-or-id&topic=name"
|
||||
- "join://shoutrrr:api-key@join/?devices=device1[,device2, ...][&icon=icon][&title=title]"
|
||||
- "script:///file/path/on/disk"
|
||||
- "https://www.example.com/path"
|
||||
|
||||
limits:
|
||||
ata:
|
||||
critical:
|
||||
error: 10
|
||||
standard:
|
||||
error: 20
|
||||
warn: 10
|
||||
scsi:
|
||||
critical: true
|
||||
standard: true
|
||||
nvme:
|
||||
critical: true
|
||||
standard: true
|
||||
log:
|
||||
file: '' #absolute or relative paths allowed, eg. web.log
|
||||
level: INFO
|
||||
|
||||
|
||||
collect:
|
||||
metric:
|
||||
enable: true
|
||||
command: '-a -o on -S on'
|
||||
long:
|
||||
enable: false
|
||||
command: ''
|
||||
short:
|
||||
enable: false
|
||||
command: ''
|
||||
# Notification "urls" look like the following. For more information about service specific configuration see
|
||||
# Shoutrrr's documentation: https://containrrr.dev/shoutrrr/services/overview/
|
||||
#
|
||||
# note, usernames and passwords containing special characters will need to be urlencoded.
|
||||
# if your username is: "myname@example.com" and your password is "124@34$1"
|
||||
# your shoutrrr url will look like: "smtp://myname%40example%2Ecom:124%4034%241@ms.my.domain.com:587"
|
||||
|
||||
#notify:
|
||||
# urls:
|
||||
# - "discord://token@webhookid"
|
||||
# - "telegram://token@telegram?channels=channel-1[,channel-2,...]"
|
||||
# - "pushover://shoutrrr:apiToken@userKey/?priority=1&devices=device1[,device2, ...]"
|
||||
# - "slack://[botname@]token-a/token-b/token-c"
|
||||
# - "smtp://username:password@host:port/?fromAddress=fromAddress&toAddresses=recipient1[,recipient2,...]"
|
||||
# - "teams://token-a/token-b/token-c"
|
||||
# - "gotify://gotify-host/token"
|
||||
# - "pushbullet://api-token[/device/#channel/email]"
|
||||
# - "ifttt://key/?events=event1[,event2,...]&value1=value1&value2=value2&value3=value3"
|
||||
# - "mattermost://[username@]mattermost-host/token[/channel]"
|
||||
# - "ntfy://username:password@host:port/topic"
|
||||
# - "hangouts://chat.googleapis.com/v1/spaces/FOO/messages?key=bar&token=baz"
|
||||
# - "zulip://bot-mail:bot-key@zulip-domain/?stream=name-or-id&topic=name"
|
||||
# - "join://shoutrrr:api-key@join/?devices=device1[,device2, ...][&icon=icon][&title=title]"
|
||||
# - "script:///file/path/on/disk"
|
||||
# - "https://www.example.com/path"
|
||||
|
||||
########################################################################################################################
|
||||
# FEATURES COMING SOON
|
||||
#
|
||||
# The following commented out sections are a preview of additional configuration options that will be available soon.
|
||||
#
|
||||
########################################################################################################################
|
||||
|
||||
#limits:
|
||||
# ata:
|
||||
# critical:
|
||||
# error: 10
|
||||
# standard:
|
||||
# error: 20
|
||||
# warn: 10
|
||||
# scsi:
|
||||
# critical: true
|
||||
# standard: true
|
||||
# nvme:
|
||||
# critical: true
|
||||
# standard: true
|
||||
|
||||
|
||||
@@ -1,22 +1,81 @@
|
||||
module github.com/analogj/scrutiny
|
||||
|
||||
go 1.13
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/AnalogJ/go-util v0.0.0-20200905200945-3b93d31215ae // indirect
|
||||
github.com/analogj/go-util v0.0.0-20190301173314-5295e364eb14
|
||||
github.com/containrrr/shoutrrr v0.0.0-20200828202222-1da53231b05a
|
||||
github.com/denisbrodbeck/machineid v1.0.1
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/containrrr/shoutrrr v0.8.0
|
||||
github.com/fatih/color v1.15.0
|
||||
github.com/gin-gonic/gin v1.6.3
|
||||
github.com/golang/mock v1.4.3
|
||||
github.com/glebarez/sqlite v1.4.5
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.0.0
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.9.0
|
||||
github.com/jaypipes/ghw v0.6.1
|
||||
github.com/jinzhu/gorm v1.9.14
|
||||
github.com/kvz/logstreamer v0.0.0-20150507115422-a635b98146f0 // indirect
|
||||
github.com/sirupsen/logrus v1.2.0
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/samber/lo v1.25.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/spf13/viper v1.15.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/urfave/cli/v2 v2.2.0
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0 // indirect
|
||||
golang.org/x/sync v0.1.0
|
||||
gorm.io/gorm v1.23.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.17.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/go-playground/locales v0.13.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
|
||||
github.com/jaypipes/pcidb v0.5.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.4 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
||||
github.com/kvz/logstreamer v0.0.0-20201023134116-02d20f4338f5 // indirect
|
||||
github.com/leodido/go-urn v1.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/ugorji/go/codec v1.1.7 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
modernc.org/libc v1.16.8 // indirect
|
||||
modernc.org/mathutil v1.4.1 // indirect
|
||||
modernc.org/memory v1.1.1 // indirect
|
||||
modernc.org/sqlite v1.17.2 // indirect
|
||||
)
|
||||
|
||||
+2
-14
@@ -1,18 +1,6 @@
|
||||
---
|
||||
engine_enable_code_mutation: true
|
||||
engine_cmd_compile:
|
||||
- go build -ldflags '-w -extldflags "-static"' -o scrutiny webapp/backend/cmd/scrutiny/scrutiny.go
|
||||
- 'GOOS=linux GOARCH=amd64 go build -ldflags "-X main.goos=linux -X main.goarch=amd64" -o scrutiny-web-linux-amd64 -tags "static" webapp/backend/cmd/scrutiny/scrutiny.go'
|
||||
- 'chmod +x scrutiny-web-linux-amd64'
|
||||
- 'GOOS=linux GOARCH=amd64 go build -ldflags "-X main.goos=linux -X main.goarch=amd64" -o scrutiny-collector-metrics-linux-amd64 -tags "static" collector/cmd/collector-metrics/collector-metrics.go'
|
||||
- 'chmod +x scrutiny-collector-metrics-linux-amd64'
|
||||
mgr_keep_lock_file: true
|
||||
engine_version_metadata_path: 'webapp/backend/pkg/version/version.go'
|
||||
engine_cmd_test: 'go test -v -tags "static" $(go list ./... | grep -v /vendor/)'
|
||||
engine_golang_package_path: 'github.com/analogj/scrutiny'
|
||||
scm_enable_branch_cleanup: true
|
||||
engine_disable_lint: true
|
||||
scm_release_assets:
|
||||
- local_path: scrutiny-web-linux-amd64
|
||||
artifact_name: scrutiny-web-linux-amd64
|
||||
- local_path: scrutiny-collector-metrics-linux-amd64
|
||||
artifact_name: scrutiny-collector-metrics-linux-amd64
|
||||
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
#!/command/with-contenv bash
|
||||
|
||||
if [ -n "${TZ}" ]
|
||||
then
|
||||
ln -snf "/usr/share/zoneinfo/${TZ}" /etc/localtime
|
||||
echo "${TZ}" > /etc/timezone
|
||||
fi
|
||||
@@ -0,0 +1,15 @@
|
||||
#!/command/with-contenv bash
|
||||
|
||||
# Cron runs in its own isolated environment (usually using only /etc/environment )
|
||||
# So when the container starts up, we will do a dump of the runtime environment into a .env file that we
|
||||
# will then source into the crontab file (/etc/cron.d/scrutiny)
|
||||
(set -o posix; export -p) > /env.sh
|
||||
|
||||
# adding ability to customize the cron schedule.
|
||||
COLLECTOR_CRON_SCHEDULE=${COLLECTOR_CRON_SCHEDULE:-"0 0 * * *"}
|
||||
|
||||
# if the cron schedule has been overridden via env variable (eg docker-compose) we should make sure to strip quotes
|
||||
[[ "${COLLECTOR_CRON_SCHEDULE}" == \"*\" || "${COLLECTOR_CRON_SCHEDULE}" == \'*\' ]] && COLLECTOR_CRON_SCHEDULE="${COLLECTOR_CRON_SCHEDULE:1:-1}"
|
||||
|
||||
# replace placeholder with correct value
|
||||
sed -i 's|{COLLECTOR_CRON_SCHEDULE}|'"${COLLECTOR_CRON_SCHEDULE}"'|g' /etc/cron.d/scrutiny
|
||||
@@ -0,0 +1,15 @@
|
||||
MAILTO=""
|
||||
# Example of job definition:
|
||||
# .---------------- minute (0 - 59)
|
||||
# | .------------- hour (0 - 23)
|
||||
# | | .---------- day of month (1 - 31)
|
||||
# | | | .------- month (1 - 12) OR jan,feb,mar,apr ...
|
||||
# | | | | .---- day of week (0 - 6) (Sunday=0 or 7) OR sun,mon,tue,wed,thu,fri,sat
|
||||
# | | | | |
|
||||
# * * * * * user-name command to be executed
|
||||
|
||||
# correctly route collector logs (STDOUT & STDERR) to Cron foreground (collectable by Docker STDOUT)
|
||||
# cron schedule to run daily at midnight: '0 0 * * *'
|
||||
# System environmental variables are stripped by cron, source our dump of the docker environmental variables before each command (/env.sh)
|
||||
{COLLECTOR_CRON_SCHEDULE} root . /env.sh; /opt/scrutiny/bin/scrutiny-collector-metrics run >/proc/1/fd/1 2>/proc/1/fd/2
|
||||
# An empty line is required at the end of this file for a valid cron file.
|
||||
@@ -0,0 +1,25 @@
|
||||
#!/command/with-contenv bash
|
||||
|
||||
# ensure not run (successfully) before
|
||||
if [ -f /tmp/custom-init-performed ]; then
|
||||
echo 'INFO: custom init already performed'
|
||||
s6-svc -D /run/service/collector-once # prevent s6 from restarting service
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "waiting for scrutiny service to start"
|
||||
s6-svwait -u /run/service/scrutiny
|
||||
|
||||
# wait until scrutiny is "Ready"
|
||||
until $(curl --output /dev/null --silent --head --fail http://localhost:8080/api/health); do echo "scrutiny api not ready" && sleep 5; done
|
||||
|
||||
echo "starting scrutiny collector (run-once mode. subsequent calls will be triggered via cron service)"
|
||||
/opt/scrutiny/bin/scrutiny-collector-metrics run
|
||||
|
||||
# prevent script's core logic from running again
|
||||
touch /tmp/custom-init-performed
|
||||
|
||||
# prevent s6 from restarting service
|
||||
s6-svc -D /run/service/collector-once
|
||||
|
||||
exit 0
|
||||
@@ -0,0 +1,4 @@
|
||||
#!/command/execlineb -S0
|
||||
|
||||
echo "cron exiting"
|
||||
s6-svscanctl -t /var/run/s6/services
|
||||
Executable
+4
@@ -0,0 +1,4 @@
|
||||
#!/command/with-contenv bash
|
||||
|
||||
echo "starting cron"
|
||||
cron -f -L 15
|
||||
@@ -0,0 +1,17 @@
|
||||
#!/command/with-contenv bash
|
||||
|
||||
mkdir -p /opt/scrutiny/influxdb/
|
||||
|
||||
if [ -f "/opt/scrutiny/influxdb/config.yaml" ]; then
|
||||
echo "influxdb config file already exists. skipping."
|
||||
else
|
||||
cat << 'EOF' > /opt/scrutiny/influxdb/config.yaml
|
||||
bolt-path: /opt/scrutiny/influxdb/influxd.bolt
|
||||
engine-path: /opt/scrutiny/influxdb/engine
|
||||
http-bind-address: ":8086"
|
||||
reporting-disabled: true
|
||||
EOF
|
||||
fi
|
||||
|
||||
echo "starting influxdb"
|
||||
influxd run
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/usr/bin/execlineb -S0
|
||||
|
||||
echo "jobber/cron exiting"
|
||||
s6-svscanctl -t /var/run/s6/services
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
echo "starting jobber/cron"
|
||||
|
||||
su -c "/usr/lib/x86_64-linux-gnu/jobberrunner /scrutiny/jobber/jobber.yaml" root
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
#!/command/with-contenv bash
|
||||
|
||||
echo "waiting for influxdb"
|
||||
until $(curl --output /dev/null --silent --head --fail http://localhost:8086/health); do echo "influxdb not ready" && sleep 5; done
|
||||
|
||||
echo "starting scrutiny"
|
||||
|
||||
scrutiny start
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
version: 1.4
|
||||
|
||||
prefs:
|
||||
logPath: /scrutiny/jobber/log.log
|
||||
runLog:
|
||||
type: file
|
||||
path: /scrutiny/jobber/runlog
|
||||
maxFileLen: 100m
|
||||
maxHistories: 2
|
||||
|
||||
resultSinks:
|
||||
- &filesystemSink
|
||||
type: filesystem
|
||||
path: /scrutiny/jobber
|
||||
data:
|
||||
- stdout
|
||||
- stderr
|
||||
maxAgeDays: 10
|
||||
|
||||
jobs:
|
||||
MetricsJob:
|
||||
cmd: /scrutiny/bin/scrutiny-collector-metrics run
|
||||
# run daily at midnight.
|
||||
time: '0 0 0 * * *'
|
||||
onError: Backoff
|
||||
notifyOnSuccess:
|
||||
- *filesystemSink
|
||||
notifyOnFailure:
|
||||
- *filesystemSink
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/config"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/errors"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/version"
|
||||
"github.com/analogj/scrutiny/webapp/backend/pkg/web"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -26,11 +29,18 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configFilePath := "/opt/scrutiny/config/scrutiny.yaml"
|
||||
configFilePathAlternative := "/opt/scrutiny/config/scrutiny.yml"
|
||||
if !utils.FileExists(configFilePath) && utils.FileExists(configFilePathAlternative) {
|
||||
configFilePath = configFilePathAlternative
|
||||
}
|
||||
|
||||
//we're going to load the config file manually, since we need to validate it.
|
||||
err = config.ReadConfig("/scrutiny/config/scrutiny.yaml") // Find and read the config file
|
||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||
err = config.ReadConfig(configFilePath) // Find and read the config file
|
||||
if _, ok := err.(errors.ConfigFileMissingError); ok { // Handle errors reading the config file
|
||||
//ignore "could not find config file"
|
||||
} else if err != nil {
|
||||
log.Print(color.HiRedString("CONFIG ERROR: %v", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -60,7 +70,7 @@ OPTIONS:
|
||||
},
|
||||
Before: func(c *cli.Context) error {
|
||||
|
||||
drawbridge := "github.com/AnalogJ/scrutiny"
|
||||
scrutiny := "github.com/AnalogJ/scrutiny"
|
||||
|
||||
var versionInfo string
|
||||
if len(goos) > 0 && len(goarch) > 0 {
|
||||
@@ -69,7 +79,7 @@ OPTIONS:
|
||||
versionInfo = fmt.Sprintf("dev-%s", version.VERSION)
|
||||
}
|
||||
|
||||
subtitle := drawbridge + utils.LeftPad2Len(versionInfo, " ", 65-len(drawbridge))
|
||||
subtitle := scrutiny + utils.LeftPad2Len(versionInfo, " ", 65-len(scrutiny))
|
||||
|
||||
color.New(color.FgGreen).Fprintf(c.App.Writer, fmt.Sprintf(utils.StripIndent(
|
||||
`
|
||||
@@ -95,11 +105,30 @@ OPTIONS:
|
||||
if err != nil { // Handle errors reading the config file
|
||||
//ignore "could not find config file"
|
||||
fmt.Printf("Could not find config file at specified path: %s", c.String("config"))
|
||||
os.Exit(1)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
webServer := web.AppEngine{Config: config}
|
||||
if c.Bool("debug") {
|
||||
config.Set("log.level", "DEBUG")
|
||||
}
|
||||
|
||||
if c.IsSet("log-file") {
|
||||
config.Set("log.file", c.String("log-file"))
|
||||
}
|
||||
|
||||
webLogger, logFile, err := CreateLogger(config)
|
||||
if logFile != nil {
|
||||
defer logFile.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
settingsData, err := json.Marshal(config.AllSettings())
|
||||
webLogger.Debug(string(settingsData), err)
|
||||
|
||||
webServer := web.AppEngine{Config: config, Logger: webLogger}
|
||||
|
||||
return webServer.Start()
|
||||
},
|
||||
@@ -109,6 +138,18 @@ OPTIONS:
|
||||
Name: "config",
|
||||
Usage: "Specify the path to the config file",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "log-file",
|
||||
Usage: "Path to file for logging. Leave empty to use STDOUT",
|
||||
Value: "",
|
||||
EnvVars: []string{"SCRUTINY_LOG_FILE"},
|
||||
},
|
||||
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Enable debug logging",
|
||||
EnvVars: []string{"SCRUTINY_DEBUG", "DEBUG"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -120,3 +161,27 @@ OPTIONS:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateLogger(appConfig config.Interface) (*logrus.Entry, *os.File, error) {
|
||||
logger := logrus.WithFields(logrus.Fields{
|
||||
"type": "web",
|
||||
})
|
||||
//set default log level
|
||||
if level, err := logrus.ParseLevel(appConfig.GetString("log.level")); err == nil {
|
||||
logger.Logger.SetLevel(level)
|
||||
} else {
|
||||
logger.Logger.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
var logFile *os.File
|
||||
var err error
|
||||
if appConfig.IsSet("log.file") && len(appConfig.GetString("log.file")) > 0 {
|
||||
logFile, err = os.OpenFile(appConfig.GetString("log.file"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Logger.Errorf("Failed to open log file %s for output: %s", appConfig.GetString("log.file"), err)
|
||||
return nil, logFile, err
|
||||
}
|
||||
logger.Logger.SetOutput(io.MultiWriter(os.Stderr, logFile))
|
||||
}
|
||||
return logger, logFile, nil
|
||||
}
|
||||
|
||||
@@ -6,8 +6,11 @@ import (
|
||||
"github.com/spf13/viper"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const DB_USER_SETTINGS_SUBKEY = "user"
|
||||
|
||||
// When initializing this class the following methods must be called:
|
||||
// Config.New
|
||||
// Config.Init
|
||||
@@ -29,41 +32,65 @@ func (c *configuration) Init() error {
|
||||
//set defaults
|
||||
c.SetDefault("web.listen.port", "8080")
|
||||
c.SetDefault("web.listen.host", "0.0.0.0")
|
||||
c.SetDefault("web.src.frontend.path", "/scrutiny/web")
|
||||
c.SetDefault("web.listen.basepath", "")
|
||||
c.SetDefault("web.src.frontend.path", "/opt/scrutiny/web")
|
||||
c.SetDefault("web.database.location", "/opt/scrutiny/config/scrutiny.db")
|
||||
|
||||
c.SetDefault("web.database.location", "/scrutiny/config/scrutiny.db")
|
||||
c.SetDefault("log.level", "INFO")
|
||||
c.SetDefault("log.file", "")
|
||||
|
||||
c.SetDefault("disks.include", []string{})
|
||||
c.SetDefault("disks.exclude", []string{})
|
||||
c.SetDefault("notify.urls", []string{})
|
||||
|
||||
c.SetDefault("notify.metric.script", "/scrutiny/config/notify-metrics.sh")
|
||||
c.SetDefault("notify.long.script", "/scrutiny/config/notify-long-test.sh")
|
||||
c.SetDefault("notify.short.script", "/scrutiny/config/notify-short-test.sh")
|
||||
c.SetDefault("web.influxdb.scheme", "http")
|
||||
c.SetDefault("web.influxdb.host", "localhost")
|
||||
c.SetDefault("web.influxdb.port", "8086")
|
||||
c.SetDefault("web.influxdb.org", "scrutiny")
|
||||
c.SetDefault("web.influxdb.bucket", "metrics")
|
||||
c.SetDefault("web.influxdb.init_username", "admin")
|
||||
c.SetDefault("web.influxdb.init_password", "password12345")
|
||||
c.SetDefault("web.influxdb.token", "scrutiny-default-admin-token")
|
||||
c.SetDefault("web.influxdb.tls.insecure_skip_verify", false)
|
||||
c.SetDefault("web.influxdb.retention_policy", true)
|
||||
|
||||
c.SetDefault("collect.metric.enable", true)
|
||||
c.SetDefault("collect.metric.command", "-a -o on -S on")
|
||||
c.SetDefault("collect.long.enable", true)
|
||||
c.SetDefault("collect.long.command", "-a -o on -S on")
|
||||
c.SetDefault("collect.short.enable", true)
|
||||
c.SetDefault("collect.short.command", "-a -o on -S on")
|
||||
//c.SetDefault("disks.include", []string{})
|
||||
//c.SetDefault("disks.exclude", []string{})
|
||||
|
||||
//if you want to load a non-standard location system config file (~/drawbridge.yml), use ReadConfig
|
||||
c.SetConfigType("yaml")
|
||||
//c.SetConfigName("drawbridge")
|
||||
//c.AddConfigPath("$HOME/")
|
||||
|
||||
//configure env variable parsing.
|
||||
c.SetEnvPrefix("SCRUTINY")
|
||||
c.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_"))
|
||||
c.AutomaticEnv()
|
||||
|
||||
//CLI options will be added via the `Set()` function
|
||||
return nil
|
||||
return c.ValidateConfig()
|
||||
}
|
||||
|
||||
func (c *configuration) SubKeys(key string) []string {
|
||||
return c.Sub(key).AllKeys()
|
||||
}
|
||||
|
||||
func (c *configuration) Sub(key string) Interface {
|
||||
config := configuration{
|
||||
Viper: c.Viper.Sub(key),
|
||||
}
|
||||
return &config
|
||||
}
|
||||
|
||||
func (c *configuration) ReadConfig(configFilePath string) error {
|
||||
//make sure that we specify that this is the correct config path (for eventual WriteConfig() calls)
|
||||
c.SetConfigFile(configFilePath)
|
||||
|
||||
configFilePath, err := utils.ExpandPath(configFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !utils.FileExists(configFilePath) {
|
||||
log.Printf("No configuration file found at %v. Skipping", configFilePath)
|
||||
log.Printf("No configuration file found at %v. Using Defaults.", configFilePath)
|
||||
return errors.ConfigFileMissingError("The configuration file could not be found.")
|
||||
}
|
||||
|
||||
@@ -93,24 +120,18 @@ func (c *configuration) ReadConfig(configFilePath string) error {
|
||||
// This function ensures that the merged config works correctly.
|
||||
func (c *configuration) ValidateConfig() error {
|
||||
|
||||
////deserialize Questions
|
||||
//questionsMap := map[string]Question{}
|
||||
//err := c.UnmarshalKey("questions", &questionsMap)
|
||||
//
|
||||
//if err != nil {
|
||||
// log.Printf("questions could not be deserialized correctly. %v", err)
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//for _, v := range questionsMap {
|
||||
//
|
||||
// typeContent, ok := v.Schema["type"].(string)
|
||||
// if !ok || len(typeContent) == 0 {
|
||||
// return errors.QuestionSyntaxError("`type` is required for questions")
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//
|
||||
//the following keys are deprecated, and no longer supported
|
||||
/*
|
||||
- notify.filter_attributes (replaced by metrics.status.filter_attributes SETTING)
|
||||
- notify.level (replaced by metrics.notify.level and metrics.status.threshold SETTING)
|
||||
*/
|
||||
//TODO add docs and upgrade doc.
|
||||
if c.IsSet("notify.filter_attributes") {
|
||||
return errors.ConfigValidationError("`notify.filter_attributes` configuration option is deprecated. Replaced by option in Dashboard Settings page")
|
||||
}
|
||||
if c.IsSet("notify.level") {
|
||||
return errors.ConfigValidationError("`notify.level` configuration option is deprecated. Replaced by option in Dashboard Settings page")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_MergeConfigMap(t *testing.T) {
|
||||
//setup
|
||||
testConfig := configuration{
|
||||
Viper: viper.New(),
|
||||
}
|
||||
testConfig.Set("user.dashboard_display", "hello")
|
||||
testConfig.SetDefault("user.layout", "hello")
|
||||
|
||||
mergeSettings := map[string]interface{}{
|
||||
"user": map[string]interface{}{
|
||||
"dashboard_display": "dashboard_display",
|
||||
"layout": "layout",
|
||||
},
|
||||
}
|
||||
//test
|
||||
err := testConfig.MergeConfigMap(mergeSettings)
|
||||
|
||||
//verify
|
||||
require.NoError(t, err)
|
||||
|
||||
// if using Set, the MergeConfigMap functionality will not override
|
||||
// if using SetDefault, the MergeConfigMap will override correctly
|
||||
require.Equal(t, "hello", testConfig.GetString("user.dashboard_display"))
|
||||
require.Equal(t, "layout", testConfig.GetString("user.layout"))
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user