From e37591ce6d1386807315f6a9ca594aff81aa0bb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Dec 2025 08:02:41 +0000 Subject: [PATCH 01/23] Bump rollup from 4.53.3 to 4.54.0 in /web/frontend Bumps [rollup](https://github.com/rollup/rollup) from 4.53.3 to 4.54.0. - [Release notes](https://github.com/rollup/rollup/releases) - [Changelog](https://github.com/rollup/rollup/blob/master/CHANGELOG.md) - [Commits](https://github.com/rollup/rollup/compare/v4.53.3...v4.54.0) --- updated-dependencies: - dependency-name: rollup dependency-version: 4.54.0 dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/frontend/package-lock.json | 189 +++++++++++++++++---------------- web/frontend/package.json | 2 +- 2 files changed, 98 insertions(+), 93 deletions(-) diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index 4c7e4bf5..c9769a59 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -24,7 +24,7 @@ "@rollup/plugin-node-resolve": "^16.0.1", "@rollup/plugin-terser": "^0.4.4", "@timohausmann/quadtree-js": "^1.2.6", - "rollup": "^4.53.3", + "rollup": "^4.54.0", "rollup-plugin-css-only": "^4.5.5", "rollup-plugin-svelte": "^7.2.3", "svelte": "^5.44.0" @@ -244,9 +244,9 @@ } }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.3.tgz", - "integrity": "sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.54.0.tgz", + "integrity": "sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==", "cpu": [ "arm" ], @@ -258,9 +258,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.3.tgz", - "integrity": "sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.54.0.tgz", + "integrity": "sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==", "cpu": [ "arm64" ], @@ -272,9 +272,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.3.tgz", - "integrity": "sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.54.0.tgz", + "integrity": "sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==", "cpu": [ "arm64" ], @@ -286,9 +286,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.3.tgz", - "integrity": "sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.54.0.tgz", + "integrity": "sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==", "cpu": [ "x64" ], @@ -300,9 +300,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.3.tgz", - "integrity": "sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.54.0.tgz", + "integrity": "sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==", "cpu": [ "arm64" ], @@ -314,9 +314,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.3.tgz", - "integrity": "sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.54.0.tgz", + "integrity": "sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==", "cpu": [ "x64" ], @@ -328,9 +328,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.3.tgz", - "integrity": "sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.54.0.tgz", + "integrity": "sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==", "cpu": [ "arm" ], @@ -342,9 +342,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.3.tgz", - "integrity": "sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.54.0.tgz", + "integrity": "sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==", "cpu": [ "arm" ], @@ -356,9 +356,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.3.tgz", - "integrity": "sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.54.0.tgz", + "integrity": "sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==", "cpu": [ "arm64" ], @@ -370,9 +370,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.3.tgz", - "integrity": "sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.54.0.tgz", + "integrity": "sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==", "cpu": [ "arm64" ], @@ -384,9 +384,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.3.tgz", - "integrity": "sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.54.0.tgz", + "integrity": "sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==", "cpu": [ "loong64" ], @@ -398,9 +398,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.3.tgz", - "integrity": "sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.54.0.tgz", + "integrity": "sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==", "cpu": [ "ppc64" ], @@ -412,9 +412,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.3.tgz", - "integrity": "sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.54.0.tgz", + "integrity": "sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==", "cpu": [ "riscv64" ], @@ -426,9 +426,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.3.tgz", - "integrity": "sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.54.0.tgz", + "integrity": "sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==", "cpu": [ "riscv64" ], @@ -440,9 +440,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.3.tgz", - "integrity": "sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.54.0.tgz", + "integrity": "sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==", "cpu": [ "s390x" ], @@ -454,9 +454,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.3.tgz", - "integrity": "sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.54.0.tgz", + "integrity": "sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==", "cpu": [ "x64" ], @@ -468,9 +468,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.3.tgz", - "integrity": "sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.54.0.tgz", + "integrity": "sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==", "cpu": [ "x64" ], @@ -482,9 +482,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.3.tgz", - "integrity": "sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.54.0.tgz", + "integrity": "sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==", "cpu": [ "arm64" ], @@ -496,9 +496,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.3.tgz", - "integrity": "sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.54.0.tgz", + "integrity": "sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==", "cpu": [ "arm64" ], @@ -510,9 +510,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.3.tgz", - "integrity": "sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.54.0.tgz", + "integrity": "sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==", "cpu": [ "ia32" ], @@ -524,9 +524,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.3.tgz", - "integrity": "sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.54.0.tgz", + "integrity": "sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==", "cpu": [ "x64" ], @@ -538,9 +538,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.3.tgz", - "integrity": "sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.54.0.tgz", + "integrity": "sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==", "cpu": [ "x64" ], @@ -621,6 +621,7 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -821,6 +822,7 @@ "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.12.0.tgz", "integrity": "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==", "license": "MIT", + "peer": true, "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } @@ -927,6 +929,7 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -976,11 +979,12 @@ } }, "node_modules/rollup": { - "version": "4.53.3", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.3.tgz", - "integrity": "sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==", + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.54.0.tgz", + "integrity": "sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "@types/estree": "1.0.8" }, @@ -992,28 +996,28 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.53.3", - "@rollup/rollup-android-arm64": "4.53.3", - "@rollup/rollup-darwin-arm64": "4.53.3", - "@rollup/rollup-darwin-x64": "4.53.3", - "@rollup/rollup-freebsd-arm64": "4.53.3", - "@rollup/rollup-freebsd-x64": "4.53.3", - "@rollup/rollup-linux-arm-gnueabihf": "4.53.3", - "@rollup/rollup-linux-arm-musleabihf": "4.53.3", - "@rollup/rollup-linux-arm64-gnu": "4.53.3", - "@rollup/rollup-linux-arm64-musl": "4.53.3", - "@rollup/rollup-linux-loong64-gnu": "4.53.3", - "@rollup/rollup-linux-ppc64-gnu": "4.53.3", - "@rollup/rollup-linux-riscv64-gnu": "4.53.3", - "@rollup/rollup-linux-riscv64-musl": "4.53.3", - "@rollup/rollup-linux-s390x-gnu": "4.53.3", - "@rollup/rollup-linux-x64-gnu": "4.53.3", - "@rollup/rollup-linux-x64-musl": "4.53.3", - "@rollup/rollup-openharmony-arm64": "4.53.3", - "@rollup/rollup-win32-arm64-msvc": "4.53.3", - "@rollup/rollup-win32-ia32-msvc": "4.53.3", - "@rollup/rollup-win32-x64-gnu": "4.53.3", - "@rollup/rollup-win32-x64-msvc": "4.53.3", + "@rollup/rollup-android-arm-eabi": "4.54.0", + "@rollup/rollup-android-arm64": "4.54.0", + "@rollup/rollup-darwin-arm64": "4.54.0", + "@rollup/rollup-darwin-x64": "4.54.0", + "@rollup/rollup-freebsd-arm64": "4.54.0", + "@rollup/rollup-freebsd-x64": "4.54.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.54.0", + "@rollup/rollup-linux-arm-musleabihf": "4.54.0", + "@rollup/rollup-linux-arm64-gnu": "4.54.0", + "@rollup/rollup-linux-arm64-musl": "4.54.0", + "@rollup/rollup-linux-loong64-gnu": "4.54.0", + "@rollup/rollup-linux-ppc64-gnu": "4.54.0", + "@rollup/rollup-linux-riscv64-gnu": "4.54.0", + "@rollup/rollup-linux-riscv64-musl": "4.54.0", + "@rollup/rollup-linux-s390x-gnu": "4.54.0", + "@rollup/rollup-linux-x64-gnu": "4.54.0", + "@rollup/rollup-linux-x64-musl": "4.54.0", + "@rollup/rollup-openharmony-arm64": "4.54.0", + "@rollup/rollup-win32-arm64-msvc": "4.54.0", + "@rollup/rollup-win32-ia32-msvc": "4.54.0", + "@rollup/rollup-win32-x64-gnu": "4.54.0", + "@rollup/rollup-win32-x64-msvc": "4.54.0", "fsevents": "~2.3.2" } }, @@ -1161,6 +1165,7 @@ "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.44.0.tgz", "integrity": "sha512-R7387No2zEGw4CtYtI2rgsui6BqjFARzoZFGLiLN5OPla0Pq4Ra2WwcP/zBomP3MYalhSNvF1fzDMuU0P0zPJw==", "license": "MIT", + "peer": true, "dependencies": { "@jridgewell/remapping": "^2.3.4", "@jridgewell/sourcemap-codec": "^1.5.0", diff --git a/web/frontend/package.json b/web/frontend/package.json index 3f7434f7..7a759c71 100644 --- a/web/frontend/package.json +++ b/web/frontend/package.json @@ -11,7 +11,7 @@ "@rollup/plugin-node-resolve": "^16.0.1", "@rollup/plugin-terser": "^0.4.4", "@timohausmann/quadtree-js": "^1.2.6", - "rollup": "^4.53.3", + "rollup": "^4.54.0", "rollup-plugin-css-only": "^4.5.5", "rollup-plugin-svelte": "^7.2.3", "svelte": "^5.44.0" From fe78f2f433136a9fb48de5c330a3daac23af4d2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Dec 2025 08:03:31 +0000 Subject: [PATCH 02/23] Bump github.com/coreos/go-oidc/v3 from 3.16.0 to 3.17.0 Bumps [github.com/coreos/go-oidc/v3](https://github.com/coreos/go-oidc) from 3.16.0 to 3.17.0. - [Release notes](https://github.com/coreos/go-oidc/releases) - [Commits](https://github.com/coreos/go-oidc/compare/v3.16.0...v3.17.0) --- updated-dependencies: - dependency-name: github.com/coreos/go-oidc/v3 dependency-version: 3.17.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index df8e1fb9..411734f3 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.31.20 github.com/aws/aws-sdk-go-v2/credentials v1.18.24 github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 - github.com/coreos/go-oidc/v3 v3.16.0 + github.com/coreos/go-oidc/v3 v3.17.0 github.com/expr-lang/expr v1.17.6 github.com/go-co-op/gocron/v2 v2.18.2 github.com/go-ldap/ldap/v3 v3.4.12 diff --git a/go.sum b/go.sum index 711c5551..08540674 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= -github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow= -github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= From 5a8b929448309601100cfb96cc2df55e06b0b402 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Dec 2025 08:04:43 +0000 Subject: [PATCH 03/23] Bump github.com/aws/aws-sdk-go-v2/config from 1.31.20 to 1.32.6 Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.31.20 to 1.32.6. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.31.20...v1.32.6) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.32.6 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 21 +++++++++++---------- go.sum | 42 ++++++++++++++++++++++-------------------- 2 files changed, 33 insertions(+), 30 deletions(-) diff --git a/go.mod b/go.mod index df8e1fb9..5fb06d17 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,8 @@ require ( github.com/ClusterCockpit/cc-lib v1.0.2 github.com/Masterminds/squirrel v1.5.4 github.com/aws/aws-sdk-go-v2 v1.41.0 - github.com/aws/aws-sdk-go-v2/config v1.31.20 - github.com/aws/aws-sdk-go-v2/credentials v1.18.24 + github.com/aws/aws-sdk-go-v2/config v1.32.6 + github.com/aws/aws-sdk-go-v2/credentials v1.19.6 github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 github.com/coreos/go-oidc/v3 v3.16.0 github.com/expr-lang/expr v1.17.6 @@ -53,18 +53,19 @@ require ( github.com/KyleBanks/depth v1.2.1 // indirect github.com/agnivade/levenshtein v1.2.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect github.com/aws/smithy-go v1.24.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect diff --git a/go.sum b/go.sum index 711c5551..10ecb4ee 100644 --- a/go.sum +++ b/go.sum @@ -34,36 +34,38 @@ github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgP github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y= -github.com/aws/aws-sdk-go-v2/config v1.31.20 h1:/jWF4Wu90EhKCgjTdy1DGxcbcbNrjfBHvksEL79tfQc= -github.com/aws/aws-sdk-go-v2/config v1.31.20/go.mod h1:95Hh1Tc5VYKL9NJ7tAkDcqeKt+MCXQB1hQZaRdJIZE0= -github.com/aws/aws-sdk-go-v2/credentials v1.18.24 h1:iJ2FmPT35EaIB0+kMa6TnQ+PwG5A1prEdAw+PsMzfHg= -github.com/aws/aws-sdk-go-v2/credentials v1.18.24/go.mod h1:U91+DrfjAiXPDEGYhh/x29o4p0qHX5HDqG7y5VViv64= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M= +github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8= +github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE= +github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 h1:eg/WYAa12vqTphzIdWMzqYRVKKnCboVPRlvaybNCqPA= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13/go.mod h1:/FDdxWhz1486obGrKKC1HONd7krpk38LBt+dutLcN9k= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 h1:NvMjwvv8hpGUILarKw7Z4Q0w1H9anXKsesMxtw++MA4= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4/go.mod h1:455WPHSwaGj2waRSpQp7TsnpOnBfw8iDfPfbwl7KPJE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 h1:zhBJXdhWIFZ1acfDYIhu4+LCzdUS2Vbcum7D01dXlHQ= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13/go.mod h1:JaaOeCE368qn2Hzi3sEzY6FgAZVCIYcC2nwbro2QCh8= github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 h1:DhdbtDl4FdNlj31+xiRXANxEE+eC7n8JQz+/ilwQ8Uc= github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2/go.mod h1:+wArOOrcHUevqdto9k1tKOF5++YTe9JEcPSc9Tx2ZSw= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.3 h1:NjShtS1t8r5LUfFVtFeI8xLAHQNTa7UI0VawXlrBMFQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.3/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 h1:gTsnx0xXNQ6SBbymoDvcoRHL+q4l/dAFsQuKfDWSaGc= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo= -github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 h1:HK5ON3KmQV2HcAunnx4sKLB9aPf3gKGwVAf7xnx0QT0= -github.com/aws/aws-sdk-go-v2/service/sts v1.40.2/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk= github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 29a20f7b0b995f99f8f198cfe69b8ace9fff4ef7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Dec 2025 09:07:01 +0000 Subject: [PATCH 04/23] Bump github.com/expr-lang/expr from 1.17.6 to 1.17.7 Bumps [github.com/expr-lang/expr](https://github.com/expr-lang/expr) from 1.17.6 to 1.17.7. - [Release notes](https://github.com/expr-lang/expr/releases) - [Commits](https://github.com/expr-lang/expr/compare/v1.17.6...v1.17.7) --- updated-dependencies: - dependency-name: github.com/expr-lang/expr dependency-version: 1.17.7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7a443875..4da3b80e 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.19.6 github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 github.com/coreos/go-oidc/v3 v3.17.0 - github.com/expr-lang/expr v1.17.6 + github.com/expr-lang/expr v1.17.7 github.com/go-co-op/gocron/v2 v2.18.2 github.com/go-ldap/ldap/v3 v3.4.12 github.com/go-sql-driver/mysql v1.9.3 diff --git a/go.sum b/go.sum index e2f5ba37..773bf31c 100644 --- a/go.sum +++ b/go.sum @@ -97,8 +97,8 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/expr-lang/expr v1.17.6 h1:1h6i8ONk9cexhDmowO/A64VPxHScu7qfSl2k8OlINec= -github.com/expr-lang/expr v1.17.6/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= +github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8= +github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= From 11ec2267daaf67cbc6602e3ba395a16ede96503e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 25 Dec 2025 08:42:54 +0100 Subject: [PATCH 05/23] Major refactor of metric data handling - make the internal memory store required and default - Rename memorystore to metricstore - Rename metricDataDispatcher to metricdispatch - Remove metricdata package - Introduce metricsync package for upstream metric data pull --- CLAUDE.md | 10 +- README.md | 8 +- cmd/cc-backend/main.go | 21 +- cmd/cc-backend/server.go | 10 +- internal/api/api_test.go | 12 +- internal/api/job.go | 6 +- .../api/{memorystore.go => metricstore.go} | 12 +- internal/api/nats_test.go | 9 +- internal/archiver/README.md | 4 +- internal/archiver/archiver.go | 4 +- internal/graph/schema.resolvers.go | 16 +- internal/graph/util.go | 6 +- internal/metricDataDispatcher/dataLoader.go | 381 ----- internal/metricdata/cc-metric-store.go | 1226 ----------------- internal/metricdata/metricdata.go | 88 -- internal/metricdata/prometheus.go | 587 -------- internal/metricdata/utils.go | 118 -- internal/metricdispatch/dataLoader.go | 490 +++++++ internal/metricdispatch/dataLoader_test.go | 125 ++ internal/{memorystore => metricstore}/api.go | 6 +- .../{memorystore => metricstore}/archive.go | 2 +- .../avroCheckpoint.go | 2 +- .../avroHelper.go | 2 +- .../avroStruct.go | 2 +- .../{memorystore => metricstore}/buffer.go | 2 +- .../checkpoint.go | 2 +- .../{memorystore => metricstore}/config.go | 4 +- .../configSchema.go | 2 +- .../{memorystore => metricstore}/debug.go | 2 +- .../healthcheck.go | 2 +- .../{memorystore => metricstore}/level.go | 2 +- .../lineprotocol.go | 2 +- .../memorystore.go | 4 +- .../memorystore_test.go | 2 +- .../query.go} | 146 +- .../{memorystore => metricstore}/stats.go | 2 +- internal/metricsync/metricdata.go | 60 + internal/repository/stats.go | 4 +- .../taskmanager/updateFootprintService.go | 10 +- 39 files changed, 815 insertions(+), 2578 deletions(-) rename internal/api/{memorystore.go => metricstore.go} (95%) delete mode 100644 internal/metricDataDispatcher/dataLoader.go delete mode 100644 internal/metricdata/cc-metric-store.go delete mode 100644 internal/metricdata/metricdata.go delete mode 100644 internal/metricdata/prometheus.go delete mode 100644 internal/metricdata/utils.go create mode 100644 internal/metricdispatch/dataLoader.go create mode 100644 internal/metricdispatch/dataLoader_test.go rename internal/{memorystore => metricstore}/api.go (98%) rename internal/{memorystore => metricstore}/archive.go (99%) rename internal/{memorystore => metricstore}/avroCheckpoint.go (99%) rename internal/{memorystore => metricstore}/avroHelper.go (99%) rename internal/{memorystore => metricstore}/avroStruct.go (99%) rename internal/{memorystore => metricstore}/buffer.go (99%) rename internal/{memorystore => metricstore}/checkpoint.go (99%) rename internal/{memorystore => metricstore}/config.go (98%) rename internal/{memorystore => metricstore}/configSchema.go (99%) rename internal/{memorystore => metricstore}/debug.go (99%) rename internal/{memorystore => metricstore}/healthcheck.go (99%) rename internal/{memorystore => metricstore}/level.go (99%) rename internal/{memorystore => metricstore}/lineprotocol.go (99%) rename internal/{memorystore => metricstore}/memorystore.go (99%) rename internal/{memorystore => metricstore}/memorystore_test.go (99%) rename internal/{metricdata/cc-metric-store-internal.go => metricstore/query.go} (87%) rename internal/{memorystore => metricstore}/stats.go (99%) create mode 100644 internal/metricsync/metricdata.go diff --git a/CLAUDE.md b/CLAUDE.md index 67412a76..f30c3923 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -96,9 +96,9 @@ The backend follows a layered architecture with clear separation of concerns: - **internal/auth**: Authentication layer - Supports local accounts, LDAP, OIDC, and JWT tokens - Implements rate limiting for login attempts -- **internal/metricdata**: Metric data repository abstraction - - Pluggable backends: cc-metric-store, Prometheus, InfluxDB - - Each cluster can have a different metric data backend +- **internal/metricstore**: Metric store with data loading API + - In-memory metric storage with checkpointing + - Query API for loading job metric data - **internal/archiver**: Job archiving to file-based archive - **internal/api/nats.go**: NATS-based API for job and node operations - Subscribes to NATS subjects for job events (start/stop) @@ -209,8 +209,8 @@ applied automatically on startup. Version tracking in `version` table. ### Adding a new metric data backend -1. Implement `MetricDataRepository` interface in `internal/metricdata/` -2. Register in `metricdata.Init()` switch statement +1. Implement metric loading functions in `internal/metricstore/query.go` +2. Add cluster configuration to metric store initialization 3. Update config.json schema documentation ### Modifying database schema diff --git a/README.md b/README.md index 468a12ad..00bcb119 100644 --- a/README.md +++ b/README.md @@ -163,11 +163,9 @@ ln -s ./var/job-archive GraphQL schema and resolvers - [`importer`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/importer) Job data import and database initialization - - [`memorystore`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/memorystore) - In-memory metric data store with checkpointing - - [`metricdata`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/metricdata) - Metric data repository implementations (cc-metric-store, Prometheus) - - [`metricDataDispatcher`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/metricDataDispatcher) + - [`metricstore`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/metricstore) + In-memory metric data store with checkpointing and metric loading + - [`metricdispatch`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/metricdispatch) Dispatches metric data loading to appropriate backends - [`repository`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/repository) Database repository layer for jobs and metadata diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index f8b4aea1..331df4f6 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -24,8 +24,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/importer" - "github.com/ClusterCockpit/cc-backend/internal/memorystore" - "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/internal/metricstore" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/tagger" "github.com/ClusterCockpit/cc-backend/internal/taskmanager" @@ -283,10 +282,7 @@ func initSubsystems() error { return fmt.Errorf("initializing archive: %w", err) } - // Initialize metricdata - if err := metricdata.Init(); err != nil { - return fmt.Errorf("initializing metricdata repository: %w", err) - } + // Note: metricstore.Init() is called later in runServer() with proper configuration // Handle database re-initialization if flagReinitDB { @@ -322,13 +318,12 @@ func initSubsystems() error { func runServer(ctx context.Context) error { var wg sync.WaitGroup - // Start metric store if enabled - if memorystore.InternalCCMSFlag { - mscfg := ccconf.GetPackageConfig("metric-store") - if mscfg == nil { - return fmt.Errorf("metric store configuration must be present") - } - memorystore.Init(mscfg, &wg) + // Initialize metric store if configuration is provided + mscfg := ccconf.GetPackageConfig("metric-store") + if mscfg != nil { + metricstore.Init(mscfg, &wg) + } else { + cclog.Debug("Metric store configuration not found, skipping metricstore initialization") } // Start archiver and task manager diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index 53e24c88..8d700823 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -29,7 +29,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph/generated" - "github.com/ClusterCockpit/cc-backend/internal/memorystore" + "github.com/ClusterCockpit/cc-backend/internal/metricstore" "github.com/ClusterCockpit/cc-backend/internal/routerConfig" "github.com/ClusterCockpit/cc-backend/pkg/nats" "github.com/ClusterCockpit/cc-backend/web" @@ -253,9 +253,7 @@ func (s *Server) init() error { } } - if memorystore.InternalCCMSFlag { - s.restAPIHandle.MountMetricStoreAPIRoutes(metricstoreapi) - } + s.restAPIHandle.MountMetricStoreAPIRoutes(metricstoreapi) if config.Keys.EmbedStaticFiles { if i, err := os.Stat("./var/img"); err == nil { @@ -383,9 +381,7 @@ func (s *Server) Shutdown(ctx context.Context) { } // Archive all the metric store data - if memorystore.InternalCCMSFlag { - memorystore.Shutdown() - } + metricstore.Shutdown() // Shutdown archiver with 10 second timeout for fast shutdown if err := archiver.Shutdown(10 * time.Second); err != nil { diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 50605f7b..a2283013 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -23,8 +23,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph" - "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" - "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/internal/metricdispatch" + "github.com/ClusterCockpit/cc-backend/internal/metricstore" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" ccconf "github.com/ClusterCockpit/cc-lib/v2/ccConfig" @@ -173,9 +173,7 @@ func setup(t *testing.T) *api.RestAPI { t.Fatal(err) } - if err := metricdata.Init(); err != nil { - t.Fatal(err) - } + // metricstore initialization removed - it's initialized via callback in tests archiver.Start(repository.GetJobRepository(), context.Background()) @@ -221,7 +219,7 @@ func TestRestApi(t *testing.T) { }, } - metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { + metricstore.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { return testData, nil } @@ -366,7 +364,7 @@ func TestRestApi(t *testing.T) { } t.Run("CheckArchive", func(t *testing.T) { - data, err := metricDataDispatcher.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background(), 60) + data, err := metricdispatch.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background(), 60) if err != nil { t.Fatal(err) } diff --git a/internal/api/job.go b/internal/api/job.go index 9b970c2e..09f7b22c 100644 --- a/internal/api/job.go +++ b/internal/api/job.go @@ -22,7 +22,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/importer" - "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" + "github.com/ClusterCockpit/cc-backend/internal/metricdispatch" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" @@ -293,7 +293,7 @@ func (api *RestAPI) getCompleteJobByID(rw http.ResponseWriter, r *http.Request) } if r.URL.Query().Get("all-metrics") == "true" { - data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution) + data, err = metricdispatch.LoadData(job, nil, scopes, r.Context(), resolution) if err != nil { cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster) return @@ -389,7 +389,7 @@ func (api *RestAPI) getJobByID(rw http.ResponseWriter, r *http.Request) { resolution = max(resolution, mc.Timestep) } - data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution) + data, err := metricdispatch.LoadData(job, metrics, scopes, r.Context(), resolution) if err != nil { cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster) return diff --git a/internal/api/memorystore.go b/internal/api/metricstore.go similarity index 95% rename from internal/api/memorystore.go rename to internal/api/metricstore.go index 56c396e2..d4ab1dfe 100644 --- a/internal/api/memorystore.go +++ b/internal/api/metricstore.go @@ -15,7 +15,7 @@ import ( "strconv" "strings" - "github.com/ClusterCockpit/cc-backend/internal/memorystore" + "github.com/ClusterCockpit/cc-backend/internal/metricstore" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" "github.com/influxdata/line-protocol/v2/lineprotocol" @@ -58,7 +58,7 @@ func freeMetrics(rw http.ResponseWriter, r *http.Request) { return } - ms := memorystore.GetMemoryStore() + ms := metricstore.GetMemoryStore() n := 0 for _, sel := range selectors { bn, err := ms.Free(sel, to) @@ -97,9 +97,9 @@ func writeMetrics(rw http.ResponseWriter, r *http.Request) { return } - ms := memorystore.GetMemoryStore() + ms := metricstore.GetMemoryStore() dec := lineprotocol.NewDecoderWithBytes(bytes) - if err := memorystore.DecodeLine(dec, ms, r.URL.Query().Get("cluster")); err != nil { + if err := metricstore.DecodeLine(dec, ms, r.URL.Query().Get("cluster")); err != nil { cclog.Errorf("/api/write error: %s", err.Error()) handleError(err, http.StatusBadRequest, rw) return @@ -129,7 +129,7 @@ func debugMetrics(rw http.ResponseWriter, r *http.Request) { selector = strings.Split(raw, ":") } - ms := memorystore.GetMemoryStore() + ms := metricstore.GetMemoryStore() if err := ms.DebugDump(bufio.NewWriter(rw), selector); err != nil { handleError(err, http.StatusBadRequest, rw) return @@ -162,7 +162,7 @@ func metricsHealth(rw http.ResponseWriter, r *http.Request) { selector := []string{rawCluster, rawNode} - ms := memorystore.GetMemoryStore() + ms := metricstore.GetMemoryStore() if err := ms.HealthCheck(bufio.NewWriter(rw), selector); err != nil { handleError(err, http.StatusBadRequest, rw) return diff --git a/internal/api/nats_test.go b/internal/api/nats_test.go index c9415afc..9e1fa2b5 100644 --- a/internal/api/nats_test.go +++ b/internal/api/nats_test.go @@ -18,7 +18,8 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph" - "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/internal/importer" + "github.com/ClusterCockpit/cc-backend/internal/metricstore" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" ccconf "github.com/ClusterCockpit/cc-lib/v2/ccConfig" @@ -167,9 +168,7 @@ func setupNatsTest(t *testing.T) *NatsAPI { t.Fatal(err) } - if err := metricdata.Init(); err != nil { - t.Fatal(err) - } + // metricstore initialization removed - it's initialized via callback in tests archiver.Start(repository.GetJobRepository(), context.Background()) @@ -564,7 +563,7 @@ func TestNatsHandleStopJob(t *testing.T) { }, } - metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { + metricstore.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { return testData, nil } diff --git a/internal/archiver/README.md b/internal/archiver/README.md index 0fae04ea..48aed797 100644 --- a/internal/archiver/README.md +++ b/internal/archiver/README.md @@ -106,7 +106,7 @@ Data is archived at the highest available resolution (typically 60s intervals). ```go // In archiver.go ArchiveJob() function -jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 300) +jobData, err := metricdispatch.LoadData(job, allMetrics, scopes, ctx, 300) // 0 = highest resolution // 300 = 5-minute resolution ``` @@ -185,6 +185,6 @@ Internal state is protected by: ## Dependencies - `internal/repository`: Database operations for job metadata -- `internal/metricDataDispatcher`: Loading metric data from various backends +- `internal/metricdispatch`: Loading metric data from various backends - `pkg/archive`: Archive backend abstraction (filesystem, S3, SQLite) - `cc-lib/schema`: Job and metric data structures diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 46ce8126..4e0b6473 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -10,7 +10,7 @@ import ( "math" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" + "github.com/ClusterCockpit/cc-backend/internal/metricdispatch" "github.com/ClusterCockpit/cc-backend/pkg/archive" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" "github.com/ClusterCockpit/cc-lib/v2/schema" @@ -60,7 +60,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.Job, error) { scopes = append(scopes, schema.MetricScopeAccelerator) } - jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s) + jobData, err := metricdispatch.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s) if err != nil { cclog.Error("Error wile loading job data for archiving") return nil, err diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 32499b8c..34bbf393 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -19,7 +19,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/generated" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" + "github.com/ClusterCockpit/cc-backend/internal/metricdispatch" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" @@ -484,7 +484,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str return nil, err } - data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution) + data, err := metricdispatch.LoadData(job, metrics, scopes, ctx, *resolution) if err != nil { cclog.Warn("Error while loading job data") return nil, err @@ -512,7 +512,7 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin return nil, err } - data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) + data, err := metricdispatch.LoadJobStats(job, metrics, ctx) if err != nil { cclog.Warnf("Error while loading jobStats data for job id %s", id) return nil, err @@ -537,7 +537,7 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [ return nil, err } - data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx) + data, err := metricdispatch.LoadScopedJobStats(job, metrics, scopes, ctx) if err != nil { cclog.Warnf("Error while loading scopedJobStats data for job id %s", id) return nil, err @@ -702,7 +702,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job res := []*model.JobStats{} for _, job := range jobs { - data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) + data, err := metricdispatch.LoadJobStats(job, metrics, ctx) if err != nil { cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID) continue @@ -759,7 +759,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ } } - data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) + data, err := metricdispatch.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) if err != nil { cclog.Warn("error while loading node data") return nil, err @@ -825,7 +825,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub } } - data, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, *resolution, from, to, ctx) + data, err := metricdispatch.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, *resolution, from, to, ctx) if err != nil { cclog.Warn("error while loading node data (Resolver.NodeMetricsList") return nil, err @@ -880,7 +880,7 @@ func (r *queryResolver) ClusterMetrics(ctx context.Context, cluster string, metr // 'nodes' == nil -> Defaults to all nodes of cluster for existing query workflow scopes := []schema.MetricScope{"node"} - data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nil, scopes, from, to, ctx) + data, err := metricdispatch.LoadNodeData(cluster, metrics, nil, scopes, from, to, ctx) if err != nil { cclog.Warn("error while loading node data") return nil, err diff --git a/internal/graph/util.go b/internal/graph/util.go index 42a1d2fb..4135ca72 100644 --- a/internal/graph/util.go +++ b/internal/graph/util.go @@ -13,7 +13,7 @@ import ( "github.com/99designs/gqlgen/graphql" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" + "github.com/ClusterCockpit/cc-backend/internal/metricdispatch" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" "github.com/ClusterCockpit/cc-lib/v2/schema" ) @@ -55,7 +55,7 @@ func (r *queryResolver) rooflineHeatmap( // resolution = max(resolution, mc.Timestep) // } - jobdata, err := metricDataDispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0) + jobdata, err := metricdispatch.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0) if err != nil { cclog.Errorf("Error while loading roofline metrics for job %d", job.ID) return nil, err @@ -128,7 +128,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF continue } - if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil { + if err := metricdispatch.LoadAverages(job, metrics, avgs, ctx); err != nil { cclog.Error("Error while loading averages for footprint") return nil, err } diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go deleted file mode 100644 index 6d1338fa..00000000 --- a/internal/metricDataDispatcher/dataLoader.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. This file is part of cc-backend. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package metricDataDispatcher - -import ( - "context" - "fmt" - "math" - "time" - - "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/internal/metricdata" - "github.com/ClusterCockpit/cc-backend/pkg/archive" - cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" - "github.com/ClusterCockpit/cc-lib/v2/lrucache" - "github.com/ClusterCockpit/cc-lib/v2/resampler" - "github.com/ClusterCockpit/cc-lib/v2/schema" -) - -var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024) - -func cacheKey( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - resolution int, -) string { - // Duration and StartTime do not need to be in the cache key as StartTime is less unique than - // job.ID and the TTL of the cache entry makes sure it does not stay there forever. - return fmt.Sprintf("%d(%s):[%v],[%v]-%d", - job.ID, job.State, metrics, scopes, resolution) -} - -// Fetches the metric data for a job. -func LoadData(job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - ctx context.Context, - resolution int, -) (schema.JobData, error) { - data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ any, ttl time.Duration, size int) { - var jd schema.JobData - var err error - - if job.State == schema.JobStateRunning || - job.MonitoringStatus == schema.MonitoringStatusRunningOrArchiving || - config.Keys.DisableArchive { - - repo, err := metricdata.GetMetricDataRepo(job.Cluster) - if err != nil { - return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster), 0, 0 - } - - if scopes == nil { - scopes = append(scopes, schema.MetricScopeNode) - } - - if metrics == nil { - cluster := archive.GetCluster(job.Cluster) - for _, mc := range cluster.MetricConfig { - metrics = append(metrics, mc.Name) - } - } - - jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution) - if err != nil { - if len(jd) != 0 { - cclog.Warnf("partial error: %s", err.Error()) - // return err, 0, 0 // Reactivating will block archiving on one partial error - } else { - cclog.Error("Error while loading job data from metric repository") - return err, 0, 0 - } - } - size = jd.Size() - } else { - var jd_temp schema.JobData - jd_temp, err = archive.GetHandle().LoadJobData(job) - if err != nil { - cclog.Error("Error while loading job data from archive") - return err, 0, 0 - } - - // Deep copy the cached archive hashmap - jd = metricdata.DeepCopy(jd_temp) - - // Resampling for archived data. - // Pass the resolution from frontend here. - for _, v := range jd { - for _, v_ := range v { - timestep := int64(0) - for i := 0; i < len(v_.Series); i += 1 { - v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, int64(v_.Timestep), int64(resolution)) - if err != nil { - return err, 0, 0 - } - } - v_.Timestep = int(timestep) - } - } - - // Avoid sending unrequested data to the client: - if metrics != nil || scopes != nil { - if metrics == nil { - metrics = make([]string, 0, len(jd)) - for k := range jd { - metrics = append(metrics, k) - } - } - - res := schema.JobData{} - for _, metric := range metrics { - if perscope, ok := jd[metric]; ok { - if len(perscope) > 1 { - subset := make(map[schema.MetricScope]*schema.JobMetric) - for _, scope := range scopes { - if jm, ok := perscope[scope]; ok { - subset[scope] = jm - } - } - - if len(subset) > 0 { - perscope = subset - } - } - - res[metric] = perscope - } - } - jd = res - } - size = jd.Size() - } - - ttl = 5 * time.Hour - if job.State == schema.JobStateRunning { - ttl = 2 * time.Minute - } - - // FIXME: Review: Is this really necessary or correct. - // Note: Lines 147-170 formerly known as prepareJobData(jobData, scopes) - // For /monitoring/job/ and some other places, flops_any and mem_bw need - // to be available at the scope 'node'. If a job has a lot of nodes, - // statisticsSeries should be available so that a min/median/max Graph can be - // used instead of a lot of single lines. - // NOTE: New StatsSeries will always be calculated as 'min/median/max' - // Existing (archived) StatsSeries can be 'min/mean/max'! - const maxSeriesSize int = 15 - for _, scopes := range jd { - for _, jm := range scopes { - if jm.StatisticsSeries != nil || len(jm.Series) <= maxSeriesSize { - continue - } - - jm.AddStatisticsSeries() - } - } - - nodeScopeRequested := false - for _, scope := range scopes { - if scope == schema.MetricScopeNode { - nodeScopeRequested = true - } - } - - if nodeScopeRequested { - jd.AddNodeScope("flops_any") - jd.AddNodeScope("mem_bw") - } - - // Round Resulting Stat Values - jd.RoundMetricStats() - - return jd, ttl, size - }) - - if err, ok := data.(error); ok { - cclog.Error("Error in returned dataset") - return nil, err - } - - return data.(schema.JobData), nil -} - -// Used for the jobsFootprint GraphQL-Query. TODO: Rename/Generalize. -func LoadAverages( - job *schema.Job, - metrics []string, - data [][]schema.Float, - ctx context.Context, -) error { - if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { - return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here? - } - - repo, err := metricdata.GetMetricDataRepo(job.Cluster) - if err != nil { - return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster) - } - - stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion? - if err != nil { - cclog.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) - return err - } - - for i, m := range metrics { - nodes, ok := stats[m] - if !ok { - data[i] = append(data[i], schema.NaN) - continue - } - - sum := 0.0 - for _, node := range nodes { - sum += node.Avg - } - data[i] = append(data[i], schema.Float(sum)) - } - - return nil -} - -// Used for statsTable in frontend: Return scoped statistics by metric. -func LoadScopedJobStats( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - ctx context.Context, -) (schema.ScopedJobStats, error) { - if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { - return archive.LoadScopedStatsFromArchive(job, metrics, scopes) - } - - repo, err := metricdata.GetMetricDataRepo(job.Cluster) - if err != nil { - return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster) - } - - scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx) - if err != nil { - cclog.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) - return nil, err - } - - return scopedStats, nil -} - -// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric. -func LoadJobStats( - job *schema.Job, - metrics []string, - ctx context.Context, -) (map[string]schema.MetricStatistics, error) { - if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { - return archive.LoadStatsFromArchive(job, metrics) - } - - data := make(map[string]schema.MetricStatistics, len(metrics)) - repo, err := metricdata.GetMetricDataRepo(job.Cluster) - if err != nil { - return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster) - } - - stats, err := repo.LoadStats(job, metrics, ctx) - if err != nil { - cclog.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) - return data, err - } - - for _, m := range metrics { - sum, avg, min, max := 0.0, 0.0, 0.0, 0.0 - nodes, ok := stats[m] - if !ok { - data[m] = schema.MetricStatistics{Min: min, Avg: avg, Max: max} - continue - } - - for _, node := range nodes { - sum += node.Avg - min = math.Min(min, node.Min) - max = math.Max(max, node.Max) - } - - data[m] = schema.MetricStatistics{ - Avg: (math.Round((sum/float64(job.NumNodes))*100) / 100), - Min: (math.Round(min*100) / 100), - Max: (math.Round(max*100) / 100), - } - } - - return data, nil -} - -// Used for the classic node/system view. Returns a map of nodes to a map of metrics. -func LoadNodeData( - cluster string, - metrics, nodes []string, - scopes []schema.MetricScope, - from, to time.Time, - ctx context.Context, -) (map[string]map[string][]*schema.JobMetric, error) { - repo, err := metricdata.GetMetricDataRepo(cluster) - if err != nil { - return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster) - } - - if metrics == nil { - for _, m := range archive.GetCluster(cluster).MetricConfig { - metrics = append(metrics, m.Name) - } - } - - data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) - if err != nil { - if len(data) != 0 { - cclog.Warnf("partial error: %s", err.Error()) - } else { - cclog.Error("Error while loading node data from metric repository") - return nil, err - } - } - - if data == nil { - return nil, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster) - } - - return data, nil -} - -func LoadNodeListData( - cluster, subCluster string, - nodes []string, - metrics []string, - scopes []schema.MetricScope, - resolution int, - from, to time.Time, - ctx context.Context, -) (map[string]schema.JobData, error) { - repo, err := metricdata.GetMetricDataRepo(cluster) - if err != nil { - return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster) - } - - if metrics == nil { - for _, m := range archive.GetCluster(cluster).MetricConfig { - metrics = append(metrics, m.Name) - } - } - - data, err := repo.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, resolution, from, to, ctx) - if err != nil { - if len(data) != 0 { - cclog.Warnf("partial error: %s", err.Error()) - } else { - cclog.Error("Error while loading node data from metric repository") - return nil, err - } - } - - // NOTE: New StatsSeries will always be calculated as 'min/median/max' - const maxSeriesSize int = 8 - for _, jd := range data { - for _, scopes := range jd { - for _, jm := range scopes { - if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize { - continue - } - jm.AddStatisticsSeries() - } - } - } - - if data == nil { - return nil, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster) - } - - return data, nil -} diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go deleted file mode 100644 index 6c146f22..00000000 --- a/internal/metricdata/cc-metric-store.go +++ /dev/null @@ -1,1226 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package metricdata - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/ClusterCockpit/cc-backend/pkg/archive" - cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" - "github.com/ClusterCockpit/cc-lib/v2/schema" -) - -type CCMetricStoreConfig struct { - Kind string `json:"kind"` - Url string `json:"url"` - Token string `json:"token"` - - // If metrics are known to this MetricDataRepository under a different - // name than in the `metricConfig` section of the 'cluster.json', - // provide this optional mapping of local to remote name for this metric. - Renamings map[string]string `json:"metricRenamings"` -} - -type CCMetricStore struct { - here2there map[string]string - there2here map[string]string - client http.Client - jwt string - url string - queryEndpoint string -} - -type ApiQueryRequest struct { - Cluster string `json:"cluster"` - Queries []ApiQuery `json:"queries"` - ForAllNodes []string `json:"for-all-nodes"` - From int64 `json:"from"` - To int64 `json:"to"` - WithStats bool `json:"with-stats"` - WithData bool `json:"with-data"` -} - -type ApiQuery struct { - Type *string `json:"type,omitempty"` - SubType *string `json:"subtype,omitempty"` - Metric string `json:"metric"` - Hostname string `json:"host"` - Resolution int `json:"resolution"` - TypeIds []string `json:"type-ids,omitempty"` - SubTypeIds []string `json:"subtype-ids,omitempty"` - Aggregate bool `json:"aggreg"` -} - -type ApiQueryResponse struct { - Queries []ApiQuery `json:"queries,omitempty"` - Results [][]ApiMetricData `json:"results"` -} - -type ApiMetricData struct { - Error *string `json:"error"` - Data []schema.Float `json:"data"` - From int64 `json:"from"` - To int64 `json:"to"` - Resolution int `json:"resolution"` - Avg schema.Float `json:"avg"` - Min schema.Float `json:"min"` - Max schema.Float `json:"max"` -} - -func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { - var config CCMetricStoreConfig - if err := json.Unmarshal(rawConfig, &config); err != nil { - cclog.Warn("Error while unmarshaling raw json config") - return err - } - - ccms.url = config.Url - ccms.queryEndpoint = fmt.Sprintf("%s/api/query", config.Url) - ccms.jwt = config.Token - ccms.client = http.Client{ - Timeout: 10 * time.Second, - } - - if config.Renamings != nil { - ccms.here2there = config.Renamings - ccms.there2here = make(map[string]string, len(config.Renamings)) - for k, v := range ccms.here2there { - ccms.there2here[v] = k - } - } else { - ccms.here2there = make(map[string]string) - ccms.there2here = make(map[string]string) - } - - return nil -} - -func (ccms *CCMetricStore) toRemoteName(metric string) string { - if renamed, ok := ccms.here2there[metric]; ok { - return renamed - } - - return metric -} - -func (ccms *CCMetricStore) toLocalName(metric string) string { - if renamed, ok := ccms.there2here[metric]; ok { - return renamed - } - - return metric -} - -func (ccms *CCMetricStore) doRequest( - ctx context.Context, - body *ApiQueryRequest, -) (*ApiQueryResponse, error) { - buf := &bytes.Buffer{} - if err := json.NewEncoder(buf).Encode(body); err != nil { - cclog.Errorf("Error while encoding request body: %s", err.Error()) - return nil, err - } - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf) - if err != nil { - cclog.Errorf("Error while building request body: %s", err.Error()) - return nil, err - } - if ccms.jwt != "" { - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) - } - - // versioning the cc-metric-store query API. - // v2 = data with resampling - // v1 = data without resampling - q := req.URL.Query() - q.Add("version", "v2") - req.URL.RawQuery = q.Encode() - - res, err := ccms.client.Do(req) - if err != nil { - cclog.Errorf("Error while performing request: %s", err.Error()) - return nil, err - } - - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("'%s': HTTP Status: %s", ccms.queryEndpoint, res.Status) - } - - var resBody ApiQueryResponse - if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil { - cclog.Errorf("Error while decoding result body: %s", err.Error()) - return nil, err - } - - return &resBody, nil -} - -func (ccms *CCMetricStore) LoadData( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - ctx context.Context, - resolution int, -) (schema.JobData, error) { - queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution) - if err != nil { - cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) - return nil, err - } - - req := ApiQueryRequest{ - Cluster: job.Cluster, - From: job.StartTime, - To: job.StartTime + int64(job.Duration), - Queries: queries, - WithStats: true, - WithData: true, - } - - resBody, err := ccms.doRequest(ctx, &req) - if err != nil { - cclog.Errorf("Error while performing request: %s", err.Error()) - return nil, err - } - - var errors []string - jobData := make(schema.JobData) - for i, row := range resBody.Results { - query := req.Queries[i] - metric := ccms.toLocalName(query.Metric) - scope := assignedScope[i] - mc := archive.GetMetricConfig(job.Cluster, metric) - if _, ok := jobData[metric]; !ok { - jobData[metric] = make(map[schema.MetricScope]*schema.JobMetric) - } - - res := mc.Timestep - if len(row) > 0 { - res = row[0].Resolution - } - - jobMetric, ok := jobData[metric][scope] - if !ok { - jobMetric = &schema.JobMetric{ - Unit: mc.Unit, - Timestep: res, - Series: make([]schema.Series, 0), - } - jobData[metric][scope] = jobMetric - } - - for ndx, res := range row { - if res.Error != nil { - /* Build list for "partial errors", if any */ - errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) - continue - } - - id := (*string)(nil) - if query.Type != nil { - id = new(string) - *id = query.TypeIds[ndx] - } - - if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { - // "schema.Float()" because regular float64 can not be JSONed when NaN. - res.Avg = schema.Float(0) - res.Min = schema.Float(0) - res.Max = schema.Float(0) - } - - jobMetric.Series = append(jobMetric.Series, schema.Series{ - Hostname: query.Hostname, - Id: id, - Statistics: schema.MetricStatistics{ - Avg: float64(res.Avg), - Min: float64(res.Min), - Max: float64(res.Max), - }, - Data: res.Data, - }) - } - - // So that one can later check len(jobData): - if len(jobMetric.Series) == 0 { - delete(jobData[metric], scope) - if len(jobData[metric]) == 0 { - delete(jobData, metric) - } - } - } - - if len(errors) != 0 { - /* Returns list for "partial errors" */ - return jobData, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) - } - return jobData, nil -} - -func (ccms *CCMetricStore) buildQueries( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - resolution int, -) ([]ApiQuery, []schema.MetricScope, error) { - queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) - assignedScope := []schema.MetricScope{} - - subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster) - if scerr != nil { - return nil, nil, scerr - } - topology := subcluster.Topology - - for _, metric := range metrics { - remoteName := ccms.toRemoteName(metric) - mc := archive.GetMetricConfig(job.Cluster, metric) - if mc == nil { - // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster) - cclog.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster) - continue - } - - // Skip if metric is removed for subcluster - if len(mc.SubClusters) != 0 { - isRemoved := false - for _, scConfig := range mc.SubClusters { - if scConfig.Name == job.SubCluster && scConfig.Remove { - isRemoved = true - break - } - } - if isRemoved { - continue - } - } - - // Avoid duplicates... - handledScopes := make([]schema.MetricScope, 0, 3) - - scopesLoop: - for _, requestedScope := range scopes { - nativeScope := mc.Scope - if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == 0 { - continue - } - - scope := nativeScope.Max(requestedScope) - for _, s := range handledScopes { - if scope == s { - continue scopesLoop - } - } - handledScopes = append(handledScopes, scope) - - for _, host := range job.Resources { - hwthreads := host.HWThreads - if hwthreads == nil { - hwthreads = topology.Node - } - - // Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node) - if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) { - if scope != schema.MetricScopeAccelerator { - // Skip all other catched cases - continue - } - - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &acceleratorString, - TypeIds: host.Accelerators, - Resolution: resolution, - }) - assignedScope = append(assignedScope, schema.MetricScopeAccelerator) - continue - } - - // Accelerator -> Node - if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode { - if len(host.Accelerators) == 0 { - continue - } - - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &acceleratorString, - TypeIds: host.Accelerators, - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // HWThread -> HWThead - if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &hwthreadString, - TypeIds: intToStringSlice(hwthreads), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // HWThread -> Core - if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { - cores, _ := topology.GetCoresFromHWThreads(hwthreads) - for _, core := range cores { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Core[core]), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - } - continue - } - - // HWThread -> Socket - if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { - sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) - for _, socket := range sockets { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Socket[socket]), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - } - continue - } - - // HWThread -> Node - if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(hwthreads), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Core -> Core - if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { - cores, _ := topology.GetCoresFromHWThreads(hwthreads) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &coreString, - TypeIds: intToStringSlice(cores), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Core -> Socket - if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket { - sockets, _ := topology.GetSocketsFromCores(hwthreads) - for _, socket := range sockets { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &coreString, - TypeIds: intToStringSlice(topology.Socket[socket]), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - } - continue - } - - // Core -> Node - if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { - cores, _ := topology.GetCoresFromHWThreads(hwthreads) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &coreString, - TypeIds: intToStringSlice(cores), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // MemoryDomain -> MemoryDomain - if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { - sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &memoryDomainString, - TypeIds: intToStringSlice(sockets), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // MemoryDoman -> Node - if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { - sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &memoryDomainString, - TypeIds: intToStringSlice(sockets), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Socket -> Socket - if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { - sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &socketString, - TypeIds: intToStringSlice(sockets), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Socket -> Node - if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { - sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &socketString, - TypeIds: intToStringSlice(sockets), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Node -> Node - if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - return nil, nil, fmt.Errorf("METRICDATA/CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope) - } - } - } - - return queries, assignedScope, nil -} - -func (ccms *CCMetricStore) LoadStats( - job *schema.Job, - metrics []string, - ctx context.Context, -) (map[string]map[string]schema.MetricStatistics, error) { - - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? - if err != nil { - cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error()) - return nil, err - } - - req := ApiQueryRequest{ - Cluster: job.Cluster, - From: job.StartTime, - To: job.StartTime + int64(job.Duration), - Queries: queries, - WithStats: true, - WithData: false, - } - - resBody, err := ccms.doRequest(ctx, &req) - if err != nil { - cclog.Errorf("Error while performing request: %s", err.Error()) - return nil, err - } - - stats := make(map[string]map[string]schema.MetricStatistics, len(metrics)) - for i, res := range resBody.Results { - query := req.Queries[i] - metric := ccms.toLocalName(query.Metric) - data := res[0] - if data.Error != nil { - cclog.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) - continue - } - - metricdata, ok := stats[metric] - if !ok { - metricdata = make(map[string]schema.MetricStatistics, job.NumNodes) - stats[metric] = metricdata - } - - if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() { - cclog.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname) - continue - } - - metricdata[query.Hostname] = schema.MetricStatistics{ - Avg: float64(data.Avg), - Min: float64(data.Min), - Max: float64(data.Max), - } - } - - return stats, nil -} - -// Used for Job-View Statistics Table -func (ccms *CCMetricStore) LoadScopedStats( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - ctx context.Context, -) (schema.ScopedJobStats, error) { - queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0) - if err != nil { - cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) - return nil, err - } - - req := ApiQueryRequest{ - Cluster: job.Cluster, - From: job.StartTime, - To: job.StartTime + int64(job.Duration), - Queries: queries, - WithStats: true, - WithData: false, - } - - resBody, err := ccms.doRequest(ctx, &req) - if err != nil { - cclog.Errorf("Error while performing request: %s", err.Error()) - return nil, err - } - - var errors []string - scopedJobStats := make(schema.ScopedJobStats) - - for i, row := range resBody.Results { - query := req.Queries[i] - metric := ccms.toLocalName(query.Metric) - scope := assignedScope[i] - - if _, ok := scopedJobStats[metric]; !ok { - scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) - } - - if _, ok := scopedJobStats[metric][scope]; !ok { - scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) - } - - for ndx, res := range row { - if res.Error != nil { - /* Build list for "partial errors", if any */ - errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) - continue - } - - id := (*string)(nil) - if query.Type != nil { - id = new(string) - *id = query.TypeIds[ndx] - } - - if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { - // "schema.Float()" because regular float64 can not be JSONed when NaN. - res.Avg = schema.Float(0) - res.Min = schema.Float(0) - res.Max = schema.Float(0) - } - - scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ - Hostname: query.Hostname, - Id: id, - Data: &schema.MetricStatistics{ - Avg: float64(res.Avg), - Min: float64(res.Min), - Max: float64(res.Max), - }, - }) - } - - // So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty - if len(scopedJobStats[metric][scope]) == 0 { - delete(scopedJobStats[metric], scope) - if len(scopedJobStats[metric]) == 0 { - delete(scopedJobStats, metric) - } - } - } - - if len(errors) != 0 { - /* Returns list for "partial errors" */ - return scopedJobStats, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) - } - return scopedJobStats, nil -} - -// Used for Systems-View Node-Overview -func (ccms *CCMetricStore) LoadNodeData( - cluster string, - metrics, nodes []string, - scopes []schema.MetricScope, - from, to time.Time, - ctx context.Context, -) (map[string]map[string][]*schema.JobMetric, error) { - req := ApiQueryRequest{ - Cluster: cluster, - From: from.Unix(), - To: to.Unix(), - WithStats: true, - WithData: true, - } - - if nodes == nil { - for _, metric := range metrics { - req.ForAllNodes = append(req.ForAllNodes, ccms.toRemoteName(metric)) - } - } else { - for _, node := range nodes { - for _, metric := range metrics { - req.Queries = append(req.Queries, ApiQuery{ - Hostname: node, - Metric: ccms.toRemoteName(metric), - Resolution: 0, // Default for Node Queries: Will return metric $Timestep Resolution - }) - } - } - } - - resBody, err := ccms.doRequest(ctx, &req) - if err != nil { - cclog.Errorf("Error while performing request: %s", err.Error()) - return nil, err - } - - var errors []string - data := make(map[string]map[string][]*schema.JobMetric) - for i, res := range resBody.Results { - var query ApiQuery - if resBody.Queries != nil { - query = resBody.Queries[i] - } else { - query = req.Queries[i] - } - - metric := ccms.toLocalName(query.Metric) - qdata := res[0] - if qdata.Error != nil { - /* Build list for "partial errors", if any */ - errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error)) - } - - if qdata.Avg.IsNaN() || qdata.Min.IsNaN() || qdata.Max.IsNaN() { - // return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN") - qdata.Avg, qdata.Min, qdata.Max = 0., 0., 0. - } - - hostdata, ok := data[query.Hostname] - if !ok { - hostdata = make(map[string][]*schema.JobMetric) - data[query.Hostname] = hostdata - } - - mc := archive.GetMetricConfig(cluster, metric) - if mc != nil { - hostdata[metric] = append(hostdata[metric], &schema.JobMetric{ - Unit: mc.Unit, - Timestep: mc.Timestep, - Series: []schema.Series{ - { - Hostname: query.Hostname, - Data: qdata.Data, - Statistics: schema.MetricStatistics{ - Avg: float64(qdata.Avg), - Min: float64(qdata.Min), - Max: float64(qdata.Max), - }, - }, - }, - }) - } else { - cclog.Warnf("Metric '%s' not configured for cluster '%s': Skipped in LoadNodeData() Return!", metric, cluster) - } - } - - if len(errors) != 0 { - /* Returns list of "partial errors" */ - return data, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) - } - - return data, nil -} - -// Used for Systems-View Node-List -func (ccms *CCMetricStore) LoadNodeListData( - cluster, subCluster string, - nodes []string, - metrics []string, - scopes []schema.MetricScope, - resolution int, - from, to time.Time, - ctx context.Context, -) (map[string]schema.JobData, error) { - - // Note: Order of node data is not guaranteed after this point - queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) - if err != nil { - cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error()) - return nil, err - } - - req := ApiQueryRequest{ - Cluster: cluster, - Queries: queries, - From: from.Unix(), - To: to.Unix(), - WithStats: true, - WithData: true, - } - - resBody, err := ccms.doRequest(ctx, &req) - if err != nil { - cclog.Errorf("Error while performing request: %s", err.Error()) - return nil, err - } - - var errors []string - data := make(map[string]schema.JobData) - for i, row := range resBody.Results { - var query ApiQuery - if resBody.Queries != nil { - query = resBody.Queries[i] - } else { - query = req.Queries[i] - } - // qdata := res[0] - metric := ccms.toLocalName(query.Metric) - scope := assignedScope[i] - mc := archive.GetMetricConfig(cluster, metric) - - res := mc.Timestep - if len(row) > 0 { - res = row[0].Resolution - } - - // Init Nested Map Data Structures If Not Found - hostData, ok := data[query.Hostname] - if !ok { - hostData = make(schema.JobData) - data[query.Hostname] = hostData - } - - metricData, ok := hostData[metric] - if !ok { - metricData = make(map[schema.MetricScope]*schema.JobMetric) - data[query.Hostname][metric] = metricData - } - - scopeData, ok := metricData[scope] - if !ok { - scopeData = &schema.JobMetric{ - Unit: mc.Unit, - Timestep: res, - Series: make([]schema.Series, 0), - } - data[query.Hostname][metric][scope] = scopeData - } - - for ndx, res := range row { - if res.Error != nil { - /* Build list for "partial errors", if any */ - errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) - continue - } - - id := (*string)(nil) - if query.Type != nil { - id = new(string) - *id = query.TypeIds[ndx] - } - - if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { - // "schema.Float()" because regular float64 can not be JSONed when NaN. - res.Avg = schema.Float(0) - res.Min = schema.Float(0) - res.Max = schema.Float(0) - } - - scopeData.Series = append(scopeData.Series, schema.Series{ - Hostname: query.Hostname, - Id: id, - Statistics: schema.MetricStatistics{ - Avg: float64(res.Avg), - Min: float64(res.Min), - Max: float64(res.Max), - }, - Data: res.Data, - }) - } - } - - if len(errors) != 0 { - /* Returns list of "partial errors" */ - return data, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) - } - - return data, nil -} - -func (ccms *CCMetricStore) buildNodeQueries( - cluster string, - subCluster string, - nodes []string, - metrics []string, - scopes []schema.MetricScope, - resolution int, -) ([]ApiQuery, []schema.MetricScope, error) { - - queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(nodes)) - assignedScope := []schema.MetricScope{} - - // Get Topol before loop if subCluster given - var subClusterTopol *schema.SubCluster - var scterr error - if subCluster != "" { - subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster) - if scterr != nil { - cclog.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error()) - return nil, nil, scterr - } - } - - for _, metric := range metrics { - remoteName := ccms.toRemoteName(metric) - mc := archive.GetMetricConfig(cluster, metric) - if mc == nil { - // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) - cclog.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster) - continue - } - - // Skip if metric is removed for subcluster - if mc.SubClusters != nil { - isRemoved := false - for _, scConfig := range mc.SubClusters { - if scConfig.Name == subCluster && scConfig.Remove { - isRemoved = true - break - } - } - if isRemoved { - continue - } - } - - // Avoid duplicates... - handledScopes := make([]schema.MetricScope, 0, 3) - - scopesLoop: - for _, requestedScope := range scopes { - nativeScope := mc.Scope - - scope := nativeScope.Max(requestedScope) - for _, s := range handledScopes { - if scope == s { - continue scopesLoop - } - } - handledScopes = append(handledScopes, scope) - - for _, hostname := range nodes { - - // If no subCluster given, get it by node - if subCluster == "" { - subClusterName, scnerr := archive.GetSubClusterByNode(cluster, hostname) - if scnerr != nil { - return nil, nil, scnerr - } - subClusterTopol, scterr = archive.GetSubCluster(cluster, subClusterName) - if scterr != nil { - return nil, nil, scterr - } - } - - // Always full node hwthread id list, no partial queries expected -> Use "topology.Node" directly where applicable - // Always full accelerator id list, no partial queries expected -> Use "acceleratorIds" directly where applicable - topology := subClusterTopol.Topology - acceleratorIds := topology.GetAcceleratorIDs() - - // Moved check here if metric matches hardware specs - if nativeScope == schema.MetricScopeAccelerator && len(acceleratorIds) == 0 { - continue scopesLoop - } - - // Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node) - if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) { - if scope != schema.MetricScopeAccelerator { - // Skip all other catched cases - continue - } - - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: false, - Type: &acceleratorString, - TypeIds: acceleratorIds, - Resolution: resolution, - }) - assignedScope = append(assignedScope, schema.MetricScopeAccelerator) - continue - } - - // Accelerator -> Node - if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode { - if len(acceleratorIds) == 0 { - continue - } - - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: true, - Type: &acceleratorString, - TypeIds: acceleratorIds, - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // HWThread -> HWThead - if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: false, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Node), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // HWThread -> Core - if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { - cores, _ := topology.GetCoresFromHWThreads(topology.Node) - for _, core := range cores { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Core[core]), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - } - continue - } - - // HWThread -> Socket - if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { - sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) - for _, socket := range sockets { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Socket[socket]), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - } - continue - } - - // HWThread -> Node - if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Node), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Core -> Core - if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { - cores, _ := topology.GetCoresFromHWThreads(topology.Node) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: false, - Type: &coreString, - TypeIds: intToStringSlice(cores), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Core -> Socket - if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket { - sockets, _ := topology.GetSocketsFromCores(topology.Node) - for _, socket := range sockets { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: true, - Type: &coreString, - TypeIds: intToStringSlice(topology.Socket[socket]), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - } - continue - } - - // Core -> Node - if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { - cores, _ := topology.GetCoresFromHWThreads(topology.Node) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: true, - Type: &coreString, - TypeIds: intToStringSlice(cores), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // MemoryDomain -> MemoryDomain - if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { - sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: false, - Type: &memoryDomainString, - TypeIds: intToStringSlice(sockets), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // MemoryDoman -> Node - if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { - sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: true, - Type: &memoryDomainString, - TypeIds: intToStringSlice(sockets), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Socket -> Socket - if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { - sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: false, - Type: &socketString, - TypeIds: intToStringSlice(sockets), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Socket -> Node - if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { - sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Aggregate: true, - Type: &socketString, - TypeIds: intToStringSlice(sockets), - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - // Node -> Node - if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { - queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: hostname, - Resolution: resolution, - }) - assignedScope = append(assignedScope, scope) - continue - } - - return nil, nil, fmt.Errorf("METRICDATA/CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope) - } - } - } - - return queries, assignedScope, nil -} diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go deleted file mode 100644 index ab0e19fb..00000000 --- a/internal/metricdata/metricdata.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. This file is part of cc-backend. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package metricdata - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/internal/memorystore" - cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" - "github.com/ClusterCockpit/cc-lib/v2/schema" -) - -type MetricDataRepository interface { - // Initialize this MetricDataRepository. One instance of - // this interface will only ever be responsible for one cluster. - Init(rawConfig json.RawMessage) error - - // Return the JobData for the given job, only with the requested metrics. - LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) - - // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope only. - LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) - - // Return a map of metrics to a map of scopes to the scoped metric statistics of the job. - LoadScopedStats(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error) - - // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. - LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) - - // Return a map of hosts to a map of metrics to a map of scopes for multiple nodes. - LoadNodeListData(cluster, subCluster string, nodes, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, ctx context.Context) (map[string]schema.JobData, error) -} - -var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{} - -func Init() error { - for _, cluster := range config.Clusters { - if cluster.MetricDataRepository != nil { - var kind struct { - Kind string `json:"kind"` - } - if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil { - cclog.Warn("Error while unmarshaling raw json MetricDataRepository") - return err - } - - var mdr MetricDataRepository - switch kind.Kind { - case "cc-metric-store": - mdr = &CCMetricStore{} - case "cc-metric-store-internal": - mdr = &CCMetricStoreInternal{} - memorystore.InternalCCMSFlag = true - case "prometheus": - mdr = &PrometheusDataRepository{} - case "test": - mdr = &TestMetricDataRepository{} - default: - return fmt.Errorf("METRICDATA/METRICDATA > Unknown MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name) - } - - if err := mdr.Init(cluster.MetricDataRepository); err != nil { - cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name) - return err - } - metricDataRepos[cluster.Name] = mdr - } - } - return nil -} - -func GetMetricDataRepo(cluster string) (MetricDataRepository, error) { - var err error - repo, ok := metricDataRepos[cluster] - - if !ok { - err = fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster) - } - - return repo, err -} diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go deleted file mode 100644 index 3fb94d51..00000000 --- a/internal/metricdata/prometheus.go +++ /dev/null @@ -1,587 +0,0 @@ -// Copyright (C) 2022 DKRZ -// All rights reserved. This file is part of cc-backend. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package metricdata - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "math" - "net/http" - "os" - "regexp" - "sort" - "strings" - "sync" - "text/template" - "time" - - "github.com/ClusterCockpit/cc-backend/pkg/archive" - cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" - "github.com/ClusterCockpit/cc-lib/v2/schema" - promapi "github.com/prometheus/client_golang/api" - promv1 "github.com/prometheus/client_golang/api/prometheus/v1" - promcfg "github.com/prometheus/common/config" - promm "github.com/prometheus/common/model" -) - -type PrometheusDataRepositoryConfig struct { - Url string `json:"url"` - Username string `json:"username,omitempty"` - Suffix string `json:"suffix,omitempty"` - Templates map[string]string `json:"query-templates"` -} - -type PrometheusDataRepository struct { - client promapi.Client - queryClient promv1.API - suffix string - templates map[string]*template.Template -} - -type PromQLArgs struct { - Nodes string -} - -type Trie map[rune]Trie - -var logOnce sync.Once - -func contains(s []schema.MetricScope, str schema.MetricScope) bool { - for _, v := range s { - if v == str { - return true - } - } - return false -} - -func MinMaxMean(data []schema.Float) (float64, float64, float64) { - if len(data) == 0 { - return 0.0, 0.0, 0.0 - } - min := math.MaxFloat64 - max := -math.MaxFloat64 - var sum float64 - var n float64 - for _, val := range data { - if val.IsNaN() { - continue - } - sum += float64(val) - n += 1 - if float64(val) > max { - max = float64(val) - } - if float64(val) < min { - min = float64(val) - } - } - return min, max, sum / n -} - -// Rewritten from -// https://github.com/ermanh/trieregex/blob/master/trieregex/trieregex.py -func nodeRegex(nodes []string) string { - root := Trie{} - // add runes of each compute node to trie - for _, node := range nodes { - _trie := root - for _, c := range node { - if _, ok := _trie[c]; !ok { - _trie[c] = Trie{} - } - _trie = _trie[c] - } - _trie['*'] = Trie{} - } - // recursively build regex from rune trie - var trieRegex func(trie Trie, reset bool) string - trieRegex = func(trie Trie, reset bool) string { - if reset == true { - trie = root - } - if len(trie) == 0 { - return "" - } - if len(trie) == 1 { - for key, _trie := range trie { - if key == '*' { - return "" - } - return regexp.QuoteMeta(string(key)) + trieRegex(_trie, false) - } - } else { - sequences := []string{} - for key, _trie := range trie { - if key != '*' { - sequences = append(sequences, regexp.QuoteMeta(string(key))+trieRegex(_trie, false)) - } - } - sort.Slice(sequences, func(i, j int) bool { - return (-len(sequences[i]) < -len(sequences[j])) || (sequences[i] < sequences[j]) - }) - var result string - // single edge from this tree node - if len(sequences) == 1 { - result = sequences[0] - if len(result) > 1 { - result = "(?:" + result + ")" - } - // multiple edges, each length 1 - } else if s := strings.Join(sequences, ""); len(s) == len(sequences) { - // char or numeric range - if len(s)-1 == int(s[len(s)-1])-int(s[0]) { - result = fmt.Sprintf("[%c-%c]", s[0], s[len(s)-1]) - // char or numeric set - } else { - result = "[" + s + "]" - } - // multiple edges of different lengths - } else { - result = "(?:" + strings.Join(sequences, "|") + ")" - } - if _, ok := trie['*']; ok { - result += "?" - } - return result - } - return "" - } - return trieRegex(root, true) -} - -func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error { - var config PrometheusDataRepositoryConfig - // parse config - if err := json.Unmarshal(rawConfig, &config); err != nil { - cclog.Warn("Error while unmarshaling raw json config") - return err - } - // support basic authentication - var rt http.RoundTripper = nil - if prom_pw := os.Getenv("PROMETHEUS_PASSWORD"); prom_pw != "" && config.Username != "" { - prom_pw := promcfg.Secret(prom_pw) - rt = promcfg.NewBasicAuthRoundTripper(promcfg.NewInlineSecret(config.Username), promcfg.NewInlineSecret(string(prom_pw)), promapi.DefaultRoundTripper) - } else { - if config.Username != "" { - return errors.New("METRICDATA/PROMETHEUS > Prometheus username provided, but PROMETHEUS_PASSWORD not set") - } - } - // init client - client, err := promapi.NewClient(promapi.Config{ - Address: config.Url, - RoundTripper: rt, - }) - if err != nil { - cclog.Error("Error while initializing new prometheus client") - return err - } - // init query client - pdb.client = client - pdb.queryClient = promv1.NewAPI(pdb.client) - // site config - pdb.suffix = config.Suffix - // init query templates - pdb.templates = make(map[string]*template.Template) - for metric, templ := range config.Templates { - pdb.templates[metric], err = template.New(metric).Parse(templ) - if err == nil { - cclog.Debugf("Added PromQL template for %s: %s", metric, templ) - } else { - cclog.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric) - } - } - return nil -} - -// TODO: respect scope argument -func (pdb *PrometheusDataRepository) FormatQuery( - metric string, - scope schema.MetricScope, - nodes []string, - cluster string, -) (string, error) { - args := PromQLArgs{} - if len(nodes) > 0 { - args.Nodes = fmt.Sprintf("(%s)%s", nodeRegex(nodes), pdb.suffix) - } else { - args.Nodes = fmt.Sprintf(".*%s", pdb.suffix) - } - - buf := &bytes.Buffer{} - if templ, ok := pdb.templates[metric]; ok { - err := templ.Execute(buf, args) - if err != nil { - return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ)) - } else { - query := buf.String() - cclog.Debugf("PromQL: %s", query) - return query, nil - } - } else { - return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > No PromQL for metric %s configured.", metric)) - } -} - -// Convert PromAPI row to CC schema.Series -func (pdb *PrometheusDataRepository) RowToSeries( - from time.Time, - step int64, - steps int64, - row *promm.SampleStream, -) schema.Series { - ts := from.Unix() - hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix) - // init array of expected length with NaN - values := make([]schema.Float, steps+1) - for i := range values { - values[i] = schema.NaN - } - // copy recorded values from prom sample pair - for _, v := range row.Values { - idx := (v.Timestamp.Unix() - ts) / step - values[idx] = schema.Float(v.Value) - } - min, max, mean := MinMaxMean(values) - // output struct - return schema.Series{ - Hostname: hostname, - Data: values, - Statistics: schema.MetricStatistics{ - Avg: mean, - Min: min, - Max: max, - }, - } -} - -func (pdb *PrometheusDataRepository) LoadData( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - ctx context.Context, - resolution int, -) (schema.JobData, error) { - // TODO respect requested scope - if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) { - scopes = append(scopes, schema.MetricScopeNode) - } - - jobData := make(schema.JobData) - // parse job specs - nodes := make([]string, len(job.Resources)) - for i, resource := range job.Resources { - nodes[i] = resource.Hostname - } - from := time.Unix(job.StartTime, 0) - to := time.Unix(job.StartTime+int64(job.Duration), 0) - - for _, scope := range scopes { - if scope != schema.MetricScopeNode { - logOnce.Do(func() { - cclog.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) - }) - continue - } - - for _, metric := range metrics { - metricConfig := archive.GetMetricConfig(job.Cluster, metric) - if metricConfig == nil { - cclog.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster) - return nil, errors.New("Prometheus config error") - } - query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster) - if err != nil { - cclog.Warn("Error while formatting prometheus query") - return nil, err - } - - // ranged query over all job nodes - r := promv1.Range{ - Start: from, - End: to, - Step: time.Duration(metricConfig.Timestep * 1e9), - } - result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) - if err != nil { - cclog.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query) - return nil, errors.New("Prometheus query error") - } - if len(warnings) > 0 { - cclog.Warnf("Warnings: %v\n", warnings) - } - - // init data structures - if _, ok := jobData[metric]; !ok { - jobData[metric] = make(map[schema.MetricScope]*schema.JobMetric) - } - jobMetric, ok := jobData[metric][scope] - if !ok { - jobMetric = &schema.JobMetric{ - Unit: metricConfig.Unit, - Timestep: metricConfig.Timestep, - Series: make([]schema.Series, 0), - } - } - step := int64(metricConfig.Timestep) - steps := int64(to.Sub(from).Seconds()) / step - // iter rows of host, metric, values - for _, row := range result.(promm.Matrix) { - jobMetric.Series = append(jobMetric.Series, - pdb.RowToSeries(from, step, steps, row)) - } - // only add metric if at least one host returned data - if !ok && len(jobMetric.Series) > 0 { - jobData[metric][scope] = jobMetric - } - // sort by hostname to get uniform coloring - sort.Slice(jobMetric.Series, func(i, j int) bool { - return (jobMetric.Series[i].Hostname < jobMetric.Series[j].Hostname) - }) - } - } - return jobData, nil -} - -// TODO change implementation to precomputed/cached stats -func (pdb *PrometheusDataRepository) LoadStats( - job *schema.Job, - metrics []string, - ctx context.Context, -) (map[string]map[string]schema.MetricStatistics, error) { - // map of metrics of nodes of stats - stats := map[string]map[string]schema.MetricStatistics{} - - data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) - if err != nil { - cclog.Warn("Error while loading job for stats") - return nil, err - } - for metric, metricData := range data { - stats[metric] = make(map[string]schema.MetricStatistics) - for _, series := range metricData[schema.MetricScopeNode].Series { - stats[metric][series.Hostname] = series.Statistics - } - } - - return stats, nil -} - -func (pdb *PrometheusDataRepository) LoadNodeData( - cluster string, - metrics, nodes []string, - scopes []schema.MetricScope, - from, to time.Time, - ctx context.Context, -) (map[string]map[string][]*schema.JobMetric, error) { - t0 := time.Now() - // Map of hosts of metrics of value slices - data := make(map[string]map[string][]*schema.JobMetric) - // query db for each metric - // TODO: scopes seems to be always empty - if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) { - scopes = append(scopes, schema.MetricScopeNode) - } - for _, scope := range scopes { - if scope != schema.MetricScopeNode { - logOnce.Do(func() { - cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) - }) - continue - } - for _, metric := range metrics { - metricConfig := archive.GetMetricConfig(cluster, metric) - if metricConfig == nil { - cclog.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster) - return nil, errors.New("Prometheus config error") - } - query, err := pdb.FormatQuery(metric, scope, nodes, cluster) - if err != nil { - cclog.Warn("Error while formatting prometheus query") - return nil, err - } - - // ranged query over all nodes - r := promv1.Range{ - Start: from, - End: to, - Step: time.Duration(metricConfig.Timestep * 1e9), - } - result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) - if err != nil { - cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err) - return nil, errors.New("Prometheus query error") - } - if len(warnings) > 0 { - cclog.Warnf("Warnings: %v\n", warnings) - } - - step := int64(metricConfig.Timestep) - steps := int64(to.Sub(from).Seconds()) / step - - // iter rows of host, metric, values - for _, row := range result.(promm.Matrix) { - hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix) - hostdata, ok := data[hostname] - if !ok { - hostdata = make(map[string][]*schema.JobMetric) - data[hostname] = hostdata - } - // output per host and metric - hostdata[metric] = append(hostdata[metric], &schema.JobMetric{ - Unit: metricConfig.Unit, - Timestep: metricConfig.Timestep, - Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)}, - }, - ) - } - } - } - t1 := time.Since(t0) - cclog.Debugf("LoadNodeData of %v nodes took %s", len(data), t1) - return data, nil -} - -// Implemented by NHR@FAU; Used in Job-View StatsTable -func (pdb *PrometheusDataRepository) LoadScopedStats( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - ctx context.Context, -) (schema.ScopedJobStats, error) { - // Assumption: pdb.loadData() only returns series node-scope - use node scope for statsTable - scopedJobStats := make(schema.ScopedJobStats) - data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) - if err != nil { - cclog.Warn("Error while loading job for scopedJobStats") - return nil, err - } - - for metric, metricData := range data { - for _, scope := range scopes { - if scope != schema.MetricScopeNode { - logOnce.Do(func() { - cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) - }) - continue - } - - if _, ok := scopedJobStats[metric]; !ok { - scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) - } - - if _, ok := scopedJobStats[metric][scope]; !ok { - scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) - } - - for _, series := range metricData[scope].Series { - scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ - Hostname: series.Hostname, - Data: &series.Statistics, - }) - } - } - } - - return scopedJobStats, nil -} - -// Implemented by NHR@FAU; Used in NodeList-View -func (pdb *PrometheusDataRepository) LoadNodeListData( - cluster, subCluster string, - nodes []string, - metrics []string, - scopes []schema.MetricScope, - resolution int, - from, to time.Time, - ctx context.Context, -) (map[string]schema.JobData, error) { - // Assumption: pdb.loadData() only returns series node-scope - use node scope for NodeList - - // Fetch Data, based on pdb.LoadNodeData() - t0 := time.Now() - // Map of hosts of jobData - data := make(map[string]schema.JobData) - - // query db for each metric - // TODO: scopes seems to be always empty - if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) { - scopes = append(scopes, schema.MetricScopeNode) - } - - for _, scope := range scopes { - if scope != schema.MetricScopeNode { - logOnce.Do(func() { - cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) - }) - continue - } - - for _, metric := range metrics { - metricConfig := archive.GetMetricConfig(cluster, metric) - if metricConfig == nil { - cclog.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster) - return nil, errors.New("Prometheus config error") - } - query, err := pdb.FormatQuery(metric, scope, nodes, cluster) - if err != nil { - cclog.Warn("Error while formatting prometheus query") - return nil, err - } - - // ranged query over all nodes - r := promv1.Range{ - Start: from, - End: to, - Step: time.Duration(metricConfig.Timestep * 1e9), - } - result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) - if err != nil { - cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err) - return nil, errors.New("Prometheus query error") - } - if len(warnings) > 0 { - cclog.Warnf("Warnings: %v\n", warnings) - } - - step := int64(metricConfig.Timestep) - steps := int64(to.Sub(from).Seconds()) / step - - // iter rows of host, metric, values - for _, row := range result.(promm.Matrix) { - hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix) - - hostdata, ok := data[hostname] - if !ok { - hostdata = make(schema.JobData) - data[hostname] = hostdata - } - - metricdata, ok := hostdata[metric] - if !ok { - metricdata = make(map[schema.MetricScope]*schema.JobMetric) - data[hostname][metric] = metricdata - } - - // output per host, metric and scope - scopeData, ok := metricdata[scope] - if !ok { - scopeData = &schema.JobMetric{ - Unit: metricConfig.Unit, - Timestep: metricConfig.Timestep, - Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)}, - } - data[hostname][metric][scope] = scopeData - } - } - } - } - t1 := time.Since(t0) - cclog.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1) - return data, nil -} diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go deleted file mode 100644 index 21dfbcac..00000000 --- a/internal/metricdata/utils.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. This file is part of cc-backend. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package metricdata - -import ( - "context" - "encoding/json" - "time" - - "github.com/ClusterCockpit/cc-lib/v2/schema" -) - -var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { - panic("TODO") -} - -// TestMetricDataRepository is only a mock for unit-testing. -type TestMetricDataRepository struct{} - -func (tmdr *TestMetricDataRepository) Init(_ json.RawMessage) error { - return nil -} - -func (tmdr *TestMetricDataRepository) LoadData( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - ctx context.Context, - resolution int, -) (schema.JobData, error) { - return TestLoadDataCallback(job, metrics, scopes, ctx, resolution) -} - -func (tmdr *TestMetricDataRepository) LoadStats( - job *schema.Job, - metrics []string, - ctx context.Context, -) (map[string]map[string]schema.MetricStatistics, error) { - panic("TODO") -} - -func (tmdr *TestMetricDataRepository) LoadScopedStats( - job *schema.Job, - metrics []string, - scopes []schema.MetricScope, - ctx context.Context, -) (schema.ScopedJobStats, error) { - panic("TODO") -} - -func (tmdr *TestMetricDataRepository) LoadNodeData( - cluster string, - metrics, nodes []string, - scopes []schema.MetricScope, - from, to time.Time, - ctx context.Context, -) (map[string]map[string][]*schema.JobMetric, error) { - panic("TODO") -} - -func (tmdr *TestMetricDataRepository) LoadNodeListData( - cluster, subCluster string, - nodes []string, - metrics []string, - scopes []schema.MetricScope, - resolution int, - from, to time.Time, - ctx context.Context, -) (map[string]schema.JobData, error) { - panic("TODO") -} - -func DeepCopy(jdTemp schema.JobData) schema.JobData { - jd := make(schema.JobData, len(jdTemp)) - for k, v := range jdTemp { - jd[k] = make(map[schema.MetricScope]*schema.JobMetric, len(jdTemp[k])) - for k_, v_ := range v { - jd[k][k_] = new(schema.JobMetric) - jd[k][k_].Series = make([]schema.Series, len(v_.Series)) - for i := 0; i < len(v_.Series); i += 1 { - jd[k][k_].Series[i].Data = make([]schema.Float, len(v_.Series[i].Data)) - copy(jd[k][k_].Series[i].Data, v_.Series[i].Data) - jd[k][k_].Series[i].Hostname = v_.Series[i].Hostname - jd[k][k_].Series[i].Id = v_.Series[i].Id - jd[k][k_].Series[i].Statistics.Avg = v_.Series[i].Statistics.Avg - jd[k][k_].Series[i].Statistics.Min = v_.Series[i].Statistics.Min - jd[k][k_].Series[i].Statistics.Max = v_.Series[i].Statistics.Max - } - jd[k][k_].Timestep = v_.Timestep - jd[k][k_].Unit.Base = v_.Unit.Base - jd[k][k_].Unit.Prefix = v_.Unit.Prefix - if v_.StatisticsSeries != nil { - // Init Slices - jd[k][k_].StatisticsSeries = new(schema.StatsSeries) - jd[k][k_].StatisticsSeries.Max = make([]schema.Float, len(v_.StatisticsSeries.Max)) - jd[k][k_].StatisticsSeries.Min = make([]schema.Float, len(v_.StatisticsSeries.Min)) - jd[k][k_].StatisticsSeries.Median = make([]schema.Float, len(v_.StatisticsSeries.Median)) - jd[k][k_].StatisticsSeries.Mean = make([]schema.Float, len(v_.StatisticsSeries.Mean)) - // Copy Data - copy(jd[k][k_].StatisticsSeries.Max, v_.StatisticsSeries.Max) - copy(jd[k][k_].StatisticsSeries.Min, v_.StatisticsSeries.Min) - copy(jd[k][k_].StatisticsSeries.Median, v_.StatisticsSeries.Median) - copy(jd[k][k_].StatisticsSeries.Mean, v_.StatisticsSeries.Mean) - // Handle Percentiles - for k__, v__ := range v_.StatisticsSeries.Percentiles { - jd[k][k_].StatisticsSeries.Percentiles[k__] = make([]schema.Float, len(v__)) - copy(jd[k][k_].StatisticsSeries.Percentiles[k__], v__) - } - } else { - jd[k][k_].StatisticsSeries = v_.StatisticsSeries - } - } - } - return jd -} diff --git a/internal/metricdispatch/dataLoader.go b/internal/metricdispatch/dataLoader.go new file mode 100644 index 00000000..8bfebbd6 --- /dev/null +++ b/internal/metricdispatch/dataLoader.go @@ -0,0 +1,490 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. This file is part of cc-backend. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package metricdispatch provides a unified interface for loading and caching job metric data. +// +// This package serves as a central dispatcher that routes metric data requests to the appropriate +// backend based on job state. For running jobs, data is fetched from the metric store (e.g., cc-metric-store). +// For completed jobs, data is retrieved from the file-based job archive. +// +// # Key Features +// +// - Automatic backend selection based on job state (running vs. archived) +// - LRU cache for performance optimization (128 MB default cache size) +// - Data resampling using Largest Triangle Three Bucket algorithm for archived data +// - Automatic statistics series generation for jobs with many nodes +// - Support for scoped metrics (node, socket, accelerator, core) +// +// # Cache Behavior +// +// Cached data has different TTL (time-to-live) values depending on job state: +// - Running jobs: 2 minutes (data changes frequently) +// - Completed jobs: 5 hours (data is static) +// +// The cache key is based on job ID, state, requested metrics, scopes, and resolution. +// +// # Usage +// +// The primary entry point is LoadData, which automatically handles both running and archived jobs: +// +// jobData, err := metricdispatch.LoadData(job, metrics, scopes, ctx, resolution) +// if err != nil { +// // Handle error +// } +// +// For statistics only, use LoadJobStats, LoadScopedJobStats, or LoadAverages depending on the required format. +package metricdispatch + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/internal/metricstore" + "github.com/ClusterCockpit/cc-backend/pkg/archive" + cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" + "github.com/ClusterCockpit/cc-lib/v2/lrucache" + "github.com/ClusterCockpit/cc-lib/v2/resampler" + "github.com/ClusterCockpit/cc-lib/v2/schema" +) + +// cache is an LRU cache with 128 MB capacity for storing loaded job metric data. +// The cache reduces load on both the metric store and archive backends. +var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024) + +// cacheKey generates a unique cache key for a job's metric data based on job ID, state, +// requested metrics, scopes, and resolution. Duration and StartTime are intentionally excluded +// because job.ID is more unique and the cache TTL ensures entries don't persist indefinitely. +func cacheKey( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + resolution int, +) string { + return fmt.Sprintf("%d(%s):[%v],[%v]-%d", + job.ID, job.State, metrics, scopes, resolution) +} + +// LoadData retrieves metric data for a job from the appropriate backend (memory store for running jobs, +// archive for completed jobs) and applies caching, resampling, and statistics generation as needed. +// +// For running jobs or when archive is disabled, data is fetched from the metric store. +// For completed archived jobs, data is loaded from the job archive and resampled if needed. +// +// Parameters: +// - job: The job for which to load metric data +// - metrics: List of metric names to load (nil loads all metrics for the cluster) +// - scopes: Metric scopes to include (nil defaults to node scope) +// - ctx: Context for cancellation and timeouts +// - resolution: Target number of data points for resampling (only applies to archived data) +// +// Returns the loaded job data and any error encountered. For partial errors (some metrics failed), +// the function returns the successfully loaded data with a warning logged. +func LoadData(job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context, + resolution int, +) (schema.JobData, error) { + data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ any, ttl time.Duration, size int) { + var jd schema.JobData + var err error + + if job.State == schema.JobStateRunning || + job.MonitoringStatus == schema.MonitoringStatusRunningOrArchiving || + config.Keys.DisableArchive { + + if scopes == nil { + scopes = append(scopes, schema.MetricScopeNode) + } + + if metrics == nil { + cluster := archive.GetCluster(job.Cluster) + for _, mc := range cluster.MetricConfig { + metrics = append(metrics, mc.Name) + } + } + + jd, err = metricstore.LoadData(job, metrics, scopes, ctx, resolution) + if err != nil { + if len(jd) != 0 { + cclog.Warnf("partial error loading metrics from store for job %d (user: %s, project: %s): %s", + job.JobID, job.User, job.Project, err.Error()) + } else { + cclog.Errorf("failed to load job data from metric store for job %d (user: %s, project: %s): %s", + job.JobID, job.User, job.Project, err.Error()) + return err, 0, 0 + } + } + size = jd.Size() + } else { + var jdTemp schema.JobData + jdTemp, err = archive.GetHandle().LoadJobData(job) + if err != nil { + cclog.Errorf("failed to load job data from archive for job %d (user: %s, project: %s): %s", + job.JobID, job.User, job.Project, err.Error()) + return err, 0, 0 + } + + jd = deepCopy(jdTemp) + + // Resample archived data using Largest Triangle Three Bucket algorithm to reduce data points + // to the requested resolution, improving transfer performance and client-side rendering. + for _, v := range jd { + for _, v_ := range v { + timestep := int64(0) + for i := 0; i < len(v_.Series); i += 1 { + v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, int64(v_.Timestep), int64(resolution)) + if err != nil { + return err, 0, 0 + } + } + v_.Timestep = int(timestep) + } + } + + // Filter job data to only include requested metrics and scopes, avoiding unnecessary data transfer. + if metrics != nil || scopes != nil { + if metrics == nil { + metrics = make([]string, 0, len(jd)) + for k := range jd { + metrics = append(metrics, k) + } + } + + res := schema.JobData{} + for _, metric := range metrics { + if perscope, ok := jd[metric]; ok { + if len(perscope) > 1 { + subset := make(map[schema.MetricScope]*schema.JobMetric) + for _, scope := range scopes { + if jm, ok := perscope[scope]; ok { + subset[scope] = jm + } + } + + if len(subset) > 0 { + perscope = subset + } + } + + res[metric] = perscope + } + } + jd = res + } + size = jd.Size() + } + + ttl = 5 * time.Hour + if job.State == schema.JobStateRunning { + ttl = 2 * time.Minute + } + + // Generate statistics series for jobs with many nodes to enable min/median/max graphs + // instead of overwhelming the UI with individual node lines. Note that newly calculated + // statistics use min/median/max, while archived statistics may use min/mean/max. + const maxSeriesSize int = 15 + for _, scopes := range jd { + for _, jm := range scopes { + if jm.StatisticsSeries != nil || len(jm.Series) <= maxSeriesSize { + continue + } + + jm.AddStatisticsSeries() + } + } + + nodeScopeRequested := false + for _, scope := range scopes { + if scope == schema.MetricScopeNode { + nodeScopeRequested = true + } + } + + if nodeScopeRequested { + jd.AddNodeScope("flops_any") + jd.AddNodeScope("mem_bw") + } + + // Round Resulting Stat Values + jd.RoundMetricStats() + + return jd, ttl, size + }) + + if err, ok := data.(error); ok { + cclog.Errorf("error in cached dataset for job %d: %s", job.JobID, err.Error()) + return nil, err + } + + return data.(schema.JobData), nil +} + +// LoadAverages computes average values for the specified metrics across all nodes of a job. +// For running jobs, it loads statistics from the metric store. For completed jobs, it uses +// the pre-calculated averages from the job archive. The results are appended to the data slice. +func LoadAverages( + job *schema.Job, + metrics []string, + data [][]schema.Float, + ctx context.Context, +) error { + if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { + return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here? + } + + stats, err := metricstore.LoadStats(job, metrics, ctx) + if err != nil { + cclog.Errorf("failed to load statistics from metric store for job %d (user: %s, project: %s): %s", + job.JobID, job.User, job.Project, err.Error()) + return err + } + + for i, m := range metrics { + nodes, ok := stats[m] + if !ok { + data[i] = append(data[i], schema.NaN) + continue + } + + sum := 0.0 + for _, node := range nodes { + sum += node.Avg + } + data[i] = append(data[i], schema.Float(sum)) + } + + return nil +} + +// LoadScopedJobStats retrieves job statistics organized by metric scope (node, socket, core, accelerator). +// For running jobs, statistics are computed from the metric store. For completed jobs, pre-calculated +// statistics are loaded from the job archive. +func LoadScopedJobStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context, +) (schema.ScopedJobStats, error) { + if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { + return archive.LoadScopedStatsFromArchive(job, metrics, scopes) + } + + scopedStats, err := metricstore.LoadScopedStats(job, metrics, scopes, ctx) + if err != nil { + cclog.Errorf("failed to load scoped statistics from metric store for job %d (user: %s, project: %s): %s", + job.JobID, job.User, job.Project, err.Error()) + return nil, err + } + + return scopedStats, nil +} + +// LoadJobStats retrieves aggregated statistics (min/avg/max) for each requested metric across all job nodes. +// For running jobs, statistics are computed from the metric store. For completed jobs, pre-calculated +// statistics are loaded from the job archive. +func LoadJobStats( + job *schema.Job, + metrics []string, + ctx context.Context, +) (map[string]schema.MetricStatistics, error) { + if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { + return archive.LoadStatsFromArchive(job, metrics) + } + + data := make(map[string]schema.MetricStatistics, len(metrics)) + + stats, err := metricstore.LoadStats(job, metrics, ctx) + if err != nil { + cclog.Errorf("failed to load statistics from metric store for job %d (user: %s, project: %s): %s", + job.JobID, job.User, job.Project, err.Error()) + return data, err + } + + for _, m := range metrics { + sum, avg, min, max := 0.0, 0.0, 0.0, 0.0 + nodes, ok := stats[m] + if !ok { + data[m] = schema.MetricStatistics{Min: min, Avg: avg, Max: max} + continue + } + + for _, node := range nodes { + sum += node.Avg + min = math.Min(min, node.Min) + max = math.Max(max, node.Max) + } + + data[m] = schema.MetricStatistics{ + Avg: (math.Round((sum/float64(job.NumNodes))*100) / 100), + Min: (math.Round(min*100) / 100), + Max: (math.Round(max*100) / 100), + } + } + + return data, nil +} + +// LoadNodeData retrieves metric data for specific nodes in a cluster within a time range. +// This is used for node monitoring views and system status pages. Data is always fetched from +// the metric store (not the archive) since it's for current/recent node status monitoring. +// +// Returns a nested map structure: node -> metric -> scoped data. +func LoadNodeData( + cluster string, + metrics, nodes []string, + scopes []schema.MetricScope, + from, to time.Time, + ctx context.Context, +) (map[string]map[string][]*schema.JobMetric, error) { + if metrics == nil { + for _, m := range archive.GetCluster(cluster).MetricConfig { + metrics = append(metrics, m.Name) + } + } + + data, err := metricstore.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) + if err != nil { + if len(data) != 0 { + cclog.Warnf("partial error loading node data from metric store for cluster %s: %s", cluster, err.Error()) + } else { + cclog.Errorf("failed to load node data from metric store for cluster %s: %s", cluster, err.Error()) + return nil, err + } + } + + if data == nil { + return nil, fmt.Errorf("metric store for cluster '%s' does not support node data queries", cluster) + } + + return data, nil +} + +// LoadNodeListData retrieves time-series metric data for multiple nodes within a time range, +// with optional resampling and automatic statistics generation for large datasets. +// This is used for comparing multiple nodes or displaying node status over time. +// +// Returns a map of node names to their job-like metric data structures. +func LoadNodeListData( + cluster, subCluster string, + nodes []string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + ctx context.Context, +) (map[string]schema.JobData, error) { + if metrics == nil { + for _, m := range archive.GetCluster(cluster).MetricConfig { + metrics = append(metrics, m.Name) + } + } + + data, err := metricstore.LoadNodeListData(cluster, subCluster, nodes, metrics, scopes, resolution, from, to, ctx) + if err != nil { + if len(data) != 0 { + cclog.Warnf("partial error loading node list data from metric store for cluster %s, subcluster %s: %s", + cluster, subCluster, err.Error()) + } else { + cclog.Errorf("failed to load node list data from metric store for cluster %s, subcluster %s: %s", + cluster, subCluster, err.Error()) + return nil, err + } + } + + // Generate statistics series for datasets with many series to improve visualization performance. + // Statistics are calculated as min/median/max. + const maxSeriesSize int = 8 + for _, jd := range data { + for _, scopes := range jd { + for _, jm := range scopes { + if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize { + continue + } + jm.AddStatisticsSeries() + } + } + } + + if data == nil { + return nil, fmt.Errorf("metric store for cluster '%s' does not support node list queries", cluster) + } + + return data, nil +} + +// deepCopy creates a deep copy of JobData to prevent cache corruption when modifying +// archived data (e.g., during resampling). This ensures the cached archive data remains +// immutable while allowing per-request transformations. +func deepCopy(source schema.JobData) schema.JobData { + result := make(schema.JobData, len(source)) + + for metricName, scopeMap := range source { + result[metricName] = make(map[schema.MetricScope]*schema.JobMetric, len(scopeMap)) + + for scope, jobMetric := range scopeMap { + result[metricName][scope] = copyJobMetric(jobMetric) + } + } + + return result +} + +func copyJobMetric(src *schema.JobMetric) *schema.JobMetric { + dst := &schema.JobMetric{ + Timestep: src.Timestep, + Unit: src.Unit, + Series: make([]schema.Series, len(src.Series)), + } + + for i := range src.Series { + dst.Series[i] = copySeries(&src.Series[i]) + } + + if src.StatisticsSeries != nil { + dst.StatisticsSeries = copyStatisticsSeries(src.StatisticsSeries) + } + + return dst +} + +func copySeries(src *schema.Series) schema.Series { + dst := schema.Series{ + Hostname: src.Hostname, + Id: src.Id, + Statistics: src.Statistics, + Data: make([]schema.Float, len(src.Data)), + } + + copy(dst.Data, src.Data) + return dst +} + +func copyStatisticsSeries(src *schema.StatsSeries) *schema.StatsSeries { + dst := &schema.StatsSeries{ + Min: make([]schema.Float, len(src.Min)), + Mean: make([]schema.Float, len(src.Mean)), + Median: make([]schema.Float, len(src.Median)), + Max: make([]schema.Float, len(src.Max)), + } + + copy(dst.Min, src.Min) + copy(dst.Mean, src.Mean) + copy(dst.Median, src.Median) + copy(dst.Max, src.Max) + + if len(src.Percentiles) > 0 { + dst.Percentiles = make(map[int][]schema.Float, len(src.Percentiles)) + for percentile, values := range src.Percentiles { + dst.Percentiles[percentile] = make([]schema.Float, len(values)) + copy(dst.Percentiles[percentile], values) + } + } + + return dst +} diff --git a/internal/metricdispatch/dataLoader_test.go b/internal/metricdispatch/dataLoader_test.go new file mode 100644 index 00000000..c4841f8d --- /dev/null +++ b/internal/metricdispatch/dataLoader_test.go @@ -0,0 +1,125 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. This file is part of cc-backend. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package metricdispatch + +import ( + "testing" + + "github.com/ClusterCockpit/cc-lib/v2/schema" +) + +func TestDeepCopy(t *testing.T) { + nodeId := "0" + original := schema.JobData{ + "cpu_load": { + schema.MetricScopeNode: &schema.JobMetric{ + Timestep: 60, + Unit: schema.Unit{Base: "load", Prefix: ""}, + Series: []schema.Series{ + { + Hostname: "node001", + Id: &nodeId, + Data: []schema.Float{1.0, 2.0, 3.0}, + Statistics: schema.MetricStatistics{ + Min: 1.0, + Avg: 2.0, + Max: 3.0, + }, + }, + }, + StatisticsSeries: &schema.StatsSeries{ + Min: []schema.Float{1.0, 1.5, 2.0}, + Mean: []schema.Float{2.0, 2.5, 3.0}, + Median: []schema.Float{2.0, 2.5, 3.0}, + Max: []schema.Float{3.0, 3.5, 4.0}, + Percentiles: map[int][]schema.Float{ + 25: {1.5, 2.0, 2.5}, + 75: {2.5, 3.0, 3.5}, + }, + }, + }, + }, + } + + copied := deepCopy(original) + + original["cpu_load"][schema.MetricScopeNode].Series[0].Data[0] = 999.0 + original["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0] = 888.0 + original["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0] = 777.0 + + if copied["cpu_load"][schema.MetricScopeNode].Series[0].Data[0] != 1.0 { + t.Errorf("Series data was not deeply copied: got %v, want 1.0", + copied["cpu_load"][schema.MetricScopeNode].Series[0].Data[0]) + } + + if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0] != 1.0 { + t.Errorf("StatisticsSeries was not deeply copied: got %v, want 1.0", + copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Min[0]) + } + + if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0] != 1.5 { + t.Errorf("Percentiles was not deeply copied: got %v, want 1.5", + copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles[25][0]) + } + + if copied["cpu_load"][schema.MetricScopeNode].Timestep != 60 { + t.Errorf("Timestep not copied correctly: got %v, want 60", + copied["cpu_load"][schema.MetricScopeNode].Timestep) + } + + if copied["cpu_load"][schema.MetricScopeNode].Series[0].Hostname != "node001" { + t.Errorf("Hostname not copied correctly: got %v, want node001", + copied["cpu_load"][schema.MetricScopeNode].Series[0].Hostname) + } +} + +func TestDeepCopyNilStatisticsSeries(t *testing.T) { + original := schema.JobData{ + "mem_used": { + schema.MetricScopeNode: &schema.JobMetric{ + Timestep: 60, + Series: []schema.Series{ + { + Hostname: "node001", + Data: []schema.Float{1.0, 2.0}, + }, + }, + StatisticsSeries: nil, + }, + }, + } + + copied := deepCopy(original) + + if copied["mem_used"][schema.MetricScopeNode].StatisticsSeries != nil { + t.Errorf("StatisticsSeries should be nil, got %v", + copied["mem_used"][schema.MetricScopeNode].StatisticsSeries) + } +} + +func TestDeepCopyEmptyPercentiles(t *testing.T) { + original := schema.JobData{ + "cpu_load": { + schema.MetricScopeNode: &schema.JobMetric{ + Timestep: 60, + Series: []schema.Series{}, + StatisticsSeries: &schema.StatsSeries{ + Min: []schema.Float{1.0}, + Mean: []schema.Float{2.0}, + Median: []schema.Float{2.0}, + Max: []schema.Float{3.0}, + Percentiles: nil, + }, + }, + }, + } + + copied := deepCopy(original) + + if copied["cpu_load"][schema.MetricScopeNode].StatisticsSeries.Percentiles != nil { + t.Errorf("Percentiles should be nil when source is nil/empty") + } +} diff --git a/internal/memorystore/api.go b/internal/metricstore/api.go similarity index 98% rename from internal/memorystore/api.go rename to internal/metricstore/api.go index 41c53a18..d8a2ea82 100644 --- a/internal/memorystore/api.go +++ b/internal/metricstore/api.go @@ -3,10 +3,11 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "errors" + "fmt" "math" "github.com/ClusterCockpit/cc-lib/v2/schema" @@ -124,6 +125,9 @@ func FetchData(req APIQueryRequest) (*APIQueryResponse, error) { req.WithData = true ms := GetMemoryStore() + if ms == nil { + return nil, fmt.Errorf("memorystore not initialized") + } response := APIQueryResponse{ Results: make([][]APIMetricData, 0, len(req.Queries)), diff --git a/internal/memorystore/archive.go b/internal/metricstore/archive.go similarity index 99% rename from internal/memorystore/archive.go rename to internal/metricstore/archive.go index fc46dac6..972769fd 100644 --- a/internal/memorystore/archive.go +++ b/internal/metricstore/archive.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "archive/zip" diff --git a/internal/memorystore/avroCheckpoint.go b/internal/metricstore/avroCheckpoint.go similarity index 99% rename from internal/memorystore/avroCheckpoint.go rename to internal/metricstore/avroCheckpoint.go index b0b0cf42..275a64bd 100644 --- a/internal/memorystore/avroCheckpoint.go +++ b/internal/metricstore/avroCheckpoint.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "bufio" diff --git a/internal/memorystore/avroHelper.go b/internal/metricstore/avroHelper.go similarity index 99% rename from internal/memorystore/avroHelper.go rename to internal/metricstore/avroHelper.go index 93a293bd..5587a58d 100644 --- a/internal/memorystore/avroHelper.go +++ b/internal/metricstore/avroHelper.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "context" diff --git a/internal/memorystore/avroStruct.go b/internal/metricstore/avroStruct.go similarity index 99% rename from internal/memorystore/avroStruct.go rename to internal/metricstore/avroStruct.go index 2643a9a7..78a8d137 100644 --- a/internal/memorystore/avroStruct.go +++ b/internal/metricstore/avroStruct.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "sync" diff --git a/internal/memorystore/buffer.go b/internal/metricstore/buffer.go similarity index 99% rename from internal/memorystore/buffer.go rename to internal/metricstore/buffer.go index 15e29b3a..94d3ce76 100644 --- a/internal/memorystore/buffer.go +++ b/internal/metricstore/buffer.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "errors" diff --git a/internal/memorystore/checkpoint.go b/internal/metricstore/checkpoint.go similarity index 99% rename from internal/memorystore/checkpoint.go rename to internal/metricstore/checkpoint.go index c48c2fd8..27d611c4 100644 --- a/internal/memorystore/checkpoint.go +++ b/internal/metricstore/checkpoint.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "bufio" diff --git a/internal/memorystore/config.go b/internal/metricstore/config.go similarity index 98% rename from internal/memorystore/config.go rename to internal/metricstore/config.go index fbd62341..97f16c46 100644 --- a/internal/memorystore/config.go +++ b/internal/metricstore/config.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "fmt" @@ -19,8 +19,6 @@ const ( DefaultAvroCheckpointInterval = time.Minute ) -var InternalCCMSFlag bool = false - type MetricStoreConfig struct { // Number of concurrent workers for checkpoint and archive operations. // If not set or 0, defaults to min(runtime.NumCPU()/2+1, 10) diff --git a/internal/memorystore/configSchema.go b/internal/metricstore/configSchema.go similarity index 99% rename from internal/memorystore/configSchema.go rename to internal/metricstore/configSchema.go index 2616edc6..f1a20a73 100644 --- a/internal/memorystore/configSchema.go +++ b/internal/metricstore/configSchema.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore const configSchema = `{ "type": "object", diff --git a/internal/memorystore/debug.go b/internal/metricstore/debug.go similarity index 99% rename from internal/memorystore/debug.go rename to internal/metricstore/debug.go index b56cf254..50c91e08 100644 --- a/internal/memorystore/debug.go +++ b/internal/metricstore/debug.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "bufio" diff --git a/internal/memorystore/healthcheck.go b/internal/metricstore/healthcheck.go similarity index 99% rename from internal/memorystore/healthcheck.go rename to internal/metricstore/healthcheck.go index b1052f3b..2a49c47a 100644 --- a/internal/memorystore/healthcheck.go +++ b/internal/metricstore/healthcheck.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "bufio" diff --git a/internal/memorystore/level.go b/internal/metricstore/level.go similarity index 99% rename from internal/memorystore/level.go rename to internal/metricstore/level.go index bce2a7a6..d46f893a 100644 --- a/internal/memorystore/level.go +++ b/internal/metricstore/level.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "sync" diff --git a/internal/memorystore/lineprotocol.go b/internal/metricstore/lineprotocol.go similarity index 99% rename from internal/memorystore/lineprotocol.go rename to internal/metricstore/lineprotocol.go index ca8cc811..cc59e213 100644 --- a/internal/memorystore/lineprotocol.go +++ b/internal/metricstore/lineprotocol.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "context" diff --git a/internal/memorystore/memorystore.go b/internal/metricstore/memorystore.go similarity index 99% rename from internal/memorystore/memorystore.go rename to internal/metricstore/memorystore.go index 7c5ea0eb..14a02fcd 100644 --- a/internal/memorystore/memorystore.go +++ b/internal/metricstore/memorystore.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -// Package memorystore provides an efficient in-memory time-series metric storage system +// Package metricstore provides an efficient in-memory time-series metric storage system // with support for hierarchical data organization, checkpointing, and archiving. // // The package organizes metrics in a tree structure (cluster → host → component) and @@ -17,7 +17,7 @@ // - Concurrent checkpoint/archive workers // - Support for sum and average aggregation // - NATS integration for metric ingestion -package memorystore +package metricstore import ( "bytes" diff --git a/internal/memorystore/memorystore_test.go b/internal/metricstore/memorystore_test.go similarity index 99% rename from internal/memorystore/memorystore_test.go rename to internal/metricstore/memorystore_test.go index 57ea6938..29379d21 100644 --- a/internal/memorystore/memorystore_test.go +++ b/internal/metricstore/memorystore_test.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "testing" diff --git a/internal/metricdata/cc-metric-store-internal.go b/internal/metricstore/query.go similarity index 87% rename from internal/metricdata/cc-metric-store-internal.go rename to internal/metricstore/query.go index 741ce358..78c78dd5 100644 --- a/internal/metricdata/cc-metric-store-internal.go +++ b/internal/metricstore/query.go @@ -3,56 +3,41 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package metricdata +package metricstore import ( "context" - "encoding/json" "fmt" "strconv" "strings" "time" - "github.com/ClusterCockpit/cc-backend/internal/memorystore" "github.com/ClusterCockpit/cc-backend/pkg/archive" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" "github.com/ClusterCockpit/cc-lib/v2/schema" ) -// Bloat Code -type CCMetricStoreConfigInternal struct { - Kind string `json:"kind"` - Url string `json:"url"` - Token string `json:"token"` +// TestLoadDataCallback allows tests to override LoadData behavior +var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) - // If metrics are known to this MetricDataRepository under a different - // name than in the `metricConfig` section of the 'cluster.json', - // provide this optional mapping of local to remote name for this metric. - Renamings map[string]string `json:"metricRenamings"` -} - -// Bloat Code -type CCMetricStoreInternal struct{} - -// Bloat Code -func (ccms *CCMetricStoreInternal) Init(rawConfig json.RawMessage) error { - return nil -} - -func (ccms *CCMetricStoreInternal) LoadData( +func LoadData( job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int, ) (schema.JobData, error) { - queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, int64(resolution)) + if TestLoadDataCallback != nil { + return TestLoadDataCallback(job, metrics, scopes, ctx, resolution) + } + + queries, assignedScope, err := buildQueries(job, metrics, scopes, int64(resolution)) if err != nil { cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) return nil, err } - req := memorystore.APIQueryRequest{ + req := APIQueryRequest{ Cluster: job.Cluster, From: job.StartTime, To: job.StartTime + int64(job.Duration), @@ -61,7 +46,7 @@ func (ccms *CCMetricStoreInternal) LoadData( WithData: true, } - resBody, err := memorystore.FetchData(req) + resBody, err := FetchData(req) if err != nil { cclog.Errorf("Error while fetching data : %s", err.Error()) return nil, err @@ -149,13 +134,13 @@ var ( acceleratorString = string(schema.MetricScopeAccelerator) ) -func (ccms *CCMetricStoreInternal) buildQueries( +func buildQueries( job *schema.Job, metrics []string, scopes []schema.MetricScope, resolution int64, -) ([]memorystore.APIQuery, []schema.MetricScope, error) { - queries := make([]memorystore.APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) +) ([]APIQuery, []schema.MetricScope, error) { + queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) assignedScope := []schema.MetricScope{} subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster) @@ -217,7 +202,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( continue } - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: false, @@ -235,7 +220,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( continue } - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: true, @@ -249,7 +234,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // HWThread -> HWThead if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: false, @@ -265,7 +250,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { cores, _ := topology.GetCoresFromHWThreads(hwthreads) for _, core := range cores { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: true, @@ -282,7 +267,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) for _, socket := range sockets { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: true, @@ -297,7 +282,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // HWThread -> Node if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: true, @@ -312,7 +297,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // Core -> Core if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { cores, _ := topology.GetCoresFromHWThreads(hwthreads) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: false, @@ -328,7 +313,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket { sockets, _ := topology.GetSocketsFromCores(hwthreads) for _, socket := range sockets { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: true, @@ -344,7 +329,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // Core -> Node if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { cores, _ := topology.GetCoresFromHWThreads(hwthreads) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: true, @@ -359,7 +344,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // MemoryDomain -> MemoryDomain if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: false, @@ -374,7 +359,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // MemoryDoman -> Node if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: true, @@ -389,7 +374,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // Socket -> Socket if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: false, @@ -404,7 +389,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // Socket -> Node if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Aggregate: true, @@ -418,7 +403,7 @@ func (ccms *CCMetricStoreInternal) buildQueries( // Node -> Node if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: host.Hostname, Resolution: resolution, @@ -435,18 +420,18 @@ func (ccms *CCMetricStoreInternal) buildQueries( return queries, assignedScope, nil } -func (ccms *CCMetricStoreInternal) LoadStats( +func LoadStats( job *schema.Job, metrics []string, ctx context.Context, ) (map[string]map[string]schema.MetricStatistics, error) { - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? + queries, _, err := buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error()) return nil, err } - req := memorystore.APIQueryRequest{ + req := APIQueryRequest{ Cluster: job.Cluster, From: job.StartTime, To: job.StartTime + int64(job.Duration), @@ -455,7 +440,7 @@ func (ccms *CCMetricStoreInternal) LoadStats( WithData: false, } - resBody, err := memorystore.FetchData(req) + resBody, err := FetchData(req) if err != nil { cclog.Errorf("Error while fetching data : %s", err.Error()) return nil, err @@ -492,20 +477,19 @@ func (ccms *CCMetricStoreInternal) LoadStats( return stats, nil } -// Used for Job-View Statistics Table -func (ccms *CCMetricStoreInternal) LoadScopedStats( +func LoadScopedStats( job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, ) (schema.ScopedJobStats, error) { - queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0) + queries, assignedScope, err := buildQueries(job, metrics, scopes, 0) if err != nil { cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) return nil, err } - req := memorystore.APIQueryRequest{ + req := APIQueryRequest{ Cluster: job.Cluster, From: job.StartTime, To: job.StartTime + int64(job.Duration), @@ -514,7 +498,7 @@ func (ccms *CCMetricStoreInternal) LoadScopedStats( WithData: false, } - resBody, err := memorystore.FetchData(req) + resBody, err := FetchData(req) if err != nil { cclog.Errorf("Error while fetching data : %s", err.Error()) return nil, err @@ -583,15 +567,14 @@ func (ccms *CCMetricStoreInternal) LoadScopedStats( return scopedJobStats, nil } -// Used for Systems-View Node-Overview -func (ccms *CCMetricStoreInternal) LoadNodeData( +func LoadNodeData( cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context, ) (map[string]map[string][]*schema.JobMetric, error) { - req := memorystore.APIQueryRequest{ + req := APIQueryRequest{ Cluster: cluster, From: from.Unix(), To: to.Unix(), @@ -604,7 +587,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeData( } else { for _, node := range nodes { for _, metric := range metrics { - req.Queries = append(req.Queries, memorystore.APIQuery{ + req.Queries = append(req.Queries, APIQuery{ Hostname: node, Metric: metric, Resolution: 0, // Default for Node Queries: Will return metric $Timestep Resolution @@ -613,7 +596,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeData( } } - resBody, err := memorystore.FetchData(req) + resBody, err := FetchData(req) if err != nil { cclog.Errorf("Error while fetching data : %s", err.Error()) return nil, err @@ -622,7 +605,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeData( var errors []string data := make(map[string]map[string][]*schema.JobMetric) for i, res := range resBody.Results { - var query memorystore.APIQuery + var query APIQuery if resBody.Queries != nil { query = resBody.Queries[i] } else { @@ -673,8 +656,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeData( return data, nil } -// Used for Systems-View Node-List -func (ccms *CCMetricStoreInternal) LoadNodeListData( +func LoadNodeListData( cluster, subCluster string, nodes []string, metrics []string, @@ -683,15 +665,14 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData( from, to time.Time, ctx context.Context, ) (map[string]schema.JobData, error) { - // Note: Order of node data is not guaranteed after this point - queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, int64(resolution)) + queries, assignedScope, err := buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, int64(resolution)) if err != nil { cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error()) return nil, err } - req := memorystore.APIQueryRequest{ + req := APIQueryRequest{ Cluster: cluster, Queries: queries, From: from.Unix(), @@ -700,7 +681,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData( WithData: true, } - resBody, err := memorystore.FetchData(req) + resBody, err := FetchData(req) if err != nil { cclog.Errorf("Error while fetching data : %s", err.Error()) return nil, err @@ -709,7 +690,7 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData( var errors []string data := make(map[string]schema.JobData) for i, row := range resBody.Results { - var query memorystore.APIQuery + var query APIQuery if resBody.Queries != nil { query = resBody.Queries[i] } else { @@ -789,15 +770,15 @@ func (ccms *CCMetricStoreInternal) LoadNodeListData( return data, nil } -func (ccms *CCMetricStoreInternal) buildNodeQueries( +func buildNodeQueries( cluster string, subCluster string, nodes []string, metrics []string, scopes []schema.MetricScope, resolution int64, -) ([]memorystore.APIQuery, []schema.MetricScope, error) { - queries := make([]memorystore.APIQuery, 0, len(metrics)*len(scopes)*len(nodes)) +) ([]APIQuery, []schema.MetricScope, error) { + queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(nodes)) assignedScope := []schema.MetricScope{} // Get Topol before loop if subCluster given @@ -812,7 +793,6 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( } for _, metric := range metrics { - metric := metric mc := archive.GetMetricConfig(cluster, metric) if mc == nil { // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) @@ -880,7 +860,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( continue } - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: false, @@ -898,7 +878,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( continue } - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: true, @@ -912,7 +892,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // HWThread -> HWThead if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: false, @@ -928,7 +908,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { cores, _ := topology.GetCoresFromHWThreads(topology.Node) for _, core := range cores { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: true, @@ -945,7 +925,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) for _, socket := range sockets { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: true, @@ -960,7 +940,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // HWThread -> Node if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: true, @@ -975,7 +955,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // Core -> Core if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { cores, _ := topology.GetCoresFromHWThreads(topology.Node) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: false, @@ -991,7 +971,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket { sockets, _ := topology.GetSocketsFromCores(topology.Node) for _, socket := range sockets { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: true, @@ -1007,7 +987,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // Core -> Node if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { cores, _ := topology.GetCoresFromHWThreads(topology.Node) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: true, @@ -1022,7 +1002,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // MemoryDomain -> MemoryDomain if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: false, @@ -1037,7 +1017,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // MemoryDoman -> Node if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: true, @@ -1052,7 +1032,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // Socket -> Socket if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: false, @@ -1067,7 +1047,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // Socket -> Node if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Aggregate: true, @@ -1081,7 +1061,7 @@ func (ccms *CCMetricStoreInternal) buildNodeQueries( // Node -> Node if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { - queries = append(queries, memorystore.APIQuery{ + queries = append(queries, APIQuery{ Metric: metric, Hostname: hostname, Resolution: resolution, diff --git a/internal/memorystore/stats.go b/internal/metricstore/stats.go similarity index 99% rename from internal/memorystore/stats.go rename to internal/metricstore/stats.go index c931ab35..51ffafc1 100644 --- a/internal/memorystore/stats.go +++ b/internal/metricstore/stats.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package memorystore +package metricstore import ( "errors" diff --git a/internal/metricsync/metricdata.go b/internal/metricsync/metricdata.go new file mode 100644 index 00000000..772f16da --- /dev/null +++ b/internal/metricsync/metricdata.go @@ -0,0 +1,60 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. This file is part of cc-backend. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package metricsync + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/ClusterCockpit/cc-backend/internal/config" + cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" + "github.com/ClusterCockpit/cc-lib/v2/schema" +) + +type MetricDataSource interface { + // Initialize this MetricDataRepository. One instance of + // this interface will only ever be responsible for one cluster. + Init(rawConfig json.RawMessage) error + + // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. + Pull(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) +} + +var metricDataSourceRepos map[string]MetricDataSource = map[string]MetricDataSource{} + +func Init() error { + for _, cluster := range config.Clusters { + if cluster.MetricDataRepository != nil { + var kind struct { + Kind string `json:"kind"` + } + if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil { + cclog.Warn("Error while unmarshaling raw json MetricDataRepository") + return err + } + + var mdr MetricDataSource + switch kind.Kind { + case "cc-metric-store": + case "prometheus": + // mdr = &PrometheusDataRepository{} + case "test": + // mdr = &TestMetricDataRepository{} + default: + return fmt.Errorf("METRICDATA/METRICDATA > Unknown MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name) + } + + if err := mdr.Init(cluster.MetricDataRepository); err != nil { + cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name) + return err + } + metricDataSourceRepos[cluster.Name] = mdr + } + } + return nil +} diff --git a/internal/repository/stats.go b/internal/repository/stats.go index d1e16eb8..989026d1 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -12,7 +12,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" + "github.com/ClusterCockpit/cc-backend/internal/metricdispatch" "github.com/ClusterCockpit/cc-backend/pkg/archive" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" "github.com/ClusterCockpit/cc-lib/v2/schema" @@ -766,7 +766,7 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram( continue } - if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil { + if err := metricdispatch.LoadAverages(job, metrics, avgs, ctx); err != nil { cclog.Errorf("Error while loading averages for histogram: %s", err) return nil } diff --git a/internal/taskmanager/updateFootprintService.go b/internal/taskmanager/updateFootprintService.go index 979a6137..c8f81e37 100644 --- a/internal/taskmanager/updateFootprintService.go +++ b/internal/taskmanager/updateFootprintService.go @@ -10,7 +10,7 @@ import ( "math" "time" - "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/internal/metricstore" "github.com/ClusterCockpit/cc-backend/pkg/archive" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" "github.com/ClusterCockpit/cc-lib/v2/schema" @@ -58,12 +58,6 @@ func RegisterFootprintWorker() { allMetrics = append(allMetrics, mc.Name) } - repo, err := metricdata.GetMetricDataRepo(cluster.Name) - if err != nil { - cclog.Errorf("no metric data repository configured for '%s'", cluster.Name) - continue - } - pendingStatements := []sq.UpdateBuilder{} for _, job := range jobs { @@ -72,7 +66,7 @@ func RegisterFootprintWorker() { sJob := time.Now() - jobStats, err := repo.LoadStats(job, allMetrics, context.Background()) + jobStats, err := metricstore.LoadStats(job, allMetrics, context.Background()) if err != nil { cclog.Errorf("error wile loading job data stats for footprint update: %v", err) ce++ From ecb5aef7355b498d2f84e1837e536ab5aa69a2d0 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 25 Dec 2025 08:48:03 +0100 Subject: [PATCH 06/23] Fix build error in unit test --- internal/api/nats_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/api/nats_test.go b/internal/api/nats_test.go index 9e1fa2b5..e92ce291 100644 --- a/internal/api/nats_test.go +++ b/internal/api/nats_test.go @@ -18,7 +18,6 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph" - "github.com/ClusterCockpit/cc-backend/internal/importer" "github.com/ClusterCockpit/cc-backend/internal/metricstore" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" From ddc2ecf82976c75b963604744ebce522f1cc6a3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Dec 2025 08:02:46 +0000 Subject: [PATCH 07/23] Bump svelte from 5.44.0 to 5.46.1 in /web/frontend Bumps [svelte](https://github.com/sveltejs/svelte/tree/HEAD/packages/svelte) from 5.44.0 to 5.46.1. - [Release notes](https://github.com/sveltejs/svelte/releases) - [Changelog](https://github.com/sveltejs/svelte/blob/main/packages/svelte/CHANGELOG.md) - [Commits](https://github.com/sveltejs/svelte/commits/svelte@5.46.1/packages/svelte) --- updated-dependencies: - dependency-name: svelte dependency-version: 5.46.1 dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/frontend/package-lock.json | 21 +++++++++++++-------- web/frontend/package.json | 2 +- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index 4c7e4bf5..648c72ce 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -27,7 +27,7 @@ "rollup": "^4.53.3", "rollup-plugin-css-only": "^4.5.5", "rollup-plugin-svelte": "^7.2.3", - "svelte": "^5.44.0" + "svelte": "^5.46.1" } }, "node_modules/@0no-co/graphql.web": { @@ -621,6 +621,7 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -746,9 +747,9 @@ "license": "MIT" }, "node_modules/esrap": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/esrap/-/esrap-2.1.3.tgz", - "integrity": "sha512-T/Dhhv/QH+yYmiaLz9SA3PW+YyenlnRKDNdtlYJrSOBmNsH4nvPux+mTwx7p+wAedlJrGoZtXNI0a0MjQ2QkVg==", + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/esrap/-/esrap-2.2.1.tgz", + "integrity": "sha512-GiYWG34AN/4CUyaWAgunGt0Rxvr1PTMlGC0vvEov/uOQYWne2bpN03Um+k8jT+q3op33mKouP2zeJ6OlM+qeUg==", "license": "MIT", "dependencies": { "@jridgewell/sourcemap-codec": "^1.4.15" @@ -821,6 +822,7 @@ "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.12.0.tgz", "integrity": "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==", "license": "MIT", + "peer": true, "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } @@ -927,6 +929,7 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -981,6 +984,7 @@ "integrity": "sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "@types/estree": "1.0.8" }, @@ -1157,10 +1161,11 @@ } }, "node_modules/svelte": { - "version": "5.44.0", - "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.44.0.tgz", - "integrity": "sha512-R7387No2zEGw4CtYtI2rgsui6BqjFARzoZFGLiLN5OPla0Pq4Ra2WwcP/zBomP3MYalhSNvF1fzDMuU0P0zPJw==", + "version": "5.46.1", + "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.46.1.tgz", + "integrity": "sha512-ynjfCHD3nP2el70kN5Pmg37sSi0EjOm9FgHYQdC4giWG/hzO3AatzXXJJgP305uIhGQxSufJLuYWtkY8uK/8RA==", "license": "MIT", + "peer": true, "dependencies": { "@jridgewell/remapping": "^2.3.4", "@jridgewell/sourcemap-codec": "^1.5.0", @@ -1172,7 +1177,7 @@ "clsx": "^2.1.1", "devalue": "^5.5.0", "esm-env": "^1.2.1", - "esrap": "^2.1.0", + "esrap": "^2.2.1", "is-reference": "^3.0.3", "locate-character": "^3.0.0", "magic-string": "^0.30.11", diff --git a/web/frontend/package.json b/web/frontend/package.json index 3f7434f7..d06ea6b4 100644 --- a/web/frontend/package.json +++ b/web/frontend/package.json @@ -14,7 +14,7 @@ "rollup": "^4.53.3", "rollup-plugin-css-only": "^4.5.5", "rollup-plugin-svelte": "^7.2.3", - "svelte": "^5.44.0" + "svelte": "^5.46.1" }, "dependencies": { "@rollup/plugin-replace": "^6.0.3", From 4e6038d6c1a26ac74dd3b0f10bfdef335d86b3a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Dec 2025 08:03:41 +0000 Subject: [PATCH 08/23] Bump github.com/99designs/gqlgen from 0.17.84 to 0.17.85 Bumps [github.com/99designs/gqlgen](https://github.com/99designs/gqlgen) from 0.17.84 to 0.17.85. - [Release notes](https://github.com/99designs/gqlgen/releases) - [Changelog](https://github.com/99designs/gqlgen/blob/master/CHANGELOG.md) - [Commits](https://github.com/99designs/gqlgen/compare/v0.17.84...v0.17.85) --- updated-dependencies: - dependency-name: github.com/99designs/gqlgen dependency-version: 0.17.85 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 18 +++++++++--------- go.sum | 36 ++++++++++++++++++------------------ 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 4da3b80e..246460a4 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ tool ( ) require ( - github.com/99designs/gqlgen v0.17.84 + github.com/99designs/gqlgen v0.17.85 github.com/ClusterCockpit/cc-lib v1.0.2 github.com/Masterminds/squirrel v1.5.4 github.com/aws/aws-sdk-go-v2 v1.41.0 @@ -42,7 +42,7 @@ require ( github.com/swaggo/http-swagger v1.3.4 github.com/swaggo/swag v1.16.6 github.com/vektah/gqlparser/v2 v2.5.31 - golang.org/x/crypto v0.45.0 + golang.org/x/crypto v0.46.0 golang.org/x/oauth2 v0.32.0 golang.org/x/time v0.14.0 ) @@ -117,13 +117,13 @@ require ( github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/tools v0.39.0 // indirect - google.golang.org/protobuf v1.36.10 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/tools v0.40.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 773bf31c..6ce3d35b 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/99designs/gqlgen v0.17.84 h1:iVMdiStgUVx/BFkMb0J5GAXlqfqtQ7bqMCYK6v52kQ0= -github.com/99designs/gqlgen v0.17.84/go.mod h1:qjoUqzTeiejdo+bwUg8unqSpeYG42XrcrQboGIezmFA= +github.com/99designs/gqlgen v0.17.85 h1:EkGx3U2FDcxQm8YDLQSpXIAVmpDyZ3IcBMOJi2nH1S0= +github.com/99designs/gqlgen v0.17.85/go.mod h1:yvs8s0bkQlRfqg03YXr3eR4OQUowVhODT/tHzCXnbOU= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= @@ -343,33 +343,33 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -377,19 +377,19 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From faf3a19f0c526227b8c3719863d8edca91bfbbdd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Dec 2025 08:04:58 +0000 Subject: [PATCH 09/23] Bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.90.2 to 1.95.0 Bumps [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2) from 1.90.2 to 1.95.0. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.90.2...service/s3/v1.95.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.95.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 4da3b80e..79bc1dfc 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/aws/aws-sdk-go-v2 v1.41.0 github.com/aws/aws-sdk-go-v2/config v1.32.6 github.com/aws/aws-sdk-go-v2/credentials v1.19.6 - github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 + github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 github.com/coreos/go-oidc/v3 v3.17.0 github.com/expr-lang/expr v1.17.7 github.com/go-co-op/gocron/v2 v2.18.2 @@ -52,16 +52,16 @@ require ( github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/KyleBanks/depth v1.2.1 // indirect github.com/agnivade/levenshtein v1.2.1 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect diff --git a/go.sum b/go.sum index 773bf31c..366f3656 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3 h1:DHctwEM8P8iTXFxC/QK0MRjwEpWQeM9yzidCRjldUz0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.3/go.mod h1:xdCzcZEtnSTKVDOmUZs4l/j3pSV6rpo1WXl5ugNsL8Y= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8= github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI= github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE= @@ -46,18 +46,18 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13 h1:eg/WYAa12vqTphzIdWMzqYRVKKnCboVPRlvaybNCqPA= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.13/go.mod h1:/FDdxWhz1486obGrKKC1HONd7krpk38LBt+dutLcN9k= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4 h1:NvMjwvv8hpGUILarKw7Z4Q0w1H9anXKsesMxtw++MA4= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.4/go.mod h1:455WPHSwaGj2waRSpQp7TsnpOnBfw8iDfPfbwl7KPJE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 h1:zhBJXdhWIFZ1acfDYIhu4+LCzdUS2Vbcum7D01dXlHQ= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13/go.mod h1:JaaOeCE368qn2Hzi3sEzY6FgAZVCIYcC2nwbro2QCh8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 h1:DhdbtDl4FdNlj31+xiRXANxEE+eC7n8JQz+/ilwQ8Uc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2/go.mod h1:+wArOOrcHUevqdto9k1tKOF5++YTe9JEcPSc9Tx2ZSw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A= +github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 h1:MIWra+MSq53CFaXXAywB2qg9YvVZifkk6vEGl/1Qor0= +github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8= github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ= github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU= github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw= From a2414791bfe4b43a60f74ffd81ae71c808902592 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Dec 2025 08:05:04 +0000 Subject: [PATCH 10/23] Bump github.com/go-co-op/gocron/v2 from 2.18.2 to 2.19.0 Bumps [github.com/go-co-op/gocron/v2](https://github.com/go-co-op/gocron) from 2.18.2 to 2.19.0. - [Release notes](https://github.com/go-co-op/gocron/releases) - [Commits](https://github.com/go-co-op/gocron/compare/v2.18.2...v2.19.0) --- updated-dependencies: - dependency-name: github.com/go-co-op/gocron/v2 dependency-version: 2.19.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4da3b80e..95107c1a 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.90.2 github.com/coreos/go-oidc/v3 v3.17.0 github.com/expr-lang/expr v1.17.7 - github.com/go-co-op/gocron/v2 v2.18.2 + github.com/go-co-op/gocron/v2 v2.19.0 github.com/go-ldap/ldap/v3 v3.4.12 github.com/go-sql-driver/mysql v1.9.3 github.com/golang-jwt/jwt/v5 v5.3.0 diff --git a/go.sum b/go.sum index 773bf31c..40fb4dcf 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo= github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-co-op/gocron/v2 v2.18.2 h1:+5VU41FUXPWSPKLXZQ/77SGzUiPCcakU0v7ENc2H20Q= -github.com/go-co-op/gocron/v2 v2.18.2/go.mod h1:Zii6he+Zfgy5W9B+JKk/KwejFOW0kZTFvHtwIpR4aBI= +github.com/go-co-op/gocron/v2 v2.19.0 h1:OKf2y6LXPs/BgBI2fl8PxUpNAI1DA9Mg+hSeGOS38OU= +github.com/go-co-op/gocron/v2 v2.19.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4= From 4d6326b8be9bef9d730ea010ae933d43e43ad9a1 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Mon, 12 Jan 2026 08:55:31 +0100 Subject: [PATCH 11/23] Remove metricsync --- internal/metricsync/metricdata.go | 60 ------------------------------- 1 file changed, 60 deletions(-) delete mode 100644 internal/metricsync/metricdata.go diff --git a/internal/metricsync/metricdata.go b/internal/metricsync/metricdata.go deleted file mode 100644 index 772f16da..00000000 --- a/internal/metricsync/metricdata.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. This file is part of cc-backend. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package metricsync - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/ClusterCockpit/cc-backend/internal/config" - cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" - "github.com/ClusterCockpit/cc-lib/v2/schema" -) - -type MetricDataSource interface { - // Initialize this MetricDataRepository. One instance of - // this interface will only ever be responsible for one cluster. - Init(rawConfig json.RawMessage) error - - // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. - Pull(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) -} - -var metricDataSourceRepos map[string]MetricDataSource = map[string]MetricDataSource{} - -func Init() error { - for _, cluster := range config.Clusters { - if cluster.MetricDataRepository != nil { - var kind struct { - Kind string `json:"kind"` - } - if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil { - cclog.Warn("Error while unmarshaling raw json MetricDataRepository") - return err - } - - var mdr MetricDataSource - switch kind.Kind { - case "cc-metric-store": - case "prometheus": - // mdr = &PrometheusDataRepository{} - case "test": - // mdr = &TestMetricDataRepository{} - default: - return fmt.Errorf("METRICDATA/METRICDATA > Unknown MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name) - } - - if err := mdr.Init(cluster.MetricDataRepository); err != nil { - cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name) - return err - } - metricDataSourceRepos[cluster.Name] = mdr - } - } - return nil -} From 56399523d7e9582ff7525b4932586415bf48d7ea Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Mon, 12 Jan 2026 09:00:06 +0100 Subject: [PATCH 12/23] Update module deps --- go.mod | 17 ++++------------- go.sum | 26 ++++++++++++++------------ 2 files changed, 18 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index 36ce47b9..c8899162 100644 --- a/go.mod +++ b/go.mod @@ -33,8 +33,6 @@ require ( github.com/linkedin/goavro/v2 v2.14.1 github.com/mattn/go-sqlite3 v1.14.32 github.com/nats-io/nats.go v1.47.0 - github.com/prometheus/client_golang v1.23.2 - github.com/prometheus/common v0.67.4 github.com/qustavo/sqlhooks/v2 v2.1.0 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/stretchr/testify v1.11.1 @@ -65,8 +63,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.7 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.40.2 // indirect github.com/aws/smithy-go v1.24.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -86,6 +82,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/goccy/go-yaml v1.19.0 // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/gorilla/websocket v1.5.3 // indirect @@ -93,24 +90,19 @@ require ( github.com/influxdata/influxdb-client-go/v2 v2.14.0 // indirect github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect github.com/jonboulle/clockwork v0.5.0 // indirect - github.com/jpillora/backoff v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.2 // indirect + github.com/kr/pretty v0.3.1 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/nats-io/nkeys v0.4.12 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/oapi-codegen/runtime v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/common v0.67.4 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sosodev/duration v1.3.1 // indirect + github.com/stmcginnis/gofish v0.20.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/swaggo/files v1.0.1 // indirect github.com/urfave/cli/v2 v2.27.7 // indirect @@ -125,7 +117,6 @@ require ( golang.org/x/sys v0.39.0 // indirect golang.org/x/text v0.32.0 // indirect golang.org/x/tools v0.39.0 // indirect - google.golang.org/protobuf v1.36.10 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 9038d960..99c2bdb0 100644 --- a/go.sum +++ b/go.sum @@ -23,6 +23,8 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= +github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op h1:Ucf+QxEKMbPogRO5guBNe5cgd9uZgfoJLOYs8WWhtjM= +github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= @@ -142,7 +144,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/go-tpm v0.9.7 h1:u89J4tUUeDTlH8xxC3CTW7OHZjbjKoHdQ9W7gCUhtxA= +github.com/google/go-tpm v0.9.7/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark= @@ -192,10 +195,6 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= @@ -219,15 +218,14 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk= +github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g= +github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= +github.com/nats-io/nats-server/v2 v2.12.3 h1:KRv+1n7lddMVgkJPQer+pt36TcO0ENxjilBmeWdjcHs= +github.com/nats-io/nats-server/v2 v2.12.3/go.mod h1:MQXjG9WjyXKz9koWzUc3jYUMKD8x3CLmTNy91IQQz3Y= github.com/nats-io/nats.go v1.47.0 h1:YQdADw6J/UfGUd2Oy6tn4Hq6YHxCaJrVKayxxFqYrgM= github.com/nats-io/nats.go v1.47.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nkeys v0.4.12 h1:nssm7JKOG9/x4J8II47VWCL1Ds29avyiQDRn0ckMvDc= @@ -238,6 +236,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -253,6 +252,7 @@ github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAi github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -264,6 +264,8 @@ github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NF github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4= github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/stmcginnis/gofish v0.20.0 h1:hH2V2Qe898F2wWT1loApnkDUrXXiLKqbSlMaH3Y1n08= +github.com/stmcginnis/gofish v0.20.0/go.mod h1:PzF5i8ecRG9A2ol8XT64npKUunyraJ+7t0kYMpQAtqU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= From 8641d9053d129075a7f7036ed538b66d619f885c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 08:07:20 +0000 Subject: [PATCH 13/23] Bump golang.org/x/oauth2 from 0.32.0 to 0.34.0 Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.32.0 to 0.34.0. - [Commits](https://github.com/golang/oauth2/compare/v0.32.0...v0.34.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bc428b9a..f96e4537 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/swaggo/swag v1.16.6 github.com/vektah/gqlparser/v2 v2.5.31 golang.org/x/crypto v0.46.0 - golang.org/x/oauth2 v0.32.0 + golang.org/x/oauth2 v0.34.0 golang.org/x/time v0.14.0 ) diff --git a/go.sum b/go.sum index e7b4fd95..e86cb314 100644 --- a/go.sum +++ b/go.sum @@ -356,8 +356,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= -golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= -golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= From 78f1db7ad1b88087847f83a5f02c06977bd6d697 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 08:52:40 +0000 Subject: [PATCH 14/23] Bump github.com/aws/aws-sdk-go-v2/credentials from 1.19.6 to 1.19.7 Bumps [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) from 1.19.6 to 1.19.7. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/m2/v1.19.6...service/m2/v1.19.7) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.19.7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 20 ++++++++++---------- go.sum | 40 ++++++++++++++++++++-------------------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/go.mod b/go.mod index f96e4537..3857483e 100644 --- a/go.mod +++ b/go.mod @@ -13,9 +13,9 @@ require ( github.com/99designs/gqlgen v0.17.85 github.com/ClusterCockpit/cc-lib v1.0.2 github.com/Masterminds/squirrel v1.5.4 - github.com/aws/aws-sdk-go-v2 v1.41.0 + github.com/aws/aws-sdk-go-v2 v1.41.1 github.com/aws/aws-sdk-go-v2/config v1.32.6 - github.com/aws/aws-sdk-go-v2/credentials v1.19.6 + github.com/aws/aws-sdk-go-v2/credentials v1.19.7 github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 github.com/coreos/go-oidc/v3 v3.17.0 github.com/expr-lang/expr v1.17.7 @@ -53,19 +53,19 @@ require ( github.com/KyleBanks/depth v1.2.1 // indirect github.com/agnivade/levenshtein v1.2.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect github.com/aws/smithy-go v1.24.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect diff --git a/go.sum b/go.sum index e86cb314..1a0c7e8b 100644 --- a/go.sum +++ b/go.sum @@ -30,20 +30,20 @@ github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7D github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= -github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= +github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU= +github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8= github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE= -github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= +github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8= +github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic= @@ -52,20 +52,20 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEd github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A= github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 h1:MIWra+MSq53CFaXXAywB2qg9YvVZifkk6vEGl/1Qor0= github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ= github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From fae6d9d835bc50f09921f61193d4cba88f0761c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 08:52:44 +0000 Subject: [PATCH 15/23] Bump github.com/mattn/go-sqlite3 from 1.14.32 to 1.14.33 Bumps [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) from 1.14.32 to 1.14.33. - [Release notes](https://github.com/mattn/go-sqlite3/releases) - [Commits](https://github.com/mattn/go-sqlite3/compare/v1.14.32...v1.14.33) --- updated-dependencies: - dependency-name: github.com/mattn/go-sqlite3 dependency-version: 1.14.33 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f96e4537..04de109a 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/jmoiron/sqlx v1.4.0 github.com/joho/godotenv v1.5.1 github.com/linkedin/goavro/v2 v2.14.1 - github.com/mattn/go-sqlite3 v1.14.32 + github.com/mattn/go-sqlite3 v1.14.33 github.com/nats-io/nats.go v1.47.0 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/common v0.67.4 diff --git a/go.sum b/go.sum index e86cb314..7ed4c1ab 100644 --- a/go.sum +++ b/go.sum @@ -241,8 +241,8 @@ github.com/linkedin/goavro/v2 v2.14.1 h1:/8VjDpd38PRsy02JS0jflAu7JZPfJcGTwqWgMkF github.com/linkedin/goavro/v2 v2.14.1/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= -github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0= +github.com/mattn/go-sqlite3 v1.14.33/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= From ad1e87d0b8d84db4a3ababa6a3e3bae107df9f94 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Mon, 12 Jan 2026 11:17:44 +0100 Subject: [PATCH 16/23] Disable dependabot alerts --- .github/dependabot.yml | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 87600f2c..00000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,15 +0,0 @@ -# To get started with Dependabot version updates, you'll need to specify which -# package ecosystems to update and where the package manifests are located. -# Please see the documentation for all configuration options: -# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file - -version: 2 -updates: - - package-ecosystem: "gomod" - directory: "/" - schedule: - interval: "weekly" - - package-ecosystem: "npm" - directory: "/web/frontend" - schedule: - interval: "weekly" From 4cec93334964c166435e79c99af6e354d661dde7 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 13 Jan 2026 06:28:33 +0100 Subject: [PATCH 17/23] Remove obsolete cluster config section --- cmd/cc-backend/main.go | 7 +-- internal/api/api_test.go | 6 +- internal/api/nats_test.go | 6 +- internal/config/config.go | 21 +------ internal/config/config_test.go | 12 +--- internal/config/schema.go | 80 -------------------------- internal/importer/importer_test.go | 6 +- internal/repository/node_test.go | 6 +- internal/repository/userConfig_test.go | 6 +- tools/archive-manager/main.go | 6 +- web/web.go | 7 --- 11 files changed, 10 insertions(+), 153 deletions(-) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index 331df4f6..8eb3c76f 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -102,12 +102,7 @@ func initConfiguration() error { return fmt.Errorf("main configuration must be present") } - clustercfg := ccconf.GetPackageConfig("clusters") - if clustercfg == nil { - return fmt.Errorf("cluster configuration must be present") - } - - config.Init(cfg, clustercfg) + config.Init(cfg) return nil } diff --git a/internal/api/api_test.go b/internal/api/api_test.go index a2283013..7aa935ff 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -157,11 +157,7 @@ func setup(t *testing.T) *api.RestAPI { // Load and check main configuration if cfg := ccconf.GetPackageConfig("main"); cfg != nil { - if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil { - config.Init(cfg, clustercfg) - } else { - cclog.Abort("Cluster configuration must be present") - } + config.Init(cfg) } else { cclog.Abort("Main configuration must be present") } diff --git a/internal/api/nats_test.go b/internal/api/nats_test.go index e92ce291..319668bb 100644 --- a/internal/api/nats_test.go +++ b/internal/api/nats_test.go @@ -151,11 +151,7 @@ func setupNatsTest(t *testing.T) *NatsAPI { // Load and check main configuration if cfg := ccconf.GetPackageConfig("main"); cfg != nil { - if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil { - config.Init(cfg, clustercfg) - } else { - cclog.Abort("Cluster configuration must be present") - } + config.Init(cfg) } else { cclog.Abort("Main configuration must be present") } diff --git a/internal/config/config.go b/internal/config/config.go index af8ec944..b8eea2ca 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -111,14 +111,6 @@ type FilterRanges struct { StartTime *TimeRange `json:"startTime"` } -type ClusterConfig struct { - Name string `json:"name"` - FilterRanges *FilterRanges `json:"filterRanges"` - MetricDataRepository json.RawMessage `json:"metricDataRepository"` -} - -var Clusters []*ClusterConfig - var Keys ProgramConfig = ProgramConfig{ Addr: "localhost:8080", DisableAuthentication: false, @@ -132,7 +124,7 @@ var Keys ProgramConfig = ProgramConfig{ ShortRunningJobsDuration: 5 * 60, } -func Init(mainConfig json.RawMessage, clusterConfig json.RawMessage) { +func Init(mainConfig json.RawMessage) { Validate(configSchema, mainConfig) dec := json.NewDecoder(bytes.NewReader(mainConfig)) dec.DisallowUnknownFields() @@ -140,17 +132,6 @@ func Init(mainConfig json.RawMessage, clusterConfig json.RawMessage) { cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", mainConfig, err.Error()) } - Validate(clustersSchema, clusterConfig) - dec = json.NewDecoder(bytes.NewReader(clusterConfig)) - dec.DisallowUnknownFields() - if err := dec.Decode(&Clusters); err != nil { - cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", mainConfig, err.Error()) - } - - if len(Clusters) < 1 { - cclog.Abort("Config Init: At least one cluster required in config. Exited with error.") - } - if Keys.EnableResampling != nil && Keys.EnableResampling.MinimumPoints > 0 { resampler.SetMinimumRequiredPoints(Keys.EnableResampling.MinimumPoints) } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 396a80a1..e4a700ff 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -16,11 +16,7 @@ func TestInit(t *testing.T) { fp := "../../configs/config.json" ccconf.Init(fp) if cfg := ccconf.GetPackageConfig("main"); cfg != nil { - if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil { - Init(cfg, clustercfg) - } else { - cclog.Abort("Cluster configuration must be present") - } + Init(cfg) } else { cclog.Abort("Main configuration must be present") } @@ -34,11 +30,7 @@ func TestInitMinimal(t *testing.T) { fp := "../../configs/config-demo.json" ccconf.Init(fp) if cfg := ccconf.GetPackageConfig("main"); cfg != nil { - if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil { - Init(cfg, clustercfg) - } else { - cclog.Abort("Cluster configuration must be present") - } + Init(cfg) } else { cclog.Abort("Main configuration must be present") } diff --git a/internal/config/schema.go b/internal/config/schema.go index ff8d0c92..2d068140 100644 --- a/internal/config/schema.go +++ b/internal/config/schema.go @@ -138,83 +138,3 @@ var configSchema = ` }, "required": ["apiAllowedIPs"] }` - -var clustersSchema = ` - { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "description": "The name of the cluster.", - "type": "string" - }, - "metricDataRepository": { - "description": "Type of the metric data repository for this cluster", - "type": "object", - "properties": { - "kind": { - "type": "string", - "enum": ["influxdb", "prometheus", "cc-metric-store", "cc-metric-store-internal", "test"] - }, - "url": { - "type": "string" - }, - "token": { - "type": "string" - } - }, - "required": ["kind"] - }, - "filterRanges": { - "description": "This option controls the slider ranges for the UI controls of numNodes, duration, and startTime.", - "type": "object", - "properties": { - "numNodes": { - "description": "UI slider range for number of nodes", - "type": "object", - "properties": { - "from": { - "type": "integer" - }, - "to": { - "type": "integer" - } - }, - "required": ["from", "to"] - }, - "duration": { - "description": "UI slider range for duration", - "type": "object", - "properties": { - "from": { - "type": "integer" - }, - "to": { - "type": "integer" - } - }, - "required": ["from", "to"] - }, - "startTime": { - "description": "UI slider range for start time", - "type": "object", - "properties": { - "from": { - "type": "string", - "format": "date-time" - }, - "to": { - "type": "null" - } - }, - "required": ["from", "to"] - } - }, - "required": ["numNodes", "duration", "startTime"] - } - }, - "required": ["name", "metricDataRepository", "filterRanges"], - "minItems": 1 - } - }` diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go index bffb8bf6..2d00fc84 100644 --- a/internal/importer/importer_test.go +++ b/internal/importer/importer_test.go @@ -121,11 +121,7 @@ func setup(t *testing.T) *repository.JobRepository { // Load and check main configuration if cfg := ccconf.GetPackageConfig("main"); cfg != nil { - if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil { - config.Init(cfg, clustercfg) - } else { - t.Fatal("Cluster configuration must be present") - } + config.Init(cfg) } else { t.Fatal("Main configuration must be present") } diff --git a/internal/repository/node_test.go b/internal/repository/node_test.go index e1d6ca93..fd935b53 100644 --- a/internal/repository/node_test.go +++ b/internal/repository/node_test.go @@ -144,11 +144,7 @@ func nodeTestSetup(t *testing.T) { // Load and check main configuration if cfg := ccconf.GetPackageConfig("main"); cfg != nil { - if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil { - config.Init(cfg, clustercfg) - } else { - cclog.Abort("Cluster configuration must be present") - } + config.Init(cfg) } else { cclog.Abort("Main configuration must be present") } diff --git a/internal/repository/userConfig_test.go b/internal/repository/userConfig_test.go index 02c70d0f..ae3adaf2 100644 --- a/internal/repository/userConfig_test.go +++ b/internal/repository/userConfig_test.go @@ -58,11 +58,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo { // Load and check main configuration if cfg := ccconf.GetPackageConfig("main"); cfg != nil { - if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil { - config.Init(cfg, clustercfg) - } else { - t.Fatal("Cluster configuration must be present") - } + config.Init(cfg) } else { t.Fatal("Main configuration must be present") } diff --git a/tools/archive-manager/main.go b/tools/archive-manager/main.go index f5f8b836..ffcba793 100644 --- a/tools/archive-manager/main.go +++ b/tools/archive-manager/main.go @@ -434,11 +434,7 @@ func main() { // Load and check main configuration if cfg := ccconf.GetPackageConfig("main"); cfg != nil { - if clustercfg := ccconf.GetPackageConfig("clusters"); clustercfg != nil { - config.Init(cfg, clustercfg) - } else { - cclog.Abort("Cluster configuration must be present") - } + config.Init(cfg) } else { cclog.Abort("Main configuration must be present") } diff --git a/web/web.go b/web/web.go index d2ae8700..37f1c2b2 100644 --- a/web/web.go +++ b/web/web.go @@ -245,7 +245,6 @@ type Page struct { User schema.User // Information about the currently logged in user (Full User Info) Roles map[string]schema.Role // Available roles for frontend render checks Build Build // Latest information about the application - Clusters []config.ClusterConfig // List of all clusters for use in the Header SubClusters map[string][]string // Map per cluster of all subClusters for use in the Header FilterPresets map[string]any // For pages with the Filter component, this can be used to set initial filters. Infos map[string]any // For generic use (e.g. username for /monitoring/user/, job id for /monitoring/job/) @@ -260,12 +259,6 @@ func RenderTemplate(rw http.ResponseWriter, file string, page *Page) { cclog.Errorf("WEB/WEB > template '%s' not found", file) } - if page.Clusters == nil { - for _, c := range config.Clusters { - page.Clusters = append(page.Clusters, config.ClusterConfig{Name: c.Name, FilterRanges: c.FilterRanges, MetricDataRepository: nil}) - } - } - if page.SubClusters == nil { page.SubClusters = make(map[string][]string) for _, cluster := range archive.Clusters { From 42809e3f75256d282e3f7a5b8bdfd9980c222882 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 13 Jan 2026 07:20:26 +0100 Subject: [PATCH 18/23] Remove embedded tagger rules --- configs/tagger/README.md | 0 {internal => configs}/tagger/apps/alf.txt | 0 {internal => configs}/tagger/apps/caracal.txt | 0 {internal => configs}/tagger/apps/chroma.txt | 0 {internal => configs}/tagger/apps/cp2k.txt | 0 {internal => configs}/tagger/apps/cpmd.txt | 0 {internal => configs}/tagger/apps/flame.txt | 0 {internal => configs}/tagger/apps/gromacs.txt | 0 {internal => configs}/tagger/apps/julia.txt | 0 {internal => configs}/tagger/apps/lammps.txt | 0 {internal => configs}/tagger/apps/matlab.txt | 0 .../tagger/apps/openfoam.txt | 0 {internal => configs}/tagger/apps/orca.txt | 0 {internal => configs}/tagger/apps/python.txt | 0 {internal => configs}/tagger/apps/starccm.txt | 0 .../tagger/apps/turbomole.txt | 0 {internal => configs}/tagger/apps/vasp.txt | 0 .../tagger/jobclasses/highload.json | 0 .../tagger/jobclasses/lowUtilization.json | 0 .../tagger/jobclasses/lowload.json | 0 .../tagger/jobclasses/parameters.json | 0 internal/tagger/classifyJob.go | 105 ++++++++++-------- internal/tagger/classifyJob_test.go | 8 +- internal/tagger/detectApp.go | 61 +++++----- internal/tagger/detectApp_test.go | 70 +++++++++++- 25 files changed, 166 insertions(+), 78 deletions(-) create mode 100644 configs/tagger/README.md rename {internal => configs}/tagger/apps/alf.txt (100%) rename {internal => configs}/tagger/apps/caracal.txt (100%) rename {internal => configs}/tagger/apps/chroma.txt (100%) rename {internal => configs}/tagger/apps/cp2k.txt (100%) rename {internal => configs}/tagger/apps/cpmd.txt (100%) rename {internal => configs}/tagger/apps/flame.txt (100%) rename {internal => configs}/tagger/apps/gromacs.txt (100%) rename {internal => configs}/tagger/apps/julia.txt (100%) rename {internal => configs}/tagger/apps/lammps.txt (100%) rename {internal => configs}/tagger/apps/matlab.txt (100%) rename {internal => configs}/tagger/apps/openfoam.txt (100%) rename {internal => configs}/tagger/apps/orca.txt (100%) rename {internal => configs}/tagger/apps/python.txt (100%) rename {internal => configs}/tagger/apps/starccm.txt (100%) rename {internal => configs}/tagger/apps/turbomole.txt (100%) rename {internal => configs}/tagger/apps/vasp.txt (100%) rename {internal => configs}/tagger/jobclasses/highload.json (100%) rename {internal => configs}/tagger/jobclasses/lowUtilization.json (100%) rename {internal => configs}/tagger/jobclasses/lowload.json (100%) rename {internal => configs}/tagger/jobclasses/parameters.json (100%) diff --git a/configs/tagger/README.md b/configs/tagger/README.md new file mode 100644 index 00000000..e69de29b diff --git a/internal/tagger/apps/alf.txt b/configs/tagger/apps/alf.txt similarity index 100% rename from internal/tagger/apps/alf.txt rename to configs/tagger/apps/alf.txt diff --git a/internal/tagger/apps/caracal.txt b/configs/tagger/apps/caracal.txt similarity index 100% rename from internal/tagger/apps/caracal.txt rename to configs/tagger/apps/caracal.txt diff --git a/internal/tagger/apps/chroma.txt b/configs/tagger/apps/chroma.txt similarity index 100% rename from internal/tagger/apps/chroma.txt rename to configs/tagger/apps/chroma.txt diff --git a/internal/tagger/apps/cp2k.txt b/configs/tagger/apps/cp2k.txt similarity index 100% rename from internal/tagger/apps/cp2k.txt rename to configs/tagger/apps/cp2k.txt diff --git a/internal/tagger/apps/cpmd.txt b/configs/tagger/apps/cpmd.txt similarity index 100% rename from internal/tagger/apps/cpmd.txt rename to configs/tagger/apps/cpmd.txt diff --git a/internal/tagger/apps/flame.txt b/configs/tagger/apps/flame.txt similarity index 100% rename from internal/tagger/apps/flame.txt rename to configs/tagger/apps/flame.txt diff --git a/internal/tagger/apps/gromacs.txt b/configs/tagger/apps/gromacs.txt similarity index 100% rename from internal/tagger/apps/gromacs.txt rename to configs/tagger/apps/gromacs.txt diff --git a/internal/tagger/apps/julia.txt b/configs/tagger/apps/julia.txt similarity index 100% rename from internal/tagger/apps/julia.txt rename to configs/tagger/apps/julia.txt diff --git a/internal/tagger/apps/lammps.txt b/configs/tagger/apps/lammps.txt similarity index 100% rename from internal/tagger/apps/lammps.txt rename to configs/tagger/apps/lammps.txt diff --git a/internal/tagger/apps/matlab.txt b/configs/tagger/apps/matlab.txt similarity index 100% rename from internal/tagger/apps/matlab.txt rename to configs/tagger/apps/matlab.txt diff --git a/internal/tagger/apps/openfoam.txt b/configs/tagger/apps/openfoam.txt similarity index 100% rename from internal/tagger/apps/openfoam.txt rename to configs/tagger/apps/openfoam.txt diff --git a/internal/tagger/apps/orca.txt b/configs/tagger/apps/orca.txt similarity index 100% rename from internal/tagger/apps/orca.txt rename to configs/tagger/apps/orca.txt diff --git a/internal/tagger/apps/python.txt b/configs/tagger/apps/python.txt similarity index 100% rename from internal/tagger/apps/python.txt rename to configs/tagger/apps/python.txt diff --git a/internal/tagger/apps/starccm.txt b/configs/tagger/apps/starccm.txt similarity index 100% rename from internal/tagger/apps/starccm.txt rename to configs/tagger/apps/starccm.txt diff --git a/internal/tagger/apps/turbomole.txt b/configs/tagger/apps/turbomole.txt similarity index 100% rename from internal/tagger/apps/turbomole.txt rename to configs/tagger/apps/turbomole.txt diff --git a/internal/tagger/apps/vasp.txt b/configs/tagger/apps/vasp.txt similarity index 100% rename from internal/tagger/apps/vasp.txt rename to configs/tagger/apps/vasp.txt diff --git a/internal/tagger/jobclasses/highload.json b/configs/tagger/jobclasses/highload.json similarity index 100% rename from internal/tagger/jobclasses/highload.json rename to configs/tagger/jobclasses/highload.json diff --git a/internal/tagger/jobclasses/lowUtilization.json b/configs/tagger/jobclasses/lowUtilization.json similarity index 100% rename from internal/tagger/jobclasses/lowUtilization.json rename to configs/tagger/jobclasses/lowUtilization.json diff --git a/internal/tagger/jobclasses/lowload.json b/configs/tagger/jobclasses/lowload.json similarity index 100% rename from internal/tagger/jobclasses/lowload.json rename to configs/tagger/jobclasses/lowload.json diff --git a/internal/tagger/jobclasses/parameters.json b/configs/tagger/jobclasses/parameters.json similarity index 100% rename from internal/tagger/jobclasses/parameters.json rename to configs/tagger/jobclasses/parameters.json diff --git a/internal/tagger/classifyJob.go b/internal/tagger/classifyJob.go index 70399218..b5f30949 100644 --- a/internal/tagger/classifyJob.go +++ b/internal/tagger/classifyJob.go @@ -2,15 +2,16 @@ // All rights reserved. This file is part of cc-backend. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. + package tagger import ( "bytes" - "embed" "encoding/json" "fmt" "maps" "os" + "path/filepath" "strings" "text/template" @@ -23,8 +24,16 @@ import ( "github.com/expr-lang/expr/vm" ) -//go:embed jobclasses/* -var jobClassFiles embed.FS +const ( + // defaultJobClassConfigPath is the default path for job classification configuration + defaultJobClassConfigPath = "./var/tagger/jobclasses" + // tagTypeJobClass is the tag type identifier for job classification tags + tagTypeJobClass = "jobClass" + // jobClassConfigDirMatch is the directory name used for matching filesystem events + jobClassConfigDirMatch = "jobclasses" + // parametersFileName is the name of the parameters configuration file + parametersFileName = "parameters.json" +) // Variable defines a named expression that can be computed and reused in rules. // Variables are evaluated before the main rule and their results are added to the environment. @@ -45,21 +54,21 @@ type ruleVariable struct { // and the final rule expression that determines if the job matches the classification. type RuleFormat struct { // Name is a human-readable description of the rule - Name string `json:"name"` + Name string `json:"name"` // Tag is the classification tag to apply if the rule matches - Tag string `json:"tag"` + Tag string `json:"tag"` // Parameters are shared values referenced in the rule (e.g., thresholds) - Parameters []string `json:"parameters"` + Parameters []string `json:"parameters"` // Metrics are the job metrics required for this rule (e.g., "cpu_load", "mem_used") - Metrics []string `json:"metrics"` + Metrics []string `json:"metrics"` // Requirements are boolean expressions that must be true for the rule to apply - Requirements []string `json:"requirements"` + Requirements []string `json:"requirements"` // Variables are computed values used in the rule expression - Variables []Variable `json:"variables"` + Variables []Variable `json:"variables"` // Rule is the boolean expression that determines if the job matches - Rule string `json:"rule"` + Rule string `json:"rule"` // Hint is a template string that generates a message when the rule matches - Hint string `json:"hint"` + Hint string `json:"hint"` } type ruleInfo struct { @@ -75,29 +84,29 @@ type ruleInfo struct { // This interface allows for easier testing and decoupling from the concrete repository implementation. type JobRepository interface { // HasTag checks if a job already has a specific tag - HasTag(jobId int64, tagType string, tagName string) bool + HasTag(jobID int64, tagType string, tagName string) bool // AddTagOrCreateDirect adds a tag to a job or creates it if it doesn't exist - AddTagOrCreateDirect(jobId int64, tagType string, tagName string) (tagId int64, err error) + AddTagOrCreateDirect(jobID int64, tagType string, tagName string) (tagID int64, err error) // UpdateMetadata updates job metadata with a key-value pair UpdateMetadata(job *schema.Job, key, val string) (err error) } // JobClassTagger classifies jobs based on configurable rules that evaluate job metrics and properties. -// Rules are loaded from embedded JSON files and can be dynamically reloaded from a watched directory. +// Rules are loaded from an external configuration directory and can be dynamically reloaded when files change. // When a job matches a rule, it is tagged with the corresponding classification and an optional hint message. type JobClassTagger struct { // rules maps classification tags to their compiled rule information - rules map[string]ruleInfo + rules map[string]ruleInfo // parameters are shared values (e.g., thresholds) used across multiple rules - parameters map[string]any + parameters map[string]any // tagType is the type of tag ("jobClass") - tagType string + tagType string // cfgPath is the path to watch for configuration changes - cfgPath string + cfgPath string // repo provides access to job database operations - repo JobRepository + repo JobRepository // getStatistics retrieves job statistics for analysis - getStatistics func(job *schema.Job) (map[string]schema.JobStatistics, error) + getStatistics func(job *schema.Job) (map[string]schema.JobStatistics, error) // getMetricConfig retrieves metric configuration (limits) for a cluster getMetricConfig func(cluster, subCluster string) map[string]*schema.Metric } @@ -169,7 +178,7 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) { // EventMatch checks if a filesystem event should trigger configuration reload. // It returns true if the event path contains "jobclasses". func (t *JobClassTagger) EventMatch(s string) bool { - return strings.Contains(s, "jobclasses") + return strings.Contains(s, jobClassConfigDirMatch) } // EventCallback is triggered when the configuration directory changes. @@ -181,9 +190,10 @@ func (t *JobClassTagger) EventCallback() { cclog.Fatal(err) } - if util.CheckFileExists(t.cfgPath + "/parameters.json") { + parametersFile := filepath.Join(t.cfgPath, parametersFileName) + if util.CheckFileExists(parametersFile) { cclog.Info("Merge parameters") - b, err := os.ReadFile(t.cfgPath + "/parameters.json") + b, err := os.ReadFile(parametersFile) if err != nil { cclog.Warnf("prepareRule() > open file error: %v", err) } @@ -198,13 +208,13 @@ func (t *JobClassTagger) EventCallback() { for _, fn := range files { fns := fn.Name() - if fns != "parameters.json" { + if fns != parametersFileName { cclog.Debugf("Process: %s", fns) - filename := fmt.Sprintf("%s/%s", t.cfgPath, fns) + filename := filepath.Join(t.cfgPath, fns) b, err := os.ReadFile(filename) if err != nil { cclog.Warnf("prepareRule() > open file error: %v", err) - return + continue } t.prepareRule(b, fns) } @@ -213,7 +223,8 @@ func (t *JobClassTagger) EventCallback() { func (t *JobClassTagger) initParameters() error { cclog.Info("Initialize parameters") - b, err := jobClassFiles.ReadFile("jobclasses/parameters.json") + parametersFile := filepath.Join(t.cfgPath, parametersFileName) + b, err := os.ReadFile(parametersFile) if err != nil { cclog.Warnf("prepareRule() > open file error: %v", err) return err @@ -227,13 +238,20 @@ func (t *JobClassTagger) initParameters() error { return nil } -// Register initializes the JobClassTagger by loading parameters and classification rules. -// It loads embedded configuration files and sets up a file watch on ./var/tagger/jobclasses -// if it exists, allowing for dynamic configuration updates without restarting the application. -// Returns an error if the embedded configuration files cannot be read or parsed. +// Register initializes the JobClassTagger by loading parameters and classification rules from external folder. +// It sets up a file watch on ./var/tagger/jobclasses if it exists, allowing for +// dynamic configuration updates without restarting the application. +// Returns an error if the configuration path does not exist or cannot be read. func (t *JobClassTagger) Register() error { - t.cfgPath = "./var/tagger/jobclasses" - t.tagType = "jobClass" + if t.cfgPath == "" { + t.cfgPath = defaultJobClassConfigPath + } + t.tagType = tagTypeJobClass + t.rules = make(map[string]ruleInfo) + + if !util.CheckFileExists(t.cfgPath) { + return fmt.Errorf("configuration path does not exist: %s", t.cfgPath) + } err := t.initParameters() if err != nil { @@ -241,31 +259,28 @@ func (t *JobClassTagger) Register() error { return err } - files, err := jobClassFiles.ReadDir("jobclasses") + files, err := os.ReadDir(t.cfgPath) if err != nil { - return fmt.Errorf("error reading app folder: %#v", err) + return fmt.Errorf("error reading jobclasses folder: %#v", err) } - t.rules = make(map[string]ruleInfo) + for _, fn := range files { fns := fn.Name() - if fns != "parameters.json" { - filename := fmt.Sprintf("jobclasses/%s", fns) + if fns != parametersFileName { cclog.Infof("Process: %s", fns) + filename := filepath.Join(t.cfgPath, fns) - b, err := jobClassFiles.ReadFile(filename) + b, err := os.ReadFile(filename) if err != nil { cclog.Warnf("prepareRule() > open file error: %v", err) - return err + continue } t.prepareRule(b, fns) } } - if util.CheckFileExists(t.cfgPath) { - t.EventCallback() - cclog.Infof("Setup file watch for %s", t.cfgPath) - util.AddListener(t.cfgPath, t) - } + cclog.Infof("Setup file watch for %s", t.cfgPath) + util.AddListener(t.cfgPath, t) t.repo = repository.GetJobRepository() t.getStatistics = archive.GetStatistics diff --git a/internal/tagger/classifyJob_test.go b/internal/tagger/classifyJob_test.go index bed7a8f0..f82cf807 100644 --- a/internal/tagger/classifyJob_test.go +++ b/internal/tagger/classifyJob_test.go @@ -13,13 +13,13 @@ type MockJobRepository struct { mock.Mock } -func (m *MockJobRepository) HasTag(jobId int64, tagType string, tagName string) bool { - args := m.Called(jobId, tagType, tagName) +func (m *MockJobRepository) HasTag(jobID int64, tagType string, tagName string) bool { + args := m.Called(jobID, tagType, tagName) return args.Bool(0) } -func (m *MockJobRepository) AddTagOrCreateDirect(jobId int64, tagType string, tagName string) (tagId int64, err error) { - args := m.Called(jobId, tagType, tagName) +func (m *MockJobRepository) AddTagOrCreateDirect(jobID int64, tagType string, tagName string) (tagID int64, err error) { + args := m.Called(jobID, tagType, tagName) return args.Get(0).(int64), args.Error(1) } diff --git a/internal/tagger/detectApp.go b/internal/tagger/detectApp.go index 0b8e3e7e..2a89ea21 100644 --- a/internal/tagger/detectApp.go +++ b/internal/tagger/detectApp.go @@ -7,9 +7,7 @@ package tagger import ( "bufio" - "embed" "fmt" - "io/fs" "os" "path/filepath" "regexp" @@ -21,8 +19,14 @@ import ( "github.com/ClusterCockpit/cc-lib/v2/util" ) -//go:embed apps/* -var appFiles embed.FS +const ( + // defaultConfigPath is the default path for application tagging configuration + defaultConfigPath = "./var/tagger/apps" + // tagTypeApp is the tag type identifier for application tags + tagTypeApp = "app" + // configDirMatch is the directory name used for matching filesystem events + configDirMatch = "apps" +) type appInfo struct { tag string @@ -30,19 +34,19 @@ type appInfo struct { } // AppTagger detects applications by matching patterns in job scripts. -// It loads application patterns from embedded files and can dynamically reload -// configuration from a watched directory. When a job script matches a pattern, +// It loads application patterns from an external configuration directory and can dynamically reload +// configuration when files change. When a job script matches a pattern, // the corresponding application tag is automatically applied. type AppTagger struct { // apps maps application tags to their matching patterns - apps map[string]appInfo + apps map[string]appInfo // tagType is the type of tag ("app") tagType string // cfgPath is the path to watch for configuration changes cfgPath string } -func (t *AppTagger) scanApp(f fs.File, fns string) { +func (t *AppTagger) scanApp(f *os.File, fns string) { scanner := bufio.NewScanner(f) ai := appInfo{tag: strings.TrimSuffix(fns, filepath.Ext(fns)), strings: make([]string, 0)} @@ -56,7 +60,7 @@ func (t *AppTagger) scanApp(f fs.File, fns string) { // EventMatch checks if a filesystem event should trigger configuration reload. // It returns true if the event path contains "apps". func (t *AppTagger) EventMatch(s string) bool { - return strings.Contains(s, "apps") + return strings.Contains(s, configDirMatch) } // EventCallback is triggered when the configuration directory changes. @@ -71,43 +75,50 @@ func (t *AppTagger) EventCallback() { for _, fn := range files { fns := fn.Name() cclog.Debugf("Process: %s", fns) - f, err := os.Open(fmt.Sprintf("%s/%s", t.cfgPath, fns)) + f, err := os.Open(filepath.Join(t.cfgPath, fns)) if err != nil { cclog.Errorf("error opening app file %s: %#v", fns, err) + continue } t.scanApp(f, fns) + f.Close() } } -// Register initializes the AppTagger by loading application patterns from embedded files. -// It also sets up a file watch on ./var/tagger/apps if it exists, allowing for +// Register initializes the AppTagger by loading application patterns from external folder. +// It sets up a file watch on ./var/tagger/apps if it exists, allowing for // dynamic configuration updates without restarting the application. -// Returns an error if the embedded application files cannot be read. +// Returns an error if the configuration path does not exist or cannot be read. func (t *AppTagger) Register() error { - t.cfgPath = "./var/tagger/apps" - t.tagType = "app" + if t.cfgPath == "" { + t.cfgPath = defaultConfigPath + } + t.tagType = tagTypeApp + t.apps = make(map[string]appInfo, 0) - files, err := appFiles.ReadDir("apps") + if !util.CheckFileExists(t.cfgPath) { + return fmt.Errorf("configuration path does not exist: %s", t.cfgPath) + } + + files, err := os.ReadDir(t.cfgPath) if err != nil { return fmt.Errorf("error reading app folder: %#v", err) } - t.apps = make(map[string]appInfo, 0) + for _, fn := range files { fns := fn.Name() cclog.Debugf("Process: %s", fns) - f, err := appFiles.Open(fmt.Sprintf("apps/%s", fns)) + f, err := os.Open(filepath.Join(t.cfgPath, fns)) if err != nil { - return fmt.Errorf("error opening app file %s: %#v", fns, err) + cclog.Errorf("error opening app file %s: %#v", fns, err) + continue } - defer f.Close() t.scanApp(f, fns) + f.Close() } - if util.CheckFileExists(t.cfgPath) { - t.EventCallback() - cclog.Infof("Setup file watch for %s", t.cfgPath) - util.AddListener(t.cfgPath, t) - } + cclog.Infof("Setup file watch for %s", t.cfgPath) + util.AddListener(t.cfgPath, t) return nil } diff --git a/internal/tagger/detectApp_test.go b/internal/tagger/detectApp_test.go index 1c44f670..fe5e7a21 100644 --- a/internal/tagger/detectApp_test.go +++ b/internal/tagger/detectApp_test.go @@ -5,6 +5,8 @@ package tagger import ( + "os" + "path/filepath" "testing" "github.com/ClusterCockpit/cc-backend/internal/repository" @@ -29,28 +31,88 @@ func noErr(tb testing.TB, err error) { } } -func TestRegister(t *testing.T) { - var tagger AppTagger +func setupAppTaggerTestDir(t *testing.T) string { + t.Helper() - err := tagger.Register() + testDir := t.TempDir() + appsDir := filepath.Join(testDir, "apps") + err := os.MkdirAll(appsDir, 0o755) noErr(t, err) + srcDir := "../../configs/tagger/apps" + files, err := os.ReadDir(srcDir) + noErr(t, err) + + for _, file := range files { + if file.IsDir() { + continue + } + srcPath := filepath.Join(srcDir, file.Name()) + dstPath := filepath.Join(appsDir, file.Name()) + + data, err := os.ReadFile(srcPath) + noErr(t, err) + + err = os.WriteFile(dstPath, data, 0o644) + noErr(t, err) + } + + return appsDir +} + +func TestRegister(t *testing.T) { + appsDir := setupAppTaggerTestDir(t) + + var tagger AppTagger + tagger.cfgPath = appsDir + tagger.tagType = tagTypeApp + tagger.apps = make(map[string]appInfo, 0) + + files, err := os.ReadDir(appsDir) + noErr(t, err) + + for _, fn := range files { + if fn.IsDir() { + continue + } + fns := fn.Name() + f, err := os.Open(filepath.Join(appsDir, fns)) + noErr(t, err) + tagger.scanApp(f, fns) + f.Close() + } + if len(tagger.apps) != 16 { t.Errorf("wrong summary for diagnostic \ngot: %d \nwant: 16", len(tagger.apps)) } } func TestMatch(t *testing.T) { + appsDir := setupAppTaggerTestDir(t) r := setup(t) job, err := r.FindByIDDirect(317) noErr(t, err) var tagger AppTagger + tagger.cfgPath = appsDir + tagger.tagType = tagTypeApp + tagger.apps = make(map[string]appInfo, 0) - err = tagger.Register() + files, err := os.ReadDir(appsDir) noErr(t, err) + for _, fn := range files { + if fn.IsDir() { + continue + } + fns := fn.Name() + f, err := os.Open(filepath.Join(appsDir, fns)) + noErr(t, err) + tagger.scanApp(f, fns) + f.Close() + } + tagger.Match(job) if !r.HasTag(317, "app", "vasp") { From a9366d14c66aae29e3da4928558bd74b39662990 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 13 Jan 2026 08:32:32 +0100 Subject: [PATCH 19/23] Add README for tagging. Enable tagging by flag without configuration option --- cmd/cc-backend/main.go | 2 + configs/tagger/README.md | 419 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 421 insertions(+) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index 8eb3c76f..9f98ccbf 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -302,6 +302,8 @@ func initSubsystems() error { // Apply tags if requested if flagApplyTags { + tagger.Init() + if err := tagger.RunTaggers(); err != nil { return fmt.Errorf("running job taggers: %w", err) } diff --git a/configs/tagger/README.md b/configs/tagger/README.md index e69de29b..759cbe97 100644 --- a/configs/tagger/README.md +++ b/configs/tagger/README.md @@ -0,0 +1,419 @@ +# Job Tagging Configuration + +ClusterCockpit provides automatic job tagging functionality to classify and +categorize jobs based on configurable rules. The tagging system consists of two +main components: + +1. **Application Detection** - Identifies which application a job is running +2. **Job Classification** - Analyzes job performance characteristics and applies classification tags + +## Directory Structure + +``` +configs/tagger/ +├── apps/ # Application detection patterns +│ ├── vasp.txt +│ ├── gromacs.txt +│ └── ... +└── jobclasses/ # Job classification rules + ├── parameters.json + ├── lowUtilization.json + ├── highload.json + └── ... +``` + +## Activating Tagger Rules + +### Step 1: Copy Configuration Files + +To activate tagging, review, adapt, and copy the configuration files from +`configs/tagger/` to `var/tagger/`: + +```bash +# From the cc-backend root directory +mkdir -p var/tagger +cp -r configs/tagger/apps var/tagger/ +cp -r configs/tagger/jobclasses var/tagger/ +``` + +### Step 2: Enable Tagging in Configuration + +Add or set the following configuration key in the `main` section of your `config.json`: + +```json +{ + "enable-job-taggers": true +} +``` + +**Important**: Automatic tagging is disabled by default. You must explicitly +enable it by setting `enable-job-taggers: true` in the main configuration file. + +### Step 3: Restart cc-backend + +The tagger system automatically loads configuration from `./var/tagger/` at +startup. After copying the files and enabling the feature, restart cc-backend: + +```bash +./cc-backend -server +``` + +### Step 4: Verify Configuration Loaded + +Check the logs for messages indicating successful configuration loading: + +``` +[INFO] Setup file watch for ./var/tagger/apps +[INFO] Setup file watch for ./var/tagger/jobclasses +``` + +## How Tagging Works + +### Automatic Tagging + +When `enable-job-taggers` is set to `true` in the configuration, tags are +automatically applied when: + +- **Job Start**: Application detection runs immediately when a job starts +- **Job Stop**: Job classification runs when a job completes + +The system analyzes job metadata and metrics to determine appropriate tags. + +**Note**: Automatic tagging only works for jobs that start or stop after the +feature is enabled. Existing jobs are not automatically retagged. + +### Manual Tagging (Retroactive) + +To apply tags to existing jobs in the database, use the `-apply-tags` command +line option: + +```bash +./cc-backend -apply-tags +``` + +This processes all jobs in the database and applies current tagging rules. This +is useful when: + +- You have existing jobs that were created before tagging was enabled +- You've added new tagging rules and want to apply them to historical data +- You've modified existing rules and want to re-evaluate all jobs + +### Hot Reload + +The tagger system watches the configuration directories for changes. You can +modify or add rules without restarting `cc-backend`: + +- Changes to `var/tagger/apps/*` are detected automatically +- Changes to `var/tagger/jobclasses/*` are detected automatically + +## Application Detection + +Application detection identifies which software a job is running by matching +patterns in the job script. + +### Configuration Format + +Application patterns are stored in text files under `var/tagger/apps/`. Each +file contains one or more regular expression patterns (one per line) that match +against the job script. + +**Example: `apps/vasp.txt`** + +``` +vasp +VASP +``` + +### How It Works + +1. When a job starts, the system retrieves the job script from metadata +2. Each line in the app files is treated as a regex pattern +3. Patterns are matched case-insensitively against the lowercased job script +4. If a match is found, a tag of type `app` with the filename (without extension) is applied +5. Only the first matching application is tagged + +### Adding New Applications + +1. Create a new file in `var/tagger/apps/` (e.g., `tensorflow.txt`) +2. Add regex patterns, one per line: + + ``` + tensorflow + tf\.keras + import tensorflow + ``` + +3. The file is automatically detected and loaded + +**Note**: The tag name will be the filename without the `.txt` extension (e.g., `tensorflow`). + +## Job Classification + +Job classification analyzes completed jobs based on their metrics and properties +to identify performance issues or characteristics. + +### Configuration Format + +Job classification rules are defined in JSON files under +`var/tagger/jobclasses/`. Each rule file defines: + +- **Metrics required**: Which job metrics to analyze +- **Requirements**: Pre-conditions that must be met +- **Variables**: Computed values used in the rule +- **Rule expression**: Boolean expression that determines if the rule matches +- **Hint template**: Message displayed when the rule matches + +### Parameters File + +`jobclasses/parameters.json` defines shared threshold values used across multiple rules: + +```json +{ + "lowcpuload_threshold_factor": 0.9, + "highmemoryusage_threshold_factor": 0.9, + "job_min_duration_seconds": 600.0, + "sampling_interval_seconds": 30.0 +} +``` + +### Rule File Structure + +**Example: `jobclasses/lowUtilization.json`** + +```json +{ + "name": "Low resource utilization", + "tag": "lowutilization", + "parameters": ["job_min_duration_seconds"], + "metrics": ["flops_any", "mem_bw"], + "requirements": [ + "job.shared == \"none\"", + "job.duration > job_min_duration_seconds" + ], + "variables": [ + { + "name": "mem_bw_perc", + "expr": "1.0 - (mem_bw.avg / mem_bw.limits.peak)" + } + ], + "rule": "flops_any.avg < flops_any.limits.alert", + "hint": "Average flop rate {{.flops_any.avg}} falls below threshold {{.flops_any.limits.alert}}" +} +``` + +#### Field Descriptions + +| Field | Description | +| -------------- | ----------------------------------------------------------------------------- | +| `name` | Human-readable description of the rule | +| `tag` | Tag identifier applied when the rule matches | +| `parameters` | List of parameter names from `parameters.json` to include in rule environment | +| `metrics` | List of metrics required for evaluation (must be present in job data) | +| `requirements` | Boolean expressions that must all be true for the rule to be evaluated | +| `variables` | Named expressions computed before evaluating the main rule | +| `rule` | Boolean expression that determines if the job matches this classification | +| `hint` | Go template string for generating a user-visible message | + +### Expression Environment + +Expressions in `requirements`, `variables`, and `rule` have access to: + +**Job Properties:** + +- `job.shared` - Shared node allocation type +- `job.duration` - Job runtime in seconds +- `job.numCores` - Number of CPU cores +- `job.numNodes` - Number of nodes +- `job.jobState` - Job completion state +- `job.numAcc` - Number of accelerators +- `job.smt` - SMT setting + +**Metric Statistics (for each metric in `metrics`):** + +- `.min` - Minimum value +- `.max` - Maximum value +- `.avg` - Average value +- `.limits.peak` - Peak limit from cluster config +- `.limits.normal` - Normal threshold +- `.limits.caution` - Caution threshold +- `.limits.alert` - Alert threshold + +**Parameters:** + +- All parameters listed in the `parameters` field + +**Variables:** + +- All variables defined in the `variables` array + +### Expression Language + +Rules use the [expr](https://github.com/expr-lang/expr) language for expressions. Supported operations: + +- **Arithmetic**: `+`, `-`, `*`, `/`, `%`, `^` +- **Comparison**: `==`, `!=`, `<`, `<=`, `>`, `>=` +- **Logical**: `&&`, `||`, `!` +- **Functions**: Standard math functions (see expr documentation) + +### Hint Templates + +Hints use Go's `text/template` syntax. Variables from the evaluation environment are accessible: + +``` +{{.flops_any.avg}} # Access metric average +{{.job.duration}} # Access job property +{{.my_variable}} # Access computed variable +``` + +### Adding New Classification Rules + +1. Create a new JSON file in `var/tagger/jobclasses/` (e.g., `memoryLeak.json`) +2. Define the rule structure: + + ```json + { + "name": "Memory Leak Detection", + "tag": "memory_leak", + "parameters": ["memory_leak_slope_threshold"], + "metrics": ["mem_used"], + "requirements": ["job.duration > 3600"], + "variables": [ + { + "name": "mem_growth", + "expr": "(mem_used.max - mem_used.min) / job.duration" + } + ], + "rule": "mem_growth > memory_leak_slope_threshold", + "hint": "Memory usage grew by {{.mem_growth}} per second" + } + ``` + +3. Add any new parameters to `parameters.json` +4. The file is automatically detected and loaded + +## Configuration Paths + +The tagger system reads from these paths (relative to cc-backend working directory): + +- **Application patterns**: `./var/tagger/apps/` +- **Job classification rules**: `./var/tagger/jobclasses/` + +These paths are defined as constants in the source code and cannot be changed without recompiling. + +## Troubleshooting + +### Tags Not Applied + +1. **Check tagging is enabled**: Verify `enable-job-taggers: true` is set in `config.json` + +2. **Check configuration exists**: + + ```bash + ls -la var/tagger/apps + ls -la var/tagger/jobclasses + ``` + +3. **Check logs for errors**: + + ```bash + ./cc-backend -server -loglevel debug + ``` + +4. **Verify file permissions**: Ensure cc-backend can read the configuration files + +5. **For existing jobs**: Use `./cc-backend -apply-tags` to retroactively tag jobs + +### Rules Not Matching + +1. **Enable debug logging**: Set `loglevel: debug` to see detailed rule evaluation +2. **Check requirements**: Ensure all requirements in the rule are satisfied +3. **Verify metrics exist**: Classification rules require job metrics to be available +4. **Check metric names**: Ensure metric names match those in your cluster configuration + +### File Watch Not Working + +If changes to configuration files aren't detected: + +1. Restart cc-backend to reload all configuration +2. Check filesystem supports file watching (network filesystems may not) +3. Check logs for file watch setup messages + +## Best Practices + +1. **Start Simple**: Begin with basic rules and refine based on results +2. **Use Requirements**: Filter out irrelevant jobs early with requirements +3. **Test Incrementally**: Add one rule at a time and verify behavior +4. **Document Rules**: Use descriptive names and clear hint messages +5. **Share Parameters**: Define common thresholds in `parameters.json` for consistency +6. **Version Control**: Keep your `var/tagger/` configuration in version control +7. **Backup Before Changes**: Test new rules on a copy before deploying to production + +## Examples + +### Simple Application Detection + +**File: `var/tagger/apps/python.txt`** + +``` +python +python3 +\.py +``` + +This detects jobs running Python scripts. + +### Complex Classification Rule + +**File: `var/tagger/jobclasses/cpuImbalance.json`** + +```json +{ + "name": "CPU Load Imbalance", + "tag": "cpu_imbalance", + "parameters": ["core_load_imbalance_threshold_factor"], + "metrics": ["cpu_load"], + "requirements": ["job.numCores > 1", "job.duration > 600"], + "variables": [ + { + "name": "load_variance", + "expr": "(cpu_load.max - cpu_load.min) / cpu_load.avg" + } + ], + "rule": "load_variance > core_load_imbalance_threshold_factor", + "hint": "CPU load varies by {{printf \"%.1f%%\" (load_variance * 100)}} across cores" +} +``` + +This detects jobs where CPU load is unevenly distributed across cores. + +## Reference + +### Configuration Options + +**Main Configuration (`config.json`)**: + +- `enable-job-taggers` (boolean, default: `false`) - Enables automatic job tagging system + - Must be set to `true` to activate automatic tagging on job start/stop events + - Does not affect the `-apply-tags` command line option + +**Command Line Options**: + +- `-apply-tags` - Apply all tagging rules to existing jobs in the database + - Works independently of `enable-job-taggers` configuration + - Useful for retroactively tagging jobs or re-evaluating with updated rules + +### Default Configuration Location + +The example configurations are provided in: + +- `configs/tagger/apps/` - Example application patterns (16 applications) +- `configs/tagger/jobclasses/` - Example classification rules (3 rules) + +Copy these to `var/tagger/` and customize for your environment. + +### Tag Types + +- `app` - Application tags (e.g., "vasp", "gromacs") +- `jobClass` - Classification tags (e.g., "lowutilization", "highload") + +Tags can be queried and filtered in the ClusterCockpit UI and API. From 2ebab1e2e2579cccebaec7615b3a1e07cf6bfe49 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 13 Jan 2026 09:50:57 +0100 Subject: [PATCH 20/23] Reformat with gofumpt --- internal/auth/auth.go | 10 ++++---- internal/auth/auth_test.go | 40 +++++++++++++++---------------- internal/auth/jwt.go | 6 ++--- internal/auth/jwtCookieSession.go | 4 ++-- internal/auth/jwtHelpers.go | 24 +++++++++---------- internal/auth/jwtHelpers_test.go | 37 ++++++++++++++-------------- internal/auth/jwtSession.go | 4 ++-- internal/auth/oidc.go | 4 ++-- 8 files changed, 64 insertions(+), 65 deletions(-) diff --git a/internal/auth/auth.go b/internal/auth/auth.go index 3be1768e..41691d00 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -40,7 +40,7 @@ type Authenticator interface { // authenticator should attempt the login. This method should not perform // expensive operations or actual authentication. CanLogin(user *schema.User, username string, rw http.ResponseWriter, r *http.Request) (*schema.User, bool) - + // Login performs the actually authentication for the user. // It returns the authenticated user or an error if authentication fails. // The user parameter may be nil if the user doesn't exist in the database yet. @@ -65,13 +65,13 @@ var ipUserLimiters sync.Map func getIPUserLimiter(ip, username string) *rate.Limiter { key := ip + ":" + username now := time.Now() - + if entry, ok := ipUserLimiters.Load(key); ok { rle := entry.(*rateLimiterEntry) rle.lastUsed = now return rle.limiter } - + // More aggressive rate limiting: 5 attempts per 15 minutes newLimiter := rate.NewLimiter(rate.Every(15*time.Minute/5), 5) ipUserLimiters.Store(key, &rateLimiterEntry{ @@ -176,7 +176,7 @@ func (auth *Authentication) AuthViaSession( func Init(authCfg *json.RawMessage) { initOnce.Do(func() { authInstance = &Authentication{} - + // Start background cleanup of rate limiters startRateLimiterCleanup() @@ -272,7 +272,7 @@ func handleUserSync(user *schema.User, syncUserOnLogin, updateUserOnLogin bool) cclog.Errorf("Error while loading user '%s': %v", user.Username, err) return } - + if err == sql.ErrNoRows && syncUserOnLogin { // Add new user if err := r.AddUser(user); err != nil { cclog.Errorf("Error while adding user '%s' to DB: %v", user.Username, err) diff --git a/internal/auth/auth_test.go b/internal/auth/auth_test.go index 15f153e6..68961354 100644 --- a/internal/auth/auth_test.go +++ b/internal/auth/auth_test.go @@ -15,25 +15,25 @@ import ( func TestGetIPUserLimiter(t *testing.T) { ip := "192.168.1.1" username := "testuser" - + // Get limiter for the first time limiter1 := getIPUserLimiter(ip, username) if limiter1 == nil { t.Fatal("Expected limiter to be created") } - + // Get the same limiter again limiter2 := getIPUserLimiter(ip, username) if limiter1 != limiter2 { t.Error("Expected to get the same limiter instance") } - + // Get a different limiter for different user limiter3 := getIPUserLimiter(ip, "otheruser") if limiter1 == limiter3 { t.Error("Expected different limiter for different user") } - + // Get a different limiter for different IP limiter4 := getIPUserLimiter("192.168.1.2", username) if limiter1 == limiter4 { @@ -45,16 +45,16 @@ func TestGetIPUserLimiter(t *testing.T) { func TestRateLimiterBehavior(t *testing.T) { ip := "10.0.0.1" username := "ratelimituser" - + limiter := getIPUserLimiter(ip, username) - + // Should allow first 5 attempts for i := 0; i < 5; i++ { if !limiter.Allow() { t.Errorf("Request %d should be allowed within rate limit", i+1) } } - + // 6th attempt should be blocked if limiter.Allow() { t.Error("Request 6 should be blocked by rate limiter") @@ -65,19 +65,19 @@ func TestRateLimiterBehavior(t *testing.T) { func TestCleanupOldRateLimiters(t *testing.T) { // Clear all existing limiters first to avoid interference from other tests cleanupOldRateLimiters(time.Now().Add(24 * time.Hour)) - + // Create some new rate limiters limiter1 := getIPUserLimiter("1.1.1.1", "user1") limiter2 := getIPUserLimiter("2.2.2.2", "user2") - + if limiter1 == nil || limiter2 == nil { t.Fatal("Failed to create test limiters") } - + // Cleanup limiters older than 1 second from now (should keep both) time.Sleep(10 * time.Millisecond) // Small delay to ensure timestamp difference cleanupOldRateLimiters(time.Now().Add(-1 * time.Second)) - + // Verify they still exist (should get same instance) if getIPUserLimiter("1.1.1.1", "user1") != limiter1 { t.Error("Limiter 1 was incorrectly cleaned up") @@ -85,10 +85,10 @@ func TestCleanupOldRateLimiters(t *testing.T) { if getIPUserLimiter("2.2.2.2", "user2") != limiter2 { t.Error("Limiter 2 was incorrectly cleaned up") } - + // Cleanup limiters older than 1 hour from now (should remove both) cleanupOldRateLimiters(time.Now().Add(2 * time.Hour)) - + // Getting them again should create new instances newLimiter1 := getIPUserLimiter("1.1.1.1", "user1") if newLimiter1 == limiter1 { @@ -107,14 +107,14 @@ func TestIPv4Extraction(t *testing.T) { {"IPv4 without port", "192.168.1.1", "192.168.1.1"}, {"Localhost with port", "127.0.0.1:3000", "127.0.0.1"}, } - + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := tt.input if host, _, err := net.SplitHostPort(result); err == nil { result = host } - + if result != tt.expected { t.Errorf("Expected %s, got %s", tt.expected, result) } @@ -122,7 +122,7 @@ func TestIPv4Extraction(t *testing.T) { } } -// TestIPv6Extraction tests extracting IPv6 addresses +// TestIPv6Extraction tests extracting IPv6 addresses func TestIPv6Extraction(t *testing.T) { tests := []struct { name string @@ -134,14 +134,14 @@ func TestIPv6Extraction(t *testing.T) { {"IPv6 without port", "2001:db8::1", "2001:db8::1"}, {"IPv6 localhost", "::1", "::1"}, } - + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := tt.input if host, _, err := net.SplitHostPort(result); err == nil { result = host } - + if result != tt.expected { t.Errorf("Expected %s, got %s", tt.expected, result) } @@ -160,14 +160,14 @@ func TestIPExtractionEdgeCases(t *testing.T) { {"Empty string", "", ""}, {"Just port", ":8080", ""}, } - + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := tt.input if host, _, err := net.SplitHostPort(result); err == nil { result = host } - + if result != tt.expected { t.Errorf("Expected %s, got %s", tt.expected, result) } diff --git a/internal/auth/jwt.go b/internal/auth/jwt.go index be642219..c0f641b9 100644 --- a/internal/auth/jwt.go +++ b/internal/auth/jwt.go @@ -101,20 +101,20 @@ func (ja *JWTAuthenticator) AuthViaJWT( // Token is valid, extract payload claims := token.Claims.(jwt.MapClaims) - + // Use shared helper to get user from JWT claims var user *schema.User user, err = getUserFromJWT(claims, Keys.JwtConfig.ValidateUser, schema.AuthToken, -1) if err != nil { return nil, err } - + // If not validating user, we only get roles from JWT (no projects for this auth method) if !Keys.JwtConfig.ValidateUser { user.Roles = extractRolesFromClaims(claims, false) user.Projects = nil // Standard JWT auth doesn't include projects } - + return user, nil } diff --git a/internal/auth/jwtCookieSession.go b/internal/auth/jwtCookieSession.go index 42f7439e..4c4bbeb6 100644 --- a/internal/auth/jwtCookieSession.go +++ b/internal/auth/jwtCookieSession.go @@ -146,13 +146,13 @@ func (ja *JWTCookieSessionAuthenticator) Login( } claims := token.Claims.(jwt.MapClaims) - + // Use shared helper to get user from JWT claims user, err = getUserFromJWT(claims, jc.ValidateUser, schema.AuthSession, schema.AuthViaToken) if err != nil { return nil, err } - + // Sync or update user if configured if !jc.ValidateUser && (jc.SyncUserOnLogin || jc.UpdateUserOnLogin) { handleTokenUser(user) diff --git a/internal/auth/jwtHelpers.go b/internal/auth/jwtHelpers.go index 5bfc91ef..de59145e 100644 --- a/internal/auth/jwtHelpers.go +++ b/internal/auth/jwtHelpers.go @@ -28,7 +28,7 @@ func extractStringFromClaims(claims jwt.MapClaims, key string) string { // If validateRoles is true, only valid roles are returned func extractRolesFromClaims(claims jwt.MapClaims, validateRoles bool) []string { var roles []string - + if rawroles, ok := claims["roles"].([]any); ok { for _, rr := range rawroles { if r, ok := rr.(string); ok { @@ -42,14 +42,14 @@ func extractRolesFromClaims(claims jwt.MapClaims, validateRoles bool) []string { } } } - + return roles } // extractProjectsFromClaims extracts projects from JWT claims func extractProjectsFromClaims(claims jwt.MapClaims) []string { projects := make([]string, 0) - + if rawprojs, ok := claims["projects"].([]any); ok { for _, pp := range rawprojs { if p, ok := pp.(string); ok { @@ -61,7 +61,7 @@ func extractProjectsFromClaims(claims jwt.MapClaims) []string { projects = append(projects, projSlice...) } } - + return projects } @@ -72,14 +72,14 @@ func extractNameFromClaims(claims jwt.MapClaims) string { if name, ok := claims["name"].(string); ok { return name } - + // Try nested structure: {name: {values: [...]}} if wrap, ok := claims["name"].(map[string]any); ok { if vals, ok := wrap["values"].([]any); ok { if len(vals) == 0 { return "" } - + name := fmt.Sprintf("%v", vals[0]) for i := 1; i < len(vals); i++ { name += fmt.Sprintf(" %v", vals[i]) @@ -87,7 +87,7 @@ func extractNameFromClaims(claims jwt.MapClaims) string { return name } } - + return "" } @@ -100,7 +100,7 @@ func getUserFromJWT(claims jwt.MapClaims, validateUser bool, authType schema.Aut if sub == "" { return nil, errors.New("missing 'sub' claim in JWT") } - + if validateUser { // Validate user against database ur := repository.GetUserRepository() @@ -109,22 +109,22 @@ func getUserFromJWT(claims jwt.MapClaims, validateUser bool, authType schema.Aut cclog.Errorf("Error while loading user '%v': %v", sub, err) return nil, fmt.Errorf("database error: %w", err) } - + // Deny any logins for unknown usernames if user == nil || err == sql.ErrNoRows { cclog.Warn("Could not find user from JWT in internal database.") return nil, errors.New("unknown user") } - + // Return database user (with database roles) return user, nil } - + // Create user from JWT claims name := extractNameFromClaims(claims) roles := extractRolesFromClaims(claims, true) // Validate roles projects := extractProjectsFromClaims(claims) - + return &schema.User{ Username: sub, Name: name, diff --git a/internal/auth/jwtHelpers_test.go b/internal/auth/jwtHelpers_test.go index 84a1f2e0..4627f7e5 100644 --- a/internal/auth/jwtHelpers_test.go +++ b/internal/auth/jwtHelpers_test.go @@ -19,7 +19,7 @@ func TestExtractStringFromClaims(t *testing.T) { "email": "test@example.com", "age": 25, // not a string } - + tests := []struct { name string key string @@ -30,7 +30,7 @@ func TestExtractStringFromClaims(t *testing.T) { {"Non-existent key", "missing", ""}, {"Non-string value", "age", ""}, } - + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := extractStringFromClaims(claims, tt.key) @@ -88,16 +88,16 @@ func TestExtractRolesFromClaims(t *testing.T) { expected: []string{}, }, } - + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := extractRolesFromClaims(tt.claims, tt.validateRoles) - + if len(result) != len(tt.expected) { t.Errorf("Expected %d roles, got %d", len(tt.expected), len(result)) return } - + for i, role := range result { if i >= len(tt.expected) || role != tt.expected[i] { t.Errorf("Expected role %s at position %d, got %s", tt.expected[i], i, role) @@ -141,16 +141,16 @@ func TestExtractProjectsFromClaims(t *testing.T) { expected: []string{"project1", "project2"}, // Should skip non-strings }, } - + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := extractProjectsFromClaims(tt.claims) - + if len(result) != len(tt.expected) { t.Errorf("Expected %d projects, got %d", len(tt.expected), len(result)) return } - + for i, project := range result { if i >= len(tt.expected) || project != tt.expected[i] { t.Errorf("Expected project %s at position %d, got %s", tt.expected[i], i, project) @@ -216,7 +216,7 @@ func TestExtractNameFromClaims(t *testing.T) { expected: "123 Smith", // Should convert to string }, } - + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := extractNameFromClaims(tt.claims) @@ -235,29 +235,28 @@ func TestGetUserFromJWT_NoValidation(t *testing.T) { "roles": []any{"user", "admin"}, "projects": []any{"project1", "project2"}, } - + user, err := getUserFromJWT(claims, false, schema.AuthToken, -1) - if err != nil { t.Fatalf("Unexpected error: %v", err) } - + if user.Username != "testuser" { t.Errorf("Expected username 'testuser', got '%s'", user.Username) } - + if user.Name != "Test User" { t.Errorf("Expected name 'Test User', got '%s'", user.Name) } - + if len(user.Roles) != 2 { t.Errorf("Expected 2 roles, got %d", len(user.Roles)) } - + if len(user.Projects) != 2 { t.Errorf("Expected 2 projects, got %d", len(user.Projects)) } - + if user.AuthType != schema.AuthToken { t.Errorf("Expected AuthType %v, got %v", schema.AuthToken, user.AuthType) } @@ -268,13 +267,13 @@ func TestGetUserFromJWT_MissingSub(t *testing.T) { claims := jwt.MapClaims{ "name": "Test User", } - + _, err := getUserFromJWT(claims, false, schema.AuthToken, -1) - + if err == nil { t.Error("Expected error for missing sub claim") } - + if err.Error() != "missing 'sub' claim in JWT" { t.Errorf("Expected specific error message, got: %v", err) } diff --git a/internal/auth/jwtSession.go b/internal/auth/jwtSession.go index 107afcb8..de7e985b 100644 --- a/internal/auth/jwtSession.go +++ b/internal/auth/jwtSession.go @@ -75,13 +75,13 @@ func (ja *JWTSessionAuthenticator) Login( } claims := token.Claims.(jwt.MapClaims) - + // Use shared helper to get user from JWT claims user, err = getUserFromJWT(claims, Keys.JwtConfig.ValidateUser, schema.AuthSession, schema.AuthViaToken) if err != nil { return nil, err } - + // Sync or update user if configured if !Keys.JwtConfig.ValidateUser && (Keys.JwtConfig.SyncUserOnLogin || Keys.JwtConfig.UpdateUserOnLogin) { handleTokenUser(user) diff --git a/internal/auth/oidc.go b/internal/auth/oidc.go index a3fc09cc..b90aca4f 100644 --- a/internal/auth/oidc.go +++ b/internal/auth/oidc.go @@ -59,7 +59,7 @@ func NewOIDC(a *Authentication) *OIDC { // Use context with timeout for provider initialization ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - + provider, err := oidc.NewProvider(ctx, Keys.OpenIDConfig.Provider) if err != nil { cclog.Fatal(err) @@ -119,7 +119,7 @@ func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) { // Exchange authorization code for token with timeout ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - + token, err := oa.client.Exchange(ctx, code, oauth2.VerifierOption(codeVerifier)) if err != nil { http.Error(rw, "Failed to exchange token: "+err.Error(), http.StatusInternalServerError) From 04a2e460ae8b1e884795bdf5200e1efb671ac958 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 13 Jan 2026 09:52:00 +0100 Subject: [PATCH 21/23] Refactor metricstore. Initial stub for cluster/ subcluster specific retention times --- internal/metricstore/avroCheckpoint.go | 6 +- internal/metricstore/config.go | 70 ++++++++++- internal/metricstore/configSchema.go | 38 +++++- .../{memorystore.go => metricstore.go} | 114 ++++++++++++++---- ...emorystore_test.go => metricstore_test.go} | 2 +- 5 files changed, 203 insertions(+), 27 deletions(-) rename internal/metricstore/{memorystore.go => metricstore.go} (76%) rename internal/metricstore/{memorystore_test.go => metricstore_test.go} (99%) diff --git a/internal/metricstore/avroCheckpoint.go b/internal/metricstore/avroCheckpoint.go index 275a64bd..aa14ce5a 100644 --- a/internal/metricstore/avroCheckpoint.go +++ b/internal/metricstore/avroCheckpoint.go @@ -24,8 +24,10 @@ import ( "github.com/linkedin/goavro/v2" ) -var NumAvroWorkers int = DefaultAvroWorkers -var startUp bool = true +var ( + NumAvroWorkers int = DefaultAvroWorkers + startUp bool = true +) func (as *AvroStore) ToCheckpoint(dir string, dumpAll bool) (int, error) { levels := make([]*AvroLevel, 0) diff --git a/internal/metricstore/config.go b/internal/metricstore/config.go index 97f16c46..06ae774d 100644 --- a/internal/metricstore/config.go +++ b/internal/metricstore/config.go @@ -33,8 +33,19 @@ type MetricStoreConfig struct { DumpToFile string `json:"dump-to-file"` EnableGops bool `json:"gops"` } `json:"debug"` + // Global default retention duration RetentionInMemory string `json:"retention-in-memory"` - Archive struct { + // Per-cluster retention overrides + Clusters []struct { + Cluster string `json:"cluster"` + RetentionInMemory string `json:"retention-in-memory"` + // Per-subcluster retention overrides within this cluster + SubClusters []struct { + SubCluster string `json:"subcluster"` + RetentionInMemory string `json:"retention-in-memory"` + } `json:"subclusters,omitempty"` + } `json:"clusters,omitempty"` + Archive struct { Interval string `json:"interval"` RootDir string `json:"directory"` DeleteInstead bool `json:"delete-instead"` @@ -50,6 +61,14 @@ type MetricStoreConfig struct { var Keys MetricStoreConfig +type retentionConfig struct { + global time.Duration + clusterMap map[string]time.Duration + subClusterMap map[string]map[string]time.Duration +} + +var retentionLookup *retentionConfig + // AggregationStrategy for aggregation over multiple values at different cpus/sockets/..., not time! type AggregationStrategy int @@ -113,3 +132,52 @@ func AddMetric(name string, metric MetricConfig) error { return nil } + +func GetRetentionDuration(cluster, subCluster string) (time.Duration, error) { + if retentionLookup == nil { + return 0, fmt.Errorf("[METRICSTORE]> retention configuration not initialized") + } + + if subCluster != "" { + if subMap, ok := retentionLookup.subClusterMap[cluster]; ok { + if retention, ok := subMap[subCluster]; ok { + return retention, nil + } + } + } + + if retention, ok := retentionLookup.clusterMap[cluster]; ok { + return retention, nil + } + + return retentionLookup.global, nil +} + +// GetShortestRetentionDuration returns the shortest configured retention duration +// across all levels (global, cluster, and subcluster configurations). +// Returns 0 if retentionLookup is not initialized or global retention is not set. +func GetShortestRetentionDuration() time.Duration { + if retentionLookup == nil || retentionLookup.global <= 0 { + return 0 + } + + shortest := retentionLookup.global + + // Check all cluster-level retention durations + for _, clusterRetention := range retentionLookup.clusterMap { + if clusterRetention > 0 && clusterRetention < shortest { + shortest = clusterRetention + } + } + + // Check all subcluster-level retention durations + for _, subClusterMap := range retentionLookup.subClusterMap { + for _, scRetention := range subClusterMap { + if scRetention > 0 && scRetention < shortest { + shortest = scRetention + } + } + } + + return shortest +} diff --git a/internal/metricstore/configSchema.go b/internal/metricstore/configSchema.go index f1a20a73..868bacc5 100644 --- a/internal/metricstore/configSchema.go +++ b/internal/metricstore/configSchema.go @@ -46,9 +46,45 @@ const configSchema = `{ } }, "retention-in-memory": { - "description": "Keep the metrics within memory for given time interval. Retention for X hours, then the metrics would be freed.", + "description": "Global default: Keep the metrics within memory for given time interval. Retention for X hours, then the metrics would be freed.", "type": "string" }, + "clusters": { + "description": "Optional per-cluster retention overrides", + "type": "array", + "items": { + "type": "object", + "required": ["cluster"], + "properties": { + "cluster": { + "description": "Cluster name", + "type": "string" + }, + "retention-in-memory": { + "description": "Cluster-specific retention duration (overrides global default)", + "type": "string" + }, + "subclusters": { + "description": "Optional per-subcluster retention overrides", + "type": "array", + "items": { + "type": "object", + "required": ["subcluster"], + "properties": { + "subcluster": { + "description": "Subcluster name", + "type": "string" + }, + "retention-in-memory": { + "description": "Subcluster-specific retention duration (overrides cluster and global default)", + "type": "string" + } + } + } + } + } + } + }, "nats": { "description": "Configuration for accepting published data through NATS.", "type": "array", diff --git a/internal/metricstore/memorystore.go b/internal/metricstore/metricstore.go similarity index 76% rename from internal/metricstore/memorystore.go rename to internal/metricstore/metricstore.go index 14a02fcd..5a5c3bce 100644 --- a/internal/metricstore/memorystore.go +++ b/internal/metricstore/metricstore.go @@ -98,6 +98,49 @@ func Init(rawConfig json.RawMessage, wg *sync.WaitGroup) { } } + globalRetention, err := time.ParseDuration(Keys.RetentionInMemory) + if err != nil { + cclog.Fatal(err) + } + + retentionLookup = &retentionConfig{ + global: globalRetention, + clusterMap: make(map[string]time.Duration), + subClusterMap: make(map[string]map[string]time.Duration), + } + + for _, clusterCfg := range Keys.Clusters { + if clusterCfg.RetentionInMemory != "" { + clusterRetention, err := time.ParseDuration(clusterCfg.RetentionInMemory) + if err != nil { + cclog.Warnf("[METRICSTORE]> Invalid retention duration for cluster '%s': %s\n", clusterCfg.Cluster, err.Error()) + continue + } + retentionLookup.clusterMap[clusterCfg.Cluster] = clusterRetention + cclog.Debugf("[METRICSTORE]> Cluster '%s' retention: %s\n", clusterCfg.Cluster, clusterRetention) + } + + if len(clusterCfg.SubClusters) > 0 { + if retentionLookup.subClusterMap[clusterCfg.Cluster] == nil { + retentionLookup.subClusterMap[clusterCfg.Cluster] = make(map[string]time.Duration) + } + + for _, scCfg := range clusterCfg.SubClusters { + if scCfg.RetentionInMemory != "" { + scRetention, err := time.ParseDuration(scCfg.RetentionInMemory) + if err != nil { + cclog.Warnf("[METRICSTORE]> Invalid retention duration for subcluster '%s/%s': %s\n", + clusterCfg.Cluster, scCfg.SubCluster, err.Error()) + continue + } + retentionLookup.subClusterMap[clusterCfg.Cluster][scCfg.SubCluster] = scRetention + cclog.Debugf("[METRICSTORE]> SubCluster '%s/%s' retention: %s\n", + clusterCfg.Cluster, scCfg.SubCluster, scRetention) + } + } + } + } + // Pass the config.MetricStoreKeys InitMetrics(Metrics) @@ -208,32 +251,22 @@ func Shutdown() { cclog.Infof("[METRICSTORE]> Done! (%d files written)\n", files) } -func getName(m *MemoryStore, i int) string { - for key, val := range m.Metrics { - if val.offset == i { - return key - } - } - return "" -} - func Retention(wg *sync.WaitGroup, ctx context.Context) { ms := GetMemoryStore() go func() { defer wg.Done() - d, err := time.ParseDuration(Keys.RetentionInMemory) - if err != nil { - cclog.Fatal(err) - } - if d <= 0 { + shortestRetention := GetShortestRetentionDuration() + if shortestRetention <= 0 { return } - tickInterval := d / 2 + tickInterval := shortestRetention / 2 if tickInterval <= 0 { return } + cclog.Debugf("[METRICSTORE]> Retention ticker interval set to %s (half of shortest retention: %s)\n", + tickInterval, shortestRetention) ticker := time.NewTicker(tickInterval) defer ticker.Stop() @@ -242,13 +275,50 @@ func Retention(wg *sync.WaitGroup, ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - t := time.Now().Add(-d) - cclog.Infof("[METRICSTORE]> start freeing buffers (older than %s)...\n", t.Format(time.RFC3339)) - freed, err := ms.Free(nil, t.Unix()) - if err != nil { - cclog.Errorf("[METRICSTORE]> freeing up buffers failed: %s\n", err.Error()) - } else { - cclog.Infof("[METRICSTORE]> done: %d buffers freed\n", freed) + totalFreed := 0 + + clusters := ms.ListChildren(nil) + for _, cluster := range clusters { + retention, err := GetRetentionDuration(cluster, "") + if err != nil { + cclog.Warnf("[METRICSTORE]> Could not get retention for cluster '%s': %s\n", cluster, err.Error()) + continue + } + if retention <= 0 { + continue + } + + t := time.Now().Add(-retention) + cclog.Debugf("[METRICSTORE]> Freeing buffers for cluster '%s' (older than %s, retention: %s)...\n", + cluster, t.Format(time.RFC3339), retention) + + subClusters := ms.ListChildren([]string{cluster}) + for _, subCluster := range subClusters { + scRetention, err := GetRetentionDuration(cluster, subCluster) + if err != nil { + cclog.Warnf("[METRICSTORE]> Could not get retention for subcluster '%s/%s': %s\n", + cluster, subCluster, err.Error()) + continue + } + if scRetention <= 0 { + continue + } + + scTime := time.Now().Add(-scRetention) + freed, err := ms.Free([]string{cluster, subCluster}, scTime.Unix()) + if err != nil { + cclog.Errorf("[METRICSTORE]> freeing buffers for '%s/%s' failed: %s\n", + cluster, subCluster, err.Error()) + } else if freed > 0 { + cclog.Debugf("[METRICSTORE]> freed %d buffers for '%s/%s' (retention: %s)\n", + freed, cluster, subCluster, scRetention) + totalFreed += freed + } + } + } + + if totalFreed > 0 { + cclog.Infof("[METRICSTORE]> Total buffers freed: %d\n", totalFreed) } } } diff --git a/internal/metricstore/memorystore_test.go b/internal/metricstore/metricstore_test.go similarity index 99% rename from internal/metricstore/memorystore_test.go rename to internal/metricstore/metricstore_test.go index 29379d21..fd7c963f 100644 --- a/internal/metricstore/memorystore_test.go +++ b/internal/metricstore/metricstore_test.go @@ -131,7 +131,7 @@ func TestBufferWrite(t *testing.T) { func TestBufferRead(t *testing.T) { b := newBuffer(100, 10) - + // Write some test data b.write(100, schema.Float(1.0)) b.write(110, schema.Float(2.0)) From 754f7e16f67e0708a479831c8360064d74147633 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 13 Jan 2026 09:52:31 +0100 Subject: [PATCH 22/23] Reformat with gofumpt --- internal/repository/node.go | 1 - pkg/archive/fsBackend.go | 4 ++-- pkg/archive/s3Backend_test.go | 42 ++++++++++++++++----------------- tools/archive-manager/main.go | 1 - tools/archive-migration/main.go | 3 +-- 5 files changed, 24 insertions(+), 27 deletions(-) diff --git a/internal/repository/node.go b/internal/repository/node.go index 752a36fa..2890cdbc 100644 --- a/internal/repository/node.go +++ b/internal/repository/node.go @@ -561,7 +561,6 @@ func (r *NodeRepository) GetNodesForList( nodeFilter string, page *model.PageRequest, ) ([]string, map[string]string, int, bool, error) { - // Init Return Vars nodes := make([]string, 0) stateMap := make(map[string]string) diff --git a/pkg/archive/fsBackend.go b/pkg/archive/fsBackend.go index 020f2aa4..61921d70 100644 --- a/pkg/archive/fsBackend.go +++ b/pkg/archive/fsBackend.go @@ -188,7 +188,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) { if isEmpty { cclog.Infof("fsBackend Init() > Bootstrapping new archive at %s", fsa.path) versionStr := fmt.Sprintf("%d\n", Version) - if err := os.WriteFile(filepath.Join(fsa.path, "version.txt"), []byte(versionStr), 0644); err != nil { + if err := os.WriteFile(filepath.Join(fsa.path, "version.txt"), []byte(versionStr), 0o644); err != nil { cclog.Errorf("fsBackend Init() > failed to create version.txt: %v", err) return 0, err } @@ -674,7 +674,7 @@ func (fsa *FsArchive) ImportJob( func (fsa *FsArchive) StoreClusterCfg(name string, config *schema.Cluster) error { dir := filepath.Join(fsa.path, name) - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { cclog.Errorf("StoreClusterCfg() > mkdir error: %v", err) return err } diff --git a/pkg/archive/s3Backend_test.go b/pkg/archive/s3Backend_test.go index 2b79db7f..0b4e17a2 100644 --- a/pkg/archive/s3Backend_test.go +++ b/pkg/archive/s3Backend_test.go @@ -41,7 +41,7 @@ func (m *MockS3Client) GetObject(ctx context.Context, params *s3.GetObjectInput, if !exists { return nil, fmt.Errorf("NoSuchKey: object not found") } - + contentLength := int64(len(data)) return &s3.GetObjectOutput{ Body: io.NopCloser(bytes.NewReader(data)), @@ -65,7 +65,7 @@ func (m *MockS3Client) HeadObject(ctx context.Context, params *s3.HeadObjectInpu if !exists { return nil, fmt.Errorf("NotFound") } - + contentLength := int64(len(data)) return &s3.HeadObjectOutput{ ContentLength: &contentLength, @@ -86,12 +86,12 @@ func (m *MockS3Client) CopyObject(ctx context.Context, params *s3.CopyObjectInpu return nil, fmt.Errorf("invalid CopySource") } sourceKey := parts[1] - + data, exists := m.objects[sourceKey] if !exists { return nil, fmt.Errorf("source not found") } - + destKey := aws.ToString(params.Key) m.objects[destKey] = data return &s3.CopyObjectOutput{}, nil @@ -100,15 +100,15 @@ func (m *MockS3Client) CopyObject(ctx context.Context, params *s3.CopyObjectInpu func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) { prefix := aws.ToString(params.Prefix) delimiter := aws.ToString(params.Delimiter) - + var contents []types.Object commonPrefixes := make(map[string]bool) - + for key, data := range m.objects { if !strings.HasPrefix(key, prefix) { continue } - + if delimiter != "" { // Check if there's a delimiter after the prefix remainder := strings.TrimPrefix(key, prefix) @@ -120,21 +120,21 @@ func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjects continue } } - + size := int64(len(data)) contents = append(contents, types.Object{ Key: aws.String(key), Size: &size, }) } - + var prefixList []types.CommonPrefix for p := range commonPrefixes { prefixList = append(prefixList, types.CommonPrefix{ Prefix: aws.String(p), }) } - + return &s3.ListObjectsV2Output{ Contents: contents, CommonPrefixes: prefixList, @@ -144,10 +144,10 @@ func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjects // Test helper to create a mock S3 archive with test data func setupMockS3Archive(t *testing.T) *MockS3Client { mock := NewMockS3Client() - + // Add version.txt mock.objects["version.txt"] = []byte("2\n") - + // Add a test cluster directory mock.objects["emmy/cluster.json"] = []byte(`{ "name": "emmy", @@ -165,7 +165,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client { } ] }`) - + // Add a test job mock.objects["emmy/1403/244/1608923076/meta.json"] = []byte(`{ "jobId": 1403244, @@ -174,7 +174,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client { "numNodes": 1, "resources": [{"hostname": "node001"}] }`) - + mock.objects["emmy/1403/244/1608923076/data.json"] = []byte(`{ "mem_used": { "node": { @@ -184,7 +184,7 @@ func setupMockS3Archive(t *testing.T) *MockS3Client { } } }`) - + return mock } @@ -213,7 +213,7 @@ func TestGetS3Key(t *testing.T) { Cluster: "emmy", StartTime: 1608923076, } - + key := getS3Key(job, "meta.json") expected := "emmy/1403/244/1608923076/meta.json" if key != expected { @@ -227,7 +227,7 @@ func TestGetS3Directory(t *testing.T) { Cluster: "emmy", StartTime: 1608923076, } - + dir := getS3Directory(job) expected := "emmy/1403/244/1608923076/" if dir != expected { @@ -247,13 +247,13 @@ func TestS3ArchiveConfigParsing(t *testing.T) { "region": "us-east-1", "usePathStyle": true }`) - + var cfg S3ArchiveConfig err := json.Unmarshal(rawConfig, &cfg) if err != nil { t.Fatalf("failed to parse config: %v", err) } - + if cfg.Bucket != "test-bucket" { t.Errorf("expected bucket 'test-bucket', got '%s'", cfg.Bucket) } @@ -277,14 +277,14 @@ func TestS3KeyGeneration(t *testing.T) { {1404397, "emmy", 1609300556, "data.json.gz", "emmy/1404/397/1609300556/data.json.gz"}, {42, "fritz", 1234567890, "meta.json", "fritz/0/042/1234567890/meta.json"}, } - + for _, tt := range tests { job := &schema.Job{ JobID: tt.jobID, Cluster: tt.cluster, StartTime: tt.startTime, } - + key := getS3Key(job, tt.file) if key != tt.expected { t.Errorf("for job %d: expected %s, got %s", tt.jobID, tt.expected, key) diff --git a/tools/archive-manager/main.go b/tools/archive-manager/main.go index ffcba793..fff81256 100644 --- a/tools/archive-manager/main.go +++ b/tools/archive-manager/main.go @@ -71,7 +71,6 @@ func countJobsNative(archivePath string) (int, error) { } return nil }) - if err != nil { return 0, fmt.Errorf("failed to walk directory: %w", err) } diff --git a/tools/archive-migration/main.go b/tools/archive-migration/main.go index 8375ee98..1384e065 100644 --- a/tools/archive-migration/main.go +++ b/tools/archive-migration/main.go @@ -70,7 +70,6 @@ func main() { // Run migration migrated, failed, err := migrateArchive(archivePath, dryRun, numWorkers) - if err != nil { cclog.Errorf("Migration completed with errors: %s", err.Error()) if failed > 0 { @@ -104,5 +103,5 @@ func checkVersion(archivePath string) error { func updateVersion(archivePath string) error { versionFile := filepath.Join(archivePath, "version.txt") - return os.WriteFile(versionFile, []byte("3\n"), 0644) + return os.WriteFile(versionFile, []byte("3\n"), 0o644) } From 25c8fca56136eb04cbfe14d5f18a67082512bc64 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 13 Jan 2026 14:42:24 +0100 Subject: [PATCH 23/23] Revert retention config in metricstore --- internal/metricstore/config.go | 68 ------------------ internal/metricstore/configSchema.go | 38 +--------- internal/metricstore/metricstore.go | 103 ++++----------------------- 3 files changed, 13 insertions(+), 196 deletions(-) diff --git a/internal/metricstore/config.go b/internal/metricstore/config.go index 06ae774d..c789f11c 100644 --- a/internal/metricstore/config.go +++ b/internal/metricstore/config.go @@ -33,18 +33,7 @@ type MetricStoreConfig struct { DumpToFile string `json:"dump-to-file"` EnableGops bool `json:"gops"` } `json:"debug"` - // Global default retention duration - RetentionInMemory string `json:"retention-in-memory"` - // Per-cluster retention overrides - Clusters []struct { - Cluster string `json:"cluster"` RetentionInMemory string `json:"retention-in-memory"` - // Per-subcluster retention overrides within this cluster - SubClusters []struct { - SubCluster string `json:"subcluster"` - RetentionInMemory string `json:"retention-in-memory"` - } `json:"subclusters,omitempty"` - } `json:"clusters,omitempty"` Archive struct { Interval string `json:"interval"` RootDir string `json:"directory"` @@ -61,14 +50,6 @@ type MetricStoreConfig struct { var Keys MetricStoreConfig -type retentionConfig struct { - global time.Duration - clusterMap map[string]time.Duration - subClusterMap map[string]map[string]time.Duration -} - -var retentionLookup *retentionConfig - // AggregationStrategy for aggregation over multiple values at different cpus/sockets/..., not time! type AggregationStrategy int @@ -132,52 +113,3 @@ func AddMetric(name string, metric MetricConfig) error { return nil } - -func GetRetentionDuration(cluster, subCluster string) (time.Duration, error) { - if retentionLookup == nil { - return 0, fmt.Errorf("[METRICSTORE]> retention configuration not initialized") - } - - if subCluster != "" { - if subMap, ok := retentionLookup.subClusterMap[cluster]; ok { - if retention, ok := subMap[subCluster]; ok { - return retention, nil - } - } - } - - if retention, ok := retentionLookup.clusterMap[cluster]; ok { - return retention, nil - } - - return retentionLookup.global, nil -} - -// GetShortestRetentionDuration returns the shortest configured retention duration -// across all levels (global, cluster, and subcluster configurations). -// Returns 0 if retentionLookup is not initialized or global retention is not set. -func GetShortestRetentionDuration() time.Duration { - if retentionLookup == nil || retentionLookup.global <= 0 { - return 0 - } - - shortest := retentionLookup.global - - // Check all cluster-level retention durations - for _, clusterRetention := range retentionLookup.clusterMap { - if clusterRetention > 0 && clusterRetention < shortest { - shortest = clusterRetention - } - } - - // Check all subcluster-level retention durations - for _, subClusterMap := range retentionLookup.subClusterMap { - for _, scRetention := range subClusterMap { - if scRetention > 0 && scRetention < shortest { - shortest = scRetention - } - } - } - - return shortest -} diff --git a/internal/metricstore/configSchema.go b/internal/metricstore/configSchema.go index 868bacc5..f1a20a73 100644 --- a/internal/metricstore/configSchema.go +++ b/internal/metricstore/configSchema.go @@ -46,45 +46,9 @@ const configSchema = `{ } }, "retention-in-memory": { - "description": "Global default: Keep the metrics within memory for given time interval. Retention for X hours, then the metrics would be freed.", + "description": "Keep the metrics within memory for given time interval. Retention for X hours, then the metrics would be freed.", "type": "string" }, - "clusters": { - "description": "Optional per-cluster retention overrides", - "type": "array", - "items": { - "type": "object", - "required": ["cluster"], - "properties": { - "cluster": { - "description": "Cluster name", - "type": "string" - }, - "retention-in-memory": { - "description": "Cluster-specific retention duration (overrides global default)", - "type": "string" - }, - "subclusters": { - "description": "Optional per-subcluster retention overrides", - "type": "array", - "items": { - "type": "object", - "required": ["subcluster"], - "properties": { - "subcluster": { - "description": "Subcluster name", - "type": "string" - }, - "retention-in-memory": { - "description": "Subcluster-specific retention duration (overrides cluster and global default)", - "type": "string" - } - } - } - } - } - } - }, "nats": { "description": "Configuration for accepting published data through NATS.", "type": "array", diff --git a/internal/metricstore/metricstore.go b/internal/metricstore/metricstore.go index 5a5c3bce..ac8948ae 100644 --- a/internal/metricstore/metricstore.go +++ b/internal/metricstore/metricstore.go @@ -98,49 +98,6 @@ func Init(rawConfig json.RawMessage, wg *sync.WaitGroup) { } } - globalRetention, err := time.ParseDuration(Keys.RetentionInMemory) - if err != nil { - cclog.Fatal(err) - } - - retentionLookup = &retentionConfig{ - global: globalRetention, - clusterMap: make(map[string]time.Duration), - subClusterMap: make(map[string]map[string]time.Duration), - } - - for _, clusterCfg := range Keys.Clusters { - if clusterCfg.RetentionInMemory != "" { - clusterRetention, err := time.ParseDuration(clusterCfg.RetentionInMemory) - if err != nil { - cclog.Warnf("[METRICSTORE]> Invalid retention duration for cluster '%s': %s\n", clusterCfg.Cluster, err.Error()) - continue - } - retentionLookup.clusterMap[clusterCfg.Cluster] = clusterRetention - cclog.Debugf("[METRICSTORE]> Cluster '%s' retention: %s\n", clusterCfg.Cluster, clusterRetention) - } - - if len(clusterCfg.SubClusters) > 0 { - if retentionLookup.subClusterMap[clusterCfg.Cluster] == nil { - retentionLookup.subClusterMap[clusterCfg.Cluster] = make(map[string]time.Duration) - } - - for _, scCfg := range clusterCfg.SubClusters { - if scCfg.RetentionInMemory != "" { - scRetention, err := time.ParseDuration(scCfg.RetentionInMemory) - if err != nil { - cclog.Warnf("[METRICSTORE]> Invalid retention duration for subcluster '%s/%s': %s\n", - clusterCfg.Cluster, scCfg.SubCluster, err.Error()) - continue - } - retentionLookup.subClusterMap[clusterCfg.Cluster][scCfg.SubCluster] = scRetention - cclog.Debugf("[METRICSTORE]> SubCluster '%s/%s' retention: %s\n", - clusterCfg.Cluster, scCfg.SubCluster, scRetention) - } - } - } - } - // Pass the config.MetricStoreKeys InitMetrics(Metrics) @@ -256,17 +213,18 @@ func Retention(wg *sync.WaitGroup, ctx context.Context) { go func() { defer wg.Done() - shortestRetention := GetShortestRetentionDuration() - if shortestRetention <= 0 { + d, err := time.ParseDuration(Keys.RetentionInMemory) + if err != nil { + cclog.Fatal(err) + } + if d <= 0 { return } - tickInterval := shortestRetention / 2 + tickInterval := d / 2 if tickInterval <= 0 { return } - cclog.Debugf("[METRICSTORE]> Retention ticker interval set to %s (half of shortest retention: %s)\n", - tickInterval, shortestRetention) ticker := time.NewTicker(tickInterval) defer ticker.Stop() @@ -275,50 +233,13 @@ func Retention(wg *sync.WaitGroup, ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - totalFreed := 0 - - clusters := ms.ListChildren(nil) - for _, cluster := range clusters { - retention, err := GetRetentionDuration(cluster, "") - if err != nil { - cclog.Warnf("[METRICSTORE]> Could not get retention for cluster '%s': %s\n", cluster, err.Error()) - continue - } - if retention <= 0 { - continue - } - - t := time.Now().Add(-retention) - cclog.Debugf("[METRICSTORE]> Freeing buffers for cluster '%s' (older than %s, retention: %s)...\n", - cluster, t.Format(time.RFC3339), retention) - - subClusters := ms.ListChildren([]string{cluster}) - for _, subCluster := range subClusters { - scRetention, err := GetRetentionDuration(cluster, subCluster) + t := time.Now().Add(-d) + cclog.Infof("[METRICSTORE]> start freeing buffers (older than %s)...\n", t.Format(time.RFC3339)) + freed, err := ms.Free(nil, t.Unix()) if err != nil { - cclog.Warnf("[METRICSTORE]> Could not get retention for subcluster '%s/%s': %s\n", - cluster, subCluster, err.Error()) - continue - } - if scRetention <= 0 { - continue - } - - scTime := time.Now().Add(-scRetention) - freed, err := ms.Free([]string{cluster, subCluster}, scTime.Unix()) - if err != nil { - cclog.Errorf("[METRICSTORE]> freeing buffers for '%s/%s' failed: %s\n", - cluster, subCluster, err.Error()) - } else if freed > 0 { - cclog.Debugf("[METRICSTORE]> freed %d buffers for '%s/%s' (retention: %s)\n", - freed, cluster, subCluster, scRetention) - totalFreed += freed - } - } - } - - if totalFreed > 0 { - cclog.Infof("[METRICSTORE]> Total buffers freed: %d\n", totalFreed) + cclog.Errorf("[METRICSTORE]> freeing up buffers failed: %s\n", err.Error()) + } else { + cclog.Infof("[METRICSTORE]> done: %d buffers freed\n", freed) } } }