diff --git a/.github/workflows/Release.yml b/.github/workflows/Release.yml deleted file mode 100644 index 8fc8755..0000000 --- a/.github/workflows/Release.yml +++ /dev/null @@ -1,331 +0,0 @@ -# See: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions - -# Workflow name -name: Release - -# Run on tag push -on: - push: - tags: - - '**' - -jobs: - - # - # Build on AlmaLinux 8.5 using golang-1.18.2 - # - AlmaLinux-RPM-build: - runs-on: ubuntu-latest - # See: https://hub.docker.com/_/almalinux - container: almalinux:8.5 - # The job outputs link to the outputs of the 'rpmrename' step - # Only job outputs can be used in child jobs - outputs: - rpm : ${{steps.rpmrename.outputs.RPM}} - srpm : ${{steps.rpmrename.outputs.SRPM}} - steps: - - # Use dnf to install development packages - - name: Install development packages - run: | - dnf --assumeyes group install "Development Tools" "RPM Development Tools" - dnf --assumeyes install wget openssl-devel diffutils delve which npm - dnf --assumeyes install 'dnf-command(builddep)' - - # Checkout git repository and submodules - # fetch-depth must be 0 to use git describe - # See: https://github.com/marketplace/actions/checkout - - name: Checkout - uses: actions/checkout@v2 - with: - submodules: recursive - fetch-depth: 0 - - # Use dnf to install build dependencies - - name: Install build dependencies - run: | - wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm - rpm -i go*.rpm - npm install --global yarn rollup svelte rollup-plugin-svelte - #dnf --assumeyes builddep build/package/cc-backend.spec - - - name: RPM build ClusterCockpit - id: rpmbuild - run: make RPM - - # AlmaLinux 8.5 is a derivate of RedHat Enterprise Linux 8 (UBI8), - # so the created RPM both contain the substring 'el8' in the RPM file names - # This step replaces the substring 'el8' to 'alma85'. It uses the move operation - # because it is unclear whether the default AlmaLinux 8.5 container contains the - # 'rename' command. This way we also get the new names for output. - - name: Rename RPMs (s/el8/alma85/) - id: rpmrename - run: | - OLD_RPM="${{steps.rpmbuild.outputs.RPM}}" - OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}" - NEW_RPM="${OLD_RPM/el8/alma85}" - NEW_SRPM=${OLD_SRPM/el8/alma85} - mv "${OLD_RPM}" "${NEW_RPM}" - mv "${OLD_SRPM}" "${NEW_SRPM}" - echo "::set-output name=SRPM::${NEW_SRPM}" - echo "::set-output name=RPM::${NEW_RPM}" - - # See: https://github.com/actions/upload-artifact - - name: Save RPM as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend RPM for AlmaLinux 8.5 - path: ${{ steps.rpmrename.outputs.RPM }} - - name: Save SRPM as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend SRPM for AlmaLinux 8.5 - path: ${{ steps.rpmrename.outputs.SRPM }} - - # - # Build on UBI 8 using golang-1.18.2 - # - UBI-8-RPM-build: - runs-on: ubuntu-latest - # See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti - container: registry.access.redhat.com/ubi8/ubi:8.5-226.1645809065 - # The job outputs link to the outputs of the 'rpmbuild' step - outputs: - rpm : ${{steps.rpmbuild.outputs.RPM}} - srpm : ${{steps.rpmbuild.outputs.SRPM}} - steps: - - # Use dnf to install development packages - - name: Install development packages - run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git wget openssl-devel diffutils delve which - - # Checkout git repository and submodules - # fetch-depth must be 0 to use git describe - # See: https://github.com/marketplace/actions/checkout - - name: Checkout - uses: actions/checkout@v2 - with: - submodules: recursive - fetch-depth: 0 - - # Use dnf to install build dependencies - - name: Install build dependencies - run: | - wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm - rpm -i go*.rpm - dnf --assumeyes --disableplugin=subscription-manager install npm - npm install --global yarn rollup svelte rollup-plugin-svelte - #dnf --assumeyes builddep build/package/cc-backend.spec - - - name: RPM build ClusterCockpit - id: rpmbuild - run: make RPM - - # See: https://github.com/actions/upload-artifact - - name: Save RPM as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend RPM for UBI 8 - path: ${{ steps.rpmbuild.outputs.RPM }} - - name: Save SRPM as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend SRPM for UBI 8 - path: ${{ steps.rpmbuild.outputs.SRPM }} - - # - # Build on Ubuntu 20.04 using official go 1.19.1 package - # - Ubuntu-focal-build: - runs-on: ubuntu-latest - container: ubuntu:20.04 - # The job outputs link to the outputs of the 'debrename' step - # Only job outputs can be used in child jobs - outputs: - deb : ${{steps.debrename.outputs.DEB}} - steps: - # Use apt to install development packages - - name: Install development packages - run: | - apt update && apt --assume-yes upgrade - apt --assume-yes install build-essential sed git wget bash - apt --assume-yes install npm - npm install --global yarn rollup svelte rollup-plugin-svelte - # Checkout git repository and submodules - # fetch-depth must be 0 to use git describe - # See: https://github.com/marketplace/actions/checkout - - name: Checkout - uses: actions/checkout@v2 - with: - submodules: recursive - fetch-depth: 0 - # Use official golang package - - name: Install Golang - run: | - wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz - tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz - export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH - go version - - name: DEB build ClusterCockpit - id: dpkg-build - run: | - ls -la - pwd - env - export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH - git config --global --add safe.directory $(pwd) - make DEB - - name: Rename DEB (add '_ubuntu20.04') - id: debrename - run: | - OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev) - NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu20.04.deb" - mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}" - echo "::set-output name=DEB::${NEW_DEB_FILE}" - # See: https://github.com/actions/upload-artifact - - name: Save DEB as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend DEB for Ubuntu 20.04 - path: ${{ steps.debrename.outputs.DEB }} - - # - # Build on Ubuntu 20.04 using official go 1.19.1 package - # - Ubuntu-jammy-build: - runs-on: ubuntu-latest - container: ubuntu:22.04 - # The job outputs link to the outputs of the 'debrename' step - # Only job outputs can be used in child jobs - outputs: - deb : ${{steps.debrename.outputs.DEB}} - steps: - # Use apt to install development packages - - name: Install development packages - run: | - apt update && apt --assume-yes upgrade - apt --assume-yes install build-essential sed git wget bash npm - npm install --global yarn rollup svelte rollup-plugin-svelte - # Checkout git repository and submodules - # fetch-depth must be 0 to use git describe - # See: https://github.com/marketplace/actions/checkout - - name: Checkout - uses: actions/checkout@v2 - with: - submodules: recursive - fetch-depth: 0 - # Use official golang package - - name: Install Golang - run: | - wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz - tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz - export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH - go version - - name: DEB build ClusterCockpit - id: dpkg-build - run: | - ls -la - pwd - env - export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH - git config --global --add safe.directory $(pwd) - make DEB - - name: Rename DEB (add '_ubuntu22.04') - id: debrename - run: | - OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev) - NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu22.04.deb" - mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}" - echo "::set-output name=DEB::${NEW_DEB_FILE}" - # See: https://github.com/actions/upload-artifact - - name: Save DEB as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend DEB for Ubuntu 22.04 - path: ${{ steps.debrename.outputs.DEB }} - - # - # Create release with fresh RPMs - # - Release: - runs-on: ubuntu-latest - # We need the RPMs, so add dependency - needs: [AlmaLinux-RPM-build, UBI-8-RPM-build, Ubuntu-focal-build, Ubuntu-jammy-build] - - steps: - # See: https://github.com/actions/download-artifact - - name: Download AlmaLinux 8.5 RPM - uses: actions/download-artifact@v2 - with: - name: cc-backend RPM for AlmaLinux 8.5 - - name: Download AlmaLinux 8.5 SRPM - uses: actions/download-artifact@v2 - with: - name: cc-backend SRPM for AlmaLinux 8.5 - - - name: Download UBI 8 RPM - uses: actions/download-artifact@v2 - with: - name: cc-backend RPM for UBI 8 - - name: Download UBI 8 SRPM - uses: actions/download-artifact@v2 - with: - name: cc-backend SRPM for UBI 8 - - - name: Download Ubuntu 20.04 DEB - uses: actions/download-artifact@v2 - with: - name: cc-backend DEB for Ubuntu 20.04 - - - name: Download Ubuntu 22.04 DEB - uses: actions/download-artifact@v2 - with: - name: cc-backend DEB for Ubuntu 22.04 - - # The download actions do not publish the name of the downloaded file, - # so we re-use the job outputs of the parent jobs. The files are all - # downloaded to the current folder. - # The gh-release action afterwards does not accept file lists but all - # files have to be listed at 'files'. The step creates one output per - # RPM package (2 per distro) - - name: Set RPM variables - id: files - run: | - ALMA_85_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}") - ALMA_85_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}") - UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}") - UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}") - U_2004_DEB=$(basename "${{ needs.Ubuntu-focal-build.outputs.deb}}") - U_2204_DEB=$(basename "${{ needs.Ubuntu-jammy-build.outputs.deb}}") - echo "ALMA_85_RPM::${ALMA_85_RPM}" - echo "ALMA_85_SRPM::${ALMA_85_SRPM}" - echo "UBI_8_RPM::${UBI_8_RPM}" - echo "UBI_8_SRPM::${UBI_8_SRPM}" - echo "U_2004_DEB::${U_2004_DEB}" - echo "U_2204_DEB::${U_2204_DEB}" - echo "::set-output name=ALMA_85_RPM::${ALMA_85_RPM}" - echo "::set-output name=ALMA_85_SRPM::${ALMA_85_SRPM}" - echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}" - echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}" - echo "::set-output name=U_2004_DEB::${U_2004_DEB}" - echo "::set-output name=U_2204_DEB::${U_2204_DEB}" - - # See: https://github.com/softprops/action-gh-release - - name: Release - uses: softprops/action-gh-release@v1 - if: startsWith(github.ref, 'refs/tags/') - with: - name: cc-backend-${{github.ref_name}} - files: | - ${{ steps.files.outputs.ALMA_85_RPM }} - ${{ steps.files.outputs.ALMA_85_SRPM }} - ${{ steps.files.outputs.UBI_8_RPM }} - ${{ steps.files.outputs.UBI_8_SRPM }} - ${{ steps.files.outputs.U_2004_DEB }} - ${{ steps.files.outputs.U_2204_DEB }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e4aa02b..a8a7429 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,7 +7,7 @@ jobs: - name: Install Go uses: actions/setup-go@v4 with: - go-version: 1.22.x + go-version: 1.24.x - name: Checkout code uses: actions/checkout@v3 - name: Build, Vet & Test diff --git a/api/schema.graphqls b/api/schema.graphqls index 9385a6f..ed8843c 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -137,11 +137,6 @@ type JobMetricWithName { metric: JobMetric! } -type JobMetricStatWithName { - name: String! - stats: MetricStatistics! -} - type JobMetric { unit: Unit timestep: Int! @@ -156,6 +151,30 @@ type Series { data: [NullableFloat!]! } +type StatsSeries { + mean: [NullableFloat!]! + median: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! +} + +type JobStatsWithScope { + name: String! + scope: MetricScope! + stats: [ScopedStats!]! +} + +type ScopedStats { + hostname: String! + id: String + data: MetricStatistics! +} + +type JobStats { + name: String! + stats: MetricStatistics! +} + type Unit { base: String! prefix: String @@ -167,13 +186,6 @@ type MetricStatistics { max: Float! } -type StatsSeries { - mean: [NullableFloat!]! - median: [NullableFloat!]! - min: [NullableFloat!]! - max: [NullableFloat!]! -} - type MetricFootprints { metric: String! data: [NullableFloat!]! @@ -247,7 +259,8 @@ type Query { job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! - jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]! + jobStats(id: ID!, metrics: [String!]): [JobStats!]! + scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index b4c6e19..e5c9ca2 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -156,11 +156,6 @@ type ComplexityRoot struct { Unit func(childComplexity int) int } - JobMetricStatWithName struct { - Name func(childComplexity int) int - Stats func(childComplexity int) int - } - JobMetricWithName struct { Metric func(childComplexity int) int Name func(childComplexity int) int @@ -175,6 +170,17 @@ type ComplexityRoot struct { Offset func(childComplexity int) int } + JobStats struct { + Name func(childComplexity int) int + Stats func(childComplexity int) int + } + + JobStatsWithScope struct { + Name func(childComplexity int) int + Scope func(childComplexity int) int + Stats func(childComplexity int) int + } + JobsStatistics struct { HistDuration func(childComplexity int) int HistMetrics func(childComplexity int) int @@ -268,14 +274,15 @@ type ComplexityRoot struct { Clusters func(childComplexity int) int GlobalMetrics func(childComplexity int) int Job func(childComplexity int, id string) int - JobMetricStats func(childComplexity int, id string, metrics []string) int JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope, resolution *int) int + JobStats func(childComplexity int, id string, metrics []string) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int + ScopedJobStats func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Tags func(childComplexity int) int User func(childComplexity int, username string) int } @@ -287,6 +294,12 @@ type ComplexityRoot struct { Hostname func(childComplexity int) int } + ScopedStats struct { + Data func(childComplexity int) int + Hostname func(childComplexity int) int + ID func(childComplexity int) int + } + Series struct { Data func(childComplexity int) int Hostname func(childComplexity int) int @@ -396,7 +409,8 @@ type QueryResolver interface { AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) Job(ctx context.Context, id string) (*schema.Job, error) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) - JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) + JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) + ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) @@ -861,20 +875,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobMetric.Unit(childComplexity), true - case "JobMetricStatWithName.name": - if e.complexity.JobMetricStatWithName.Name == nil { - break - } - - return e.complexity.JobMetricStatWithName.Name(childComplexity), true - - case "JobMetricStatWithName.stats": - if e.complexity.JobMetricStatWithName.Stats == nil { - break - } - - return e.complexity.JobMetricStatWithName.Stats(childComplexity), true - case "JobMetricWithName.metric": if e.complexity.JobMetricWithName.Metric == nil { break @@ -931,6 +931,41 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Offset(childComplexity), true + case "JobStats.name": + if e.complexity.JobStats.Name == nil { + break + } + + return e.complexity.JobStats.Name(childComplexity), true + + case "JobStats.stats": + if e.complexity.JobStats.Stats == nil { + break + } + + return e.complexity.JobStats.Stats(childComplexity), true + + case "JobStatsWithScope.name": + if e.complexity.JobStatsWithScope.Name == nil { + break + } + + return e.complexity.JobStatsWithScope.Name(childComplexity), true + + case "JobStatsWithScope.scope": + if e.complexity.JobStatsWithScope.Scope == nil { + break + } + + return e.complexity.JobStatsWithScope.Scope(childComplexity), true + + case "JobStatsWithScope.stats": + if e.complexity.JobStatsWithScope.Stats == nil { + break + } + + return e.complexity.JobStatsWithScope.Stats(childComplexity), true + case "JobsStatistics.histDuration": if e.complexity.JobsStatistics.HistDuration == nil { break @@ -1400,18 +1435,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.Job(childComplexity, args["id"].(string)), true - case "Query.jobMetricStats": - if e.complexity.Query.JobMetricStats == nil { - break - } - - args, err := ec.field_Query_jobMetricStats_args(context.TODO(), rawArgs) - if err != nil { - return 0, false - } - - return e.complexity.Query.JobMetricStats(childComplexity, args["id"].(string), args["metrics"].([]string)), true - case "Query.jobMetrics": if e.complexity.Query.JobMetrics == nil { break @@ -1424,6 +1447,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope), args["resolution"].(*int)), true + case "Query.jobStats": + if e.complexity.Query.JobStats == nil { + break + } + + args, err := ec.field_Query_jobStats_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.JobStats(childComplexity, args["id"].(string), args["metrics"].([]string)), true + case "Query.jobs": if e.complexity.Query.Jobs == nil { break @@ -1496,6 +1531,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.RooflineHeatmap(childComplexity, args["filter"].([]*model.JobFilter), args["rows"].(int), args["cols"].(int), args["minX"].(float64), args["minY"].(float64), args["maxX"].(float64), args["maxY"].(float64)), true + case "Query.scopedJobStats": + if e.complexity.Query.ScopedJobStats == nil { + break + } + + args, err := ec.field_Query_scopedJobStats_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.ScopedJobStats(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)), true + case "Query.tags": if e.complexity.Query.Tags == nil { break @@ -1543,6 +1590,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Resource.Hostname(childComplexity), true + case "ScopedStats.data": + if e.complexity.ScopedStats.Data == nil { + break + } + + return e.complexity.ScopedStats.Data(childComplexity), true + + case "ScopedStats.hostname": + if e.complexity.ScopedStats.Hostname == nil { + break + } + + return e.complexity.ScopedStats.Hostname(childComplexity), true + + case "ScopedStats.id": + if e.complexity.ScopedStats.ID == nil { + break + } + + return e.complexity.ScopedStats.ID(childComplexity), true + case "Series.data": if e.complexity.Series.Data == nil { break @@ -2131,11 +2199,6 @@ type JobMetricWithName { metric: JobMetric! } -type JobMetricStatWithName { - name: String! - stats: MetricStatistics! -} - type JobMetric { unit: Unit timestep: Int! @@ -2150,6 +2213,30 @@ type Series { data: [NullableFloat!]! } +type StatsSeries { + mean: [NullableFloat!]! + median: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! +} + +type JobStatsWithScope { + name: String! + scope: MetricScope! + stats: [ScopedStats!]! +} + +type ScopedStats { + hostname: String! + id: String + data: MetricStatistics! +} + +type JobStats { + name: String! + stats: MetricStatistics! +} + type Unit { base: String! prefix: String @@ -2161,13 +2248,6 @@ type MetricStatistics { max: Float! } -type StatsSeries { - mean: [NullableFloat!]! - median: [NullableFloat!]! - min: [NullableFloat!]! - max: [NullableFloat!]! -} - type MetricFootprints { metric: String! data: [NullableFloat!]! @@ -2241,7 +2321,8 @@ type Query { job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! - jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]! + jobStats(id: ID!, metrics: [String!]): [JobStats!]! + scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! @@ -2694,57 +2775,6 @@ func (ec *executionContext) field_Query_allocatedNodes_argsCluster( return zeroVal, nil } -func (ec *executionContext) field_Query_jobMetricStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { - var err error - args := map[string]any{} - arg0, err := ec.field_Query_jobMetricStats_argsID(ctx, rawArgs) - if err != nil { - return nil, err - } - args["id"] = arg0 - arg1, err := ec.field_Query_jobMetricStats_argsMetrics(ctx, rawArgs) - if err != nil { - return nil, err - } - args["metrics"] = arg1 - return args, nil -} -func (ec *executionContext) field_Query_jobMetricStats_argsID( - ctx context.Context, - rawArgs map[string]any, -) (string, error) { - if _, ok := rawArgs["id"]; !ok { - var zeroVal string - return zeroVal, nil - } - - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) - if tmp, ok := rawArgs["id"]; ok { - return ec.unmarshalNID2string(ctx, tmp) - } - - var zeroVal string - return zeroVal, nil -} - -func (ec *executionContext) field_Query_jobMetricStats_argsMetrics( - ctx context.Context, - rawArgs map[string]any, -) ([]string, error) { - if _, ok := rawArgs["metrics"]; !ok { - var zeroVal []string - return zeroVal, nil - } - - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) - if tmp, ok := rawArgs["metrics"]; ok { - return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) - } - - var zeroVal []string - return zeroVal, nil -} - func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -2842,6 +2872,57 @@ func (ec *executionContext) field_Query_jobMetrics_argsResolution( return zeroVal, nil } +func (ec *executionContext) field_Query_jobStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_jobStats_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + arg1, err := ec.field_Query_jobStats_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg1 + return args, nil +} +func (ec *executionContext) field_Query_jobStats_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobStats_argsMetrics( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["metrics"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + func (ec *executionContext) field_Query_job_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -3682,6 +3763,80 @@ func (ec *executionContext) field_Query_rooflineHeatmap_argsMaxY( return zeroVal, nil } +func (ec *executionContext) field_Query_scopedJobStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_scopedJobStats_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + arg1, err := ec.field_Query_scopedJobStats_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg1 + arg2, err := ec.field_Query_scopedJobStats_argsScopes(ctx, rawArgs) + if err != nil { + return nil, err + } + args["scopes"] = arg2 + return args, nil +} +func (ec *executionContext) field_Query_scopedJobStats_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_scopedJobStats_argsMetrics( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["metrics"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_scopedJobStats_argsScopes( + ctx context.Context, + rawArgs map[string]any, +) ([]schema.MetricScope, error) { + if _, ok := rawArgs["scopes"]; !ok { + var zeroVal []schema.MetricScope + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) + if tmp, ok := rawArgs["scopes"]; ok { + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + } + + var zeroVal []schema.MetricScope + return zeroVal, nil +} + func (ec *executionContext) field_Query_user_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -6663,102 +6818,6 @@ func (ec *executionContext) fieldContext_JobMetric_statisticsSeries(_ context.Co return fc, nil } -func (ec *executionContext) _JobMetricStatWithName_name(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricStatWithName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobMetricStatWithName_name(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Name, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobMetricStatWithName_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobMetricStatWithName", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") - }, - } - return fc, nil -} - -func (ec *executionContext) _JobMetricStatWithName_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricStatWithName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobMetricStatWithName_stats(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Stats, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(*schema.MetricStatistics) - fc.Result = res - return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobMetricStatWithName_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobMetricStatWithName", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "avg": - return ec.fieldContext_MetricStatistics_avg(ctx, field) - case "min": - return ec.fieldContext_MetricStatistics_min(ctx, field) - case "max": - return ec.fieldContext_MetricStatistics_max(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) - }, - } - return fc, nil -} - func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobMetricWithName_name(ctx, field) if err != nil { @@ -7163,6 +7222,242 @@ func (ec *executionContext) fieldContext_JobResultList_hasNextPage(_ context.Con return fc, nil } +func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStats_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStats_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_stats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stats, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*schema.MetricStatistics) + fc.Result = res + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStats_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "avg": + return ec.fieldContext_MetricStatistics_avg(ctx, field) + case "min": + return ec.fieldContext_MetricStatistics_min(ctx, field) + case "max": + return ec.fieldContext_MetricStatistics_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_scope(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_scope(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Scope, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(schema.MetricScope) + fc.Result = res + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type MetricScope does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_stats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stats, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.ScopedStats) + fc.Result = res + return ec.marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "hostname": + return ec.fieldContext_ScopedStats_hostname(ctx, field) + case "id": + return ec.fieldContext_ScopedStats_id(ctx, field) + case "data": + return ec.fieldContext_ScopedStats_data(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type ScopedStats", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_id(ctx, field) if err != nil { @@ -10296,8 +10591,8 @@ func (ec *executionContext) fieldContext_Query_jobMetrics(ctx context.Context, f return fc, nil } -func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query_jobMetricStats(ctx, field) +func (ec *executionContext) _Query_jobStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_jobStats(ctx, field) if err != nil { return graphql.Null } @@ -10310,7 +10605,7 @@ func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobMetricStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string)) + return ec.resolvers.Query().JobStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string)) }) if err != nil { ec.Error(ctx, err) @@ -10322,12 +10617,12 @@ func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field gra } return graphql.Null } - res := resTmp.([]*model.JobMetricStatWithName) + res := resTmp.([]*model.JobStats) fc.Result = res - return ec.marshalNJobMetricStatWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithNameᚄ(ctx, field.Selections, res) + return ec.marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_jobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, @@ -10336,11 +10631,11 @@ func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Contex Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { case "name": - return ec.fieldContext_JobMetricStatWithName_name(ctx, field) + return ec.fieldContext_JobStats_name(ctx, field) case "stats": - return ec.fieldContext_JobMetricStatWithName_stats(ctx, field) + return ec.fieldContext_JobStats_stats(ctx, field) } - return nil, fmt.Errorf("no field named %q was found under type JobMetricStatWithName", field.Name) + return nil, fmt.Errorf("no field named %q was found under type JobStats", field.Name) }, } defer func() { @@ -10350,7 +10645,70 @@ func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Contex } }() ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Query_jobMetricStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + if fc.Args, err = ec.field_Query_jobStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_scopedJobStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_scopedJobStats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().ScopedJobStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.JobStatsWithScope) + fc.Result = res + return ec.marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_JobStatsWithScope_name(ctx, field) + case "scope": + return ec.fieldContext_JobStatsWithScope_scope(ctx, field) + case "stats": + return ec.fieldContext_JobStatsWithScope_stats(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type JobStatsWithScope", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_scopedJobStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) return fc, err } @@ -11058,6 +11416,143 @@ func (ec *executionContext) fieldContext_Resource_configuration(_ context.Contex return fc, nil } +func (ec *executionContext) _ScopedStats_hostname(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_hostname(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Hostname, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_hostname(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _ScopedStats_id(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_id(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _ScopedStats_data(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_data(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*schema.MetricStatistics) + fc.Result = res + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "avg": + return ec.fieldContext_MetricStatistics_avg(ctx, field) + case "min": + return ec.fieldContext_MetricStatistics_min(ctx, field) + case "max": + return ec.fieldContext_MetricStatistics_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _Series_hostname(ctx context.Context, field graphql.CollectedField, obj *schema.Series) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Series_hostname(ctx, field) if err != nil { @@ -16569,50 +17064,6 @@ func (ec *executionContext) _JobMetric(ctx context.Context, sel ast.SelectionSet return out } -var jobMetricStatWithNameImplementors = []string{"JobMetricStatWithName"} - -func (ec *executionContext) _JobMetricStatWithName(ctx context.Context, sel ast.SelectionSet, obj *model.JobMetricStatWithName) graphql.Marshaler { - fields := graphql.CollectFields(ec.OperationContext, sel, jobMetricStatWithNameImplementors) - - out := graphql.NewFieldSet(fields) - deferred := make(map[string]*graphql.FieldSet) - for i, field := range fields { - switch field.Name { - case "__typename": - out.Values[i] = graphql.MarshalString("JobMetricStatWithName") - case "name": - out.Values[i] = ec._JobMetricStatWithName_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - case "stats": - out.Values[i] = ec._JobMetricStatWithName_stats(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - default: - panic("unknown field " + strconv.Quote(field.Name)) - } - } - out.Dispatch(ctx) - if out.Invalids > 0 { - return graphql.Null - } - - atomic.AddInt32(&ec.deferred, int32(len(deferred))) - - for label, dfs := range deferred { - ec.processDeferredGroup(graphql.DeferredGroup{ - Label: label, - Path: graphql.GetPath(ctx), - FieldSet: dfs, - Context: ctx, - }) - } - - return out -} - var jobMetricWithNameImplementors = []string{"JobMetricWithName"} func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.SelectionSet, obj *model.JobMetricWithName) graphql.Marshaler { @@ -16709,6 +17160,99 @@ func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.Selectio return out } +var jobStatsImplementors = []string{"JobStats"} + +func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet, obj *model.JobStats) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, jobStatsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("JobStats") + case "name": + out.Values[i] = ec._JobStats_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "stats": + out.Values[i] = ec._JobStats_stats(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var jobStatsWithScopeImplementors = []string{"JobStatsWithScope"} + +func (ec *executionContext) _JobStatsWithScope(ctx context.Context, sel ast.SelectionSet, obj *model.JobStatsWithScope) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, jobStatsWithScopeImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("JobStatsWithScope") + case "name": + out.Values[i] = ec._JobStatsWithScope_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "scope": + out.Values[i] = ec._JobStatsWithScope_scope(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "stats": + out.Values[i] = ec._JobStatsWithScope_stats(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var jobsStatisticsImplementors = []string{"JobsStatistics"} func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.SelectionSet, obj *model.JobsStatistics) graphql.Marshaler { @@ -17513,7 +18057,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) - case "jobMetricStats": + case "jobStats": field := field innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { @@ -17522,7 +18066,29 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr ec.Error(ctx, ec.Recover(ctx, r)) } }() - res = ec._Query_jobMetricStats(ctx, field) + res = ec._Query_jobStats(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "scopedJobStats": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_scopedJobStats(ctx, field) if res == graphql.Null { atomic.AddUint32(&fs.Invalids, 1) } @@ -17740,6 +18306,52 @@ func (ec *executionContext) _Resource(ctx context.Context, sel ast.SelectionSet, return out } +var scopedStatsImplementors = []string{"ScopedStats"} + +func (ec *executionContext) _ScopedStats(ctx context.Context, sel ast.SelectionSet, obj *model.ScopedStats) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, scopedStatsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ScopedStats") + case "hostname": + out.Values[i] = ec._ScopedStats_hostname(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "id": + out.Values[i] = ec._ScopedStats_id(ctx, field, obj) + case "data": + out.Values[i] = ec._ScopedStats_data(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var seriesImplementors = []string{"Series"} func (ec *executionContext) _Series(ctx context.Context, sel ast.SelectionSet, obj *schema.Series) graphql.Marshaler { @@ -19346,60 +19958,6 @@ func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpit return ec._JobMetric(ctx, sel, v) } -func (ec *executionContext) marshalNJobMetricStatWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricStatWithName) graphql.Marshaler { - ret := make(graphql.Array, len(v)) - var wg sync.WaitGroup - isLen1 := len(v) == 1 - if !isLen1 { - wg.Add(len(v)) - } - for i := range v { - i := i - fc := &graphql.FieldContext{ - Index: &i, - Result: &v[i], - } - ctx := graphql.WithFieldContext(ctx, fc) - f := func(i int) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil - } - }() - if !isLen1 { - defer wg.Done() - } - ret[i] = ec.marshalNJobMetricStatWithName2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithName(ctx, sel, v[i]) - } - if isLen1 { - f(i) - } else { - go f(i) - } - - } - wg.Wait() - - for _, e := range ret { - if e == graphql.Null { - return graphql.Null - } - } - - return ret -} - -func (ec *executionContext) marshalNJobMetricStatWithName2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithName(ctx context.Context, sel ast.SelectionSet, v *model.JobMetricStatWithName) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - return graphql.Null - } - return ec._JobMetricStatWithName(ctx, sel, v) -} - func (ec *executionContext) marshalNJobMetricWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricWithName) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -19478,6 +20036,114 @@ func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋcc return v } +func (ec *executionContext) marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobStats) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNJobStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStats(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNJobStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStats(ctx context.Context, sel ast.SelectionSet, v *model.JobStats) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._JobStats(ctx, sel, v) +} + +func (ec *executionContext) marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobStatsWithScope) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx context.Context, sel ast.SelectionSet, v *model.JobStatsWithScope) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._JobStatsWithScope(ctx, sel, v) +} + func (ec *executionContext) marshalNJobsStatistics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobsStatisticsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobsStatistics) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -19891,6 +20557,60 @@ func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋ return ec._Resource(ctx, sel, v) } +func (ec *executionContext) marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.ScopedStats) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNScopedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStats(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNScopedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStats(ctx context.Context, sel ast.SelectionSet, v *model.ScopedStats) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._ScopedStats(ctx, sel, v) +} + func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler { return ec._Series(ctx, sel, &v) } diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index d83a318..43c4e37 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -81,11 +81,6 @@ type JobLinkResultList struct { Count *int `json:"count,omitempty"` } -type JobMetricStatWithName struct { - Name string `json:"name"` - Stats *schema.MetricStatistics `json:"stats"` -} - type JobMetricWithName struct { Name string `json:"name"` Scope schema.MetricScope `json:"scope"` @@ -100,6 +95,17 @@ type JobResultList struct { HasNextPage *bool `json:"hasNextPage,omitempty"` } +type JobStats struct { + Name string `json:"name"` + Stats *schema.MetricStatistics `json:"stats"` +} + +type JobStatsWithScope struct { + Name string `json:"name"` + Scope schema.MetricScope `json:"scope"` + Stats []*ScopedStats `json:"stats"` +} + type JobsStatistics struct { ID string `json:"id"` Name string `json:"name"` @@ -173,6 +179,12 @@ type PageRequest struct { Page int `json:"page"` } +type ScopedStats struct { + Hostname string `json:"hostname"` + ID *string `json:"id,omitempty"` + Data *schema.MetricStatistics `json:"data"` +} + type StringInput struct { Eq *string `json:"eq,omitempty"` Neq *string `json:"neq,omitempty"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 7d3de90..029be87 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -301,24 +301,23 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str return res, err } -// JobMetricStats is the resolver for the jobMetricStats field. -func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) { - +// JobStats is the resolver for the jobStats field. +func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) { job, err := r.Query().Job(ctx, id) if err != nil { - log.Warn("Error while querying job for metrics") + log.Warnf("Error while querying job %s for metadata", id) return nil, err } - data, err := metricDataDispatcher.LoadStatData(job, metrics, ctx) + data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) if err != nil { - log.Warn("Error while loading job stat data") + log.Warnf("Error while loading jobStats data for job id %s", id) return nil, err } - res := []*model.JobMetricStatWithName{} + res := []*model.JobStats{} for name, md := range data { - res = append(res, &model.JobMetricStatWithName{ + res = append(res, &model.JobStats{ Name: name, Stats: &md, }) @@ -327,6 +326,44 @@ func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics [ return res, err } +// ScopedJobStats is the resolver for the scopedJobStats field. +func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) { + job, err := r.Query().Job(ctx, id) + if err != nil { + log.Warnf("Error while querying job %s for metadata", id) + return nil, err + } + + data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx) + if err != nil { + log.Warnf("Error while loading scopedJobStats data for job id %s", id) + return nil, err + } + + res := make([]*model.JobStatsWithScope, 0) + for name, scoped := range data { + for scope, stats := range scoped { + + mdlStats := make([]*model.ScopedStats, 0) + for _, stat := range stats { + mdlStats = append(mdlStats, &model.ScopedStats{ + Hostname: stat.Hostname, + ID: stat.Id, + Data: stat.Data, + }) + } + + res = append(res, &model.JobStatsWithScope{ + Name: name, + Scope: scope, + Stats: mdlStats, + }) + } + } + + return res, nil +} + // JobsFootprints is the resolver for the jobsFootprints field. func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) { // NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column! diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index f3f60b4..c6cecd8 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -224,8 +224,34 @@ func LoadAverages( return nil } -// Used for polar plots in frontend -func LoadStatData( +// Used for statsTable in frontend: Return scoped statistics by metric. +func LoadScopedJobStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context, +) (schema.ScopedJobStats, error) { + + if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { + return archive.LoadScopedStatsFromArchive(job, metrics, scopes) + } + + repo, err := metricdata.GetMetricDataRepo(job.Cluster) + if err != nil { + return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster) + } + + scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx) + if err != nil { + log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) + return nil, err + } + + return scopedStats, nil +} + +// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric. +func LoadJobStats( job *schema.Job, metrics []string, ctx context.Context, @@ -237,12 +263,12 @@ func LoadStatData( data := make(map[string]schema.MetricStatistics, len(metrics)) repo, err := metricdata.GetMetricDataRepo(job.Cluster) if err != nil { - return data, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster) + return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster) } stats, err := repo.LoadStats(job, metrics, ctx) if err != nil { - log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) + log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) return data, err } diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 2b92fbb..9516e2b 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -129,13 +129,13 @@ func (ccms *CCMetricStore) doRequest( ) (*ApiQueryResponse, error) { buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(body); err != nil { - log.Warn("Error while encoding request body") + log.Errorf("Error while encoding request body: %s", err.Error()) return nil, err } req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf) if err != nil { - log.Warn("Error while building request body") + log.Errorf("Error while building request body: %s", err.Error()) return nil, err } if ccms.jwt != "" { @@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest( res, err := ccms.client.Do(req) if err != nil { - log.Error("Error while performing request") + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -161,7 +161,7 @@ func (ccms *CCMetricStore) doRequest( var resBody ApiQueryResponse if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil { - log.Warn("Error while decoding result body") + log.Errorf("Error while decoding result body: %s", err.Error()) return nil, err } @@ -177,7 +177,7 @@ func (ccms *CCMetricStore) LoadData( ) (schema.JobData, error) { queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution) if err != nil { - log.Warn("Error while building queries") + log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) return nil, err } @@ -192,7 +192,7 @@ func (ccms *CCMetricStore) LoadData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error("Error while performing request") + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -557,16 +557,9 @@ func (ccms *CCMetricStore) LoadStats( ctx context.Context, ) (map[string]map[string]schema.MetricStatistics, error) { - // metricConfigs := archive.GetCluster(job.Cluster).MetricConfig - // resolution := 9000 - - // for _, mc := range metricConfigs { - // resolution = min(resolution, mc.Timestep) - // } - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { - log.Warn("Error while building query") + log.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error()) return nil, err } @@ -581,7 +574,7 @@ func (ccms *CCMetricStore) LoadStats( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error("Error while performing request") + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -591,9 +584,8 @@ func (ccms *CCMetricStore) LoadStats( metric := ccms.toLocalName(query.Metric) data := res[0] if data.Error != nil { - log.Infof("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) + log.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) continue - // return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) } metricdata, ok := stats[metric] @@ -603,9 +595,8 @@ func (ccms *CCMetricStore) LoadStats( } if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() { - log.Infof("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname) + log.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname) continue - // return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN") } metricdata[query.Hostname] = schema.MetricStatistics{ @@ -618,7 +609,98 @@ func (ccms *CCMetricStore) LoadStats( return stats, nil } -// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known! +// Used for Job-View Statistics Table +func (ccms *CCMetricStore) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context, +) (schema.ScopedJobStats, error) { + queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0) + if err != nil { + log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) + return nil, err + } + + req := ApiQueryRequest{ + Cluster: job.Cluster, + From: job.StartTime.Unix(), + To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(), + Queries: queries, + WithStats: true, + WithData: false, + } + + resBody, err := ccms.doRequest(ctx, &req) + if err != nil { + log.Errorf("Error while performing request: %s", err.Error()) + return nil, err + } + + var errors []string + scopedJobStats := make(schema.ScopedJobStats) + + for i, row := range resBody.Results { + query := req.Queries[i] + metric := ccms.toLocalName(query.Metric) + scope := assignedScope[i] + + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for ndx, res := range row { + if res.Error != nil { + /* Build list for "partial errors", if any */ + errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) + continue + } + + id := (*string)(nil) + if query.Type != nil { + id = new(string) + *id = query.TypeIds[ndx] + } + + if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { + // "schema.Float()" because regular float64 can not be JSONed when NaN. + res.Avg = schema.Float(0) + res.Min = schema.Float(0) + res.Max = schema.Float(0) + } + + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: query.Hostname, + Id: id, + Data: &schema.MetricStatistics{ + Avg: float64(res.Avg), + Min: float64(res.Min), + Max: float64(res.Max), + }, + }) + } + + // So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty + if len(scopedJobStats[metric][scope]) == 0 { + delete(scopedJobStats[metric], scope) + if len(scopedJobStats[metric]) == 0 { + delete(scopedJobStats, metric) + } + } + } + + if len(errors) != 0 { + /* Returns list for "partial errors" */ + return scopedJobStats, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) + } + return scopedJobStats, nil +} + +// Used for Systems-View Node-Overview func (ccms *CCMetricStore) LoadNodeData( cluster string, metrics, nodes []string, @@ -652,7 +734,7 @@ func (ccms *CCMetricStore) LoadNodeData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -710,6 +792,7 @@ func (ccms *CCMetricStore) LoadNodeData( return data, nil } +// Used for Systems-View Node-List func (ccms *CCMetricStore) LoadNodeListData( cluster, subCluster, nodeFilter string, metrics []string, @@ -768,7 +851,7 @@ func (ccms *CCMetricStore) LoadNodeListData( queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) if err != nil { - log.Warn("Error while building queries") + log.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error()) return nil, totalNodes, hasNextPage, err } @@ -783,7 +866,7 @@ func (ccms *CCMetricStore) LoadNodeListData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) + log.Errorf("Error while performing request: %s", err.Error()) return nil, totalNodes, hasNextPage, err } @@ -888,7 +971,7 @@ func (ccms *CCMetricStore) buildNodeQueries( if subCluster != "" { subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster) if scterr != nil { - // TODO: Log + log.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error()) return nil, nil, scterr } } @@ -898,7 +981,7 @@ func (ccms *CCMetricStore) buildNodeQueries( mc := archive.GetMetricConfig(cluster, metric) if mc == nil { // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) - log.Infof("metric '%s' is not specified for cluster '%s'", metric, cluster) + log.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster) continue } diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index 79c2d4a..c53dad3 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -10,6 +10,8 @@ import ( "encoding/json" "errors" "fmt" + "math" + "sort" "strings" "time" @@ -64,6 +66,8 @@ func (idb *InfluxDBv2DataRepository) LoadData( ctx context.Context, resolution int) (schema.JobData, error) { + log.Infof("InfluxDB 2 Backend: Resolution Scaling not Implemented, will return default timestep. Requested Resolution %d", resolution) + measurementsConds := make([]string, 0, len(metrics)) for _, m := range metrics { measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m)) @@ -86,7 +90,7 @@ func (idb *InfluxDBv2DataRepository) LoadData( query := "" switch scope { case "node": - // Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows + // Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows <-- Resolution could be added here? // log.Info("Scope 'node' requested. ") query = fmt.Sprintf(` from(bucket: "%s") @@ -116,6 +120,12 @@ func (idb *InfluxDBv2DataRepository) LoadData( // idb.bucket, // idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )), // measurementsCond, hostsCond) + case "hwthread": + log.Info(" Scope 'hwthread' requested, but not yet supported: Will return 'node' scope only. ") + continue + case "accelerator": + log.Info(" Scope 'accelerator' requested, but not yet supported: Will return 'node' scope only. ") + continue default: log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope) continue @@ -173,6 +183,11 @@ func (idb *InfluxDBv2DataRepository) LoadData( } case "socket": continue + case "accelerator": + continue + case "hwthread": + // See below @ core + continue case "core": continue // Include Series.Id in hostSeries @@ -301,6 +316,53 @@ func (idb *InfluxDBv2DataRepository) LoadStats( return stats, nil } +// Used in Job-View StatsTable +// UNTESTED +func (idb *InfluxDBv2DataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { + + // Assumption: idb.loadData() only returns series node-scope - use node scope for statsTable + scopedJobStats := make(schema.ScopedJobStats) + data, err := idb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) + if err != nil { + log.Warn("Error while loading job for scopedJobStats") + return nil, err + } + + for metric, metricData := range data { + for _, scope := range scopes { + if scope != schema.MetricScopeNode { + logOnce.Do(func() { + log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + }) + continue + } + + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for _, series := range metricData[scope].Series { + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: series.Hostname, + Data: &series.Statistics, + }) + } + } + } + + return scopedJobStats, nil +} + +// Used in Systems-View @ Node-Overview +// UNTESTED func (idb *InfluxDBv2DataRepository) LoadNodeData( cluster string, metrics, nodes []string, @@ -308,12 +370,123 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData( from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { - // TODO : Implement to be used in Analysis- und System/Node-View - log.Infof("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes) + // Note: scopes[] Array will be ignored, only return node scope - return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") + // CONVERT ARGS TO INFLUX + measurementsConds := make([]string, 0) + for _, m := range metrics { + measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m)) + } + measurementsCond := strings.Join(measurementsConds, " or ") + + hostsConds := make([]string, 0) + if nodes == nil { + var allNodes []string + subClusterNodeLists := archive.NodeLists[cluster] + for _, nodeList := range subClusterNodeLists { + allNodes = append(nodes, nodeList.PrintList()...) + } + for _, node := range allNodes { + nodes = append(nodes, node) + hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node)) + } + } else { + for _, node := range nodes { + hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node)) + } + } + hostsCond := strings.Join(hostsConds, " or ") + + // BUILD AND PERFORM QUERY + query := fmt.Sprintf(` + from(bucket: "%s") + |> range(start: %s, stop: %s) + |> filter(fn: (r) => (%s) and (%s) ) + |> drop(columns: ["_start", "_stop"]) + |> group(columns: ["hostname", "_measurement"]) + |> aggregateWindow(every: 60s, fn: mean) + |> drop(columns: ["_time"])`, + idb.bucket, + idb.formatTime(from), idb.formatTime(to), + measurementsCond, hostsCond) + + rows, err := idb.queryClient.Query(ctx, query) + if err != nil { + log.Error("Error while performing query") + return nil, err + } + + // HANDLE QUERY RETURN + // Collect Float Arrays for Node@Metric -> No Scope Handling! + influxData := make(map[string]map[string][]schema.Float) + for rows.Next() { + row := rows.Record() + host, field := row.ValueByKey("hostname").(string), row.Measurement() + + influxHostData, ok := influxData[host] + if !ok { + influxHostData = make(map[string][]schema.Float) + influxData[host] = influxHostData + } + + influxFieldData, ok := influxData[host][field] + if !ok { + influxFieldData = make([]schema.Float, 0) + influxData[host][field] = influxFieldData + } + + val, ok := row.Value().(float64) + if ok { + influxData[host][field] = append(influxData[host][field], schema.Float(val)) + } else { + influxData[host][field] = append(influxData[host][field], schema.Float(0)) + } + } + + // BUILD FUNCTION RETURN + data := make(map[string]map[string][]*schema.JobMetric) + for node, metricData := range influxData { + + nodeData, ok := data[node] + if !ok { + nodeData = make(map[string][]*schema.JobMetric) + data[node] = nodeData + } + + for metric, floatArray := range metricData { + avg, min, max := 0.0, 0.0, 0.0 + for _, val := range floatArray { + avg += float64(val) + min = math.Min(min, float64(val)) + max = math.Max(max, float64(val)) + } + + stats := schema.MetricStatistics{ + Avg: (math.Round((avg/float64(len(floatArray)))*100) / 100), + Min: (math.Round(min*100) / 100), + Max: (math.Round(max*100) / 100), + } + + mc := archive.GetMetricConfig(cluster, metric) + nodeData[metric] = append(nodeData[metric], &schema.JobMetric{ + Unit: mc.Unit, + Timestep: mc.Timestep, + Series: []schema.Series{ + { + Hostname: node, + Statistics: stats, + Data: floatArray, + }, + }, + }) + } + } + + return data, nil } +// Used in Systems-View @ Node-List +// UNTESTED func (idb *InfluxDBv2DataRepository) LoadNodeListData( cluster, subCluster, nodeFilter string, metrics []string, @@ -324,10 +497,79 @@ func (idb *InfluxDBv2DataRepository) LoadNodeListData( ctx context.Context, ) (map[string]schema.JobData, int, bool, error) { + // Assumption: idb.loadData() only returns series node-scope - use node scope for NodeList + + // 0) Init additional vars var totalNodes int = 0 var hasNextPage bool = false - // TODO : Implement to be used in NodeList-View - log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) - return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") + // 1) Get list of all nodes + var nodes []string + if subCluster != "" { + scNodes := archive.NodeLists[cluster][subCluster] + nodes = scNodes.PrintList() + } else { + subClusterNodeLists := archive.NodeLists[cluster] + for _, nodeList := range subClusterNodeLists { + nodes = append(nodes, nodeList.PrintList()...) + } + } + + // 2) Filter nodes + if nodeFilter != "" { + filteredNodes := []string{} + for _, node := range nodes { + if strings.Contains(node, nodeFilter) { + filteredNodes = append(filteredNodes, node) + } + } + nodes = filteredNodes + } + + // 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ... + totalNodes = len(nodes) + sort.Strings(nodes) + + // 3) Apply paging + if len(nodes) > page.ItemsPerPage { + start := (page.Page - 1) * page.ItemsPerPage + end := start + page.ItemsPerPage + if end > len(nodes) { + end = len(nodes) + hasNextPage = false + } else { + hasNextPage = true + } + nodes = nodes[start:end] + } + + // 4) Fetch And Convert Data, use idb.LoadNodeData() for query + + rawNodeData, err := idb.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) + if err != nil { + log.Error(fmt.Sprintf("Error while loading influx nodeData for nodeListData %#v\n", err)) + return nil, totalNodes, hasNextPage, err + } + + data := make(map[string]schema.JobData) + for node, nodeData := range rawNodeData { + // Init Nested Map Data Structures If Not Found + hostData, ok := data[node] + if !ok { + hostData = make(schema.JobData) + data[node] = hostData + } + + for metric, nodeMetricData := range nodeData { + metricData, ok := hostData[metric] + if !ok { + metricData = make(map[schema.MetricScope]*schema.JobMetric) + data[node][metric] = metricData + } + + data[node][metric][schema.MetricScopeNode] = nodeMetricData[0] // Only Node Scope Returned from loadNodeData + } + } + + return data, totalNodes, hasNextPage, nil } diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 0fe94d1..f30d837 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -24,9 +24,12 @@ type MetricDataRepository interface { // Return the JobData for the given job, only with the requested metrics. LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) - // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now. + // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope only. LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) + // Return a map of metrics to a map of scopes to the scoped metric statistics of the job. + LoadScopedStats(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error) + // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index cd849ce..d16501e 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -448,6 +448,51 @@ func (pdb *PrometheusDataRepository) LoadNodeData( return data, nil } +// Implemented by NHR@FAU; Used in Job-View StatsTable +func (pdb *PrometheusDataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { + + // Assumption: pdb.loadData() only returns series node-scope - use node scope for statsTable + scopedJobStats := make(schema.ScopedJobStats) + data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) + if err != nil { + log.Warn("Error while loading job for scopedJobStats") + return nil, err + } + + for metric, metricData := range data { + for _, scope := range scopes { + if scope != schema.MetricScopeNode { + logOnce.Do(func() { + log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + }) + continue + } + + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for _, series := range metricData[scope].Series { + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: series.Hostname, + Data: &series.Statistics, + }) + } + } + } + + return scopedJobStats, nil +} + +// Implemented by NHR@FAU; Used in NodeList-View func (pdb *PrometheusDataRepository) LoadNodeListData( cluster, subCluster, nodeFilter string, metrics []string, @@ -458,10 +503,132 @@ func (pdb *PrometheusDataRepository) LoadNodeListData( ctx context.Context, ) (map[string]schema.JobData, int, bool, error) { + // Assumption: pdb.loadData() only returns series node-scope - use node scope for NodeList + + // 0) Init additional vars var totalNodes int = 0 var hasNextPage bool = false - // TODO : Implement to be used in NodeList-View - log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) - return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository") + // 1) Get list of all nodes + var nodes []string + if subCluster != "" { + scNodes := archive.NodeLists[cluster][subCluster] + nodes = scNodes.PrintList() + } else { + subClusterNodeLists := archive.NodeLists[cluster] + for _, nodeList := range subClusterNodeLists { + nodes = append(nodes, nodeList.PrintList()...) + } + } + + // 2) Filter nodes + if nodeFilter != "" { + filteredNodes := []string{} + for _, node := range nodes { + if strings.Contains(node, nodeFilter) { + filteredNodes = append(filteredNodes, node) + } + } + nodes = filteredNodes + } + + // 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ... + totalNodes = len(nodes) + sort.Strings(nodes) + + // 3) Apply paging + if len(nodes) > page.ItemsPerPage { + start := (page.Page - 1) * page.ItemsPerPage + end := start + page.ItemsPerPage + if end > len(nodes) { + end = len(nodes) + hasNextPage = false + } else { + hasNextPage = true + } + nodes = nodes[start:end] + } + + // 4) Fetch Data, based on pdb.LoadNodeData() + + t0 := time.Now() + // Map of hosts of jobData + data := make(map[string]schema.JobData) + + // query db for each metric + // TODO: scopes seems to be always empty + if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) { + scopes = append(scopes, schema.MetricScopeNode) + } + + for _, scope := range scopes { + if scope != schema.MetricScopeNode { + logOnce.Do(func() { + log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + }) + continue + } + + for _, metric := range metrics { + metricConfig := archive.GetMetricConfig(cluster, metric) + if metricConfig == nil { + log.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster) + return nil, totalNodes, hasNextPage, errors.New("Prometheus config error") + } + query, err := pdb.FormatQuery(metric, scope, nodes, cluster) + if err != nil { + log.Warn("Error while formatting prometheus query") + return nil, totalNodes, hasNextPage, err + } + + // ranged query over all nodes + r := promv1.Range{ + Start: from, + End: to, + Step: time.Duration(metricConfig.Timestep * 1e9), + } + result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) + if err != nil { + log.Errorf("Prometheus query error in LoadNodeData: %v\n", err) + return nil, totalNodes, hasNextPage, errors.New("Prometheus query error") + } + if len(warnings) > 0 { + log.Warnf("Warnings: %v\n", warnings) + } + + step := int64(metricConfig.Timestep) + steps := int64(to.Sub(from).Seconds()) / step + + // iter rows of host, metric, values + for _, row := range result.(promm.Matrix) { + hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix) + + hostdata, ok := data[hostname] + if !ok { + hostdata = make(schema.JobData) + data[hostname] = hostdata + } + + metricdata, ok := hostdata[metric] + if !ok { + metricdata = make(map[schema.MetricScope]*schema.JobMetric) + data[hostname][metric] = metricdata + } + + // output per host, metric and scope + scopeData, ok := metricdata[scope] + if !ok { + scopeData = &schema.JobMetric{ + Unit: metricConfig.Unit, + Timestep: metricConfig.Timestep, + Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)}, + } + data[hostname][metric][scope] = scopeData + } + } + } + } + t1 := time.Since(t0) + log.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1) + return data, totalNodes, hasNextPage, nil } diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index 48dd237..aa7bde1 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -36,7 +36,17 @@ func (tmdr *TestMetricDataRepository) LoadData( func (tmdr *TestMetricDataRepository) LoadStats( job *schema.Job, - metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { + metrics []string, + ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { + + panic("TODO") +} + +func (tmdr *TestMetricDataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { panic("TODO") } diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 2eabb52..cd457eb 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -27,6 +27,8 @@ type ArchiveBackend interface { LoadJobData(job *schema.Job) (schema.JobData, error) + LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) + LoadClusterCfg(name string) (*schema.Cluster, error) StoreJobMeta(jobMeta *schema.JobMeta) error @@ -87,7 +89,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error { var version uint64 version, err = ar.Init(rawConfig) if err != nil { - log.Error("Error while initializing archiveBackend") + log.Errorf("Error while initializing archiveBackend: %s", err.Error()) return } log.Infof("Load archive version %d", version) @@ -110,7 +112,7 @@ func LoadAveragesFromArchive( ) error { metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } @@ -125,7 +127,7 @@ func LoadAveragesFromArchive( return nil } -// Helper to metricdataloader.LoadStatData(). +// Helper to metricdataloader.LoadJobStats(). func LoadStatsFromArchive( job *schema.Job, metrics []string, @@ -133,7 +135,7 @@ func LoadStatsFromArchive( data := make(map[string]schema.MetricStatistics, len(metrics)) metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return data, err } @@ -154,10 +156,26 @@ func LoadStatsFromArchive( return data, nil } +// Helper to metricdataloader.LoadScopedJobStats(). +func LoadScopedStatsFromArchive( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, +) (schema.ScopedJobStats, error) { + + data, err := ar.LoadJobStats(job) + if err != nil { + log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error()) + return nil, err + } + + return data, nil +} + func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) { metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return nil, err } @@ -173,7 +191,7 @@ func UpdateMetadata(job *schema.Job, metadata map[string]string) error { jobMeta, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } @@ -193,7 +211,7 @@ func UpdateTags(job *schema.Job, tags []*schema.Tag) error { jobMeta, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } diff --git a/pkg/archive/fsBackend.go b/pkg/archive/fsBackend.go index 8a43748..711b1f5 100644 --- a/pkg/archive/fsBackend.go +++ b/pkg/archive/fsBackend.go @@ -115,6 +115,40 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) { } } +func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) { + f, err := os.Open(filename) + + if err != nil { + log.Errorf("fsBackend LoadJobStats()- %v", err) + return nil, err + } + defer f.Close() + + if isCompressed { + r, err := gzip.NewReader(f) + if err != nil { + log.Errorf(" %v", err) + return nil, err + } + defer r.Close() + + if config.Keys.Validate { + if err := schema.Validate(schema.Data, r); err != nil { + return nil, fmt.Errorf("validate job data: %v", err) + } + } + + return DecodeJobStats(r, filename) + } else { + if config.Keys.Validate { + if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil { + return nil, fmt.Errorf("validate job data: %v", err) + } + } + return DecodeJobStats(bufio.NewReader(f), filename) + } +} + func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) { var config FsArchiveConfig @@ -389,6 +423,18 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) { return loadJobData(filename, isCompressed) } +func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) { + var isCompressed bool = true + filename := getPath(job, fsa.path, "data.json.gz") + + if !util.CheckFileExists(filename) { + filename = getPath(job, fsa.path, "data.json") + isCompressed = false + } + + return loadJobStats(filename, isCompressed) +} + func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) { filename := getPath(job, fsa.path, "meta.json") return loadJobMeta(filename) diff --git a/pkg/archive/json.go b/pkg/archive/json.go index 1219658..5201b74 100644 --- a/pkg/archive/json.go +++ b/pkg/archive/json.go @@ -32,6 +32,43 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) { return data.(schema.JobData), nil } +func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) { + jobData, err := DecodeJobData(r, k) + // Convert schema.JobData to schema.ScopedJobStats + if jobData != nil { + scopedJobStats := make(schema.ScopedJobStats) + for metric, metricData := range jobData { + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + for scope, jobMetric := range metricData { + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for _, series := range jobMetric.Series { + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: series.Hostname, + Id: series.Id, + Data: &series.Statistics, + }) + } + + // So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty + if len(scopedJobStats[metric][scope]) == 0 { + delete(scopedJobStats[metric], scope) + if len(scopedJobStats[metric]) == 0 { + delete(scopedJobStats, metric) + } + } + } + } + return scopedJobStats, nil + } + return nil, err +} + func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) { var d schema.JobMeta if err := json.NewDecoder(r).Decode(&d); err != nil { diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go index ffac21b..fbb85e4 100644 --- a/pkg/schema/metrics.go +++ b/pkg/schema/metrics.go @@ -15,6 +15,7 @@ import ( ) type JobData map[string]map[MetricScope]*JobMetric +type ScopedJobStats map[string]map[MetricScope][]*ScopedStats type JobMetric struct { StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"` @@ -30,6 +31,12 @@ type Series struct { Statistics MetricStatistics `json:"statistics"` } +type ScopedStats struct { + Hostname string `json:"hostname"` + Id *string `json:"id,omitempty"` + Data *MetricStatistics `json:"data"` +} + type MetricStatistics struct { Avg float64 `json:"avg"` Min float64 `json:"min"` diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 22a69d8..0a2aa26 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -40,7 +40,7 @@ import JobRoofline from "./job/JobRoofline.svelte"; import EnergySummary from "./job/EnergySummary.svelte"; import PlotGrid from "./generic/PlotGrid.svelte"; - import StatsTable from "./job/StatsTable.svelte"; + import StatsTab from "./job/StatsTab.svelte"; export let dbid; export let username; @@ -53,10 +53,8 @@ let isMetricsSelectionOpen = false, selectedMetrics = [], - selectedScopes = []; - - let plots = {}, - statsTable + selectedScopes = [], + plots = {}; let availableMetrics = new Set(), missingMetrics = [], @@ -127,28 +125,17 @@ let job = $initq.data.job; if (!job) return; - const pendingMetrics = [ - ...( - ( - ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || - ccconfig[`job_view_selectedMetrics:${job.cluster}`] - ) || - $initq.data.globalMetrics - .reduce((names, gm) => { - if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { - names.push(gm.name); - } - return names; - }, []) - ), - ...( - ( - ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] || - ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] - ) || - ccconfig[`job_view_nodestats_selectedMetrics`] - ), - ]; + const pendingMetrics = ( + ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || + ccconfig[`job_view_selectedMetrics:${job.cluster}`] + ) || + $initq.data.globalMetrics + .reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { + names.push(gm.name); + } + return names; + }, []) // Select default Scopes to load: Check before if any metric has accelerator scope by default const accScopeDefault = [...pendingMetrics].some(function (m) { @@ -343,7 +330,6 @@ {#if item.data} statsTable.moreLoaded(detail)} job={$initq.data.job} metricName={item.metric} metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit} @@ -398,22 +384,8 @@ {/if} - - {#if $jobMetrics?.data?.jobMetrics} - {#key $jobMetrics.data.jobMetrics} - - {/key} - {/if} - + +
{#if $initq.data.job.metaData?.jobScript} diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index bcfa4fd..b68ef47 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -150,11 +150,6 @@ // On additional scope request if (selectedScope == "load-all") { - // Push scope to statsTable (Needs to be in this case, else newly selected 'Metric.svelte' renders cause statsTable race condition) - const statsTableData = $metricData.data.singleUpdate.filter((x) => x.scope !== "node") - if (statsTableData.length > 0) { - dispatch("more-loaded", statsTableData); - } // Set selected scope to min of returned scopes selectedScope = minScope(scopes) nodeOnly = (selectedScope == "node") // "node" still only scope after load-all diff --git a/web/frontend/src/job/StatsTab.svelte b/web/frontend/src/job/StatsTab.svelte new file mode 100644 index 0000000..b7647b5 --- /dev/null +++ b/web/frontend/src/job/StatsTab.svelte @@ -0,0 +1,145 @@ + + + + + + + + + {#if job.numNodes > 1} + + {/if} + + +
+ + {#if $scopedStats.fetching} + + + + + + {:else if $scopedStats.error} + + + {$scopedStats.error.message} + + + {:else} + r.hostname).sort()} + data={$scopedStats?.data?.scopedJobStats} + {selectedMetrics} + /> + {/if} +
+ + diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte deleted file mode 100644 index c8f12f2..0000000 --- a/web/frontend/src/job/StatsTable.svelte +++ /dev/null @@ -1,178 +0,0 @@ - - - - - - - - - -
- - - - - - {/each} - - - - - {#each selectedMetrics as metric} - {#if selectedScopes[metric] != "node"} - - {/if} - {#each ["min", "avg", "max"] as stat} - - {/each} - {/each} - - - - {#each hosts as host (host)} - - - {#each selectedMetrics as metric (metric)} - - {/each} - - {/each} - -
- {#each selectedMetrics as metric} - - - - - {metric} - - - {#each scopesForMetric(metric, jobMetrics) as scope} - - {/each} - - -
NodeId sortBy(metric, stat)}> - {stat} - {#if selectedScopes[metric] == "node"} - - {/if} -
{host}
- - diff --git a/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte b/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte index cf90408..fe6693b 100644 --- a/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte +++ b/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte @@ -40,14 +40,14 @@ const client = getContextClient(); const polarQuery = gql` query ($dbid: ID!, $selectedMetrics: [String!]!) { - jobMetricStats(id: $dbid, metrics: $selectedMetrics) { + jobStats(id: $dbid, metrics: $selectedMetrics) { name stats { - min - avg - max - } + min + avg + max } + } } `; @@ -66,7 +66,7 @@ {:else} {/if} \ No newline at end of file diff --git a/web/frontend/src/job/statstab/StatsTable.svelte b/web/frontend/src/job/statstab/StatsTable.svelte new file mode 100644 index 0000000..2ed2f28 --- /dev/null +++ b/web/frontend/src/job/statstab/StatsTable.svelte @@ -0,0 +1,139 @@ + + + + + + + + + + {/each} + + + + + {#each selectedMetrics as metric} + {#if selectedScopes[metric] != "node"} + + {/if} + {#each ["min", "avg", "max"] as stat} + + {/each} + {/each} + + + + {#each hosts as host (host)} + + + {#each selectedMetrics as metric (metric)} + + {/each} + + {/each} + +
+ {#each selectedMetrics as metric} + + + + + {metric} + + + {#each (availableScopes[metric] || []) as scope} + + {/each} + + +
NodeId sortBy(metric, stat)}> + {stat} + {#if selectedScopes[metric] == "node"} + + {/if} +
{host}
\ No newline at end of file diff --git a/web/frontend/src/job/StatsTableEntry.svelte b/web/frontend/src/job/statstab/StatsTableEntry.svelte similarity index 56% rename from web/frontend/src/job/StatsTableEntry.svelte rename to web/frontend/src/job/statstab/StatsTableEntry.svelte index 9504a63..b39eacb 100644 --- a/web/frontend/src/job/StatsTableEntry.svelte +++ b/web/frontend/src/job/statstab/StatsTableEntry.svelte @@ -1,11 +1,11 @@ -{#if series == null || series.length == 0} +{#if stats == null || stats.length == 0} No data -{:else if series.length == 1 && scope == "node"} +{:else if stats.length == 1 && scope == "node"} - {series[0].statistics.min} + {stats[0].data.min} - {series[0].statistics.avg} + {stats[0].data.avg} - {series[0].statistics.max} + {stats[0].data.max} {:else} @@ -76,19 +76,19 @@ sortByField(field)}> Sort {/each} - {#each series as s, i} + {#each stats as s, i} {s.id ?? i} - {s.statistics.min} - {s.statistics.avg} - {s.statistics.max} + {s.data.min} + {s.data.avg} + {s.data.max} {/each}