diff --git a/api/schema.graphqls b/api/schema.graphqls index e62fb0a..d7d4f24 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -194,6 +194,15 @@ type NodeMetrics { metrics: [JobMetricWithName!]! } +type NodesResultList { + items: [NodeMetrics!]! + offset: Int + limit: Int + count: Int + totalNodes: Int + hasNextPage: Boolean +} + type ClusterSupport { cluster: String! subClusters: [String!]! @@ -236,11 +245,12 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! + jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! + nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList! } type Mutation { diff --git a/go.mod b/go.mod index 681cc98..84cdf7d 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module github.com/ClusterCockpit/cc-backend -go 1.23 +go 1.23.5 require ( - github.com/99designs/gqlgen v0.17.57 + github.com/99designs/gqlgen v0.17.63 github.com/ClusterCockpit/cc-units v0.4.0 github.com/Masterminds/squirrel v1.5.4 github.com/coreos/go-oidc/v3 v3.11.0 @@ -25,8 +25,8 @@ require ( github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/swaggo/http-swagger v1.3.4 github.com/swaggo/swag v1.16.4 - github.com/vektah/gqlparser/v2 v2.5.20 - golang.org/x/crypto v0.31.0 + github.com/vektah/gqlparser/v2 v2.5.22 + golang.org/x/crypto v0.32.0 golang.org/x/exp v0.0.0-20240707233637-46b078467d37 golang.org/x/oauth2 v0.21.0 ) @@ -35,11 +35,11 @@ require ( filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/KyleBanks/depth v1.2.1 // indirect - github.com/agnivade/levenshtein v1.2.0 // indirect + github.com/agnivade/levenshtein v1.2.1 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect github.com/go-jose/go-jose/v4 v4.0.3 // indirect @@ -61,7 +61,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -78,12 +78,12 @@ require ( github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.31.0 // indirect + golang.org/x/net v0.34.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.27.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + golang.org/x/tools v0.29.0 // indirect + google.golang.org/protobuf v1.36.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index be6272e..07aaafd 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/99designs/gqlgen v0.17.57 h1:Ak4p60BRq6QibxY0lEc0JnQhDurfhxA67sp02lMjmPc= -github.com/99designs/gqlgen v0.17.57/go.mod h1:Jx61hzOSTcR4VJy/HFIgXiQ5rJ0Ypw8DxWLjbYDAUw0= +github.com/99designs/gqlgen v0.17.63 h1:HCdaYDPd9HqUXRchEvmE3EFzELRwLlaJ8DBuyC8Cqto= +github.com/99designs/gqlgen v0.17.63/go.mod h1:sVCM2iwIZisJjTI/DEC3fpH+HFgxY1496ZJ+jbT9IjA= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= @@ -17,8 +17,8 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5 github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0= github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= -github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= @@ -36,8 +36,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= -github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= -github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -153,8 +153,8 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= @@ -224,8 +224,8 @@ github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= -github.com/vektah/gqlparser/v2 v2.5.20 h1:kPaWbhBntxoZPaNdBaIPT1Kh0i1b/onb5kXgEdP5JCo= -github.com/vektah/gqlparser/v2 v2.5.20/go.mod h1:xMl+ta8a5M1Yo1A1Iwt/k7gSpscwSnHZdw7tfhEGfTM= +github.com/vektah/gqlparser/v2 v2.5.22 h1:yaaeJ0fu+nv1vUMW0Hl+aS1eiv1vMfapBNjpffAda1I= +github.com/vektah/gqlparser/v2 v2.5.22/go.mod h1:xMl+ta8a5M1Yo1A1Iwt/k7gSpscwSnHZdw7tfhEGfTM= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -238,8 +238,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w= golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -255,8 +255,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -273,8 +273,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -293,11 +293,11 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= -golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/internal/api/rest.go b/internal/api/rest.go index 4e52701..2921ba5 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1419,7 +1419,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request) rw.Header().Set("Content-Type", "text/plain") key, value := r.FormValue("key"), r.FormValue("value") - fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value) + // fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value) if err := repository.GetUserCfgRepo().UpdateConfig(key, value, repository.GetUserFromContext(r.Context())); err != nil { http.Error(rw, err.Error(), http.StatusUnprocessableEntity) diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 00609ac..d50033e 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -249,6 +249,15 @@ type ComplexityRoot struct { SubCluster func(childComplexity int) int } + NodesResultList struct { + Count func(childComplexity int) int + HasNextPage func(childComplexity int) int + Items func(childComplexity int) int + Limit func(childComplexity int) int + Offset func(childComplexity int) int + TotalNodes func(childComplexity int) int + } + Query struct { AllocatedNodes func(childComplexity int, cluster string) int Clusters func(childComplexity int) int @@ -257,8 +266,9 @@ type ComplexityRoot struct { JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope, resolution *int) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int - JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int + JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int + NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int Tags func(childComplexity int) int User func(childComplexity int, username string) int @@ -382,9 +392,10 @@ type QueryResolver interface { JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) - JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) + JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) + NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) } type SubClusterResolver interface { NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) @@ -1288,6 +1299,48 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.NodeMetrics.SubCluster(childComplexity), true + case "NodesResultList.count": + if e.complexity.NodesResultList.Count == nil { + break + } + + return e.complexity.NodesResultList.Count(childComplexity), true + + case "NodesResultList.hasNextPage": + if e.complexity.NodesResultList.HasNextPage == nil { + break + } + + return e.complexity.NodesResultList.HasNextPage(childComplexity), true + + case "NodesResultList.items": + if e.complexity.NodesResultList.Items == nil { + break + } + + return e.complexity.NodesResultList.Items(childComplexity), true + + case "NodesResultList.limit": + if e.complexity.NodesResultList.Limit == nil { + break + } + + return e.complexity.NodesResultList.Limit(childComplexity), true + + case "NodesResultList.offset": + if e.complexity.NodesResultList.Offset == nil { + break + } + + return e.complexity.NodesResultList.Offset(childComplexity), true + + case "NodesResultList.totalNodes": + if e.complexity.NodesResultList.TotalNodes == nil { + break + } + + return e.complexity.NodesResultList.TotalNodes(childComplexity), true + case "Query.allocatedNodes": if e.complexity.Query.AllocatedNodes == nil { break @@ -1372,7 +1425,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true + return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate), args["numDurationBins"].(*string), args["numMetricBins"].(*int)), true case "Query.nodeMetrics": if e.complexity.Query.NodeMetrics == nil { @@ -1386,6 +1439,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.NodeMetrics(childComplexity, args["cluster"].(string), args["nodes"].([]string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time)), true + case "Query.nodeMetricsList": + if e.complexity.Query.NodeMetricsList == nil { + break + } + + args, err := ec.field_Query_nodeMetricsList_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.NodeMetricsList(childComplexity, args["cluster"].(string), args["subCluster"].(string), args["nodeFilter"].(string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time), args["page"].(*model.PageRequest), args["resolution"].(*int)), true + case "Query.rooflineHeatmap": if e.complexity.Query.RooflineHeatmap == nil { break @@ -1786,8 +1851,8 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in } func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { - rc := graphql.GetOperationContext(ctx) - ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)} + opCtx := graphql.GetOperationContext(ctx) + ec := executionContext{opCtx, e, 0, 0, make(chan graphql.DeferredResult)} inputUnmarshalMap := graphql.BuildUnmarshalerMap( ec.unmarshalInputFloatRange, ec.unmarshalInputIntRange, @@ -1800,7 +1865,7 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { ) first := true - switch rc.Operation.Operation { + switch opCtx.Operation.Operation { case ast.Query: return func(ctx context.Context) *graphql.Response { var response graphql.Response @@ -1808,7 +1873,7 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { if first { first = false ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) - data = ec._Query(ctx, rc.Operation.SelectionSet) + data = ec._Query(ctx, opCtx.Operation.SelectionSet) } else { if atomic.LoadInt32(&ec.pendingDeferred) > 0 { result := <-ec.deferredResults @@ -1838,7 +1903,7 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { } first = false ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) - data := ec._Mutation(ctx, rc.Operation.SelectionSet) + data := ec._Mutation(ctx, opCtx.Operation.SelectionSet) var buf bytes.Buffer data.MarshalGQL(&buf) @@ -2090,6 +2155,15 @@ type NodeMetrics { metrics: [JobMetricWithName!]! } +type NodesResultList { + items: [NodeMetrics!]! + offset: Int + limit: Int + count: Int + totalNodes: Int + hasNextPage: Boolean +} + type ClusterSupport { cluster: String! subClusters: [String!]! @@ -2132,11 +2206,12 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! + jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! + nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList! } type Mutation { @@ -2272,491 +2347,1550 @@ var parsedSchema = gqlparser.MustLoadSchema(sources...) func (ec *executionContext) field_Mutation_addTagsToJob_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["job"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("job")) - arg0, err = ec.unmarshalNID2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Mutation_addTagsToJob_argsJob(ctx, rawArgs) + if err != nil { + return nil, err } args["job"] = arg0 - var arg1 []string - if tmp, ok := rawArgs["tagIds"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("tagIds")) - arg1, err = ec.unmarshalNID2ᚕstringᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Mutation_addTagsToJob_argsTagIds(ctx, rawArgs) + if err != nil { + return nil, err } args["tagIds"] = arg1 return args, nil } +func (ec *executionContext) field_Mutation_addTagsToJob_argsJob( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["job"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("job")) + if tmp, ok := rawArgs["job"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Mutation_addTagsToJob_argsTagIds( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["tagIds"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("tagIds")) + if tmp, ok := rawArgs["tagIds"]; ok { + return ec.unmarshalNID2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} func (ec *executionContext) field_Mutation_createTag_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["type"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("type")) - arg0, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Mutation_createTag_argsType(ctx, rawArgs) + if err != nil { + return nil, err } args["type"] = arg0 - var arg1 string - if tmp, ok := rawArgs["name"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) - arg1, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Mutation_createTag_argsName(ctx, rawArgs) + if err != nil { + return nil, err } args["name"] = arg1 - var arg2 string - if tmp, ok := rawArgs["scope"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("scope")) - arg2, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg2, err := ec.field_Mutation_createTag_argsScope(ctx, rawArgs) + if err != nil { + return nil, err } args["scope"] = arg2 return args, nil } +func (ec *executionContext) field_Mutation_createTag_argsType( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["type"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("type")) + if tmp, ok := rawArgs["type"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Mutation_createTag_argsName( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["name"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + if tmp, ok := rawArgs["name"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Mutation_createTag_argsScope( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["scope"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scope")) + if tmp, ok := rawArgs["scope"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} func (ec *executionContext) field_Mutation_deleteTag_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["id"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) - arg0, err = ec.unmarshalNID2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Mutation_deleteTag_argsID(ctx, rawArgs) + if err != nil { + return nil, err } args["id"] = arg0 return args, nil } +func (ec *executionContext) field_Mutation_deleteTag_argsID( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["id"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} func (ec *executionContext) field_Mutation_removeTagsFromJob_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["job"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("job")) - arg0, err = ec.unmarshalNID2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Mutation_removeTagsFromJob_argsJob(ctx, rawArgs) + if err != nil { + return nil, err } args["job"] = arg0 - var arg1 []string - if tmp, ok := rawArgs["tagIds"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("tagIds")) - arg1, err = ec.unmarshalNID2ᚕstringᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Mutation_removeTagsFromJob_argsTagIds(ctx, rawArgs) + if err != nil { + return nil, err } args["tagIds"] = arg1 return args, nil } +func (ec *executionContext) field_Mutation_removeTagsFromJob_argsJob( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["job"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("job")) + if tmp, ok := rawArgs["job"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Mutation_removeTagsFromJob_argsTagIds( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["tagIds"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("tagIds")) + if tmp, ok := rawArgs["tagIds"]; ok { + return ec.unmarshalNID2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} func (ec *executionContext) field_Mutation_updateConfiguration_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["name"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) - arg0, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Mutation_updateConfiguration_argsName(ctx, rawArgs) + if err != nil { + return nil, err } args["name"] = arg0 - var arg1 string - if tmp, ok := rawArgs["value"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("value")) - arg1, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Mutation_updateConfiguration_argsValue(ctx, rawArgs) + if err != nil { + return nil, err } args["value"] = arg1 return args, nil } +func (ec *executionContext) field_Mutation_updateConfiguration_argsName( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["name"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + if tmp, ok := rawArgs["name"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Mutation_updateConfiguration_argsValue( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["value"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("value")) + if tmp, ok := rawArgs["value"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["name"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) - arg0, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query___type_argsName(ctx, rawArgs) + if err != nil { + return nil, err } args["name"] = arg0 return args, nil } +func (ec *executionContext) field_Query___type_argsName( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["name"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + if tmp, ok := rawArgs["name"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} func (ec *executionContext) field_Query_allocatedNodes_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["cluster"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) - arg0, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_allocatedNodes_argsCluster(ctx, rawArgs) + if err != nil { + return nil, err } args["cluster"] = arg0 return args, nil } +func (ec *executionContext) field_Query_allocatedNodes_argsCluster( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["cluster"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) + if tmp, ok := rawArgs["cluster"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["id"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) - arg0, err = ec.unmarshalNID2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_jobMetrics_argsID(ctx, rawArgs) + if err != nil { + return nil, err } args["id"] = arg0 - var arg1 []string - if tmp, ok := rawArgs["metrics"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) - arg1, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Query_jobMetrics_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err } args["metrics"] = arg1 - var arg2 []schema.MetricScope - if tmp, ok := rawArgs["scopes"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) - arg2, err = ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg2, err := ec.field_Query_jobMetrics_argsScopes(ctx, rawArgs) + if err != nil { + return nil, err } args["scopes"] = arg2 - var arg3 *int - if tmp, ok := rawArgs["resolution"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("resolution")) - arg3, err = ec.unmarshalOInt2ᚖint(ctx, tmp) - if err != nil { - return nil, err - } + arg3, err := ec.field_Query_jobMetrics_argsResolution(ctx, rawArgs) + if err != nil { + return nil, err } args["resolution"] = arg3 return args, nil } +func (ec *executionContext) field_Query_jobMetrics_argsID( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["id"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobMetrics_argsMetrics( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["metrics"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobMetrics_argsScopes( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]schema.MetricScope, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["scopes"] + if !ok { + var zeroVal []schema.MetricScope + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) + if tmp, ok := rawArgs["scopes"]; ok { + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + } + + var zeroVal []schema.MetricScope + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobMetrics_argsResolution( + ctx context.Context, + rawArgs map[string]interface{}, +) (*int, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["resolution"] + if !ok { + var zeroVal *int + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("resolution")) + if tmp, ok := rawArgs["resolution"]; ok { + return ec.unmarshalOInt2ᚖint(ctx, tmp) + } + + var zeroVal *int + return zeroVal, nil +} func (ec *executionContext) field_Query_job_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["id"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) - arg0, err = ec.unmarshalNID2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_job_argsID(ctx, rawArgs) + if err != nil { + return nil, err } args["id"] = arg0 return args, nil } +func (ec *executionContext) field_Query_job_argsID( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["id"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} func (ec *executionContext) field_Query_jobsFootprints_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 []*model.JobFilter - if tmp, ok := rawArgs["filter"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) - arg0, err = ec.unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_jobsFootprints_argsFilter(ctx, rawArgs) + if err != nil { + return nil, err } args["filter"] = arg0 - var arg1 []string - if tmp, ok := rawArgs["metrics"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) - arg1, err = ec.unmarshalNString2ᚕstringᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Query_jobsFootprints_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err } args["metrics"] = arg1 return args, nil } +func (ec *executionContext) field_Query_jobsFootprints_argsFilter( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]*model.JobFilter, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["filter"] + if !ok { + var zeroVal []*model.JobFilter + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) + if tmp, ok := rawArgs["filter"]; ok { + return ec.unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) + } + + var zeroVal []*model.JobFilter + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobsFootprints_argsMetrics( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["metrics"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalNString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} func (ec *executionContext) field_Query_jobsStatistics_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 []*model.JobFilter - if tmp, ok := rawArgs["filter"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) - arg0, err = ec.unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_jobsStatistics_argsFilter(ctx, rawArgs) + if err != nil { + return nil, err } args["filter"] = arg0 - var arg1 []string - if tmp, ok := rawArgs["metrics"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) - arg1, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Query_jobsStatistics_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err } args["metrics"] = arg1 - var arg2 *model.PageRequest - if tmp, ok := rawArgs["page"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) - arg2, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) - if err != nil { - return nil, err - } + arg2, err := ec.field_Query_jobsStatistics_argsPage(ctx, rawArgs) + if err != nil { + return nil, err } args["page"] = arg2 - var arg3 *model.SortByAggregate - if tmp, ok := rawArgs["sortBy"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("sortBy")) - arg3, err = ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) - if err != nil { - return nil, err - } + arg3, err := ec.field_Query_jobsStatistics_argsSortBy(ctx, rawArgs) + if err != nil { + return nil, err } args["sortBy"] = arg3 - var arg4 *model.Aggregate - if tmp, ok := rawArgs["groupBy"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy")) - arg4, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) - if err != nil { - return nil, err - } + arg4, err := ec.field_Query_jobsStatistics_argsGroupBy(ctx, rawArgs) + if err != nil { + return nil, err } args["groupBy"] = arg4 + arg5, err := ec.field_Query_jobsStatistics_argsNumDurationBins(ctx, rawArgs) + if err != nil { + return nil, err + } + args["numDurationBins"] = arg5 + arg6, err := ec.field_Query_jobsStatistics_argsNumMetricBins(ctx, rawArgs) + if err != nil { + return nil, err + } + args["numMetricBins"] = arg6 return args, nil } +func (ec *executionContext) field_Query_jobsStatistics_argsFilter( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]*model.JobFilter, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["filter"] + if !ok { + var zeroVal []*model.JobFilter + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) + if tmp, ok := rawArgs["filter"]; ok { + return ec.unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) + } + + var zeroVal []*model.JobFilter + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobsStatistics_argsMetrics( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["metrics"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobsStatistics_argsPage( + ctx context.Context, + rawArgs map[string]interface{}, +) (*model.PageRequest, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["page"] + if !ok { + var zeroVal *model.PageRequest + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) + if tmp, ok := rawArgs["page"]; ok { + return ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) + } + + var zeroVal *model.PageRequest + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobsStatistics_argsSortBy( + ctx context.Context, + rawArgs map[string]interface{}, +) (*model.SortByAggregate, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["sortBy"] + if !ok { + var zeroVal *model.SortByAggregate + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("sortBy")) + if tmp, ok := rawArgs["sortBy"]; ok { + return ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) + } + + var zeroVal *model.SortByAggregate + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobsStatistics_argsGroupBy( + ctx context.Context, + rawArgs map[string]interface{}, +) (*model.Aggregate, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["groupBy"] + if !ok { + var zeroVal *model.Aggregate + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy")) + if tmp, ok := rawArgs["groupBy"]; ok { + return ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) + } + + var zeroVal *model.Aggregate + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobsStatistics_argsNumDurationBins( + ctx context.Context, + rawArgs map[string]interface{}, +) (*string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["numDurationBins"] + if !ok { + var zeroVal *string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("numDurationBins")) + if tmp, ok := rawArgs["numDurationBins"]; ok { + return ec.unmarshalOString2ᚖstring(ctx, tmp) + } + + var zeroVal *string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobsStatistics_argsNumMetricBins( + ctx context.Context, + rawArgs map[string]interface{}, +) (*int, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["numMetricBins"] + if !ok { + var zeroVal *int + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("numMetricBins")) + if tmp, ok := rawArgs["numMetricBins"]; ok { + return ec.unmarshalOInt2ᚖint(ctx, tmp) + } + + var zeroVal *int + return zeroVal, nil +} func (ec *executionContext) field_Query_jobs_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 []*model.JobFilter - if tmp, ok := rawArgs["filter"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) - arg0, err = ec.unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_jobs_argsFilter(ctx, rawArgs) + if err != nil { + return nil, err } args["filter"] = arg0 - var arg1 *model.PageRequest - if tmp, ok := rawArgs["page"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) - arg1, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Query_jobs_argsPage(ctx, rawArgs) + if err != nil { + return nil, err } args["page"] = arg1 - var arg2 *model.OrderByInput - if tmp, ok := rawArgs["order"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("order")) - arg2, err = ec.unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx, tmp) - if err != nil { - return nil, err - } + arg2, err := ec.field_Query_jobs_argsOrder(ctx, rawArgs) + if err != nil { + return nil, err } args["order"] = arg2 return args, nil } +func (ec *executionContext) field_Query_jobs_argsFilter( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]*model.JobFilter, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["filter"] + if !ok { + var zeroVal []*model.JobFilter + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) + if tmp, ok := rawArgs["filter"]; ok { + return ec.unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) + } + + var zeroVal []*model.JobFilter + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobs_argsPage( + ctx context.Context, + rawArgs map[string]interface{}, +) (*model.PageRequest, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["page"] + if !ok { + var zeroVal *model.PageRequest + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) + if tmp, ok := rawArgs["page"]; ok { + return ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) + } + + var zeroVal *model.PageRequest + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobs_argsOrder( + ctx context.Context, + rawArgs map[string]interface{}, +) (*model.OrderByInput, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["order"] + if !ok { + var zeroVal *model.OrderByInput + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("order")) + if tmp, ok := rawArgs["order"]; ok { + return ec.unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx, tmp) + } + + var zeroVal *model.OrderByInput + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + arg0, err := ec.field_Query_nodeMetricsList_argsCluster(ctx, rawArgs) + if err != nil { + return nil, err + } + args["cluster"] = arg0 + arg1, err := ec.field_Query_nodeMetricsList_argsSubCluster(ctx, rawArgs) + if err != nil { + return nil, err + } + args["subCluster"] = arg1 + arg2, err := ec.field_Query_nodeMetricsList_argsNodeFilter(ctx, rawArgs) + if err != nil { + return nil, err + } + args["nodeFilter"] = arg2 + arg3, err := ec.field_Query_nodeMetricsList_argsScopes(ctx, rawArgs) + if err != nil { + return nil, err + } + args["scopes"] = arg3 + arg4, err := ec.field_Query_nodeMetricsList_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg4 + arg5, err := ec.field_Query_nodeMetricsList_argsFrom(ctx, rawArgs) + if err != nil { + return nil, err + } + args["from"] = arg5 + arg6, err := ec.field_Query_nodeMetricsList_argsTo(ctx, rawArgs) + if err != nil { + return nil, err + } + args["to"] = arg6 + arg7, err := ec.field_Query_nodeMetricsList_argsPage(ctx, rawArgs) + if err != nil { + return nil, err + } + args["page"] = arg7 + arg8, err := ec.field_Query_nodeMetricsList_argsResolution(ctx, rawArgs) + if err != nil { + return nil, err + } + args["resolution"] = arg8 + return args, nil +} +func (ec *executionContext) field_Query_nodeMetricsList_argsCluster( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["cluster"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) + if tmp, ok := rawArgs["cluster"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsSubCluster( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["subCluster"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("subCluster")) + if tmp, ok := rawArgs["subCluster"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsNodeFilter( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["nodeFilter"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("nodeFilter")) + if tmp, ok := rawArgs["nodeFilter"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsScopes( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]schema.MetricScope, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["scopes"] + if !ok { + var zeroVal []schema.MetricScope + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) + if tmp, ok := rawArgs["scopes"]; ok { + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + } + + var zeroVal []schema.MetricScope + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsMetrics( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["metrics"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsFrom( + ctx context.Context, + rawArgs map[string]interface{}, +) (time.Time, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["from"] + if !ok { + var zeroVal time.Time + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) + if tmp, ok := rawArgs["from"]; ok { + return ec.unmarshalNTime2timeᚐTime(ctx, tmp) + } + + var zeroVal time.Time + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsTo( + ctx context.Context, + rawArgs map[string]interface{}, +) (time.Time, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["to"] + if !ok { + var zeroVal time.Time + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) + if tmp, ok := rawArgs["to"]; ok { + return ec.unmarshalNTime2timeᚐTime(ctx, tmp) + } + + var zeroVal time.Time + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsPage( + ctx context.Context, + rawArgs map[string]interface{}, +) (*model.PageRequest, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["page"] + if !ok { + var zeroVal *model.PageRequest + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) + if tmp, ok := rawArgs["page"]; ok { + return ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) + } + + var zeroVal *model.PageRequest + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetricsList_argsResolution( + ctx context.Context, + rawArgs map[string]interface{}, +) (*int, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["resolution"] + if !ok { + var zeroVal *int + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("resolution")) + if tmp, ok := rawArgs["resolution"]; ok { + return ec.unmarshalOInt2ᚖint(ctx, tmp) + } + + var zeroVal *int + return zeroVal, nil +} func (ec *executionContext) field_Query_nodeMetrics_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["cluster"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) - arg0, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_nodeMetrics_argsCluster(ctx, rawArgs) + if err != nil { + return nil, err } args["cluster"] = arg0 - var arg1 []string - if tmp, ok := rawArgs["nodes"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nodes")) - arg1, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Query_nodeMetrics_argsNodes(ctx, rawArgs) + if err != nil { + return nil, err } args["nodes"] = arg1 - var arg2 []schema.MetricScope - if tmp, ok := rawArgs["scopes"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) - arg2, err = ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg2, err := ec.field_Query_nodeMetrics_argsScopes(ctx, rawArgs) + if err != nil { + return nil, err } args["scopes"] = arg2 - var arg3 []string - if tmp, ok := rawArgs["metrics"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) - arg3, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg3, err := ec.field_Query_nodeMetrics_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err } args["metrics"] = arg3 - var arg4 time.Time - if tmp, ok := rawArgs["from"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) - arg4, err = ec.unmarshalNTime2timeᚐTime(ctx, tmp) - if err != nil { - return nil, err - } + arg4, err := ec.field_Query_nodeMetrics_argsFrom(ctx, rawArgs) + if err != nil { + return nil, err } args["from"] = arg4 - var arg5 time.Time - if tmp, ok := rawArgs["to"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) - arg5, err = ec.unmarshalNTime2timeᚐTime(ctx, tmp) - if err != nil { - return nil, err - } + arg5, err := ec.field_Query_nodeMetrics_argsTo(ctx, rawArgs) + if err != nil { + return nil, err } args["to"] = arg5 return args, nil } +func (ec *executionContext) field_Query_nodeMetrics_argsCluster( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["cluster"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) + if tmp, ok := rawArgs["cluster"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetrics_argsNodes( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["nodes"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("nodes")) + if tmp, ok := rawArgs["nodes"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetrics_argsScopes( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]schema.MetricScope, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["scopes"] + if !ok { + var zeroVal []schema.MetricScope + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) + if tmp, ok := rawArgs["scopes"]; ok { + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + } + + var zeroVal []schema.MetricScope + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetrics_argsMetrics( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["metrics"] + if !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetrics_argsFrom( + ctx context.Context, + rawArgs map[string]interface{}, +) (time.Time, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["from"] + if !ok { + var zeroVal time.Time + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) + if tmp, ok := rawArgs["from"]; ok { + return ec.unmarshalNTime2timeᚐTime(ctx, tmp) + } + + var zeroVal time.Time + return zeroVal, nil +} + +func (ec *executionContext) field_Query_nodeMetrics_argsTo( + ctx context.Context, + rawArgs map[string]interface{}, +) (time.Time, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["to"] + if !ok { + var zeroVal time.Time + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) + if tmp, ok := rawArgs["to"]; ok { + return ec.unmarshalNTime2timeᚐTime(ctx, tmp) + } + + var zeroVal time.Time + return zeroVal, nil +} func (ec *executionContext) field_Query_rooflineHeatmap_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 []*model.JobFilter - if tmp, ok := rawArgs["filter"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) - arg0, err = ec.unmarshalNJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_rooflineHeatmap_argsFilter(ctx, rawArgs) + if err != nil { + return nil, err } args["filter"] = arg0 - var arg1 int - if tmp, ok := rawArgs["rows"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("rows")) - arg1, err = ec.unmarshalNInt2int(ctx, tmp) - if err != nil { - return nil, err - } + arg1, err := ec.field_Query_rooflineHeatmap_argsRows(ctx, rawArgs) + if err != nil { + return nil, err } args["rows"] = arg1 - var arg2 int - if tmp, ok := rawArgs["cols"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cols")) - arg2, err = ec.unmarshalNInt2int(ctx, tmp) - if err != nil { - return nil, err - } + arg2, err := ec.field_Query_rooflineHeatmap_argsCols(ctx, rawArgs) + if err != nil { + return nil, err } args["cols"] = arg2 - var arg3 float64 - if tmp, ok := rawArgs["minX"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("minX")) - arg3, err = ec.unmarshalNFloat2float64(ctx, tmp) - if err != nil { - return nil, err - } + arg3, err := ec.field_Query_rooflineHeatmap_argsMinX(ctx, rawArgs) + if err != nil { + return nil, err } args["minX"] = arg3 - var arg4 float64 - if tmp, ok := rawArgs["minY"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("minY")) - arg4, err = ec.unmarshalNFloat2float64(ctx, tmp) - if err != nil { - return nil, err - } + arg4, err := ec.field_Query_rooflineHeatmap_argsMinY(ctx, rawArgs) + if err != nil { + return nil, err } args["minY"] = arg4 - var arg5 float64 - if tmp, ok := rawArgs["maxX"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("maxX")) - arg5, err = ec.unmarshalNFloat2float64(ctx, tmp) - if err != nil { - return nil, err - } + arg5, err := ec.field_Query_rooflineHeatmap_argsMaxX(ctx, rawArgs) + if err != nil { + return nil, err } args["maxX"] = arg5 - var arg6 float64 - if tmp, ok := rawArgs["maxY"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("maxY")) - arg6, err = ec.unmarshalNFloat2float64(ctx, tmp) - if err != nil { - return nil, err - } + arg6, err := ec.field_Query_rooflineHeatmap_argsMaxY(ctx, rawArgs) + if err != nil { + return nil, err } args["maxY"] = arg6 return args, nil } +func (ec *executionContext) field_Query_rooflineHeatmap_argsFilter( + ctx context.Context, + rawArgs map[string]interface{}, +) ([]*model.JobFilter, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["filter"] + if !ok { + var zeroVal []*model.JobFilter + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) + if tmp, ok := rawArgs["filter"]; ok { + return ec.unmarshalNJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) + } + + var zeroVal []*model.JobFilter + return zeroVal, nil +} + +func (ec *executionContext) field_Query_rooflineHeatmap_argsRows( + ctx context.Context, + rawArgs map[string]interface{}, +) (int, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["rows"] + if !ok { + var zeroVal int + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("rows")) + if tmp, ok := rawArgs["rows"]; ok { + return ec.unmarshalNInt2int(ctx, tmp) + } + + var zeroVal int + return zeroVal, nil +} + +func (ec *executionContext) field_Query_rooflineHeatmap_argsCols( + ctx context.Context, + rawArgs map[string]interface{}, +) (int, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["cols"] + if !ok { + var zeroVal int + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("cols")) + if tmp, ok := rawArgs["cols"]; ok { + return ec.unmarshalNInt2int(ctx, tmp) + } + + var zeroVal int + return zeroVal, nil +} + +func (ec *executionContext) field_Query_rooflineHeatmap_argsMinX( + ctx context.Context, + rawArgs map[string]interface{}, +) (float64, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["minX"] + if !ok { + var zeroVal float64 + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("minX")) + if tmp, ok := rawArgs["minX"]; ok { + return ec.unmarshalNFloat2float64(ctx, tmp) + } + + var zeroVal float64 + return zeroVal, nil +} + +func (ec *executionContext) field_Query_rooflineHeatmap_argsMinY( + ctx context.Context, + rawArgs map[string]interface{}, +) (float64, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["minY"] + if !ok { + var zeroVal float64 + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("minY")) + if tmp, ok := rawArgs["minY"]; ok { + return ec.unmarshalNFloat2float64(ctx, tmp) + } + + var zeroVal float64 + return zeroVal, nil +} + +func (ec *executionContext) field_Query_rooflineHeatmap_argsMaxX( + ctx context.Context, + rawArgs map[string]interface{}, +) (float64, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["maxX"] + if !ok { + var zeroVal float64 + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("maxX")) + if tmp, ok := rawArgs["maxX"]; ok { + return ec.unmarshalNFloat2float64(ctx, tmp) + } + + var zeroVal float64 + return zeroVal, nil +} + +func (ec *executionContext) field_Query_rooflineHeatmap_argsMaxY( + ctx context.Context, + rawArgs map[string]interface{}, +) (float64, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["maxY"] + if !ok { + var zeroVal float64 + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("maxY")) + if tmp, ok := rawArgs["maxY"]; ok { + return ec.unmarshalNFloat2float64(ctx, tmp) + } + + var zeroVal float64 + return zeroVal, nil +} func (ec *executionContext) field_Query_user_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 string - if tmp, ok := rawArgs["username"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("username")) - arg0, err = ec.unmarshalNString2string(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field_Query_user_argsUsername(ctx, rawArgs) + if err != nil { + return nil, err } args["username"] = arg0 return args, nil } +func (ec *executionContext) field_Query_user_argsUsername( + ctx context.Context, + rawArgs map[string]interface{}, +) (string, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["username"] + if !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("username")) + if tmp, ok := rawArgs["username"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 bool - if tmp, ok := rawArgs["includeDeprecated"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field___Type_enumValues_argsIncludeDeprecated(ctx, rawArgs) + if err != nil { + return nil, err } args["includeDeprecated"] = arg0 return args, nil } +func (ec *executionContext) field___Type_enumValues_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]interface{}, +) (bool, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["includeDeprecated"] + if !ok { + var zeroVal bool + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2bool(ctx, tmp) + } + + var zeroVal bool + return zeroVal, nil +} func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} - var arg0 bool - if tmp, ok := rawArgs["includeDeprecated"]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp) - if err != nil { - return nil, err - } + arg0, err := ec.field___Type_fields_argsIncludeDeprecated(ctx, rawArgs) + if err != nil { + return nil, err } args["includeDeprecated"] = arg0 return args, nil } +func (ec *executionContext) field___Type_fields_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]interface{}, +) (bool, error) { + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs["includeDeprecated"] + if !ok { + var zeroVal bool + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2bool(ctx, tmp) + } + + var zeroVal bool + return zeroVal, nil +} // endregion ***************************** args.gotpl ***************************** @@ -8427,6 +9561,263 @@ func (ec *executionContext) fieldContext_NodeMetrics_metrics(_ context.Context, return fc, nil } +func (ec *executionContext) _NodesResultList_items(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_items(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Items, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.NodeMetrics) + fc.Result = res + return ec.marshalNNodeMetrics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeMetricsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_items(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "host": + return ec.fieldContext_NodeMetrics_host(ctx, field) + case "subCluster": + return ec.fieldContext_NodeMetrics_subCluster(ctx, field) + case "metrics": + return ec.fieldContext_NodeMetrics_metrics(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type NodeMetrics", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_offset(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_offset(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Offset, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_offset(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_limit(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_limit(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Limit, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_limit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_count(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_count(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Count, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_totalNodes(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_totalNodes(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalNodes, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_totalNodes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NodesResultList_hasNextPage(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NodesResultList_hasNextPage(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HasNextPage, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*bool) + fc.Result = res + return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NodesResultList_hasNextPage(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NodesResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Query_clusters(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query_clusters(ctx, field) if err != nil { @@ -9018,7 +10409,7 @@ func (ec *executionContext) _Query_jobsStatistics(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate)) + return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate), fc.Args["numDurationBins"].(*string), fc.Args["numMetricBins"].(*int)) }) if err != nil { ec.Error(ctx, err) @@ -9213,6 +10604,75 @@ func (ec *executionContext) fieldContext_Query_nodeMetrics(ctx context.Context, return fc, nil } +func (ec *executionContext) _Query_nodeMetricsList(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_nodeMetricsList(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().NodeMetricsList(rctx, fc.Args["cluster"].(string), fc.Args["subCluster"].(string), fc.Args["nodeFilter"].(string), fc.Args["scopes"].([]schema.MetricScope), fc.Args["metrics"].([]string), fc.Args["from"].(time.Time), fc.Args["to"].(time.Time), fc.Args["page"].(*model.PageRequest), fc.Args["resolution"].(*int)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*model.NodesResultList) + fc.Result = res + return ec.marshalNNodesResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_nodeMetricsList(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "items": + return ec.fieldContext_NodesResultList_items(ctx, field) + case "offset": + return ec.fieldContext_NodesResultList_offset(ctx, field) + case "limit": + return ec.fieldContext_NodesResultList_limit(ctx, field) + case "count": + return ec.fieldContext_NodesResultList_count(ctx, field) + case "totalNodes": + return ec.fieldContext_NodesResultList_totalNodes(ctx, field) + case "hasNextPage": + return ec.fieldContext_NodesResultList_hasNextPage(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type NodesResultList", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_nodeMetricsList_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query___type(ctx, field) if err != nil { @@ -15526,6 +16986,55 @@ func (ec *executionContext) _NodeMetrics(ctx context.Context, sel ast.SelectionS return out } +var nodesResultListImplementors = []string{"NodesResultList"} + +func (ec *executionContext) _NodesResultList(ctx context.Context, sel ast.SelectionSet, obj *model.NodesResultList) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, nodesResultListImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("NodesResultList") + case "items": + out.Values[i] = ec._NodesResultList_items(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "offset": + out.Values[i] = ec._NodesResultList_offset(ctx, field, obj) + case "limit": + out.Values[i] = ec._NodesResultList_limit(ctx, field, obj) + case "count": + out.Values[i] = ec._NodesResultList_count(ctx, field, obj) + case "totalNodes": + out.Values[i] = ec._NodesResultList_totalNodes(ctx, field, obj) + case "hasNextPage": + out.Values[i] = ec._NodesResultList_hasNextPage(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var queryImplementors = []string{"Query"} func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { @@ -15799,6 +17308,28 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "nodeMetricsList": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_nodeMetricsList(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "__type": out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { @@ -17844,6 +19375,20 @@ func (ec *executionContext) marshalNNodeMetrics2ᚖgithubᚗcomᚋClusterCockpit return ec._NodeMetrics(ctx, sel, v) } +func (ec *executionContext) marshalNNodesResultList2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v model.NodesResultList) graphql.Marshaler { + return ec._NodesResultList(ctx, sel, &v) +} + +func (ec *executionContext) marshalNNodesResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v *model.NodesResultList) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._NodesResultList(ctx, sel, v) +} + func (ec *executionContext) unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx context.Context, v interface{}) (schema.Float, error) { var res schema.Float err := res.UnmarshalGQL(v) diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 7f0db5f..fd24897 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -148,6 +148,15 @@ type NodeMetrics struct { Metrics []*JobMetricWithName `json:"metrics"` } +type NodesResultList struct { + Items []*NodeMetrics `json:"items"` + Offset *int `json:"offset,omitempty"` + Limit *int `json:"limit,omitempty"` + Count *int `json:"count,omitempty"` + TotalNodes *int `json:"totalNodes,omitempty"` + HasNextPage *bool `json:"hasNextPage,omitempty"` +} + type OrderByInput struct { Field string `json:"field"` Type string `json:"type"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index b529f2c..d13a29b 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -2,7 +2,7 @@ package graph // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. -// Code generated by github.com/99designs/gqlgen version v0.17.49 +// Code generated by github.com/99designs/gqlgen version v0.17.57 import ( "context" @@ -354,10 +354,14 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag } // JobsStatistics is the resolver for the jobsStatistics field. -func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { +func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) { var err error var stats []*model.JobsStatistics + // Top Level Defaults + var defaultDurationBins string = "1h" + var defaultMetricBins int = 10 + if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") || requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") { if groupBy == nil { @@ -391,8 +395,13 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF } if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") || requireField(ctx, "histNumCores") || requireField(ctx, "histNumAccs") { + + if numDurationBins == nil { + numDurationBins = &defaultDurationBins + } + if groupBy == nil { - stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0]) + stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0], numDurationBins) if err != nil { return nil, err } @@ -402,8 +411,13 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF } if requireField(ctx, "histMetrics") { + + if numMetricBins == nil { + numMetricBins = &defaultMetricBins + } + if groupBy == nil { - stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0]) + stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0], numMetricBins) if err != nil { return nil, err } @@ -423,8 +437,8 @@ func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.Job // NodeMetrics is the resolver for the nodeMetrics field. func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) { user := repository.GetUserFromContext(ctx) - if user != nil && !user.HasRole(schema.RoleAdmin) { - return nil, errors.New("you need to be an administrator for this query") + if user != nil && !user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { + return nil, errors.New("you need to be administrator or support staff for this query") } if metrics == nil { @@ -435,7 +449,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) if err != nil { - log.Warn("Error while loading node data") + log.Warn("error while loading node data") return nil, err } @@ -445,7 +459,10 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ Host: hostname, Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)), } - host.SubCluster, _ = archive.GetSubClusterByNode(cluster, hostname) + host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname) + if err != nil { + log.Warnf("error in nodeMetrics resolver: %s", err) + } for metric, scopedMetrics := range metrics { for _, scopedMetric := range scopedMetrics { @@ -463,6 +480,68 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ return nodeMetrics, nil } +// NodeMetricsList is the resolver for the nodeMetricsList field. +func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) { + if resolution == nil { // Load from Config + if config.Keys.EnableResampling != nil { + defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions) + resolution = &defaultRes + } else { // Set 0 (Loads configured metric timestep) + defaultRes := 0 + resolution = &defaultRes + } + } + + user := repository.GetUserFromContext(ctx) + if user != nil && !user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { + return nil, errors.New("you need to be administrator or support staff for this query") + } + + if metrics == nil { + for _, mc := range archive.GetCluster(cluster).MetricConfig { + metrics = append(metrics, mc.Name) + } + } + + data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx) + if err != nil { + log.Warn("error while loading node data") + return nil, err + } + + nodeMetricsList := make([]*model.NodeMetrics, 0, len(data)) + for hostname, metrics := range data { + host := &model.NodeMetrics{ + Host: hostname, + Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)), + } + host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname) + if err != nil { + log.Warnf("error in nodeMetrics resolver: %s", err) + } + + for metric, scopedMetrics := range metrics { + for scope, scopedMetric := range scopedMetrics { + host.Metrics = append(host.Metrics, &model.JobMetricWithName{ + Name: metric, + Scope: scope, + Metric: scopedMetric, + }) + } + } + + nodeMetricsList = append(nodeMetricsList, host) + } + + nodeMetricsListResult := &model.NodesResultList{ + Items: nodeMetricsList, + TotalNodes: &totalNodes, + HasNextPage: &hasNextPage, + } + + return nodeMetricsListResult, nil +} + // NumberOfNodes is the resolver for the numberOfNodes field. func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) { nodeList, err := archive.ParseNodeList(obj.Nodes) @@ -490,11 +569,9 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } // SubCluster returns generated.SubClusterResolver implementation. func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } -type ( - clusterResolver struct{ *Resolver } - jobResolver struct{ *Resolver } - metricValueResolver struct{ *Resolver } - mutationResolver struct{ *Resolver } - queryResolver struct{ *Resolver } - subClusterResolver struct{ *Resolver } -) +type clusterResolver struct{ *Resolver } +type jobResolver struct{ *Resolver } +type metricValueResolver struct{ *Resolver } +type mutationResolver struct{ *Resolver } +type queryResolver struct{ *Resolver } +type subClusterResolver struct{ *Resolver } diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index 1f2e175..939a0fb 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" @@ -219,7 +220,7 @@ func LoadAverages( return nil } -// Used for the node/system view. Returns a map of nodes to a map of metrics. +// Used for the classic node/system view. Returns a map of nodes to a map of metrics. func LoadNodeData( cluster string, metrics, nodes []string, @@ -254,3 +255,53 @@ func LoadNodeData( return data, nil } + +func LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + repo, err := metricdata.GetMetricDataRepo(cluster) + if err != nil { + return nil, 0, false, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster) + } + + if metrics == nil { + for _, m := range archive.GetCluster(cluster).MetricConfig { + metrics = append(metrics, m.Name) + } + } + + data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx) + if err != nil { + if len(data) != 0 { + log.Warnf("partial error: %s", err.Error()) + } else { + log.Error("Error while loading node data from metric repository") + return nil, totalNodes, hasNextPage, err + } + } + + // NOTE: New StatsSeries will always be calculated as 'min/median/max' + const maxSeriesSize int = 8 + for _, jd := range data { + for _, scopes := range jd { + for _, jm := range scopes { + if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize { + continue + } + jm.AddStatisticsSeries() + } + } + } + + if data == nil { + return nil, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster) + } + + return data, totalNodes, hasNextPage, nil +} diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index ce5101c..8d2d3f5 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -11,10 +11,12 @@ import ( "encoding/json" "fmt" "net/http" + "sort" "strconv" "strings" "time" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -211,7 +213,6 @@ func (ccms *CCMetricStore) LoadData( } jobMetric, ok := jobData[metric][scope] - if !ok { jobMetric = &schema.JobMetric{ Unit: mc.Unit, @@ -235,8 +236,7 @@ func (ccms *CCMetricStore) LoadData( } if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { - // TODO: use schema.Float instead of float64? - // This is done because regular float64 can not be JSONed when NaN. + // "schema.Float()" because regular float64 can not be JSONed when NaN. res.Avg = schema.Float(0) res.Min = schema.Float(0) res.Max = schema.Float(0) @@ -693,6 +693,445 @@ func (ccms *CCMetricStore) LoadNodeData( return data, nil } +func (ccms *CCMetricStore) LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + + // 0) Init additional vars + var totalNodes int = 0 + var hasNextPage bool = false + + // 1) Get list of all nodes + var nodes []string + if subCluster != "" { + scNodes := archive.NodeLists[cluster][subCluster] + nodes = scNodes.PrintList() + } else { + subClusterNodeLists := archive.NodeLists[cluster] + for _, nodeList := range subClusterNodeLists { + nodes = append(nodes, nodeList.PrintList()...) + } + } + + // 2) Filter nodes + if nodeFilter != "" { + filteredNodes := []string{} + for _, node := range nodes { + if strings.Contains(node, nodeFilter) { + filteredNodes = append(filteredNodes, node) + } + } + nodes = filteredNodes + } + + // 2.1) Count total nodes && Sort nodes -> Sorting invalidated after ccms return ... + totalNodes = len(nodes) + sort.Strings(nodes) + + // 3) Apply paging + if len(nodes) > page.ItemsPerPage { + start := (page.Page - 1) * page.ItemsPerPage + end := start + page.ItemsPerPage + if end > len(nodes) { + end = len(nodes) + hasNextPage = false + } else { + hasNextPage = true + } + nodes = nodes[start:end] + } + + // Note: Order of node data is not guaranteed after this point, but contents match page and filter criteria + + queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) + if err != nil { + log.Warn("Error while building queries") + return nil, totalNodes, hasNextPage, err + } + + req := ApiQueryRequest{ + Cluster: cluster, + Queries: queries, + From: from.Unix(), + To: to.Unix(), + WithStats: true, + WithData: true, + } + + resBody, err := ccms.doRequest(ctx, &req) + if err != nil { + log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) + return nil, totalNodes, hasNextPage, err + } + + var errors []string + data := make(map[string]schema.JobData) + for i, row := range resBody.Results { + var query ApiQuery + if resBody.Queries != nil { + query = resBody.Queries[i] + } else { + query = req.Queries[i] + } + // qdata := res[0] + metric := ccms.toLocalName(query.Metric) + scope := assignedScope[i] + mc := archive.GetMetricConfig(cluster, metric) + + res := row[0].Resolution + if res == 0 { + res = mc.Timestep + } + + // Init Nested Map Data Structures If Not Found + hostData, ok := data[query.Hostname] + if !ok { + hostData = make(schema.JobData) + data[query.Hostname] = hostData + } + + metricData, ok := hostData[metric] + if !ok { + metricData = make(map[schema.MetricScope]*schema.JobMetric) + data[query.Hostname][metric] = metricData + } + + scopeData, ok := metricData[scope] + if !ok { + scopeData = &schema.JobMetric{ + Unit: mc.Unit, + Timestep: res, + Series: make([]schema.Series, 0), + } + data[query.Hostname][metric][scope] = scopeData + } + + for ndx, res := range row { + if res.Error != nil { + /* Build list for "partial errors", if any */ + errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) + continue + } + + id := (*string)(nil) + if query.Type != nil { + id = new(string) + *id = query.TypeIds[ndx] + } + + if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { + // "schema.Float()" because regular float64 can not be JSONed when NaN. + res.Avg = schema.Float(0) + res.Min = schema.Float(0) + res.Max = schema.Float(0) + } + + scopeData.Series = append(scopeData.Series, schema.Series{ + Hostname: query.Hostname, + Id: id, + Statistics: schema.MetricStatistics{ + Avg: float64(res.Avg), + Min: float64(res.Min), + Max: float64(res.Max), + }, + Data: res.Data, + }) + } + } + + if len(errors) != 0 { + /* Returns list of "partial errors" */ + return data, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) + } + + return data, totalNodes, hasNextPage, nil +} + +func (ccms *CCMetricStore) buildNodeQueries( + cluster string, + subCluster string, + nodes []string, + metrics []string, + scopes []schema.MetricScope, + resolution int, +) ([]ApiQuery, []schema.MetricScope, error) { + + queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(nodes)) + assignedScope := []schema.MetricScope{} + + // Get Topol before loop if subCluster given + var subClusterTopol *schema.SubCluster + var scterr error + if subCluster != "" { + subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster) + if scterr != nil { + // TODO: Log + return nil, nil, scterr + } + } + + for _, metric := range metrics { + remoteName := ccms.toRemoteName(metric) + mc := archive.GetMetricConfig(cluster, metric) + if mc == nil { + // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) + log.Infof("metric '%s' is not specified for cluster '%s'", metric, cluster) + continue + } + + // Avoid duplicates... + handledScopes := make([]schema.MetricScope, 0, 3) + + scopesLoop: + for _, requestedScope := range scopes { + nativeScope := mc.Scope + + scope := nativeScope.Max(requestedScope) + for _, s := range handledScopes { + if scope == s { + continue scopesLoop + } + } + handledScopes = append(handledScopes, scope) + + for _, hostname := range nodes { + + // If no subCluster given, get it by node + if subCluster == "" { + subClusterName, scnerr := archive.GetSubClusterByNode(cluster, hostname) + if scnerr != nil { + return nil, nil, scnerr + } + subClusterTopol, scterr = archive.GetSubCluster(cluster, subClusterName) + if scterr != nil { + return nil, nil, scterr + } + } + + // Always full node hwthread id list, no partial queries expected -> Use "topology.Node" directly where applicable + // Always full accelerator id list, no partial queries expected -> Use "acceleratorIds" directly where applicable + topology := subClusterTopol.Topology + acceleratorIds := topology.GetAcceleratorIDs() + + // Moved check here if metric matches hardware specs + if nativeScope == schema.MetricScopeAccelerator && len(acceleratorIds) == 0 { + continue scopesLoop + } + + // Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node) + if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) { + if scope != schema.MetricScopeAccelerator { + // Skip all other catched cases + continue + } + + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &acceleratorString, + TypeIds: acceleratorIds, + Resolution: resolution, + }) + assignedScope = append(assignedScope, schema.MetricScopeAccelerator) + continue + } + + // Accelerator -> Node + if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode { + if len(acceleratorIds) == 0 { + continue + } + + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &acceleratorString, + TypeIds: acceleratorIds, + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // HWThread -> HWThead + if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Node), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // HWThread -> Core + if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore { + cores, _ := topology.GetCoresFromHWThreads(topology.Node) + for _, core := range cores { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Core[core]), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + } + continue + } + + // HWThread -> Socket + if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket { + sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) + for _, socket := range sockets { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Socket[socket]), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + } + continue + } + + // HWThread -> Node + if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Node), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Core -> Core + if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { + cores, _ := topology.GetCoresFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &coreString, + TypeIds: intToStringSlice(cores), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Core -> Node + if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { + cores, _ := topology.GetCoresFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &coreString, + TypeIds: intToStringSlice(cores), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // MemoryDomain -> MemoryDomain + if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { + sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &memoryDomainString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // MemoryDoman -> Node + if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { + sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &memoryDomainString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Socket -> Socket + if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { + sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: false, + Type: &socketString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Socket -> Node + if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { + sockets, _ := topology.GetSocketsFromHWThreads(topology.Node) + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Aggregate: true, + Type: &socketString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + // Node -> Node + if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { + queries = append(queries, ApiQuery{ + Metric: remoteName, + Hostname: hostname, + Resolution: resolution, + }) + assignedScope = append(assignedScope, scope) + continue + } + + return nil, nil, fmt.Errorf("METRICDATA/CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope) + } + } + } + + return queries, assignedScope, nil +} + func intToStringSlice(is []int) []string { ss := make([]string, len(is)) for i, x := range is { diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index b416fa5..79c2d4a 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -312,3 +313,21 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData( return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") } + +func (idb *InfluxDBv2DataRepository) LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + + var totalNodes int = 0 + var hasNextPage bool = false + // TODO : Implement to be used in NodeList-View + log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) + + return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") +} diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 354dd5f..0fe94d1 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -11,6 +11,7 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -26,8 +27,11 @@ type MetricDataRepository interface { // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now. LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) - // Return a map of hosts to a map of metrics at the requested scopes for that node. + // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) + + // Return a map of hosts to a map of metrics to a map of scopes for multiple nodes. + LoadNodeListData(cluster, subCluster, nodeFilter string, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, page *model.PageRequest, ctx context.Context) (map[string]schema.JobData, int, bool, error) } var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{} diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index 0611824..cd849ce 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -20,6 +20,7 @@ import ( "text/template" "time" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -446,3 +447,21 @@ func (pdb *PrometheusDataRepository) LoadNodeData( log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1) return data, nil } + +func (pdb *PrometheusDataRepository) LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + + var totalNodes int = 0 + var hasNextPage bool = false + // TODO : Implement to be used in NodeList-View + log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) + + return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository") +} diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index dcdaaaa..48dd237 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -9,6 +9,7 @@ import ( "encoding/json" "time" + "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -50,6 +51,19 @@ func (tmdr *TestMetricDataRepository) LoadNodeData( panic("TODO") } +func (tmdr *TestMetricDataRepository) LoadNodeListData( + cluster, subCluster, nodeFilter string, + metrics []string, + scopes []schema.MetricScope, + resolution int, + from, to time.Time, + page *model.PageRequest, + ctx context.Context, +) (map[string]schema.JobData, int, bool, error) { + + panic("TODO") +} + func DeepCopy(jd_temp schema.JobData) schema.JobData { var jd schema.JobData diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 484851d..ad518bd 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -8,7 +8,6 @@ import ( "context" "database/sql" "fmt" - "math" "time" "github.com/ClusterCockpit/cc-backend/internal/config" @@ -447,15 +446,40 @@ func (r *JobRepository) AddHistograms( ctx context.Context, filter []*model.JobFilter, stat *model.JobsStatistics, + durationBins *string, ) (*model.JobsStatistics, error) { start := time.Now() + var targetBinCount int + var targetBinSize int + switch { + case *durationBins == "1m": // 1 Minute Bins + Max 60 Bins -> Max 60 Minutes + targetBinCount = 60 + targetBinSize = 60 + case *durationBins == "10m": // 10 Minute Bins + Max 72 Bins -> Max 12 Hours + targetBinCount = 72 + targetBinSize = 600 + case *durationBins == "1h": // 1 Hour Bins + Max 48 Bins -> Max 48 Hours + targetBinCount = 48 + targetBinSize = 3600 + case *durationBins == "6h": // 6 Hour Bins + Max 12 Bins -> Max 3 Days + targetBinCount = 12 + targetBinSize = 21600 + case *durationBins == "12h": // 12 hour Bins + Max 14 Bins -> Max 7 Days + targetBinCount = 14 + targetBinSize = 43200 + default: // 24h + targetBinCount = 24 + targetBinSize = 3600 + } + castType := r.getCastType() var err error - value := fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as %s) as value`, time.Now().Unix(), castType) - stat.HistDuration, err = r.jobsStatisticsHistogram(ctx, value, filter) + // Return X-Values always as seconds, will be formatted into minutes and hours in frontend + value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType) + stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount) if err != nil { - log.Warn("Error while loading job statistics histogram: running jobs") + log.Warn("Error while loading job statistics histogram: job duration") return nil, err } @@ -487,6 +511,7 @@ func (r *JobRepository) AddMetricHistograms( filter []*model.JobFilter, metrics []string, stat *model.JobsStatistics, + targetBinCount *int, ) (*model.JobsStatistics, error) { start := time.Now() @@ -494,7 +519,7 @@ func (r *JobRepository) AddMetricHistograms( for _, f := range filter { if f.State != nil { if len(f.State) == 1 && f.State[0] == "running" { - stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter) + stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount) log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) return stat, nil } @@ -503,7 +528,7 @@ func (r *JobRepository) AddMetricHistograms( // All other cases: Query and make bins in sqlite directly for _, m := range metrics { - metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter) + metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount) if err != nil { log.Warnf("Error while loading job metric statistics histogram: %s", m) continue @@ -540,6 +565,7 @@ func (r *JobRepository) jobsStatisticsHistogram( } points := make([]*model.HistoPoint, 0) + // is it possible to introduce zero values here? requires info about bincount for rows.Next() { point := model.HistoPoint{} if err := rows.Scan(&point.Value, &point.Count); err != nil { @@ -553,10 +579,66 @@ func (r *JobRepository) jobsStatisticsHistogram( return points, nil } +func (r *JobRepository) jobsDurationStatisticsHistogram( + ctx context.Context, + value string, + filters []*model.JobFilter, + binSizeSeconds int, + targetBinCount *int, +) ([]*model.HistoPoint, error) { + start := time.Now() + query, qerr := SecurityCheck(ctx, + sq.Select(value, "COUNT(job.id) AS count").From("job")) + + if qerr != nil { + return nil, qerr + } + + // Setup Array + points := make([]*model.HistoPoint, 0) + for i := 1; i <= *targetBinCount; i++ { + point := model.HistoPoint{Value: i * binSizeSeconds, Count: 0} + points = append(points, &point) + } + + for _, f := range filters { + query = BuildWhereClause(f, query) + } + + rows, err := query.GroupBy("value").RunWith(r.DB).Query() + if err != nil { + log.Error("Error while running query") + return nil, err + } + + // Fill Array at matching $Value + for rows.Next() { + point := model.HistoPoint{} + if err := rows.Scan(&point.Value, &point.Count); err != nil { + log.Warn("Error while scanning rows") + return nil, err + } + + for _, e := range points { + if e.Value == (point.Value * binSizeSeconds) { + // Note: + // Matching on unmodified integer value (and multiplying point.Value by binSizeSeconds after match) + // causes frontend to loop into highest targetBinCount, due to zoom condition instantly being fullfilled (cause unknown) + e.Count = point.Count + break + } + } + } + + log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) + return points, nil +} + func (r *JobRepository) jobsMetricStatisticsHistogram( ctx context.Context, metric string, filters []*model.JobFilter, + bins *int, ) (*model.MetricHistoPoints, error) { // Get specific Peak or largest Peak var metricConfig *schema.MetricConfig @@ -624,16 +706,15 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( return nil, sqlerr } - bins := 10 binQuery := fmt.Sprintf(`CAST( (case when %s = value.max then value.max*0.999999999 else %s end - value.min) / (value.max - - value.min) * %d as INTEGER )`, jm, jm, bins) + value.min) * %v as INTEGER )`, jm, jm, *bins) mainQuery := sq.Select( fmt.Sprintf(`%s + 1 as bin`, binQuery), fmt.Sprintf(`count(%s) as count`, jm), - fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery), - fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery), + fmt.Sprintf(`CAST(((value.max / %d) * (%v )) as INTEGER ) as min`, *bins, binQuery), + fmt.Sprintf(`CAST(((value.max / %d) * (%v + 1 )) as INTEGER ) as max`, *bins, binQuery), ).From("job").CrossJoin( fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., ).Where(fmt.Sprintf(`%s is not null and %s <= %f`, jm, jm, peak)) @@ -657,7 +738,15 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( return nil, err } + // Setup Array points := make([]*model.MetricHistoPoint, 0) + for i := 1; i <= *bins; i++ { + binMax := ((int(peak) / *bins) * i) + binMin := ((int(peak) / *bins) * (i - 1)) + point := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax} + points = append(points, &point) + } + for rows.Next() { point := model.MetricHistoPoint{} if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil { @@ -665,7 +754,20 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( return nil, err // Totally bricks cc-backend if returned and if all metrics requested? } - points = append(points, &point) + for _, e := range points { + if e.Bin != nil && point.Bin != nil { + if *e.Bin == *point.Bin { + e.Count = point.Count + if point.Min != nil { + e.Min = point.Min + } + if point.Max != nil { + e.Max = point.Max + } + break + } + } + } } result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points} @@ -678,7 +780,9 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram( ctx context.Context, metrics []string, filters []*model.JobFilter, + bins *int, ) []*model.MetricHistoPoints { + // Get Jobs jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil) if err != nil { @@ -720,7 +824,6 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram( metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) peak = metricConfig.Peak unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base - log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) } } @@ -740,28 +843,24 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram( } // Make and fill bins - bins := 10.0 - peakBin := peak / bins + peakBin := int(peak) / *bins points := make([]*model.MetricHistoPoint, 0) - for b := 0; b < 10; b++ { + for b := 0; b < *bins; b++ { count := 0 bindex := b + 1 - bmin := math.Round(peakBin * float64(b)) - bmax := math.Round(peakBin * (float64(b) + 1.0)) + bmin := peakBin * b + bmax := peakBin * (b + 1) // Iterate AVG values for indexed metric and count for bins for _, val := range avgs[idx] { - if float64(val) >= bmin && float64(val) < bmax { + if int(val) >= bmin && int(val) < bmax { count += 1 } } - bminint := int(bmin) - bmaxint := int(bmax) - // Append Bin to Metric Result Array - point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bminint, Max: &bmaxint} + point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bmin, Max: &bmax} points = append(points, &point) } diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 1a3317f..bf74391 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -42,10 +42,12 @@ var routes []Route = []Route{ {"/monitoring/projects/", "monitoring/list.tmpl", "Projects - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "PROJECT"; return i }}, {"/monitoring/tags/", "monitoring/taglist.tmpl", "Tags - ClusterCockpit", false, setupTaglistRoute}, {"/monitoring/user/{id}", "monitoring/user.tmpl", "User - ClusterCockpit", true, setupUserRoute}, - {"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster - ClusterCockpit", false, setupClusterRoute}, + {"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster Node Overview - ClusterCockpit", false, setupClusterOverviewRoute}, + {"/monitoring/systems/list/{cluster}", "monitoring/systems.tmpl", "Cluster Node List - ClusterCockpit", false, setupClusterListRoute}, + {"/monitoring/systems/list/{cluster}/{subcluster}", "monitoring/systems.tmpl", "Cluster Node List - ClusterCockpit", false, setupClusterListRoute}, {"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node - ClusterCockpit", false, setupNodeRoute}, {"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analysis - ClusterCockpit", true, setupAnalysisRoute}, - {"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of - ClusterCockpit", false, setupClusterRoute}, + {"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of - ClusterCockpit", false, setupClusterStatusRoute}, } func setupHomeRoute(i InfoType, r *http.Request) InfoType { @@ -111,7 +113,7 @@ func setupUserRoute(i InfoType, r *http.Request) InfoType { return i } -func setupClusterRoute(i InfoType, r *http.Request) InfoType { +func setupClusterStatusRoute(i InfoType, r *http.Request) InfoType { vars := mux.Vars(r) i["id"] = vars["cluster"] i["cluster"] = vars["cluster"] @@ -123,6 +125,36 @@ func setupClusterRoute(i InfoType, r *http.Request) InfoType { return i } +func setupClusterOverviewRoute(i InfoType, r *http.Request) InfoType { + vars := mux.Vars(r) + i["id"] = vars["cluster"] + i["cluster"] = vars["cluster"] + i["displayType"] = "OVERVIEW" + + from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to") + if from != "" || to != "" { + i["from"] = from + i["to"] = to + } + return i +} + +func setupClusterListRoute(i InfoType, r *http.Request) InfoType { + vars := mux.Vars(r) + i["id"] = vars["cluster"] + i["cluster"] = vars["cluster"] + i["sid"] = vars["subcluster"] + i["subCluster"] = vars["subcluster"] + i["displayType"] = "LIST" + + from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to") + if from != "" || to != "" { + i["from"] = from + i["to"] = to + } + return i +} + func setupNodeRoute(i InfoType, r *http.Request) InfoType { vars := mux.Vars(r) i["cluster"] = vars["cluster"] @@ -343,6 +375,9 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { infos := route.Setup(map[string]interface{}{}, r) if id, ok := infos["id"]; ok { title = strings.Replace(route.Title, "", id.(string), 1) + if sid, ok := infos["sid"]; ok { // 2nd ID element + title = strings.Replace(title, "", sid.(string), 1) + } } // Get User -> What if NIL? diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index fff32c9..72718d0 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -15,12 +15,12 @@ import ( var ( Clusters []*schema.Cluster GlobalMetricList []*schema.GlobalMetricListItem - nodeLists map[string]map[string]NodeList + NodeLists map[string]map[string]NodeList ) func initClusterConfig() error { Clusters = []*schema.Cluster{} - nodeLists = map[string]map[string]NodeList{} + NodeLists = map[string]map[string]NodeList{} metricLookup := make(map[string]schema.GlobalMetricListItem) for _, c := range ar.GetClusters() { @@ -109,7 +109,7 @@ func initClusterConfig() error { Clusters = append(Clusters, cluster) - nodeLists[cluster.Name] = make(map[string]NodeList) + NodeLists[cluster.Name] = make(map[string]NodeList) for _, sc := range cluster.SubClusters { if sc.Nodes == "*" { continue @@ -119,7 +119,7 @@ func initClusterConfig() error { if err != nil { return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > in %s/cluster.json: %w", cluster.Name, err) } - nodeLists[cluster.Name][sc.Name] = nl + NodeLists[cluster.Name][sc.Name] = nl } } @@ -187,7 +187,7 @@ func AssignSubCluster(job *schema.BaseJob) error { } host0 := job.Resources[0].Hostname - for sc, nl := range nodeLists[job.Cluster] { + for sc, nl := range NodeLists[job.Cluster] { if nl != nil && nl.Contains(host0) { job.SubCluster = sc return nil @@ -203,7 +203,7 @@ func AssignSubCluster(job *schema.BaseJob) error { } func GetSubClusterByNode(cluster, hostname string) (string, error) { - for sc, nl := range nodeLists[cluster] { + for sc, nl := range NodeLists[cluster] { if nl != nil && nl.Contains(hostname) { return sc, nil } diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index 0c88c61..07e4647 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -194,7 +194,17 @@ func (topo *Topology) GetAcceleratorID(id int) (string, error) { } } -func (topo *Topology) GetAcceleratorIDs() ([]int, error) { +// Return list of hardware (string) accelerator IDs +func (topo *Topology) GetAcceleratorIDs() []string { + accels := make([]string, 0) + for _, accel := range topo.Accelerators { + accels = append(accels, accel.ID) + } + return accels +} + +// Outdated? Or: Return indices of accelerators in parent array? +func (topo *Topology) GetAcceleratorIDsAsInt() ([]int, error) { accels := make([]int, 0) for _, accel := range topo.Accelerators { id, err := strconv.Atoi(accel.ID) diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index e21171f..4b89d34 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -1,12 +1,12 @@ { "name": "cc-frontend", - "version": "1.0.2", + "version": "1.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "cc-frontend", - "version": "1.0.2", + "version": "1.0.0", "license": "MIT", "dependencies": { "@rollup/plugin-replace": "^5.0.7", diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index 2e977e8..40757d3 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -174,6 +174,7 @@ }, }); + // Note: Different footprints than those saved in DB per Job -> Caused by Legacy Naming $: footprintsQuery = queryStore({ client: client, query: gql` @@ -470,10 +471,12 @@ height={300} data={convert2uplot($statsQuery.data.stats[0].histDuration)} title="Duration Distribution" - xlabel="Current Runtimes" - xunit="Hours" + xlabel="Current Job Runtimes" + xunit="Runtime" ylabel="Number of Jobs" yunit="Jobs" + usesBins + xtime /> {/key} @@ -519,7 +522,6 @@ ({ metric, ...binsFromFootprint( @@ -563,7 +565,6 @@ ({ m1, f1: $footprintsQuery.data.footprints.metrics.find( diff --git a/web/frontend/src/Config.root.svelte b/web/frontend/src/Config.root.svelte index dc45491..126f92b 100644 --- a/web/frontend/src/Config.root.svelte +++ b/web/frontend/src/Config.root.svelte @@ -3,6 +3,7 @@ Properties: - `ìsAdmin Bool!`: Is currently logged in user admin authority + - `isSupport Bool!`: Is currently logged in user support authority - `isApi Bool!`: Is currently logged in user api authority - `username String!`: Empty string if auth. is disabled, otherwise the username as string --> @@ -10,15 +11,17 @@ -{#if isAdmin == true} +{#if isAdmin} Admin Options @@ -27,6 +30,15 @@ {/if} +{#if isSupport || isAdmin} + + + Support Options + + + +{/if} + User Options diff --git a/web/frontend/src/Header.svelte b/web/frontend/src/Header.svelte index 9b12403..cf3e058 100644 --- a/web/frontend/src/Header.svelte +++ b/web/frontend/src/Header.svelte @@ -26,6 +26,7 @@ export let username; export let authlevel; export let clusters; + export let subClusters; export let roles; let isOpen = false; @@ -93,10 +94,19 @@ }, { title: "Nodes", - requiredRole: roles.admin, + requiredRole: roles.support, href: "/monitoring/systems/", icon: "hdd-rack", perCluster: true, + listOptions: true, + menu: "Info", + }, + { + title: "Analysis", + requiredRole: roles.support, + href: "/monitoring/analysis/", + icon: "graph-up", + perCluster: true, listOptions: false, menu: "Info", }, @@ -109,15 +119,6 @@ listOptions: false, menu: "Info", }, - { - title: "Analysis", - requiredRole: roles.support, - href: "/monitoring/analysis/", - icon: "graph-up", - perCluster: true, - listOptions: false, - menu: "Info", - }, ]; @@ -138,11 +139,13 @@ {#if screenSize > 1500 || screenSize < 768} item.requiredRole <= authlevel)} /> {:else if screenSize > 1300} item.requiredRole <= authlevel && item.menu != "Info", )} @@ -156,6 +159,7 @@ @@ -168,6 +172,7 @@ {:else} item.requiredRole <= authlevel && item.menu == "none", )} @@ -180,6 +185,7 @@ item.requiredRole <= authlevel && item.menu == 'Jobs', @@ -196,6 +202,7 @@ item.requiredRole <= authlevel && item.menu == 'Groups', @@ -212,6 +219,7 @@ item.requiredRole <= authlevel && item.menu == 'Info', diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index ad9a0c7..086f25c 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -348,7 +348,6 @@ {:else if $initq?.data && $jobMetrics?.data?.jobMetrics} Selected Node - + @@ -153,18 +153,20 @@ {#if $nodeJobsData.fetching} {:else if $nodeJobsData.data} - - - Activity - - - Show List - - + + + Activity + + + Show List + + {:else} - - No currently running jobs. - + + + Activity + + {/if} @@ -189,7 +191,6 @@ {:else} ({ diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index e49d11a..f34b98b 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -463,7 +463,7 @@
- + @@ -587,17 +587,23 @@ {/key} +
+ + + {#key $mainQuery.data.stats} {/key} @@ -640,12 +646,15 @@ {/key} +
+ + + {#if metricsInHistograms} {#key $mainQuery.data.stats[0].histMetrics} diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 488cdad..4086667 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -1,7 +1,8 @@ + + {#if $initq.data} + + {#if !displayNodeOverview} + + + + Metrics + + + + {#if resampleConfig} + + + + Resolution + + {#each resampleResolutions as res} + + {/each} + + + + {/if} + {/if} - + - Find Node + Find Node(s) @@ -132,20 +142,22 @@ - - - - - Metric - - - + + {#if displayNodeOverview} + + + + Metric + + {#each systemMetrics as metric} + + {/each} + + + + {/if} {/if} -
-{#if $nodesQuery.error} + + +{#if displayType !== "OVERVIEW" && displayType !== "LIST"} - {$nodesQuery.error.message} - - -{:else if $nodesQuery.fetching || $initq.fetching} - - - + Unknown displayList type! {:else} - - h.host.includes(hostnameFilter) && - h.metrics.some( - (m) => m.name == selectedMetric && m.scope == "node", - ), - ) - .map((h) => ({ - host: h.host, - subCluster: h.subCluster, - data: h.metrics.find( - (m) => m.name == selectedMetric && m.scope == "node", - ), - disabled: checkMetricDisabled( - selectedMetric, - cluster, - h.subCluster, - ), - })) - .sort((a, b) => a.host.localeCompare(b.host))} - > -

- {item.host} ({item.subCluster}) -

- {#if item.disabled === false && item.data} - c.name == cluster)} - subCluster={item.subCluster} - forNode={true} - /> - {:else if item.disabled === true && item.data} - Metric disabled for subcluster {selectedMetric}:{item.subCluster} - {:else} - No dataset returned for {selectedMetric} - {/if} -
+ {#if displayNodeOverview} + + + {:else} + + + {/if} {/if} + + { + selectedMetrics = [...detail] + }} +/> diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 57f2f28..fae972b 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -17,6 +17,9 @@ Icon, Card, Spinner, + Input, + InputGroup, + InputGroupText } from "@sveltestrap/sveltestrap"; import { queryStore, @@ -59,6 +62,11 @@ let showFootprint = filterPresets.cluster ? !!ccconfig[`plot_list_showFootprint:${filterPresets.cluster}`] : !!ccconfig.plot_list_showFootprint; + + let numDurationBins = "1h"; + let numMetricBins = 10; + let durationBinOptions = ["1m","10m","1h","6h","12h"]; + let metricBinOptions = [10, 20, 50, 100]; $: metricsInHistograms = selectedCluster ? ccconfig[`user_view_histogramMetrics:${selectedCluster}`] || [] @@ -68,8 +76,8 @@ $: stats = queryStore({ client: client, query: gql` - query ($jobFilters: [JobFilter!]!, $metricsInHistograms: [String!]) { - jobsStatistics(filter: $jobFilters, metrics: $metricsInHistograms) { + query ($jobFilters: [JobFilter!]!, $metricsInHistograms: [String!], $numDurationBins: String, $numMetricBins: Int) { + jobsStatistics(filter: $jobFilters, metrics: $metricsInHistograms, numDurationBins: $numDurationBins , numMetricBins: $numMetricBins ) { totalJobs shortJobs totalWalltime @@ -96,7 +104,7 @@ } } `, - variables: { jobFilters, metricsInHistograms }, + variables: { jobFilters, metricsInHistograms, numDurationBins, numMetricBins }, }); onMount(() => filterComponent.updateFilters()); @@ -118,8 +126,8 @@ {/if} - - + + - + - + + + + + + + Duration Bin Size + + + {#each durationBinOptions as dbin} + + {/each} + + + + filterComponent.updateFilters(detail)} /> - + { jobList.refreshJobs() jobList.refreshAllMetrics() @@ -215,10 +238,12 @@ {/key} @@ -238,16 +263,32 @@ - - + + + + + + + + + Metric Bins + + + {#each metricBinOptions as mbin} + + {/each} + + + {#if metricsInHistograms?.length > 0} {#if $stats.error} @@ -267,18 +308,17 @@ {#key $stats.data.jobsStatistics[0].histMetrics} {/key} diff --git a/web/frontend/src/config.entrypoint.js b/web/frontend/src/config.entrypoint.js index feb3916..d2949f2 100644 --- a/web/frontend/src/config.entrypoint.js +++ b/web/frontend/src/config.entrypoint.js @@ -5,6 +5,7 @@ new Config({ target: document.getElementById('svelte-app'), props: { isAdmin: isAdmin, + isSupport: isSupport, isApi: isApi, username: username, ncontent: ncontent, diff --git a/web/frontend/src/config/AdminSettings.svelte b/web/frontend/src/config/AdminSettings.svelte index f512d40..dd53df4 100644 --- a/web/frontend/src/config/AdminSettings.svelte +++ b/web/frontend/src/config/AdminSettings.svelte @@ -4,7 +4,7 @@ + + diff --git a/web/frontend/src/config/UserSettings.svelte b/web/frontend/src/config/UserSettings.svelte index 539aadf..1b59e31 100644 --- a/web/frontend/src/config/UserSettings.svelte +++ b/web/frontend/src/config/UserSettings.svelte @@ -18,6 +18,7 @@ const ccconfig = getContext("cc-config"); let message = { msg: "", target: "", color: "#d63384" }; let displayMessage = false; + let cbmode = ccconfig?.plot_general_colorblindMode || false; async function handleSettingSubmit(event) { const selector = event.detail.selector @@ -28,6 +29,9 @@ const res = await fetch(form.action, { method: "POST", body: formData }); if (res.ok) { let text = await res.text(); + if (formData.get("key") === "plot_general_colorblindMode") { + cbmode = JSON.parse(formData.get("value")); + } popMessage(text, target, "#048109"); } else { let text = await res.text(); @@ -51,4 +55,4 @@ handleSettingSubmit(e)}/> handleSettingSubmit(e)}/> - handleSettingSubmit(e)}/> + handleSettingSubmit(e)}/> diff --git a/web/frontend/src/config/admin/Options.svelte b/web/frontend/src/config/admin/Options.svelte index a1fe307..3808834 100644 --- a/web/frontend/src/config/admin/Options.svelte +++ b/web/frontend/src/config/admin/Options.svelte @@ -45,7 +45,7 @@ - Metric Plot Resampling + Metric Plot Resampling Info

Triggered at {resampleConfig.trigger} datapoints.

Configured resolutions: {resampleConfig.resolutions}

diff --git a/web/frontend/src/config/support/SupportOptions.svelte b/web/frontend/src/config/support/SupportOptions.svelte new file mode 100644 index 0000000..7d9ce03 --- /dev/null +++ b/web/frontend/src/config/support/SupportOptions.svelte @@ -0,0 +1,89 @@ + + + + + + + +
+ handleSettingSubmit("#node-paging-form", "npag")} + > + + +
Node List Paging Type
+ {#if displayMessage && message.target == "npag"}
+ Update: {message.msg} +
{/if} +
+ +
+
+ {#if config?.node_list_usePaging} + + {:else} + + {/if} + +
+
+ {#if config?.node_list_usePaging} + + {:else} + + {/if} + +
+
+ +
+
+ +
\ No newline at end of file diff --git a/web/frontend/src/config/user/PlotColorScheme.svelte b/web/frontend/src/config/user/PlotColorScheme.svelte index d180327..1cb5d86 100644 --- a/web/frontend/src/config/user/PlotColorScheme.svelte +++ b/web/frontend/src/config/user/PlotColorScheme.svelte @@ -24,6 +24,7 @@ export let config; export let message; export let displayMessage; + export let cbmode = false; const dispatch = createEventDispatcher(); function updateSetting(selector, target) { @@ -265,6 +266,62 @@ ], }; + // https://personal.sron.nl/~pault/ + // https://tsitsul.in/blog/coloropt/ + const cvdschemes = { + HighContrast: [ + "rgb(221,170,51)", + "rgb(187,85,102)", + "rgb(0,68,136)", + "rgb(0,0,0)", + ], + Bright: [ + "rgb(68,119,170)", + "rgb(102,204,238)", + "rgb(34,136,51)", + "rgb(204,187,68)", + "rgb(238,102,119)", + "rgb(170,51,119)", + "rgb(187,187,187)", + ], + Muted: [ + "rgb(51,34,136)", + "rgb(136,204,238)", + "rgb(68,170,153)", + "rgb(17,119,51)", + "rgb(153,153,51)", + "rgb(221,204,119)", + "rgb(204,102,119)", + "rgb(136,34,85)", + "rgb(170,68,153)", + "rgb(221,221,221)", + ], + NormalSixColor: [ + "rgb(64,83,211)", + "rgb(221,179,16)", + "rgb(181,29,20)", + "rgb(0,190,255)", + "rgb(251,73,176)", + "rgb(0,178,93)", + "rgb(202,202,202)", + ], + NormalTwelveColor: [ + "rgb(235,172,35)", + "rgb(184,0,88)", + "rgb(0,140,249)", + "rgb(0,110,0)", + "rgb(0,187,173)", + "rgb(209,99,230)", + "rgb(178,69,2)", + "rgb(255,146,135)", + "rgb(89,84,214)", + "rgb(0,198,248)", + "rgb(135,133,0)", + "rgb(0,167,108)", + "rgb(189,189,189)", + ] + } + @@ -281,7 +338,7 @@ -
Color Scheme for Timeseries Plots
+
Color Scheme for Timeseries Plots {cbmode ? `(Color Blind Friendly Palettes)` : ``}
{#if displayMessage && message.target == "cs"}
@@ -293,7 +350,7 @@ - {#each Object.entries(colorschemes) as [name, rgbrow]} + {#each Object.entries(cbmode ? cvdschemes : colorschemes) as [name, rgbrow]} + {$nodesQuery.error.message} + + +{:else if $nodesQuery.fetching } + + + + + +{:else if filteredData?.length > 0} + + + {#each filteredData as item (item.host)} + +

+ {item.host} ({item.subCluster}) +

+ {#if item?.disabled[selectedMetric]} + Metric disabled for subcluster {selectedMetric}:{item.subCluster} + {:else} + + + {/if} + + {/each} + +{/if} \ No newline at end of file diff --git a/web/frontend/src/systems/nodelist/NodeInfo.svelte b/web/frontend/src/systems/nodelist/NodeInfo.svelte new file mode 100644 index 0000000..ad6c98e --- /dev/null +++ b/web/frontend/src/systems/nodelist/NodeInfo.svelte @@ -0,0 +1,177 @@ + + + + + + +
+
+ Node + + {hostname} + +
+
+
+
+ {cluster} {subCluster} +
+
+
+ + {#if healthWarn} + + + + + + Status + + + + {:else if metricWarn} + + + + + + Status + + + + {:else if nodeJobsData.jobs.count == 1 && nodeJobsData.jobs.items[0].exclusive} + + + + + + Status + + + + {:else if nodeJobsData.jobs.count >= 1 && !nodeJobsData.jobs.items[0].exclusive} + + + + + + Status + + + + {:else} + + + + + + Status + + + + {/if} +
+ + + + + + + Activity + + + + + List + + + + + + + + + Users + + + + + List + + + {#if userList?.length > 0} + +
+ {userList.join(", ")} +
+
+ {/if} + + + + + + + Projects + + + + + List + + + {#if projectList?.length > 0} + +
+ {projectList.join(", ")} +
+
+ {/if} +
+
+ diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte new file mode 100644 index 0000000..a1e4a54 --- /dev/null +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -0,0 +1,187 @@ + + + + +
+ + {#each refinedData as metricData (metricData.data.name)} + {#key metricData} + + {/key} + {/each} + diff --git a/web/templates/base.tmpl b/web/templates/base.tmpl index 2464d7f..358f926 100644 --- a/web/templates/base.tmpl +++ b/web/templates/base.tmpl @@ -15,10 +15,11 @@ {{end}} diff --git a/web/templates/config.tmpl b/web/templates/config.tmpl index 914dc88..0222da7 100644 --- a/web/templates/config.tmpl +++ b/web/templates/config.tmpl @@ -8,6 +8,7 @@ {{define "javascript"}} {{end}} diff --git a/web/web.go b/web/web.go index 1cfa176..45d8646 100644 --- a/web/web.go +++ b/web/web.go @@ -13,6 +13,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/util" + "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -95,6 +96,7 @@ type Page struct { Roles map[string]schema.Role // Available roles for frontend render checks Build Build // Latest information about the application Clusters []schema.ClusterConfig // List of all clusters for use in the Header + SubClusters map[string][]string // Map per cluster of all subClusters for use in the Header FilterPresets map[string]interface{} // For pages with the Filter component, this can be used to set initial filters. Infos map[string]interface{} // For generic use (e.g. username for /monitoring/user/, job id for /monitoring/job/) Config map[string]interface{} // UI settings for the currently logged in user (e.g. line width, ...) @@ -114,6 +116,15 @@ func RenderTemplate(rw http.ResponseWriter, file string, page *Page) { } } + if page.SubClusters == nil { + page.SubClusters = make(map[string][]string) + for _, cluster := range archive.Clusters { + for _, sc := range cluster.SubClusters { + page.SubClusters[cluster.Name] = append(page.SubClusters[cluster.Name], sc.Name) + } + } + } + log.Debugf("Page config : %v\n", page.Config) if err := t.Execute(rw, page); err != nil { log.Errorf("Template error: %s", err.Error())
{name} @@ -333,8 +390,9 @@ diff --git a/web/frontend/src/systems/NodeOverview.svelte b/web/frontend/src/systems/NodeOverview.svelte new file mode 100644 index 0000000..68ccd78 --- /dev/null +++ b/web/frontend/src/systems/NodeOverview.svelte @@ -0,0 +1,155 @@ + + + + +{#if $nodesQuery.error} + +
+ {#if $nodeJobsData.fetching} + + + + + + {:else} + + {/if} + + {#if metricData?.disabled} + Metric disabled for subcluster {metricData.data.name}:{nodeData.subCluster} + {:else if !!metricData.data?.metric.statisticsSeries} + + +
+ {#key extendedLegendData} + + {/key} + {:else} + + {/if} +