Merge branch 'master' into hotfix

This commit is contained in:
Christoph Kluge 2025-02-14 14:29:33 +01:00
commit 07e72294dc
51 changed files with 4304 additions and 775 deletions

View File

@ -194,6 +194,15 @@ type NodeMetrics {
metrics: [JobMetricWithName!]!
}
type NodesResultList {
items: [NodeMetrics!]!
offset: Int
limit: Int
count: Int
totalNodes: Int
hasNextPage: Boolean
}
type ClusterSupport {
cluster: String!
subClusters: [String!]!
@ -236,11 +245,12 @@ type Query {
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]!
jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]!
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]!
nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList!
}
type Mutation {

26
go.mod
View File

@ -1,9 +1,9 @@
module github.com/ClusterCockpit/cc-backend
go 1.23
go 1.23.5
require (
github.com/99designs/gqlgen v0.17.57
github.com/99designs/gqlgen v0.17.63
github.com/ClusterCockpit/cc-units v0.4.0
github.com/Masterminds/squirrel v1.5.4
github.com/coreos/go-oidc/v3 v3.11.0
@ -25,8 +25,8 @@ require (
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
github.com/swaggo/http-swagger v1.3.4
github.com/swaggo/swag v1.16.4
github.com/vektah/gqlparser/v2 v2.5.20
golang.org/x/crypto v0.29.0
github.com/vektah/gqlparser/v2 v2.5.22
golang.org/x/crypto v0.32.0
golang.org/x/exp v0.0.0-20240707233637-46b078467d37
golang.org/x/oauth2 v0.21.0
)
@ -35,11 +35,11 @@ require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/agnivade/levenshtein v1.2.0 // indirect
github.com/agnivade/levenshtein v1.2.1 // indirect
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/go-jose/go-jose/v4 v4.0.3 // indirect
@ -61,7 +61,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@ -78,12 +78,12 @@ require (
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
go.uber.org/atomic v1.11.0 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.31.0 // indirect
golang.org/x/sync v0.9.0 // indirect
golang.org/x/sys v0.27.0 // indirect
golang.org/x/text v0.20.0 // indirect
golang.org/x/tools v0.27.0 // indirect
google.golang.org/protobuf v1.35.2 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/tools v0.29.0 // indirect
google.golang.org/protobuf v1.36.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect

48
go.sum
View File

@ -1,7 +1,7 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/99designs/gqlgen v0.17.57 h1:Ak4p60BRq6QibxY0lEc0JnQhDurfhxA67sp02lMjmPc=
github.com/99designs/gqlgen v0.17.57/go.mod h1:Jx61hzOSTcR4VJy/HFIgXiQ5rJ0Ypw8DxWLjbYDAUw0=
github.com/99designs/gqlgen v0.17.63 h1:HCdaYDPd9HqUXRchEvmE3EFzELRwLlaJ8DBuyC8Cqto=
github.com/99designs/gqlgen v0.17.63/go.mod h1:sVCM2iwIZisJjTI/DEC3fpH+HFgxY1496ZJ+jbT9IjA=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
@ -17,8 +17,8 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0=
github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U=
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
@ -36,8 +36,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -153,8 +153,8 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
@ -224,8 +224,8 @@ github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A=
github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg=
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/vektah/gqlparser/v2 v2.5.20 h1:kPaWbhBntxoZPaNdBaIPT1Kh0i1b/onb5kXgEdP5JCo=
github.com/vektah/gqlparser/v2 v2.5.20/go.mod h1:xMl+ta8a5M1Yo1A1Iwt/k7gSpscwSnHZdw7tfhEGfTM=
github.com/vektah/gqlparser/v2 v2.5.22 h1:yaaeJ0fu+nv1vUMW0Hl+aS1eiv1vMfapBNjpffAda1I=
github.com/vektah/gqlparser/v2 v2.5.22/go.mod h1:xMl+ta8a5M1Yo1A1Iwt/k7gSpscwSnHZdw7tfhEGfTM=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
@ -238,8 +238,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w=
golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
@ -255,15 +255,15 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -273,8 +273,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@ -287,17 +287,17 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o=
golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q=
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@ -1419,7 +1419,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
rw.Header().Set("Content-Type", "text/plain")
key, value := r.FormValue("key"), r.FormValue("value")
fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value)
// fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value)
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, repository.GetUserFromContext(r.Context())); err != nil {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)

File diff suppressed because it is too large Load Diff

View File

@ -148,6 +148,15 @@ type NodeMetrics struct {
Metrics []*JobMetricWithName `json:"metrics"`
}
type NodesResultList struct {
Items []*NodeMetrics `json:"items"`
Offset *int `json:"offset,omitempty"`
Limit *int `json:"limit,omitempty"`
Count *int `json:"count,omitempty"`
TotalNodes *int `json:"totalNodes,omitempty"`
HasNextPage *bool `json:"hasNextPage,omitempty"`
}
type OrderByInput struct {
Field string `json:"field"`
Type string `json:"type"`

View File

@ -2,7 +2,7 @@ package graph
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
// Code generated by github.com/99designs/gqlgen version v0.17.49
// Code generated by github.com/99designs/gqlgen version v0.17.57
import (
"context"
@ -354,10 +354,14 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
}
// JobsStatistics is the resolver for the jobsStatistics field.
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) {
var err error
var stats []*model.JobsStatistics
// Top Level Defaults
var defaultDurationBins string = "1h"
var defaultMetricBins int = 10
if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
if groupBy == nil {
@ -391,8 +395,13 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
}
if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") || requireField(ctx, "histNumCores") || requireField(ctx, "histNumAccs") {
if numDurationBins == nil {
numDurationBins = &defaultDurationBins
}
if groupBy == nil {
stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0])
stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0], numDurationBins)
if err != nil {
return nil, err
}
@ -402,8 +411,13 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
}
if requireField(ctx, "histMetrics") {
if numMetricBins == nil {
numMetricBins = &defaultMetricBins
}
if groupBy == nil {
stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0])
stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0], numMetricBins)
if err != nil {
return nil, err
}
@ -423,8 +437,8 @@ func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.Job
// NodeMetrics is the resolver for the nodeMetrics field.
func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) {
user := repository.GetUserFromContext(ctx)
if user != nil && !user.HasRole(schema.RoleAdmin) {
return nil, errors.New("you need to be an administrator for this query")
if user != nil && !user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) {
return nil, errors.New("you need to be administrator or support staff for this query")
}
if metrics == nil {
@ -435,7 +449,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
log.Warn("Error while loading node data")
log.Warn("error while loading node data")
return nil, err
}
@ -445,7 +459,10 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
Host: hostname,
Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
}
host.SubCluster, _ = archive.GetSubClusterByNode(cluster, hostname)
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
if err != nil {
log.Warnf("error in nodeMetrics resolver: %s", err)
}
for metric, scopedMetrics := range metrics {
for _, scopedMetric := range scopedMetrics {
@ -463,6 +480,68 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
return nodeMetrics, nil
}
// NodeMetricsList is the resolver for the nodeMetricsList field.
func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) {
if resolution == nil { // Load from Config
if config.Keys.EnableResampling != nil {
defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions)
resolution = &defaultRes
} else { // Set 0 (Loads configured metric timestep)
defaultRes := 0
resolution = &defaultRes
}
}
user := repository.GetUserFromContext(ctx)
if user != nil && !user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) {
return nil, errors.New("you need to be administrator or support staff for this query")
}
if metrics == nil {
for _, mc := range archive.GetCluster(cluster).MetricConfig {
metrics = append(metrics, mc.Name)
}
}
data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx)
if err != nil {
log.Warn("error while loading node data")
return nil, err
}
nodeMetricsList := make([]*model.NodeMetrics, 0, len(data))
for hostname, metrics := range data {
host := &model.NodeMetrics{
Host: hostname,
Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)),
}
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
if err != nil {
log.Warnf("error in nodeMetrics resolver: %s", err)
}
for metric, scopedMetrics := range metrics {
for scope, scopedMetric := range scopedMetrics {
host.Metrics = append(host.Metrics, &model.JobMetricWithName{
Name: metric,
Scope: scope,
Metric: scopedMetric,
})
}
}
nodeMetricsList = append(nodeMetricsList, host)
}
nodeMetricsListResult := &model.NodesResultList{
Items: nodeMetricsList,
TotalNodes: &totalNodes,
HasNextPage: &hasNextPage,
}
return nodeMetricsListResult, nil
}
// NumberOfNodes is the resolver for the numberOfNodes field.
func (r *subClusterResolver) NumberOfNodes(ctx context.Context, obj *schema.SubCluster) (int, error) {
nodeList, err := archive.ParseNodeList(obj.Nodes)
@ -490,11 +569,9 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
// SubCluster returns generated.SubClusterResolver implementation.
func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} }
type (
clusterResolver struct{ *Resolver }
jobResolver struct{ *Resolver }
metricValueResolver struct{ *Resolver }
mutationResolver struct{ *Resolver }
queryResolver struct{ *Resolver }
subClusterResolver struct{ *Resolver }
)
type clusterResolver struct{ *Resolver }
type jobResolver struct{ *Resolver }
type metricValueResolver struct{ *Resolver }
type mutationResolver struct{ *Resolver }
type queryResolver struct{ *Resolver }
type subClusterResolver struct{ *Resolver }

View File

@ -10,6 +10,7 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
@ -219,7 +220,7 @@ func LoadAverages(
return nil
}
// Used for the node/system view. Returns a map of nodes to a map of metrics.
// Used for the classic node/system view. Returns a map of nodes to a map of metrics.
func LoadNodeData(
cluster string,
metrics, nodes []string,
@ -254,3 +255,53 @@ func LoadNodeData(
return data, nil
}
func LoadNodeListData(
cluster, subCluster, nodeFilter string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
from, to time.Time,
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
repo, err := metricdata.GetMetricDataRepo(cluster)
if err != nil {
return nil, 0, false, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
}
if metrics == nil {
for _, m := range archive.GetCluster(cluster).MetricConfig {
metrics = append(metrics, m.Name)
}
}
data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx)
if err != nil {
if len(data) != 0 {
log.Warnf("partial error: %s", err.Error())
} else {
log.Error("Error while loading node data from metric repository")
return nil, totalNodes, hasNextPage, err
}
}
// NOTE: New StatsSeries will always be calculated as 'min/median/max'
const maxSeriesSize int = 8
for _, jd := range data {
for _, scopes := range jd {
for _, jm := range scopes {
if jm.StatisticsSeries != nil || len(jm.Series) < maxSeriesSize {
continue
}
jm.AddStatisticsSeries()
}
}
}
if data == nil {
return nil, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
}
return data, totalNodes, hasNextPage, nil
}

View File

@ -11,10 +11,12 @@ import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strconv"
"strings"
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
@ -205,13 +207,12 @@ func (ccms *CCMetricStore) LoadData(
jobData[metric] = make(map[schema.MetricScope]*schema.JobMetric)
}
res := row[0].Resolution
if res == 0 {
res = mc.Timestep
res := mc.Timestep
if len(row) > 0 {
res = row[0].Resolution
}
jobMetric, ok := jobData[metric][scope]
if !ok {
jobMetric = &schema.JobMetric{
Unit: mc.Unit,
@ -235,8 +236,7 @@ func (ccms *CCMetricStore) LoadData(
}
if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() {
// TODO: use schema.Float instead of float64?
// This is done because regular float64 can not be JSONed when NaN.
// "schema.Float()" because regular float64 can not be JSONed when NaN.
res.Avg = schema.Float(0)
res.Min = schema.Float(0)
res.Max = schema.Float(0)
@ -693,6 +693,445 @@ func (ccms *CCMetricStore) LoadNodeData(
return data, nil
}
func (ccms *CCMetricStore) LoadNodeListData(
cluster, subCluster, nodeFilter string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
from, to time.Time,
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
// 0) Init additional vars
var totalNodes int = 0
var hasNextPage bool = false
// 1) Get list of all nodes
var nodes []string
if subCluster != "" {
scNodes := archive.NodeLists[cluster][subCluster]
nodes = scNodes.PrintList()
} else {
subClusterNodeLists := archive.NodeLists[cluster]
for _, nodeList := range subClusterNodeLists {
nodes = append(nodes, nodeList.PrintList()...)
}
}
// 2) Filter nodes
if nodeFilter != "" {
filteredNodes := []string{}
for _, node := range nodes {
if strings.Contains(node, nodeFilter) {
filteredNodes = append(filteredNodes, node)
}
}
nodes = filteredNodes
}
// 2.1) Count total nodes && Sort nodes -> Sorting invalidated after ccms return ...
totalNodes = len(nodes)
sort.Strings(nodes)
// 3) Apply paging
if len(nodes) > page.ItemsPerPage {
start := (page.Page - 1) * page.ItemsPerPage
end := start + page.ItemsPerPage
if end > len(nodes) {
end = len(nodes)
hasNextPage = false
} else {
hasNextPage = true
}
nodes = nodes[start:end]
}
// Note: Order of node data is not guaranteed after this point, but contents match page and filter criteria
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
if err != nil {
log.Warn("Error while building queries")
return nil, totalNodes, hasNextPage, err
}
req := ApiQueryRequest{
Cluster: cluster,
Queries: queries,
From: from.Unix(),
To: to.Unix(),
WithStats: true,
WithData: true,
}
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
log.Error(fmt.Sprintf("Error while performing request %#v\n", err))
return nil, totalNodes, hasNextPage, err
}
var errors []string
data := make(map[string]schema.JobData)
for i, row := range resBody.Results {
var query ApiQuery
if resBody.Queries != nil {
query = resBody.Queries[i]
} else {
query = req.Queries[i]
}
// qdata := res[0]
metric := ccms.toLocalName(query.Metric)
scope := assignedScope[i]
mc := archive.GetMetricConfig(cluster, metric)
res := mc.Timestep
if len(row) > 0 {
res = row[0].Resolution
}
// Init Nested Map Data Structures If Not Found
hostData, ok := data[query.Hostname]
if !ok {
hostData = make(schema.JobData)
data[query.Hostname] = hostData
}
metricData, ok := hostData[metric]
if !ok {
metricData = make(map[schema.MetricScope]*schema.JobMetric)
data[query.Hostname][metric] = metricData
}
scopeData, ok := metricData[scope]
if !ok {
scopeData = &schema.JobMetric{
Unit: mc.Unit,
Timestep: res,
Series: make([]schema.Series, 0),
}
data[query.Hostname][metric][scope] = scopeData
}
for ndx, res := range row {
if res.Error != nil {
/* Build list for "partial errors", if any */
errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error))
continue
}
id := (*string)(nil)
if query.Type != nil {
id = new(string)
*id = query.TypeIds[ndx]
}
if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() {
// "schema.Float()" because regular float64 can not be JSONed when NaN.
res.Avg = schema.Float(0)
res.Min = schema.Float(0)
res.Max = schema.Float(0)
}
scopeData.Series = append(scopeData.Series, schema.Series{
Hostname: query.Hostname,
Id: id,
Statistics: schema.MetricStatistics{
Avg: float64(res.Avg),
Min: float64(res.Min),
Max: float64(res.Max),
},
Data: res.Data,
})
}
}
if len(errors) != 0 {
/* Returns list of "partial errors" */
return data, totalNodes, hasNextPage, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
}
return data, totalNodes, hasNextPage, nil
}
func (ccms *CCMetricStore) buildNodeQueries(
cluster string,
subCluster string,
nodes []string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
) ([]ApiQuery, []schema.MetricScope, error) {
queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(nodes))
assignedScope := []schema.MetricScope{}
// Get Topol before loop if subCluster given
var subClusterTopol *schema.SubCluster
var scterr error
if subCluster != "" {
subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster)
if scterr != nil {
// TODO: Log
return nil, nil, scterr
}
}
for _, metric := range metrics {
remoteName := ccms.toRemoteName(metric)
mc := archive.GetMetricConfig(cluster, metric)
if mc == nil {
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
log.Infof("metric '%s' is not specified for cluster '%s'", metric, cluster)
continue
}
// Avoid duplicates...
handledScopes := make([]schema.MetricScope, 0, 3)
scopesLoop:
for _, requestedScope := range scopes {
nativeScope := mc.Scope
scope := nativeScope.Max(requestedScope)
for _, s := range handledScopes {
if scope == s {
continue scopesLoop
}
}
handledScopes = append(handledScopes, scope)
for _, hostname := range nodes {
// If no subCluster given, get it by node
if subCluster == "" {
subClusterName, scnerr := archive.GetSubClusterByNode(cluster, hostname)
if scnerr != nil {
return nil, nil, scnerr
}
subClusterTopol, scterr = archive.GetSubCluster(cluster, subClusterName)
if scterr != nil {
return nil, nil, scterr
}
}
// Always full node hwthread id list, no partial queries expected -> Use "topology.Node" directly where applicable
// Always full accelerator id list, no partial queries expected -> Use "acceleratorIds" directly where applicable
topology := subClusterTopol.Topology
acceleratorIds := topology.GetAcceleratorIDs()
// Moved check here if metric matches hardware specs
if nativeScope == schema.MetricScopeAccelerator && len(acceleratorIds) == 0 {
continue scopesLoop
}
// Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node)
if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) {
if scope != schema.MetricScopeAccelerator {
// Skip all other catched cases
continue
}
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: false,
Type: &acceleratorString,
TypeIds: acceleratorIds,
Resolution: resolution,
})
assignedScope = append(assignedScope, schema.MetricScopeAccelerator)
continue
}
// Accelerator -> Node
if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode {
if len(acceleratorIds) == 0 {
continue
}
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: true,
Type: &acceleratorString,
TypeIds: acceleratorIds,
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// HWThread -> HWThead
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: false,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Node),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// HWThread -> Core
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
for _, core := range cores {
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Core[core]),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
}
continue
}
// HWThread -> Socket
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
for _, socket := range sockets {
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Socket[socket]),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
}
continue
}
// HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Node),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: false,
Type: &coreString,
TypeIds: intToStringSlice(cores),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: true,
Type: &coreString,
TypeIds: intToStringSlice(cores),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: false,
Type: &memoryDomainString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// MemoryDoman -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: true,
Type: &memoryDomainString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: false,
Type: &socketString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Aggregate: true,
Type: &socketString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
queries = append(queries, ApiQuery{
Metric: remoteName,
Hostname: hostname,
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
return nil, nil, fmt.Errorf("METRICDATA/CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
}
}
}
return queries, assignedScope, nil
}
func intToStringSlice(is []int) []string {
ss := make([]string, len(is))
for i, x := range is {

View File

@ -13,6 +13,7 @@ import (
"strings"
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
@ -312,3 +313,21 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData(
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
}
func (idb *InfluxDBv2DataRepository) LoadNodeListData(
cluster, subCluster, nodeFilter string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
from, to time.Time,
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
var totalNodes int = 0
var hasNextPage bool = false
// TODO : Implement to be used in NodeList-View
log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
}

View File

@ -11,6 +11,7 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@ -26,8 +27,11 @@ type MetricDataRepository interface {
// Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now.
LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error)
// Return a map of hosts to a map of metrics at the requested scopes for that node.
// Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node.
LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error)
// Return a map of hosts to a map of metrics to a map of scopes for multiple nodes.
LoadNodeListData(cluster, subCluster, nodeFilter string, metrics []string, scopes []schema.MetricScope, resolution int, from, to time.Time, page *model.PageRequest, ctx context.Context) (map[string]schema.JobData, int, bool, error)
}
var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{}

View File

@ -20,6 +20,7 @@ import (
"text/template"
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
@ -446,3 +447,21 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
return data, nil
}
func (pdb *PrometheusDataRepository) LoadNodeListData(
cluster, subCluster, nodeFilter string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
from, to time.Time,
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
var totalNodes int = 0
var hasNextPage bool = false
// TODO : Implement to be used in NodeList-View
log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository")
}

View File

@ -9,6 +9,7 @@ import (
"encoding/json"
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@ -50,6 +51,19 @@ func (tmdr *TestMetricDataRepository) LoadNodeData(
panic("TODO")
}
func (tmdr *TestMetricDataRepository) LoadNodeListData(
cluster, subCluster, nodeFilter string,
metrics []string,
scopes []schema.MetricScope,
resolution int,
from, to time.Time,
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
panic("TODO")
}
func DeepCopy(jd_temp schema.JobData) schema.JobData {
var jd schema.JobData

View File

@ -8,7 +8,6 @@ import (
"context"
"database/sql"
"fmt"
"math"
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
@ -447,15 +446,40 @@ func (r *JobRepository) AddHistograms(
ctx context.Context,
filter []*model.JobFilter,
stat *model.JobsStatistics,
durationBins *string,
) (*model.JobsStatistics, error) {
start := time.Now()
var targetBinCount int
var targetBinSize int
switch {
case *durationBins == "1m": // 1 Minute Bins + Max 60 Bins -> Max 60 Minutes
targetBinCount = 60
targetBinSize = 60
case *durationBins == "10m": // 10 Minute Bins + Max 72 Bins -> Max 12 Hours
targetBinCount = 72
targetBinSize = 600
case *durationBins == "1h": // 1 Hour Bins + Max 48 Bins -> Max 48 Hours
targetBinCount = 48
targetBinSize = 3600
case *durationBins == "6h": // 6 Hour Bins + Max 12 Bins -> Max 3 Days
targetBinCount = 12
targetBinSize = 21600
case *durationBins == "12h": // 12 hour Bins + Max 14 Bins -> Max 7 Days
targetBinCount = 14
targetBinSize = 43200
default: // 24h
targetBinCount = 24
targetBinSize = 3600
}
castType := r.getCastType()
var err error
value := fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as %s) as value`, time.Now().Unix(), castType)
stat.HistDuration, err = r.jobsStatisticsHistogram(ctx, value, filter)
// Return X-Values always as seconds, will be formatted into minutes and hours in frontend
value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType)
stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount)
if err != nil {
log.Warn("Error while loading job statistics histogram: running jobs")
log.Warn("Error while loading job statistics histogram: job duration")
return nil, err
}
@ -487,6 +511,7 @@ func (r *JobRepository) AddMetricHistograms(
filter []*model.JobFilter,
metrics []string,
stat *model.JobsStatistics,
targetBinCount *int,
) (*model.JobsStatistics, error) {
start := time.Now()
@ -494,7 +519,7 @@ func (r *JobRepository) AddMetricHistograms(
for _, f := range filter {
if f.State != nil {
if len(f.State) == 1 && f.State[0] == "running" {
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter)
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount)
log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
return stat, nil
}
@ -503,7 +528,7 @@ func (r *JobRepository) AddMetricHistograms(
// All other cases: Query and make bins in sqlite directly
for _, m := range metrics {
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter)
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount)
if err != nil {
log.Warnf("Error while loading job metric statistics histogram: %s", m)
continue
@ -540,6 +565,7 @@ func (r *JobRepository) jobsStatisticsHistogram(
}
points := make([]*model.HistoPoint, 0)
// is it possible to introduce zero values here? requires info about bincount
for rows.Next() {
point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil {
@ -553,10 +579,66 @@ func (r *JobRepository) jobsStatisticsHistogram(
return points, nil
}
func (r *JobRepository) jobsDurationStatisticsHistogram(
ctx context.Context,
value string,
filters []*model.JobFilter,
binSizeSeconds int,
targetBinCount *int,
) ([]*model.HistoPoint, error) {
start := time.Now()
query, qerr := SecurityCheck(ctx,
sq.Select(value, "COUNT(job.id) AS count").From("job"))
if qerr != nil {
return nil, qerr
}
// Setup Array
points := make([]*model.HistoPoint, 0)
for i := 1; i <= *targetBinCount; i++ {
point := model.HistoPoint{Value: i * binSizeSeconds, Count: 0}
points = append(points, &point)
}
for _, f := range filters {
query = BuildWhereClause(f, query)
}
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
if err != nil {
log.Error("Error while running query")
return nil, err
}
// Fill Array at matching $Value
for rows.Next() {
point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil {
log.Warn("Error while scanning rows")
return nil, err
}
for _, e := range points {
if e.Value == (point.Value * binSizeSeconds) {
// Note:
// Matching on unmodified integer value (and multiplying point.Value by binSizeSeconds after match)
// causes frontend to loop into highest targetBinCount, due to zoom condition instantly being fullfilled (cause unknown)
e.Count = point.Count
break
}
}
}
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil
}
func (r *JobRepository) jobsMetricStatisticsHistogram(
ctx context.Context,
metric string,
filters []*model.JobFilter,
bins *int,
) (*model.MetricHistoPoints, error) {
// Get specific Peak or largest Peak
var metricConfig *schema.MetricConfig
@ -624,16 +706,15 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
return nil, sqlerr
}
bins := 10
binQuery := fmt.Sprintf(`CAST( (case when %s = value.max
then value.max*0.999999999 else %s end - value.min) / (value.max -
value.min) * %d as INTEGER )`, jm, jm, bins)
value.min) * %v as INTEGER )`, jm, jm, *bins)
mainQuery := sq.Select(
fmt.Sprintf(`%s + 1 as bin`, binQuery),
fmt.Sprintf(`count(%s) as count`, jm),
fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery),
fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery),
fmt.Sprintf(`CAST(((value.max / %d) * (%v )) as INTEGER ) as min`, *bins, binQuery),
fmt.Sprintf(`CAST(((value.max / %d) * (%v + 1 )) as INTEGER ) as max`, *bins, binQuery),
).From("job").CrossJoin(
fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs...,
).Where(fmt.Sprintf(`%s is not null and %s <= %f`, jm, jm, peak))
@ -657,7 +738,15 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
return nil, err
}
// Setup Array
points := make([]*model.MetricHistoPoint, 0)
for i := 1; i <= *bins; i++ {
binMax := ((int(peak) / *bins) * i)
binMin := ((int(peak) / *bins) * (i - 1))
point := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax}
points = append(points, &point)
}
for rows.Next() {
point := model.MetricHistoPoint{}
if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil {
@ -665,7 +754,20 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
return nil, err // Totally bricks cc-backend if returned and if all metrics requested?
}
points = append(points, &point)
for _, e := range points {
if e.Bin != nil && point.Bin != nil {
if *e.Bin == *point.Bin {
e.Count = point.Count
if point.Min != nil {
e.Min = point.Min
}
if point.Max != nil {
e.Max = point.Max
}
break
}
}
}
}
result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points}
@ -678,7 +780,9 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
ctx context.Context,
metrics []string,
filters []*model.JobFilter,
bins *int,
) []*model.MetricHistoPoints {
// Get Jobs
jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil)
if err != nil {
@ -720,7 +824,6 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric)
peak = metricConfig.Peak
unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base
log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
}
}
@ -740,28 +843,24 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
}
// Make and fill bins
bins := 10.0
peakBin := peak / bins
peakBin := int(peak) / *bins
points := make([]*model.MetricHistoPoint, 0)
for b := 0; b < 10; b++ {
for b := 0; b < *bins; b++ {
count := 0
bindex := b + 1
bmin := math.Round(peakBin * float64(b))
bmax := math.Round(peakBin * (float64(b) + 1.0))
bmin := peakBin * b
bmax := peakBin * (b + 1)
// Iterate AVG values for indexed metric and count for bins
for _, val := range avgs[idx] {
if float64(val) >= bmin && float64(val) < bmax {
if int(val) >= bmin && int(val) < bmax {
count += 1
}
}
bminint := int(bmin)
bmaxint := int(bmax)
// Append Bin to Metric Result Array
point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bminint, Max: &bmaxint}
point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bmin, Max: &bmax}
points = append(points, &point)
}

View File

@ -42,10 +42,12 @@ var routes []Route = []Route{
{"/monitoring/projects/", "monitoring/list.tmpl", "Projects - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "PROJECT"; return i }},
{"/monitoring/tags/", "monitoring/taglist.tmpl", "Tags - ClusterCockpit", false, setupTaglistRoute},
{"/monitoring/user/{id}", "monitoring/user.tmpl", "User <ID> - ClusterCockpit", true, setupUserRoute},
{"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> - ClusterCockpit", false, setupClusterRoute},
{"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> Node Overview - ClusterCockpit", false, setupClusterOverviewRoute},
{"/monitoring/systems/list/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> Node List - ClusterCockpit", false, setupClusterListRoute},
{"/monitoring/systems/list/{cluster}/{subcluster}", "monitoring/systems.tmpl", "Cluster <ID> <SID> Node List - ClusterCockpit", false, setupClusterListRoute},
{"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node <ID> - ClusterCockpit", false, setupNodeRoute},
{"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analysis - ClusterCockpit", true, setupAnalysisRoute},
{"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of <ID> - ClusterCockpit", false, setupClusterRoute},
{"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of <ID> - ClusterCockpit", false, setupClusterStatusRoute},
}
func setupHomeRoute(i InfoType, r *http.Request) InfoType {
@ -111,7 +113,7 @@ func setupUserRoute(i InfoType, r *http.Request) InfoType {
return i
}
func setupClusterRoute(i InfoType, r *http.Request) InfoType {
func setupClusterStatusRoute(i InfoType, r *http.Request) InfoType {
vars := mux.Vars(r)
i["id"] = vars["cluster"]
i["cluster"] = vars["cluster"]
@ -123,6 +125,36 @@ func setupClusterRoute(i InfoType, r *http.Request) InfoType {
return i
}
func setupClusterOverviewRoute(i InfoType, r *http.Request) InfoType {
vars := mux.Vars(r)
i["id"] = vars["cluster"]
i["cluster"] = vars["cluster"]
i["displayType"] = "OVERVIEW"
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
if from != "" || to != "" {
i["from"] = from
i["to"] = to
}
return i
}
func setupClusterListRoute(i InfoType, r *http.Request) InfoType {
vars := mux.Vars(r)
i["id"] = vars["cluster"]
i["cluster"] = vars["cluster"]
i["sid"] = vars["subcluster"]
i["subCluster"] = vars["subcluster"]
i["displayType"] = "LIST"
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
if from != "" || to != "" {
i["from"] = from
i["to"] = to
}
return i
}
func setupNodeRoute(i InfoType, r *http.Request) InfoType {
vars := mux.Vars(r)
i["cluster"] = vars["cluster"]
@ -343,6 +375,9 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) {
infos := route.Setup(map[string]interface{}{}, r)
if id, ok := infos["id"]; ok {
title = strings.Replace(route.Title, "<ID>", id.(string), 1)
if sid, ok := infos["sid"]; ok { // 2nd ID element
title = strings.Replace(title, "<SID>", sid.(string), 1)
}
}
// Get User -> What if NIL?

View File

@ -15,12 +15,12 @@ import (
var (
Clusters []*schema.Cluster
GlobalMetricList []*schema.GlobalMetricListItem
nodeLists map[string]map[string]NodeList
NodeLists map[string]map[string]NodeList
)
func initClusterConfig() error {
Clusters = []*schema.Cluster{}
nodeLists = map[string]map[string]NodeList{}
NodeLists = map[string]map[string]NodeList{}
metricLookup := make(map[string]schema.GlobalMetricListItem)
for _, c := range ar.GetClusters() {
@ -109,7 +109,7 @@ func initClusterConfig() error {
Clusters = append(Clusters, cluster)
nodeLists[cluster.Name] = make(map[string]NodeList)
NodeLists[cluster.Name] = make(map[string]NodeList)
for _, sc := range cluster.SubClusters {
if sc.Nodes == "*" {
continue
@ -119,7 +119,7 @@ func initClusterConfig() error {
if err != nil {
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > in %s/cluster.json: %w", cluster.Name, err)
}
nodeLists[cluster.Name][sc.Name] = nl
NodeLists[cluster.Name][sc.Name] = nl
}
}
@ -187,7 +187,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
}
host0 := job.Resources[0].Hostname
for sc, nl := range nodeLists[job.Cluster] {
for sc, nl := range NodeLists[job.Cluster] {
if nl != nil && nl.Contains(host0) {
job.SubCluster = sc
return nil
@ -203,7 +203,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
}
func GetSubClusterByNode(cluster, hostname string) (string, error) {
for sc, nl := range nodeLists[cluster] {
for sc, nl := range NodeLists[cluster] {
if nl != nil && nl.Contains(hostname) {
return sc, nil
}

View File

@ -194,7 +194,17 @@ func (topo *Topology) GetAcceleratorID(id int) (string, error) {
}
}
func (topo *Topology) GetAcceleratorIDs() ([]int, error) {
// Return list of hardware (string) accelerator IDs
func (topo *Topology) GetAcceleratorIDs() []string {
accels := make([]string, 0)
for _, accel := range topo.Accelerators {
accels = append(accels, accel.ID)
}
return accels
}
// Outdated? Or: Return indices of accelerators in parent array?
func (topo *Topology) GetAcceleratorIDsAsInt() ([]int, error) {
accels := make([]int, 0)
for _, accel := range topo.Accelerators {
id, err := strconv.Atoi(accel.ID)

View File

@ -1,12 +1,12 @@
{
"name": "cc-frontend",
"version": "1.0.2",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "cc-frontend",
"version": "1.0.2",
"version": "1.0.0",
"license": "MIT",
"dependencies": {
"@rollup/plugin-replace": "^5.0.7",

View File

@ -174,6 +174,7 @@
},
});
// Note: Different footprints than those saved in DB per Job -> Caused by Legacy Naming
$: footprintsQuery = queryStore({
client: client,
query: gql`
@ -470,10 +471,12 @@
height={300}
data={convert2uplot($statsQuery.data.stats[0].histDuration)}
title="Duration Distribution"
xlabel="Current Runtimes"
xunit="Hours"
xlabel="Current Job Runtimes"
xunit="Runtime"
ylabel="Number of Jobs"
yunit="Jobs"
usesBins
xtime
/>
{/key}
</Col>
@ -519,7 +522,6 @@
<Col>
<PlotGrid
let:item
renderFor="analysis"
items={metricsInHistograms.map((metric) => ({
metric,
...binsFromFootprint(
@ -563,7 +565,6 @@
<PlotGrid
let:item
let:width
renderFor="analysis"
items={metricsInScatterplots.map(([m1, m2]) => ({
m1,
f1: $footprintsQuery.data.footprints.metrics.find(

View File

@ -3,6 +3,7 @@
Properties:
- `ìsAdmin Bool!`: Is currently logged in user admin authority
- `isSupport Bool!`: Is currently logged in user support authority
- `isApi Bool!`: Is currently logged in user api authority
- `username String!`: Empty string if auth. is disabled, otherwise the username as string
-->
@ -10,15 +11,17 @@
<script>
import { Card, CardHeader, CardTitle } from "@sveltestrap/sveltestrap";
import UserSettings from "./config/UserSettings.svelte";
import SupportSettings from "./config/SupportSettings.svelte";
import AdminSettings from "./config/AdminSettings.svelte";
export let isAdmin;
export let isSupport;
export let isApi;
export let username;
export let ncontent;
</script>
{#if isAdmin == true}
{#if isAdmin}
<Card style="margin-bottom: 1.5em;">
<CardHeader>
<CardTitle class="mb-1">Admin Options</CardTitle>
@ -27,6 +30,15 @@
</Card>
{/if}
{#if isSupport || isAdmin}
<Card style="margin-bottom: 1.5em;">
<CardHeader>
<CardTitle class="mb-1">Support Options</CardTitle>
</CardHeader>
<SupportSettings/>
</Card>
{/if}
<Card>
<CardHeader>
<CardTitle class="mb-1">User Options</CardTitle>

View File

@ -26,6 +26,7 @@
export let username;
export let authlevel;
export let clusters;
export let subClusters;
export let roles;
let isOpen = false;
@ -93,10 +94,19 @@
},
{
title: "Nodes",
requiredRole: roles.admin,
requiredRole: roles.support,
href: "/monitoring/systems/",
icon: "hdd-rack",
perCluster: true,
listOptions: true,
menu: "Info",
},
{
title: "Analysis",
requiredRole: roles.support,
href: "/monitoring/analysis/",
icon: "graph-up",
perCluster: true,
listOptions: false,
menu: "Info",
},
@ -109,15 +119,6 @@
listOptions: false,
menu: "Info",
},
{
title: "Analysis",
requiredRole: roles.support,
href: "/monitoring/analysis/",
icon: "graph-up",
perCluster: true,
listOptions: false,
menu: "Info",
},
];
</script>
@ -138,11 +139,13 @@
{#if screenSize > 1500 || screenSize < 768}
<NavbarLinks
{clusters}
{subClusters}
links={views.filter((item) => item.requiredRole <= authlevel)}
/>
{:else if screenSize > 1300}
<NavbarLinks
{clusters}
{subClusters}
links={views.filter(
(item) => item.requiredRole <= authlevel && item.menu != "Info",
)}
@ -156,6 +159,7 @@
<DropdownMenu class="dropdown-menu-lg-end">
<NavbarLinks
{clusters}
{subClusters}
direction="right"
links={views.filter(
(item) =>
@ -168,6 +172,7 @@
{:else}
<NavbarLinks
{clusters}
{subClusters}
links={views.filter(
(item) => item.requiredRole <= authlevel && item.menu == "none",
)}
@ -180,6 +185,7 @@
<DropdownMenu class="dropdown-menu-lg-end">
<NavbarLinks
{clusters}
{subClusters}
direction="right"
links={views.filter(
(item) => item.requiredRole <= authlevel && item.menu == 'Jobs',
@ -196,6 +202,7 @@
<DropdownMenu class="dropdown-menu-lg-end">
<NavbarLinks
{clusters}
{subClusters}
direction="right"
links={views.filter(
(item) => item.requiredRole <= authlevel && item.menu == 'Groups',
@ -212,6 +219,7 @@
<DropdownMenu class="dropdown-menu-lg-end">
<NavbarLinks
{clusters}
{subClusters}
direction="right"
links={views.filter(
(item) => item.requiredRole <= authlevel && item.menu == 'Info',

View File

@ -348,7 +348,6 @@
{:else if $initq?.data && $jobMetrics?.data?.jobMetrics}
<PlotGrid
let:item
renderFor="job"
items={orderAndMap(
groupByScope($jobMetrics.data.jobMetrics),
selectedMetrics,

View File

@ -44,7 +44,7 @@
if (from == null || to == null) {
to = new Date(Date.now());
from = new Date(to.getTime());
from.setHours(from.getHours() - 12);
from.setHours(from.getHours() - 4);
}
const initialized = getContext("initialized")
@ -141,7 +141,7 @@
<InputGroup>
<InputGroupText><Icon name="hdd" /></InputGroupText>
<InputGroupText>Selected Node</InputGroupText>
<Input style="background-color: white;"type="text" value="{hostname} ({cluster})" disabled/>
<Input style="background-color: white;"type="text" value="{hostname} [{cluster} ({$nodeMetricsData?.data ? $nodeMetricsData.data.nodeMetrics[0].subCluster : ''})]" disabled/>
</InputGroup>
</Col>
<!-- Time Col -->
@ -153,18 +153,20 @@
{#if $nodeJobsData.fetching}
<Spinner />
{:else if $nodeJobsData.data}
<InputGroup>
<InputGroupText><Icon name="activity" /></InputGroupText>
<InputGroupText>Activity</InputGroupText>
<Input style="background-color: white;"type="text" value="{$nodeJobsData.data.jobs.count} Jobs" disabled/>
<a title="Show jobs running on this node" href="/monitoring/jobs/?cluster={cluster}&state=running&node={hostname}" target="_blank" class="btn btn-outline-secondary" role="button" aria-disabled="true">
<Icon name="view-list" /> Show List
</a>
</InputGroup>
<InputGroup>
<InputGroupText><Icon name="activity" /></InputGroupText>
<InputGroupText>Activity</InputGroupText>
<Input style="background-color: white;" type="text" value="{$nodeJobsData.data.jobs.count} Jobs" disabled/>
<a title="Show jobs running on this node" href="/monitoring/jobs/?cluster={cluster}&state=running&node={hostname}" target="_blank" class="btn btn-outline-secondary" role="button" aria-disabled="true">
<Icon name="view-list" /> Show List
</a>
</InputGroup>
{:else}
<Input type="text" disabled>
No currently running jobs.
</Input>
<InputGroup>
<InputGroupText><Icon name="activity" /></InputGroupText>
<InputGroupText>Activity</InputGroupText>
<Input type="text" value="No running jobs." disabled />
</InputGroup>
{/if}
</Col>
<!-- Refresh Col-->
@ -189,7 +191,6 @@
{:else}
<PlotGrid
let:item
renderFor="node"
itemsPerRow={ccconfig.plot_view_plotsPerRow}
items={$nodeMetricsData.data.nodeMetrics[0].metrics
.map((m) => ({

View File

@ -463,7 +463,7 @@
<hr />
<!-- Usage Stats as Histograms -->
<!-- User and Project Stats as Pie-Charts -->
<Row cols={{ lg: 4, md: 2, sm: 1 }}>
<Col class="p-2">
@ -587,17 +587,23 @@
{/key}
</Col>
</Row>
<hr class="my-2" />
<!-- Static Stats as Histograms : Running Duration && Allocated Hardware Counts-->
<Row cols={{ lg: 2, md: 1 }}>
<Col class="p-2">
{#key $mainQuery.data.stats}
<Histogram
data={convert2uplot($mainQuery.data.stats[0].histDuration)}
title="Duration Distribution"
xlabel="Current Runtimes"
xunit="Hours"
xlabel="Current Job Runtimes"
xunit="Runtime"
ylabel="Number of Jobs"
yunit="Jobs"
usesBins
xtime
/>
{/key}
</Col>
@ -640,12 +646,15 @@
{/key}
</Col>
</Row>
<hr class="my-2" />
<!-- Selectable Stats as Histograms : Average Values of Running Jobs -->
{#if metricsInHistograms}
{#key $mainQuery.data.stats[0].histMetrics}
<PlotGrid
let:item
renderFor="user"
items={$mainQuery.data.stats[0].histMetrics}
itemsPerRow={2}
>

View File

@ -1,7 +1,8 @@
<!--
@component Main cluster metric status view component; renders current state of metrics / nodes
@component Main cluster node status view component; renders overview or list depending on type
Properties:
- `displayType String?`: The type of node display ['OVERVIEW' || 'LIST']
- `cluster String`: The cluster to show status information for
- `from Date?`: Custom Time Range selection 'from' [Default: null]
- `to Date?`: Custom Time Range selection 'to' [Default: null]
@ -12,33 +13,34 @@
import {
Row,
Col,
Card,
Input,
InputGroup,
InputGroupText,
Icon,
Spinner,
Card,
Button,
} from "@sveltestrap/sveltestrap";
import {
queryStore,
gql,
getContextClient,
} from "@urql/svelte";
import {
init,
checkMetricDisabled,
} from "./generic/utils.js";
import PlotGrid from "./generic/PlotGrid.svelte";
import MetricPlot from "./generic/plots/MetricPlot.svelte";
import { init } from "./generic/utils.js";
import NodeOverview from "./systems/NodeOverview.svelte";
import NodeList from "./systems/NodeList.svelte";
import MetricSelection from "./generic/select/MetricSelection.svelte";
import TimeSelection from "./generic/select/TimeSelection.svelte";
import Refresher from "./generic/helper/Refresher.svelte";
export let displayType;
export let cluster;
export let subCluster = "";
export let from = null;
export let to = null;
const { query: initq } = init();
console.assert(
displayType == "OVERVIEW" || displayType == "LIST",
"Invalid nodes displayType provided!",
);
if (from == null || to == null) {
to = new Date(Date.now());
from = new Date(to.getTime());
@ -47,57 +49,28 @@
const initialized = getContext("initialized");
const ccconfig = getContext("cc-config");
const clusters = getContext("clusters");
const globalMetrics = getContext("globalMetrics");
const displayNodeOverview = (displayType === 'OVERVIEW')
const resampleConfig = getContext("resampling") || null;
const resampleResolutions = resampleConfig ? [...resampleConfig.resolutions] : [];
const resampleDefault = resampleConfig ? Math.max(...resampleConfig.resolutions) : 0;
let selectedResolution = resampleConfig ? resampleDefault : 0;
let hostnameFilter = "";
let selectedMetric = ccconfig.system_view_selectedMetric;
let pendingHostnameFilter = "";
let selectedMetric = ccconfig.system_view_selectedMetric || "";
let selectedMetrics = ccconfig[`node_list_selectedMetrics:${cluster}`] || [ccconfig.system_view_selectedMetric];
let isMetricsSelectionOpen = false;
const client = getContextClient();
$: nodesQuery = queryStore({
client: client,
query: gql`
query ($cluster: String!, $metrics: [String!], $from: Time!, $to: Time!) {
nodeMetrics(
cluster: $cluster
metrics: $metrics
from: $from
to: $to
) {
host
subCluster
metrics {
name
scope
metric {
timestep
unit {
base
prefix
}
series {
statistics {
min
avg
max
}
data
}
}
}
}
}
`,
variables: {
cluster: cluster,
metrics: [selectedMetric],
from: from.toISOString(),
to: to.toISOString(),
},
});
/*
Note 1: "Sorting" as use-case ignored for now, probably default to alphanumerical on hostnames of cluster (handled in frontend at the moment)
Note 2: Add Idle State Filter (== No allocated Jobs) [Frontend?] : Cannot be handled by CCMS, requires secondary job query and refiltering of visible nodes
*/
let systemMetrics = [];
let systemUnits = {};
function loadMetrics(isInitialized) {
if (!isInitialized) return
systemMetrics = [...globalMetrics.filter((gm) => gm?.availability.find((av) => av.cluster == cluster))]
@ -108,23 +81,60 @@
$: loadMetrics($initialized)
$: if (displayNodeOverview) {
selectedMetrics = [selectedMetric]
}
$: { // Wait after input for some time to prevent too many requests
setTimeout(function () {
hostnameFilter = pendingHostnameFilter;
}, 500);
}
</script>
<Row cols={{ xs: 2, lg: 4 }}>
{#if $initq.error}
<Card body color="danger">{$initq.error.message}</Card>
{:else if $initq.fetching}
<Spinner />
{:else}
<!-- ROW1: Tools-->
<Row cols={{ xs: 2, lg: !displayNodeOverview ? (resampleConfig ? 5 : 4) : 4 }} class="mb-3">
{#if $initq.data}
<!-- List Metric Select Col-->
{#if !displayNodeOverview}
<Col>
<InputGroup>
<InputGroupText><Icon name="graph-up" /></InputGroupText>
<InputGroupText class="text-capitalize">Metrics</InputGroupText>
<Button
outline
color="primary"
on:click={() => (isMetricsSelectionOpen = true)}
>
{selectedMetrics.length} selected
</Button>
</InputGroup>
</Col>
{#if resampleConfig}
<Col>
<InputGroup>
<InputGroupText><Icon name="plus-slash-minus" /></InputGroupText>
<InputGroupText>Resolution</InputGroupText>
<Input type="select" bind:value={selectedResolution}>
{#each resampleResolutions as res}
<option value={res}
>{res} sec</option
>
{/each}
</Input>
</InputGroup>
</Col>
{/if}
{/if}
<!-- Node Col-->
<Col>
<Col class="mt-2 mt-lg-0">
<InputGroup>
<InputGroupText><Icon name="hdd" /></InputGroupText>
<InputGroupText>Find Node</InputGroupText>
<InputGroupText>Find Node(s)</InputGroupText>
<Input
placeholder="hostname..."
placeholder="Filter hostname ..."
type="text"
bind:value={hostnameFilter}
bind:value={pendingHostnameFilter}
/>
</InputGroup>
</Col>
@ -132,20 +142,22 @@
<Col>
<TimeSelection bind:from bind:to />
</Col>
<!-- Metric Col-->
<Col class="mt-2 mt-lg-0">
<InputGroup>
<InputGroupText><Icon name="graph-up" /></InputGroupText>
<InputGroupText>Metric</InputGroupText>
<select class="form-select" bind:value={selectedMetric}>
{#each systemMetrics as metric}
<option value={metric.name}
>{metric.name} {systemUnits[metric.name] ? "("+systemUnits[metric.name]+")" : ""}</option
>
{/each}
</select>
</InputGroup>
</Col>
<!-- Overview Metric Col-->
{#if displayNodeOverview}
<Col class="mt-2 mt-lg-0">
<InputGroup>
<InputGroupText><Icon name="graph-up" /></InputGroupText>
<InputGroupText>Metric</InputGroupText>
<Input type="select" bind:value={selectedMetric}>
{#each systemMetrics as metric}
<option value={metric.name}
>{metric.name} {systemUnits[metric.name] ? "("+systemUnits[metric.name]+")" : ""}</option
>
{/each}
</Input>
</InputGroup>
</Col>
{/if}
<!-- Refresh Col-->
<Col class="mt-2 mt-lg-0">
<Refresher
@ -158,75 +170,30 @@
</Col>
{/if}
</Row>
<br />
{#if $nodesQuery.error}
<!-- ROW2: Content-->
{#if displayType !== "OVERVIEW" && displayType !== "LIST"}
<Row>
<Col>
<Card body color="danger">{$nodesQuery.error.message}</Card>
</Col>
</Row>
{:else if $nodesQuery.fetching || $initq.fetching}
<Row>
<Col>
<Spinner />
<Card body color="danger">Unknown displayList type! </Card>
</Col>
</Row>
{:else}
<PlotGrid
let:item
renderFor="systems"
itemsPerRow={ccconfig.plot_view_plotsPerRow}
items={$nodesQuery.data.nodeMetrics
.filter(
(h) =>
h.host.includes(hostnameFilter) &&
h.metrics.some(
(m) => m.name == selectedMetric && m.scope == "node",
),
)
.map((h) => ({
host: h.host,
subCluster: h.subCluster,
data: h.metrics.find(
(m) => m.name == selectedMetric && m.scope == "node",
),
disabled: checkMetricDisabled(
selectedMetric,
cluster,
h.subCluster,
),
}))
.sort((a, b) => a.host.localeCompare(b.host))}
>
<h4 style="width: 100%; text-align: center;">
<a
style="display: block;padding-top: 15px;"
href="/monitoring/node/{cluster}/{item.host}"
>{item.host} ({item.subCluster})</a
>
</h4>
{#if item.disabled === false && item.data}
<MetricPlot
timestep={item.data.metric.timestep}
series={item.data.metric.series}
metric={item.data.name}
cluster={clusters.find((c) => c.name == cluster)}
subCluster={item.subCluster}
forNode={true}
/>
{:else if item.disabled === true && item.data}
<Card style="margin-left: 2rem;margin-right: 2rem;" body color="info"
>Metric disabled for subcluster <code
>{selectedMetric}:{item.subCluster}</code
></Card
>
{:else}
<Card
style="margin-left: 2rem;margin-right: 2rem;"
body
color="warning"
>No dataset returned for <code>{selectedMetric}</code></Card
>
{/if}
</PlotGrid>
{#if displayNodeOverview}
<!-- ROW2-1: Node Overview (Grid Included)-->
<NodeOverview {cluster} {subCluster} {ccconfig} {selectedMetrics} {from} {to} {hostnameFilter}/>
{:else}
<!-- ROW2-2: Node List (Grid Included)-->
<NodeList {cluster} {subCluster} {ccconfig} {selectedMetrics} {selectedResolution} {hostnameFilter} {from} {to} {systemUnits}/>
{/if}
{/if}
<MetricSelection
{cluster}
configName="node_list_selectedMetrics"
metrics={selectedMetrics}
bind:isOpen={isMetricsSelectionOpen}
on:update-metrics={({ detail }) => {
selectedMetrics = [...detail]
}}
/>

View File

@ -17,6 +17,9 @@
Icon,
Card,
Spinner,
Input,
InputGroup,
InputGroupText
} from "@sveltestrap/sveltestrap";
import {
queryStore,
@ -59,6 +62,11 @@
let showFootprint = filterPresets.cluster
? !!ccconfig[`plot_list_showFootprint:${filterPresets.cluster}`]
: !!ccconfig.plot_list_showFootprint;
let numDurationBins = "1h";
let numMetricBins = 10;
let durationBinOptions = ["1m","10m","1h","6h","12h"];
let metricBinOptions = [10, 20, 50, 100];
$: metricsInHistograms = selectedCluster
? ccconfig[`user_view_histogramMetrics:${selectedCluster}`] || []
@ -68,8 +76,8 @@
$: stats = queryStore({
client: client,
query: gql`
query ($jobFilters: [JobFilter!]!, $metricsInHistograms: [String!]) {
jobsStatistics(filter: $jobFilters, metrics: $metricsInHistograms) {
query ($jobFilters: [JobFilter!]!, $metricsInHistograms: [String!], $numDurationBins: String, $numMetricBins: Int) {
jobsStatistics(filter: $jobFilters, metrics: $metricsInHistograms, numDurationBins: $numDurationBins , numMetricBins: $numMetricBins ) {
totalJobs
shortJobs
totalWalltime
@ -96,7 +104,7 @@
}
}
`,
variables: { jobFilters, metricsInHistograms },
variables: { jobFilters, metricsInHistograms, numDurationBins, numMetricBins },
});
onMount(() => filterComponent.updateFilters());
@ -118,8 +126,8 @@
{/if}
<!-- ROW2: Tools-->
<Row cols={{ xs: 1, md: 2, lg: 4}} class="mb-3">
<Col lg="2" class="mb-2 mb-lg-0">
<Row cols={{ xs: 1, md: 2, lg: 6}} class="mb-3">
<Col class="mb-2 mb-lg-0">
<ButtonGroup class="w-100">
<Button outline color="primary" on:click={() => (isSortingOpen = true)}>
<Icon name="sort-up" /> Sorting
@ -133,7 +141,7 @@
</Button>
</ButtonGroup>
</Col>
<Col lg="4" xl="6" class="mb-1 mb-lg-0">
<Col lg="4" class="mb-1 mb-lg-0">
<Filters
{filterPresets}
{matchedJobs}
@ -148,12 +156,27 @@
}}
/>
</Col>
<Col lg="3" xl="2" class="mb-2 mb-lg-0">
<Col class="mb-2 mb-lg-0">
<InputGroup>
<InputGroupText>
<Icon name="bar-chart-line-fill" />
</InputGroupText>
<InputGroupText>
Duration Bin Size
</InputGroupText>
<Input type="select" bind:value={numDurationBins} style="max-width: 120px;">
{#each durationBinOptions as dbin}
<option value={dbin}>{dbin}</option>
{/each}
</Input>
</InputGroup>
</Col>
<Col class="mb-2 mb-lg-0">
<TextFilter
on:set-filter={({ detail }) => filterComponent.updateFilters(detail)}
/>
</Col>
<Col lg="3" xl="2" class="mb-1 mb-lg-0">
<Col class="mb-1 mb-lg-0">
<Refresher on:refresh={() => {
jobList.refreshJobs()
jobList.refreshAllMetrics()
@ -215,10 +238,12 @@
<Histogram
data={convert2uplot($stats.data.jobsStatistics[0].histDuration)}
title="Duration Distribution"
xlabel="Current Runtimes"
xunit="Hours"
xlabel="Job Runtimes"
xunit="Runtime"
ylabel="Number of Jobs"
yunit="Jobs"
usesBins
xtime
/>
{/key}
</Col>
@ -238,16 +263,32 @@
</Row>
<!-- ROW4+5: Selectable Histograms -->
<Row cols={{ xs: 1, md: 5}}>
<Col>
<Row>
<Col xs="12" md="3" lg="2" class="mb-2 mb-md-0">
<Button
outline
color="secondary"
class="w-100"
on:click={() => (isHistogramSelectionOpen = true)}
>
<Icon name="bar-chart-line" /> Select Histograms
</Button>
</Col>
<Col xs="12" md="9" lg="10" class="mb-2 mb-md-0">
<InputGroup>
<InputGroupText>
<Icon name="bar-chart-line-fill" />
</InputGroupText>
<InputGroupText>
Metric Bins
</InputGroupText>
<Input type="select" bind:value={numMetricBins} style="max-width: 120px;">
{#each metricBinOptions as mbin}
<option value={mbin}>{mbin}</option>
{/each}
</Input>
</InputGroup>
</Col>
</Row>
{#if metricsInHistograms?.length > 0}
{#if $stats.error}
@ -267,18 +308,17 @@
{#key $stats.data.jobsStatistics[0].histMetrics}
<PlotGrid
let:item
renderFor="user"
items={$stats.data.jobsStatistics[0].histMetrics}
itemsPerRow={3}
>
<Histogram
data={convert2uplot(item.data)}
usesBins={true}
title="Distribution of '{item.metric} ({item.stat})' footprints"
xlabel={`${item.metric} bin maximum ${item?.unit ? `[${item.unit}]` : ``}`}
xunit={item.unit}
ylabel="Number of Jobs"
yunit="Jobs"
usesBins
/>
</PlotGrid>
{/key}

View File

@ -5,6 +5,7 @@ new Config({
target: document.getElementById('svelte-app'),
props: {
isAdmin: isAdmin,
isSupport: isSupport,
isApi: isApi,
username: username,
ncontent: ncontent,

View File

@ -4,7 +4,7 @@
<script>
import { Row, Col } from "@sveltestrap/sveltestrap";
import { onMount } from "svelte";
import { onMount, getContext } from "svelte";
import EditRole from "./admin/EditRole.svelte";
import EditProject from "./admin/EditProject.svelte";
import AddUser from "./admin/AddUser.svelte";
@ -17,6 +17,8 @@
let users = [];
let roles = [];
const ccconfig = getContext("cc-config");
function getUserList() {
fetch("/config/users/?via-ldap=false&not-just-user=true")
.then((res) => res.json())
@ -54,6 +56,6 @@
<Col>
<EditProject on:reload={getUserList} />
</Col>
<Options />
<Options config={ccconfig}/>
<NoticeEdit {ncontent}/>
</Row>

View File

@ -0,0 +1,13 @@
<!--
@component Support settings wrapper
Properties: None
-->
<script>
import { getContext } from "svelte";
import SupportOptions from "./support/SupportOptions.svelte";
const ccconfig = getContext("cc-config");
</script>
<SupportOptions config={ccconfig}/>

View File

@ -18,6 +18,7 @@
const ccconfig = getContext("cc-config");
let message = { msg: "", target: "", color: "#d63384" };
let displayMessage = false;
let cbmode = ccconfig?.plot_general_colorblindMode || false;
async function handleSettingSubmit(event) {
const selector = event.detail.selector
@ -28,6 +29,9 @@
const res = await fetch(form.action, { method: "POST", body: formData });
if (res.ok) {
let text = await res.text();
if (formData.get("key") === "plot_general_colorblindMode") {
cbmode = JSON.parse(formData.get("value"));
}
popMessage(text, target, "#048109");
} else {
let text = await res.text();
@ -51,4 +55,4 @@
<UserOptions config={ccconfig} {username} {isApi} bind:message bind:displayMessage on:update-config={(e) => handleSettingSubmit(e)}/>
<PlotRenderOptions config={ccconfig} bind:message bind:displayMessage on:update-config={(e) => handleSettingSubmit(e)}/>
<PlotColorScheme config={ccconfig} bind:message bind:displayMessage on:update-config={(e) => handleSettingSubmit(e)}/>
<PlotColorScheme config={ccconfig} bind:cbmode bind:message bind:displayMessage on:update-config={(e) => handleSettingSubmit(e)}/>

View File

@ -45,7 +45,7 @@
<Col>
<Card class="h-100">
<CardBody>
<CardTitle class="mb-3">Metric Plot Resampling</CardTitle>
<CardTitle class="mb-3">Metric Plot Resampling Info</CardTitle>
<p>Triggered at {resampleConfig.trigger} datapoints.</p>
<p>Configured resolutions: {resampleConfig.resolutions}</p>
</CardBody>

View File

@ -0,0 +1,89 @@
<!--
@component Support option select card
-->
<script>
import { Row, Col, Card, CardTitle, Button} from "@sveltestrap/sveltestrap";
import { fade } from "svelte/transition";
export let config;
let message;
let displayMessage;
async function handleSettingSubmit(selector, target) {
let form = document.querySelector(selector);
let formData = new FormData(form);
try {
const res = await fetch(form.action, { method: "POST", body: formData });
if (res.ok) {
let text = await res.text();
popMessage(text, target, "#048109");
} else {
let text = await res.text();
throw new Error("Response Code " + res.status + "-> " + text);
}
} catch (err) {
popMessage(err, target, "#d63384");
}
return false;
}
function popMessage(response, restarget, rescolor) {
message = { msg: response, target: restarget, color: rescolor };
displayMessage = true;
setTimeout(function () {
displayMessage = false;
}, 3500);
}
</script>
<Row cols={1} class="p-2 g-2">
<Col>
<Card class="h-100">
<form
id="node-paging-form"
method="post"
action="/frontend/configuration/"
class="card-body"
on:submit|preventDefault={() =>
handleSettingSubmit("#node-paging-form", "npag")}
>
<!-- Svelte 'class' directive only on DOMs directly, normal 'class="xxx"' does not work, so style-array it is. -->
<CardTitle
style="margin-bottom: 1em; display: flex; align-items: center;"
>
<div>Node List Paging Type</div>
{#if displayMessage && message.target == "npag"}<div
style="margin-left: auto; font-size: 0.9em;"
>
<code style="color: {message.color};" out:fade
>Update: {message.msg}</code
>
</div>{/if}
</CardTitle>
<input type="hidden" name="key" value="node_list_usePaging" />
<div class="mb-3">
<div>
{#if config?.node_list_usePaging}
<input type="radio" id="nodes-true-checked" name="value" value="true" checked />
{:else}
<input type="radio" id="nodes-true" name="value" value="true" />
{/if}
<label for="true">Paging with selectable count of nodes.</label>
</div>
<div>
{#if config?.node_list_usePaging}
<input type="radio" id="nodes-false" name="value" value="false" />
{:else}
<input type="radio" id="nodes-false-checked" name="value" value="false" checked />
{/if}
<label for="false">Continuous scroll iteratively adding 10 nodes.</label>
</div>
</div>
<Button color="primary" type="submit">Submit</Button>
</form>
</Card>
</Col>
</Row>

View File

@ -24,6 +24,7 @@
export let config;
export let message;
export let displayMessage;
export let cbmode = false;
const dispatch = createEventDispatcher();
function updateSetting(selector, target) {
@ -265,6 +266,62 @@
],
};
// https://personal.sron.nl/~pault/
// https://tsitsul.in/blog/coloropt/
const cvdschemes = {
HighContrast: [
"rgb(221,170,51)",
"rgb(187,85,102)",
"rgb(0,68,136)",
"rgb(0,0,0)",
],
Bright: [
"rgb(68,119,170)",
"rgb(102,204,238)",
"rgb(34,136,51)",
"rgb(204,187,68)",
"rgb(238,102,119)",
"rgb(170,51,119)",
"rgb(187,187,187)",
],
Muted: [
"rgb(51,34,136)",
"rgb(136,204,238)",
"rgb(68,170,153)",
"rgb(17,119,51)",
"rgb(153,153,51)",
"rgb(221,204,119)",
"rgb(204,102,119)",
"rgb(136,34,85)",
"rgb(170,68,153)",
"rgb(221,221,221)",
],
NormalSixColor: [
"rgb(64,83,211)",
"rgb(221,179,16)",
"rgb(181,29,20)",
"rgb(0,190,255)",
"rgb(251,73,176)",
"rgb(0,178,93)",
"rgb(202,202,202)",
],
NormalTwelveColor: [
"rgb(235,172,35)",
"rgb(184,0,88)",
"rgb(0,140,249)",
"rgb(0,110,0)",
"rgb(0,187,173)",
"rgb(209,99,230)",
"rgb(178,69,2)",
"rgb(255,146,135)",
"rgb(89,84,214)",
"rgb(0,198,248)",
"rgb(135,133,0)",
"rgb(0,167,108)",
"rgb(189,189,189)",
]
}
</script>
<Row cols={1} class="p-2 g-2">
@ -281,7 +338,7 @@
<CardTitle
style="margin-bottom: 1em; display: flex; align-items: center;"
>
<div>Color Scheme for Timeseries Plots</div>
<div>Color Scheme for Timeseries Plots {cbmode ? `(Color Blind Friendly Palettes)` : ``}</div>
{#if displayMessage && message.target == "cs"}<div
style="margin-left: auto; font-size: 0.9em;"
>
@ -293,7 +350,7 @@
<input type="hidden" name="key" value="plot_general_colorscheme" />
<Table hover>
<tbody>
{#each Object.entries(colorschemes) as [name, rgbrow]}
{#each Object.entries(cbmode ? cvdschemes : colorschemes) as [name, rgbrow]}
<tr>
<th scope="col">{name}</th>
<td>
@ -333,8 +390,9 @@
<style>
.color-dot {
height: 10px;
width: 10px;
margin-left: 1px;
height: 12px;
width: 12px;
border-radius: 50%;
display: inline-block;
}

View File

@ -129,8 +129,8 @@
>
<!-- BACKGROUND -->
<Col
><Card class="h-100">
<Col class="d-flex justify-content-between"
><Card class="h-100" style="width: 49%;">
<form
id="backgrounds-form"
method="post"
@ -173,6 +173,50 @@
</div>
<Button color="primary" type="submit">Submit</Button>
</form>
</Card></Col
>
</Card>
<Card class="h-100" style="width: 49%;">
<form
id="colorblindmode-form"
method="post"
action="/frontend/configuration/"
class="card-body"
on:submit|preventDefault={() =>
updateSetting("#colorblindmode-form", "cbm")}
>
<!-- Svelte 'class' directive only on DOMs directly, normal 'class="xxx"' does not work, so style-array it is. -->
<CardTitle
style="margin-bottom: 1em; display: flex; align-items: center;"
>
<div>Color Blind Mode</div>
{#if displayMessage && message.target == "cbm"}<div
style="margin-left: auto; font-size: 0.9em;"
>
<code style="color: {message.color};" out:fade
>Update: {message.msg}</code
>
</div>{/if}
</CardTitle>
<input type="hidden" name="key" value="plot_general_colorblindMode" />
<div class="mb-3">
<div>
{#if config?.plot_general_colorblindMode}
<input type="radio" id="cbm-true-checked" name="value" value="true" checked />
{:else}
<input type="radio" id="cbm-true" name="value" value="true" />
{/if}
<label for="true">Yes</label>
</div>
<div>
{#if config?.plot_general_colorblindMode}
<input type="radio" id="cbm-false" name="value" value="false" />
{:else}
<input type="radio" id="cbm-false-checked" name="value" value="false" checked />
{/if}
<label for="false">No</label>
</div>
</div>
<Button color="primary" type="submit">Submit</Button>
</form>
</Card>
</Col>
</Row>

View File

@ -43,10 +43,33 @@
}
}
let displayCheck = false;
function clipJwt() {
navigator.clipboard
.writeText(jwt)
.catch((reason) => console.error(reason));
displayCheck = true;
// Navigator clipboard api needs a secure context (https)
if (navigator.clipboard && window.isSecureContext) {
navigator.clipboard
.writeText(jwt)
.catch((reason) => console.error(reason));
} else {
// Workaround: Create, Fill, And Copy Content of Textarea
const textArea = document.createElement("textarea");
textArea.value = jwt;
textArea.style.position = "absolute";
textArea.style.left = "-999999px";
document.body.prepend(textArea);
textArea.select();
try {
document.execCommand('copy');
} catch (error) {
console.error(error);
} finally {
textArea.remove();
}
}
setTimeout(function () {
displayCheck = false;
}, 1000);
}
const dispatch = createEventDispatcher();
@ -74,7 +97,7 @@
<CardTitle
style="margin-bottom: 1em; display: flex; align-items: center;"
>
<div>Paging Type</div>
<div>Job List Paging Type</div>
{#if displayMessage && message.target == "pag"}<div
style="margin-left: auto; font-size: 0.9em;"
>
@ -120,6 +143,11 @@
<p class="mt-2">
Your token is displayed on the right. Press this button to copy it to the clipboard.
</p>
{#if displayCheck}
<p class="mt-2">
<span class="text-success">Copied!</span>
</p>
{/if}
{:else}
<Button color="success" on:click={getUserJwt(username)}>
Generate JWT for '{username}'

View File

@ -4,7 +4,6 @@
Properties:
- `itemsPerRow Number`: Elements to render per row
- `items [Any]`: List of plot components to render
- `renderFor String`: If 'job', filter disabled metrics
-->
<script>
@ -15,43 +14,13 @@
export let itemsPerRow
export let items
export let renderFor
let rows = [];
const isPlaceholder = x => x._is_placeholder === true;
function tile(items, itemsPerRow) {
const rows = []
for (let ri = 0; ri < items.length; ri += itemsPerRow) {
const row = []
for (let ci = 0; ci < itemsPerRow; ci += 1) {
if (ri + ci < items.length)
row.push(items[ri + ci])
else
row.push({ _is_placeholder: true, ri, ci })
}
rows.push(row)
}
return rows
}
$: if (renderFor === 'job') {
rows = tile(items.filter(item => item.disabled === false), itemsPerRow)
} else {
rows = tile(items, itemsPerRow)
}
</script>
{#each rows as row}
<Row cols={{ xs: 1, sm: 1, md: 2, lg: itemsPerRow}}>
{#each row as item (item)}
<Col class="px-1">
{#if !isPlaceholder(item)}
<slot item={item}/>
{/if}
</Col>
{/each}
</Row>
{/each}
<Row cols={{ xs: 1, sm: 2, md: 3, lg: itemsPerRow}}>
{#each items as item}
<Col class="px-1">
<slot {item}/>
</Col>
{/each}
</Row>

View File

@ -15,8 +15,8 @@
<script>
import uPlot from "uplot";
import { formatNumber } from "../units.js";
import { onMount, onDestroy } from "svelte";
import { formatNumber } from "../units.js";
import { Card } from "@sveltestrap/sveltestrap";
export let data;
@ -26,16 +26,31 @@
export let title = "";
export let xlabel = "";
export let xunit = "";
export let xtime = false;
export let ylabel = "";
export let yunit = "";
const { bars } = uPlot.paths;
const drawStyles = {
bars: 1,
points: 2,
};
function formatTime(t) {
if (t !== null) {
if (isNaN(t)) {
return t;
} else {
const tAbs = Math.abs(t);
const h = Math.floor(tAbs / 3600);
const m = Math.floor((tAbs % 3600) / 60);
if (h == 0) return `${m}m`;
else if (m == 0) return `${h}h`;
else return `${h}:${m}h`;
}
}
}
function paths(u, seriesIdx, idx0, idx1, extendGap, buildClip) {
let s = u.series[seriesIdx];
let style = s.drawStyle;
@ -139,7 +154,7 @@
label: xlabel,
labelGap: 10,
size: 25,
incrs: [1, 2, 5, 6, 10, 12, 50, 100, 500, 1000, 5000, 10000],
incrs: xtime ? [60, 120, 300, 600, 1800, 3600, 7200, 14400, 18000, 21600, 43200, 86400] : [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000],
border: {
show: true,
stroke: "#000000",
@ -149,7 +164,13 @@
size: 5 / devicePixelRatio,
stroke: "#000000",
},
values: (_, t) => t.map((v) => formatNumber(v)),
values: (_, t) => t.map((v) => {
if (xtime) {
return formatTime(v);
} else {
return formatNumber(v)
}
}),
},
{
stroke: "#000000",
@ -166,17 +187,25 @@
size: 5 / devicePixelRatio,
stroke: "#000000",
},
values: (_, t) => t.map((v) => formatNumber(v)),
values: (_, t) => t.map((v) => {
return formatNumber(v)
}),
},
],
series: [
{
label: xunit !== "" ? xunit : null,
value: (u, ts, sidx, didx) => {
if (usesBins) {
if (usesBins && xtime) {
const min = u.data[sidx][didx - 1] ? formatTime(u.data[sidx][didx - 1]) : 0;
const max = formatTime(u.data[sidx][didx]);
ts = min + "-" + max; // narrow spaces
} else if (usesBins) {
const min = u.data[sidx][didx - 1] ? u.data[sidx][didx - 1] : 0;
const max = u.data[sidx][didx];
ts = min + "-" + max; // narrow spaces
} else if (xtime) {
ts = formatTime(ts);
}
return ts;
},
@ -191,6 +220,7 @@
},
{
drawStyle: drawStyles.bars,
width: 1, // 1 / lastBinCount,
lineInterpolation: null,
stroke: "#85abce",
fill: "#85abce", // + "1A", // Transparent Fill

View File

@ -9,12 +9,12 @@
- `height Number?`: The plot height [Default: 300]
- `timestep Number`: The timestep used for X-axis rendering
- `series [GraphQL.Series]`: The metric data object
- `useStatsSeries Bool?`: If this plot uses the statistics Min/Max/Median representation; automatically set to according bool [Default: null]
- `useStatsSeries Bool?`: If this plot uses the statistics Min/Max/Median representation; automatically set to according bool [Default: false]
- `statisticsSeries [GraphQL.StatisticsSeries]?`: Min/Max/Median representation of metric data [Default: null]
- `cluster GraphQL.Cluster`: Cluster Object of the parent job
- `cluster String`: Cluster name of the parent job / data
- `subCluster String`: Name of the subCluster of the parent job
- `isShared Bool?`: If this job used shared resources; will adapt threshold indicators accordingly [Default: false]
- `forNode Bool?`: If this plot is used for node data display; will ren[data, err := metricdata.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)](https://github.com/ClusterCockpit/cc-backend/blob/9fe7cdca9215220a19930779a60c8afc910276a3/internal/graph/schema.resolvers.go#L391-L392)der x-axis as negative time with $now as maximum [Default: false]
- `forNode Bool?`: If this plot is used for node data display; will render x-axis as negative time with $now as maximum [Default: false]
- `numhwthreads Number?`: Number of job HWThreads [Default: 0]
- `numaccs Number?`: Number of job Accelerators [Default: 0]
- `zoomState Object?`: The last zoom state to preserve on user zoom [Default: null]
@ -124,13 +124,13 @@
export let metric;
export let scope = "node";
export let width = null;
export let width = 0;
export let height = 300;
export let timestep;
export let series;
export let useStatsSeries = null;
export let useStatsSeries = false;
export let statisticsSeries = null;
export let cluster;
export let cluster = "";
export let subCluster;
export let isShared = false;
export let forNode = false;
@ -138,11 +138,11 @@
export let numaccs = 0;
export let zoomState = null;
export let thresholdState = null;
export let extendedLegendData = null;
if (useStatsSeries == null) useStatsSeries = statisticsSeries != null;
if (useStatsSeries == false && series == null) useStatsSeries = true;
if (!useStatsSeries && statisticsSeries != null) useStatsSeries = true;
const usesMeanStatsSeries = (useStatsSeries && statisticsSeries.mean.length != 0)
const usesMeanStatsSeries = (statisticsSeries?.mean && statisticsSeries.mean.length != 0)
const dispatch = createEventDispatcher();
const subClusterTopology = getContext("getHardwareTopology")(cluster, subCluster);
const metricConfig = getContext("getMetricConfig")(cluster, subCluster, metric);
@ -152,10 +152,12 @@
const lineWidth =
clusterCockpitConfig.plot_general_lineWidth / window.devicePixelRatio;
const lineColors = clusterCockpitConfig.plot_general_colorscheme;
const cbmode = clusterCockpitConfig?.plot_general_colorblindMode || false;
const backgroundColors = {
normal: "rgba(255, 255, 255, 1.0)",
caution: "rgba(255, 128, 0, 0.3)",
alert: "rgba(255, 0, 0, 0.3)",
caution: cbmode ? "rgba(239, 230, 69, 0.3)" : "rgba(255, 128, 0, 0.3)",
alert: cbmode ? "rgba(225, 86, 44, 0.3)" : "rgba(255, 0, 0, 0.3)",
};
const thresholds = findJobAggregationThresholds(
subClusterTopology,
@ -192,6 +194,7 @@
className && legendEl.classList.add(className);
uPlot.assign(legendEl.style, {
minWidth: extendedLegendData ? "300px" : "100px",
textAlign: "left",
pointerEvents: "none",
display: "none",
@ -205,11 +208,10 @@
// conditional hide series color markers:
if (
useStatsSeries === true || // Min/Max/Median Self-Explanatory
useStatsSeries || // Min/Max/Median Self-Explanatory
dataSize === 1 || // Only one Y-Dataseries
dataSize > 6
dataSize > 8 // More than 8 Y-Dataseries
) {
// More than 6 Y-Dataseries
const idents = legendEl.querySelectorAll(".u-marker");
for (let i = 0; i < idents.length; i++)
idents[i].style.display = "none";
@ -235,12 +237,12 @@
function update(u) {
const { left, top } = u.cursor;
const width = u.over.querySelector(".u-legend").offsetWidth;
const width = u?.over?.querySelector(".u-legend")?.offsetWidth ? u.over.querySelector(".u-legend").offsetWidth : 0;
legendEl.style.transform =
"translate(" + (left - width - 15) + "px, " + (top + 15) + "px)";
}
if (dataSize <= 12 || useStatsSeries === true) {
if (dataSize <= 12 || useStatsSeries) {
return {
hooks: {
init: init,
@ -309,13 +311,6 @@
}
}
const plotSeries = [
{
label: "Runtime",
value: (u, ts, sidx, didx) =>
didx == null ? null : formatTime(ts, forNode),
},
];
const plotData = [new Array(longestSeries)];
if (forNode === true) {
// Negative Timestamp Buildup
@ -332,6 +327,15 @@
plotData[0][j] = j * timestep;
}
const plotSeries = [
// Note: X-Legend Will not be shown as soon as Y-Axis are in extendedMode
{
label: "Runtime",
value: (u, ts, sidx, didx) =>
(didx == null) ? null : formatTime(ts, forNode),
}
];
let plotBands = undefined;
if (useStatsSeries) {
plotData.push(statisticsSeries.min);
@ -346,13 +350,13 @@
label: "min",
scale: "y",
width: lineWidth,
stroke: "red",
stroke: cbmode ? "rgb(0,255,0)" : "red",
});
plotSeries.push({
label: "max",
scale: "y",
width: lineWidth,
stroke: "green",
stroke: cbmode ? "rgb(0,0,255)" : "green",
});
plotSeries.push({
label: usesMeanStatsSeries ? "mean" : "median",
@ -362,21 +366,66 @@
});
plotBands = [
{ series: [2, 3], fill: "rgba(0,255,0,0.1)" },
{ series: [3, 1], fill: "rgba(255,0,0,0.1)" },
{ series: [2, 3], fill: cbmode ? "rgba(0,0,255,0.1)" : "rgba(0,255,0,0.1)" },
{ series: [3, 1], fill: cbmode ? "rgba(0,255,0,0.1)" : "rgba(255,0,0,0.1)" },
];
} else {
for (let i = 0; i < series.length; i++) {
plotData.push(series[i].data);
plotSeries.push({
label:
scope === "node"
// Default
if (!extendedLegendData) {
plotSeries.push({
label:
scope === "node"
? series[i].hostname
: scope + " #" + (i + 1),
scale: "y",
width: lineWidth,
stroke: lineColor(i, series.length),
});
: scope === "accelerator"
? 'Acc #' + (i + 1) // series[i].id.slice(9, 14) | Too Hardware Specific
: scope + " #" + (i + 1),
scale: "y",
width: lineWidth,
stroke: lineColor(i, series.length),
});
}
// Extended Legend For NodeList
else {
plotSeries.push({
label:
scope === "node"
? series[i].hostname
: scope === "accelerator"
? 'Acc #' + (i + 1) // series[i].id.slice(9, 14) | Too Hardware Specific
: scope + " #" + (i + 1),
scale: "y",
width: lineWidth,
stroke: lineColor(i, series.length),
values: (u, sidx, idx) => {
// "i" = "sidx - 1" : sidx contains x-axis-data
if (idx == null)
return {
time: '-',
value: '-',
user: '-',
job: '-'
};
if (series[i].id in extendedLegendData) {
return {
time: formatTime(plotData[0][idx], forNode),
value: plotData[sidx][idx],
user: extendedLegendData[series[i].id].user,
job: extendedLegendData[series[i].id].job,
};
} else {
return {
time: formatTime(plotData[0][idx], forNode),
value: plotData[sidx][idx],
user: '-',
job: '-',
};
}
}
});
}
}
}
@ -432,13 +481,13 @@
u.ctx.save();
u.ctx.textAlign = "start"; // 'end'
u.ctx.fillStyle = "black";
u.ctx.fillText(textl, u.bbox.left + 10, u.bbox.top + 10);
u.ctx.fillText(textl, u.bbox.left + 10, u.bbox.top + (forNode ? 0 : 10));
u.ctx.textAlign = "end";
u.ctx.fillStyle = "black";
u.ctx.fillText(
textr,
u.bbox.left + u.bbox.width - 10,
u.bbox.top + 10,
u.bbox.top + (forNode ? 0 : 10),
);
// u.ctx.fillText(text, u.bbox.left + u.bbox.width - 10, u.bbox.top + u.bbox.height - 10) // Recipe for bottom right
@ -496,10 +545,12 @@
},
legend: {
// Display legend until max 12 Y-dataseries
show: series.length <= 12 || useStatsSeries === true ? true : false,
live: series.length <= 12 || useStatsSeries === true ? true : false,
show: series.length <= 12 || useStatsSeries,
live: series.length <= 12 || useStatsSeries,
},
cursor: { drag: { x: true, y: true } },
cursor: {
drag: { x: true, y: true },
}
};
// RENDER HANDLING
@ -535,17 +586,9 @@
}
onMount(() => {
// Setup Wrapper
if (series[0].data.length > 0) {
if (forNode) {
plotWrapper.style.paddingTop = "0.5rem"
plotWrapper.style.paddingBottom = "0.5rem"
}
plotWrapper.style.backgroundColor = backgroundColor();
plotWrapper.style.borderRadius = "5px";
if (plotWrapper) {
render(width, height);
}
// Init Plot
render(width, height);
});
onDestroy(() => {
@ -553,22 +596,20 @@
if (uplot) uplot.destroy();
});
// This updates it on all size changes
// Condition for reactive triggering (eg scope change)
$: if (series[0].data.length > 0) {
// This updates plot on all size changes if wrapper (== data) exists
$: if (plotWrapper) {
onSizeChange(width, height);
}
</script>
<!-- Define Wrapper and NoData Card within $width -->
<div bind:clientWidth={width}>
{#if series[0].data.length > 0}
<div bind:this={plotWrapper}/>
{:else}
<Card class="mx-4" body color="warning"
>Cannot render plot: No series data returned for <code>{metric}</code></Card
>
{/if}
</div>
<!-- Define $width Wrapper and NoData Card -->
{#if series[0]?.data && series[0].data.length > 0}
<div bind:this={plotWrapper} bind:clientWidth={width}
style="background-color: {backgroundColor()};" class={forNode ? 'py-2 rounded' : 'rounded'}
/>
{:else}
<Card body color="warning" class="mx-4"
>Cannot render plot: No series data returned for <code>{metric}</code></Card
>
{/if}

View File

@ -40,6 +40,7 @@
let timeoutId = null;
const lineWidth = clusterCockpitConfig.plot_general_lineWidth;
const cbmode = clusterCockpitConfig?.plot_general_colorblindMode || false;
// Helpers
function getGradientR(x) {
@ -61,7 +62,7 @@
return Math.floor(x * 255.0);
}
function getRGB(c) {
return `rgb(${getGradientR(c)}, ${getGradientG(c)}, ${getGradientB(c)})`;
return `rgb(${cbmode ? '0' : getGradientR(c)}, ${getGradientG(c)}, ${getGradientB(c)})`;
}
function nearestThousand(num) {
return Math.ceil(num / 1000) * 1000;

View File

@ -12,7 +12,7 @@
-->
<script>
import { getContext } from "svelte";
import { getContext, createEventDispatcher } from "svelte";
import {
Modal,
ModalBody,
@ -33,6 +33,7 @@
const onInit = getContext("on-init")
const globalMetrics = getContext("globalMetrics")
const dispatch = createEventDispatcher();
let newMetricsOrder = [];
let unorderedMetrics = [...metrics];
@ -128,6 +129,8 @@
throw res.error;
}
});
dispatch('update-metrics', metrics);
}
</script>
@ -175,6 +178,7 @@
</ModalBody>
<ModalFooter>
<Button color="primary" on:click={closeAndApply}>Close & Apply</Button>
<Button color="secondary" on:click={() => (isOpen = !isOpen)}>Cancel</Button>
</ModalFooter>
</Modal>

View File

@ -304,8 +304,19 @@ export function stickyHeader(datatableHeaderSelector, updatePading) {
export function checkMetricDisabled(m, c, s) { // [m]etric, [c]luster, [s]ubcluster
const metrics = getContext("globalMetrics");
const result = metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s)
return !result
const available = metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s)
// Return inverse logic
return !available
}
export function checkMetricsDisabled(ma, c, s) { // [m]etric[a]rray, [c]luster, [s]ubcluster
let result = {};
const metrics = getContext("globalMetrics");
ma.forEach((m) => {
// Return named inverse logic: !available
result[m] = !(metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s))
});
return result
}
export function getStatsItems(presetStats = []) {
@ -438,7 +449,7 @@ function getMetricConfigDeep(metric, cluster, subCluster) {
}
}
export function convert2uplot(canvasData) {
export function convert2uplot(canvasData, secondsToMinutes = false, secondsToHours = false) {
// Prep: Uplot Data Structure
let uplotData = [[],[]] // [X, Y1, Y2, ...]
// Iterate if exists
@ -446,11 +457,21 @@ export function convert2uplot(canvasData) {
canvasData.forEach( cd => {
if (Object.keys(cd).length == 4) { // MetricHisto Datafromat
uplotData[0].push(cd?.max ? cd.max : 0)
uplotData[1].push(cd?.count ? cd.count : 0)
} else { // Default -> Fill Histodata with zero values on unused value placing -> maybe allows zoom trigger as known
if (secondsToHours) {
let hours = cd.value / 3600
console.log("x seconds to y hours", cd.value, hours)
uplotData[0].push(hours)
} else if (secondsToMinutes) {
let minutes = cd.value / 60
console.log("x seconds to y minutes", cd.value, minutes)
uplotData[0].push(minutes)
} else {
uplotData[0].push(cd.value)
}
uplotData[1].push(cd.count)
} else { // Default
uplotData[0].push(cd.value)
uplotData[1].push(cd.count)
}
}
})
}
return uplotData

View File

@ -3,6 +3,7 @@
Properties:
- `clusters [String]`: List of cluster names
- `subClusters map[String][]string`: Map of subclusters by cluster names
- `links [Object]`: Pre-filtered link objects based on user auth
- `direction String?`: The direcion of the drop-down menue [default: down]
-->
@ -18,45 +19,83 @@
} from "@sveltestrap/sveltestrap";
export let clusters;
export let subClusters;
export let links;
export let direction = "down";
</script>
{#each links as item}
{#if item.listOptions}
<Dropdown nav inNavbar {direction}>
<DropdownToggle nav caret>
<Icon name={item.icon} />
{item.title}
</DropdownToggle>
<DropdownMenu class="dropdown-menu-lg-end">
<DropdownItem
href={item.href}
>
All Clusters
</DropdownItem>
<DropdownItem divider />
{#each clusters as cluster}
<Dropdown nav direction="right">
<DropdownToggle nav caret class="dropdown-item py-1 px-2">
{cluster.name}
</DropdownToggle>
<DropdownMenu>
<DropdownItem class="py-1 px-2"
href={item.href + '?cluster=' + cluster.name}
>
All Jobs
</DropdownItem>
<DropdownItem class="py-1 px-2"
href={item.href + '?cluster=' + cluster.name + '&state=running'}
>
Running Jobs
</DropdownItem>
</DropdownMenu>
</Dropdown>
{/each}
</DropdownMenu>
</Dropdown>
{#if item.title === 'Nodes'}
<Dropdown nav inNavbar {direction}>
<DropdownToggle nav caret>
<Icon name={item.icon} />
{item.title}
</DropdownToggle>
<DropdownMenu class="dropdown-menu-lg-end">
{#each clusters as cluster}
<Dropdown nav direction="right">
<DropdownToggle nav caret class="dropdown-item py-1 px-2">
{cluster.name}
</DropdownToggle>
<DropdownMenu>
<DropdownItem class="py-1 px-2"
href={item.href + cluster.name}
>
Node Overview
</DropdownItem>
<DropdownItem class="py-1 px-2"
href={item.href + 'list/' + cluster.name}
>
Node List
</DropdownItem>
{#each subClusters[cluster.name] as subCluster}
<DropdownItem class="py-1 px-2"
href={item.href + 'list/' + cluster.name + '/' + subCluster}
>
{subCluster} Node List
</DropdownItem>
{/each}
</DropdownMenu>
</Dropdown>
{/each}
</DropdownMenu>
</Dropdown>
{:else}
<Dropdown nav inNavbar {direction}>
<DropdownToggle nav caret>
<Icon name={item.icon} />
{item.title}
</DropdownToggle>
<DropdownMenu class="dropdown-menu-lg-end">
<DropdownItem
href={item.href}
>
All Clusters
</DropdownItem>
<DropdownItem divider />
{#each clusters as cluster}
<Dropdown nav direction="right">
<DropdownToggle nav caret class="dropdown-item py-1 px-2">
{cluster.name}
</DropdownToggle>
<DropdownMenu>
<DropdownItem class="py-1 px-2"
href={item.href + '?cluster=' + cluster.name}
>
All Jobs
</DropdownItem>
<DropdownItem class="py-1 px-2"
href={item.href + '?cluster=' + cluster.name + '&state=running'}
>
Running Jobs
</DropdownItem>
</DropdownMenu>
</Dropdown>
{/each}
</DropdownMenu>
</Dropdown>
{/if}
{:else if !item.perCluster}
<NavLink href={item.href} active={window.location.pathname == item.href}
><Icon name={item.icon} /> {item.title}</NavLink

View File

@ -4,11 +4,14 @@ import Systems from './Systems.root.svelte'
new Systems({
target: document.getElementById('svelte-app'),
props: {
displayType: displayType,
cluster: infos.cluster,
subCluster: infos.subCluster,
from: infos.from,
to: infos.to
},
context: new Map([
['cc-config', clusterCockpitConfig]
['cc-config', clusterCockpitConfig],
['resampling', resampleConfig]
])
})

View File

@ -0,0 +1,280 @@
<!--
@component Cluster Per Node List component; renders current state of SELECTABLE metrics for ALL nodes
Properties:
- `cluster String`: The nodes' cluster
- `subCluster String`: The nodes' subCluster
- `ccconfig Object?`: The ClusterCockpit Config Context [Default: null]
- `selectedMetrics [String]`: The array of selected metrics
- `systemUnits Object`: The object of metric units
-->
<script>
import { getContext } from "svelte";
import { queryStore, gql, getContextClient, mutationStore } from "@urql/svelte";
import { Row, Col, Card, Table, Spinner } from "@sveltestrap/sveltestrap";
import { stickyHeader } from "../generic/utils.js";
import NodeListRow from "./nodelist/NodeListRow.svelte";
import Pagination from "../generic/joblist/Pagination.svelte";
export let cluster;
export let subCluster = "";
export let ccconfig = null;
export let selectedMetrics = [];
export let selectedResolution = 0;
export let hostnameFilter = "";
export let systemUnits = null;
export let from = null;
export let to = null;
// Decouple from Job List Paging Params?
let usePaging = ccconfig?.node_list_usePaging || false
let itemsPerPage = usePaging ? (ccconfig?.plot_list_nodesPerPage || 10) : 10;
let page = 1;
let paging = { itemsPerPage, page };
let headerPaddingTop = 0;
stickyHeader(
".cc-table-wrapper > table.table >thead > tr > th.position-sticky:nth-child(1)",
(x) => (headerPaddingTop = x),
);
// const { query: initq } = init();
const initialized = getContext("initialized");
const client = getContextClient();
const nodeListQuery = gql`
query ($cluster: String!, $subCluster: String!, $nodeFilter: String!, $metrics: [String!], $scopes: [MetricScope!]!, $from: Time!, $to: Time!, $paging: PageRequest!, $selectedResolution: Int) {
nodeMetricsList(
cluster: $cluster
subCluster: $subCluster
nodeFilter: $nodeFilter
scopes: $scopes
metrics: $metrics
from: $from
to: $to
page: $paging
resolution: $selectedResolution
) {
items {
host
subCluster
metrics {
name
scope
metric {
timestep
unit {
base
prefix
}
series {
id
hostname
data
statistics {
min
avg
max
}
}
statisticsSeries {
min
median
max
}
}
}
}
totalNodes
hasNextPage
}
}
`
const updateConfigurationMutation = ({ name, value }) => {
return mutationStore({
client: client,
query: gql`
mutation ($name: String!, $value: String!) {
updateConfiguration(name: $name, value: $value)
}
`,
variables: { name, value },
});
};
// Decouple from Job List Paging Params?
function updateConfiguration(value, page) {
updateConfigurationMutation({
name: "plot_list_nodesPerPage",
value: value,
}).subscribe((res) => {
if (res.fetching === false && !res.error) {
nodes = [] // Empty List
paging = { itemsPerPage: value, page: page }; // Trigger reload of nodeList
} else if (res.fetching === false && res.error) {
throw res.error;
}
});
}
if (!usePaging) {
window.addEventListener('scroll', () => {
let {
scrollTop,
scrollHeight,
clientHeight
} = document.documentElement;
// Add 100 px offset to trigger load earlier
if (scrollTop + clientHeight >= scrollHeight - 100 && $nodesQuery?.data != null && $nodesQuery.data?.nodeMetricsList.hasNextPage) {
let pendingPaging = { ...paging }
pendingPaging.page += 1
paging = pendingPaging
};
});
};
$: nodesQuery = queryStore({
client: client,
query: nodeListQuery,
variables: {
cluster: cluster,
subCluster: subCluster,
nodeFilter: hostnameFilter,
scopes: ["core", "socket", "accelerator"],
metrics: selectedMetrics,
from: from.toISOString(),
to: to.toISOString(),
paging: paging,
selectedResolution: selectedResolution,
},
requestPolicy: "network-only", // Resolution queries are cached, but how to access them? For now: reload on every change
});
let nodes = [];
$: if ($initialized && $nodesQuery.data) {
if (usePaging) {
nodes = [...$nodesQuery.data.nodeMetricsList.items].sort((a, b) => a.host.localeCompare(b.host));
} else {
nodes = nodes.concat([...$nodesQuery.data.nodeMetricsList.items].sort((a, b) => a.host.localeCompare(b.host)))
}
}
$: if (!usePaging && (selectedMetrics || selectedResolution || hostnameFilter || from || to)) {
// Continous Scroll: Reset list and paging if parameters change: Existing entries will not match new selections
nodes = [];
paging = { itemsPerPage, page: 1 };
}
$: matchedNodes = $nodesQuery.data?.nodeMetricsList.totalNodes || matchedNodes;
</script>
<Row>
<div class="col cc-table-wrapper">
<Table cellspacing="0px" cellpadding="0px">
<thead>
<tr>
<th
class="position-sticky top-0 text-capitalize"
scope="col"
style="padding-top: {headerPaddingTop}px;"
>
{cluster} Node Info
{#if $nodesQuery.fetching}
<Spinner size="sm" style="margin-left:10px;" secondary />
{/if}
</th>
{#each selectedMetrics as metric (metric)}
<th
class="position-sticky top-0 text-center"
scope="col"
style="padding-top: {headerPaddingTop}px"
>
{metric} ({systemUnits[metric]})
</th>
{/each}
</tr>
</thead>
<tbody>
{#if $nodesQuery.error}
<Row>
<Col>
<Card body color="danger">{$nodesQuery.error.message}</Card>
</Col>
</Row>
{:else}
{#each nodes as nodeData}
<NodeListRow {nodeData} {cluster} {selectedMetrics}/>
{:else}
<tr>
<td colspan={selectedMetrics.length + 1}> No nodes found </td>
</tr>
{/each}
{/if}
{#if $nodesQuery.fetching || !$nodesQuery.data}
<tr>
<td colspan={selectedMetrics.length + 1}>
<div style="text-align:center;">
<p><b>
Loading nodes {nodes.length + 1} to
{ matchedNodes
? `${(nodes.length + paging.itemsPerPage) > matchedNodes ? matchedNodes : (nodes.length + paging.itemsPerPage)} of ${matchedNodes} total`
: (nodes.length + paging.itemsPerPage)
}
</b></p>
<Spinner secondary />
</div>
</td>
</tr>
{/if}
</tbody>
</Table>
</div>
</Row>
{#if usePaging}
<Pagination
bind:page
{itemsPerPage}
itemText="Nodes"
totalItems={matchedNodes}
on:update-paging={({ detail }) => {
if (detail.itemsPerPage != itemsPerPage) {
updateConfiguration(detail.itemsPerPage.toString(), detail.page);
} else {
nodes = []
paging = { itemsPerPage: detail.itemsPerPage, page: detail.page };
}
}}
/>
{/if}
<style>
.cc-table-wrapper {
overflow: initial;
}
.cc-table-wrapper > :global(table) {
border-collapse: separate;
border-spacing: 0px;
table-layout: fixed;
}
.cc-table-wrapper :global(button) {
margin-bottom: 0px;
}
.cc-table-wrapper > :global(table > tbody > tr > td) {
margin: 0px;
padding-left: 5px;
padding-right: 0px;
}
th.position-sticky.top-0 {
background-color: white;
z-index: 10;
border-bottom: 1px solid black;
}
</style>

View File

@ -0,0 +1,155 @@
<!--
@component Cluster Per Node Overview component; renders current state of ONE metric for ALL nodes
Properties:
- `ccconfig Object?`: The ClusterCockpit Config Context [Default: null]
- `cluster String`: The cluster to show status information for
- `selectedMetric String?`: The selectedMetric input [Default: ""]
-->
<script>
import { queryStore, gql, getContextClient } from "@urql/svelte";
import { Row, Col, Card, Spinner } from "@sveltestrap/sveltestrap";
import { init, checkMetricsDisabled } from "../generic/utils.js";
import MetricPlot from "../generic/plots/MetricPlot.svelte";
export let ccconfig = null;
export let cluster = "";
export const subCluster = "";
export let selectedMetrics = null;
export let hostnameFilter = "";
export let from = null;
export let to = null;
const { query: initq } = init();
const client = getContextClient();
const nodeQuery = gql`
query ($cluster: String!, $metrics: [String!], $from: Time!, $to: Time!) {
nodeMetrics(
cluster: $cluster
metrics: $metrics
from: $from
to: $to
) {
host
subCluster
metrics {
name
scope
metric {
timestep
unit {
base
prefix
}
series {
statistics {
min
avg
max
}
data
}
}
}
}
}
`
$: selectedMetric = selectedMetrics[0] ? selectedMetrics[0] : "";
$: nodesQuery = queryStore({
client: client,
query: nodeQuery,
variables: {
cluster: cluster,
metrics: selectedMetrics,
from: from.toISOString(),
to: to.toISOString(),
},
});
let rawData = []
$: if ($initq.data && $nodesQuery?.data) {
rawData = $nodesQuery?.data?.nodeMetrics.filter((h) => {
if (h.subCluster === '') { // Exclude nodes with empty subCluster field
console.warn('subCluster not configured for node', h.host)
return false
} else {
return h.metrics.some(
(m) => selectedMetrics.includes(m.name) && m.scope == "node",
)
}
})
}
let mappedData = []
$: if (rawData?.length > 0) {
mappedData = rawData.map((h) => ({
host: h.host,
subCluster: h.subCluster,
data: h.metrics.filter(
(m) => selectedMetrics.includes(m.name) && m.scope == "node",
),
disabled: checkMetricsDisabled(
selectedMetrics,
cluster,
h.subCluster,
),
}))
.sort((a, b) => a.host.localeCompare(b.host))
}
let filteredData = []
$: if (mappedData?.length > 0) {
filteredData = mappedData.filter((h) =>
h.host.includes(hostnameFilter)
)
}
</script>
{#if $nodesQuery.error}
<Row>
<Col>
<Card body color="danger">{$nodesQuery.error.message}</Card>
</Col>
</Row>
{:else if $nodesQuery.fetching }
<Row>
<Col>
<Spinner />
</Col>
</Row>
{:else if filteredData?.length > 0}
<!-- PlotGrid flattened into this component -->
<Row cols={{ xs: 1, sm: 2, md: 3, lg: ccconfig.plot_view_plotsPerRow}}>
{#each filteredData as item (item.host)}
<Col class="px-1">
<h4 style="width: 100%; text-align: center;">
<a
style="display: block;padding-top: 15px;"
href="/monitoring/node/{cluster}/{item.host}"
>{item.host} ({item.subCluster})</a
>
</h4>
{#if item?.disabled[selectedMetric]}
<Card body class="mx-3" color="info"
>Metric disabled for subcluster <code
>{selectedMetric}:{item.subCluster}</code
></Card
>
{:else}
<!-- "No Data"-Warning included in MetricPlot-Component -->
<MetricPlot
timestep={item.data[0].metric.timestep}
series={item.data[0].metric.series}
metric={item.data[0].name}
{cluster}
subCluster={item.subCluster}
forNode
/>
{/if}
</Col>
{/each}
</Row>
{/if}

View File

@ -0,0 +1,177 @@
<!--
@component Displays node info, serves links to single node page and lists
Properties:
- `cluster String`: The nodes' cluster
- `subCluster String`: The nodes' subCluster
- `cluster String`: The nodes' hostname
-->
<script>
import {
Icon,
Button,
Card,
CardHeader,
CardBody,
Input,
InputGroup,
InputGroupText, } from "@sveltestrap/sveltestrap";
export let cluster;
export let subCluster
export let hostname;
export let dataHealth;
export let nodeJobsData = null;
// Not at least one returned, selected metric: NodeHealth warning
const healthWarn = !dataHealth.includes(true);
// At least one non-returned selected metric: Metric config error?
const metricWarn = dataHealth.includes(false);
let userList;
let projectList;
$: if (nodeJobsData) {
userList = Array.from(new Set(nodeJobsData.jobs.items.map((j) => j.user))).sort((a, b) => a.localeCompare(b));
projectList = Array.from(new Set(nodeJobsData.jobs.items.map((j) => j.project))).sort((a, b) => a.localeCompare(b));
}
</script>
<Card class="pb-3">
<CardHeader class="d-inline-flex justify-content-between align-items-end">
<div>
<h5 class="mb-0">
Node
<a href="/monitoring/node/{cluster}/{hostname}" target="_blank">
{hostname}
</a>
</h5>
</div>
<div class="text-capitalize">
<h6 class="mb-0">
{cluster} {subCluster}
</h6>
</div>
</CardHeader>
<CardBody>
{#if healthWarn}
<InputGroup>
<InputGroupText>
<Icon name="exclamation-circle"/>
</InputGroupText>
<InputGroupText>
Status
</InputGroupText>
<Button color="danger" disabled>
Unhealthy
</Button>
</InputGroup>
{:else if metricWarn}
<InputGroup>
<InputGroupText>
<Icon name="info-circle"/>
</InputGroupText>
<InputGroupText>
Status
</InputGroupText>
<Button color="warning" disabled>
Missing Metric
</Button>
</InputGroup>
{:else if nodeJobsData.jobs.count == 1 && nodeJobsData.jobs.items[0].exclusive}
<InputGroup>
<InputGroupText>
<Icon name="circle-fill"/>
</InputGroupText>
<InputGroupText>
Status
</InputGroupText>
<Button color="success" disabled>
Exclusive
</Button>
</InputGroup>
{:else if nodeJobsData.jobs.count >= 1 && !nodeJobsData.jobs.items[0].exclusive}
<InputGroup>
<InputGroupText>
<Icon name="circle-half"/>
</InputGroupText>
<InputGroupText>
Status
</InputGroupText>
<Button color="success" disabled>
Shared
</Button>
</InputGroup>
{:else}
<InputGroup>
<InputGroupText>
<Icon name="circle"/>
</InputGroupText>
<InputGroupText>
Status
</InputGroupText>
<Button color="secondary" disabled>
Idle
</Button>
</InputGroup>
{/if}
<hr class="my-3"/>
<!-- JOBS -->
<InputGroup size="sm" class="justify-content-between mb-3">
<InputGroupText>
<Icon name="activity"/>
</InputGroupText>
<InputGroupText class="justify-content-center" style="width: 4.4rem;">
Activity
</InputGroupText>
<Input class="flex-grow-1" style="background-color: white;" type="text" value="{nodeJobsData?.jobs?.count || 0} Job{(nodeJobsData?.jobs?.count == 1) ? '': 's'}" disabled />
<a title="Show jobs running on this node" href="/monitoring/jobs/?cluster={cluster}&state=running&node={hostname}" target="_blank" class="btn btn-outline-primary" role="button" aria-disabled="true" >
<Icon name="view-list" />
List
</a>
</InputGroup>
<!-- USERS -->
<InputGroup size="sm" class="justify-content-between {(userList?.length > 0) ? 'mb-1' : 'mb-3'}">
<InputGroupText>
<Icon name="people"/>
</InputGroupText>
<InputGroupText class="justify-content-center" style="width: 4.4rem;">
Users
</InputGroupText>
<Input class="flex-grow-1" style="background-color: white;" type="text" value="{userList?.length || 0} User{(userList?.length == 1) ? '': 's'}" disabled />
<a title="Show users active on this node" href="/monitoring/users/?cluster={cluster}&state=running&node={hostname}" target="_blank" class="btn btn-outline-primary" role="button" aria-disabled="true" >
<Icon name="view-list" />
List
</a>
</InputGroup>
{#if userList?.length > 0}
<Card class="mb-3">
<div class="p-1">
{userList.join(", ")}
</div>
</Card>
{/if}
<!-- PROJECTS -->
<InputGroup size="sm" class="justify-content-between {(projectList?.length > 0) ? 'mb-1' : 'mb-3'}">
<InputGroupText>
<Icon name="journals"/>
</InputGroupText>
<InputGroupText class="justify-content-center" style="width: 4.4rem;">
Projects
</InputGroupText>
<Input class="flex-grow-1" style="background-color: white;" type="text" value="{projectList?.length || 0} Project{(projectList?.length == 1) ? '': 's'}" disabled />
<a title="Show projects active on this node" href="/monitoring/projects/?cluster={cluster}&state=running&node={hostname}" target="_blank" class="btn btn-outline-primary" role="button" aria-disabled="true" >
<Icon name="view-list" />
List
</a>
</InputGroup>
{#if projectList?.length > 0}
<Card>
<div class="p-1">
{projectList.join(", ")}
</div>
</Card>
{/if}
</CardBody>
</Card>

View File

@ -0,0 +1,187 @@
<!--
@component Data row for a single node displaying metric plots
Properties:
- `cluster String`: The nodes' cluster
- `nodeData Object`: The node data object including metric data
- `selectedMetrics [String]`: The array of selected metrics
-->
<script>
import {
queryStore,
gql,
getContextClient,
} from "@urql/svelte";
import { Card, CardBody, Spinner } from "@sveltestrap/sveltestrap";
import { maxScope, checkMetricDisabled } from "../../generic/utils.js";
import MetricPlot from "../../generic/plots/MetricPlot.svelte";
import NodeInfo from "./NodeInfo.svelte";
export let cluster;
export let nodeData;
export let selectedMetrics;
const client = getContextClient();
const paging = { itemsPerPage: 50, page: 1 };
const sorting = { field: "startTime", type: "col", order: "DESC" };
const filter = [
{ cluster: { eq: cluster } },
{ node: { contains: nodeData.host } },
{ state: ["running"] },
];
const nodeJobsQuery = gql`
query (
$filter: [JobFilter!]!
$sorting: OrderByInput!
$paging: PageRequest!
) {
jobs(filter: $filter, order: $sorting, page: $paging) {
items {
jobId
user
project
exclusive
resources {
hostname
accelerators
}
}
count
}
}
`;
$: nodeJobsData = queryStore({
client: client,
query: nodeJobsQuery,
variables: { paging, sorting, filter },
});
// Helper
const selectScope = (nodeMetrics) =>
nodeMetrics.reduce(
(a, b) =>
maxScope([a.scope, b.scope]) == a.scope ? b : a,
nodeMetrics[0],
);
const sortAndSelectScope = (allNodeMetrics) =>
selectedMetrics
.map((selectedName) => allNodeMetrics.filter((nodeMetric) => nodeMetric.name == selectedName))
.map((matchedNodeMetrics) => ({
disabled: false,
data: matchedNodeMetrics.length > 0 ? selectScope(matchedNodeMetrics) : null,
}))
.map((scopedNodeMetric) => {
if (scopedNodeMetric?.data) {
return {
disabled: checkMetricDisabled(
scopedNodeMetric.data.name,
cluster,
nodeData.subCluster,
),
data: scopedNodeMetric.data,
};
} else {
return scopedNodeMetric;
}
});
let refinedData;
let dataHealth;
$: if (nodeData?.metrics) {
refinedData = sortAndSelectScope(nodeData?.metrics)
dataHealth = refinedData.filter((rd) => rd.disabled === false).map((enabled) => (enabled.data.metric.series.length > 0))
}
let extendedLegendData = null;
$: if ($nodeJobsData?.data) {
// Get Shared State of Node: Only Build extended Legend For Shared Nodes
if ($nodeJobsData.data.jobs.count >= 1 && !$nodeJobsData.data.jobs.items[0].exclusive) {
const accSet = Array.from(new Set($nodeJobsData.data.jobs.items
.map((i) => i.resources
.filter((r) => r.hostname === nodeData.host)
.map((r) => r.accelerators)
)
)).flat(2)
extendedLegendData = {}
for (const accId of accSet) {
const matchJob = $nodeJobsData.data.jobs.items.find((i) => i.resources.find((r) => r.accelerators.includes(accId)))
extendedLegendData[accId] = {
user: matchJob?.user ? matchJob?.user : '-',
job: matchJob?.jobId ? matchJob?.jobId : '-',
}
}
// Theoretically extendable for hwthreadIDs
}
}
</script>
<tr>
<td>
{#if $nodeJobsData.fetching}
<Card>
<CardBody class="content-center">
<Spinner/>
</CardBody>
</Card>
{:else}
<NodeInfo nodeJobsData={$nodeJobsData.data} {cluster} subCluster={nodeData.subCluster} hostname={nodeData.host} {dataHealth}/>
{/if}
</td>
{#each refinedData as metricData (metricData.data.name)}
{#key metricData}
<td>
{#if metricData?.disabled}
<Card body class="mx-3" color="info"
>Metric disabled for subcluster <code
>{metricData.data.name}:{nodeData.subCluster}</code
></Card
>
{:else if !!metricData.data?.metric.statisticsSeries}
<!-- "No Data"-Warning included in MetricPlot-Component -->
<MetricPlot
{cluster}
subCluster={nodeData.subCluster}
metric={metricData.data.name}
scope={metricData.data.scope}
timestep={metricData.data.metric.timestep}
series={metricData.data.metric.series}
statisticsSeries={metricData.data?.metric.statisticsSeries}
useStatsSeries={!!metricData.data?.metric.statisticsSeries}
height={175}
forNode
/>
<div class="my-2"/>
{#key extendedLegendData}
<MetricPlot
{cluster}
subCluster={nodeData.subCluster}
metric={metricData.data.name}
scope={metricData.data.scope}
timestep={metricData.data.metric.timestep}
series={metricData.data.metric.series}
height={175}
{extendedLegendData}
forNode
/>
{/key}
{:else}
<MetricPlot
{cluster}
subCluster={nodeData.subCluster}
metric={metricData.data.name}
scope={metricData.data.scope}
timestep={metricData.data.metric.timestep}
series={metricData.data.metric.series}
height={375}
forNode
/>
{/if}
</td>
{/key}
{/each}
</tr>

View File

@ -15,10 +15,11 @@
{{end}}
<script>
const header = {
"username": "{{ .User.Username }}",
"authlevel": {{ .User.GetAuthLevel }},
"clusters": {{ .Clusters }},
"roles": {{ .Roles }}
"username": "{{ .User.Username }}",
"authlevel": {{ .User.GetAuthLevel }},
"clusters": {{ .Clusters }},
"subClusters": {{ .SubClusters }},
"roles": {{ .Roles }}
};
</script>
</head>

View File

@ -8,6 +8,7 @@
{{define "javascript"}}
<script>
const isAdmin = {{ .User.HasRole .Roles.admin }};
const isSupport = {{ .User.HasRole .Roles.support }};
const isApi = {{ .User.HasRole .Roles.api }};
const username = {{ .User.Username }};
const filterPresets = {{ .FilterPresets }};

View File

@ -7,8 +7,10 @@
{{end}}
{{define "javascript"}}
<script>
const displayType = {{ .Infos.displayType }};
const infos = {{ .Infos }};
const clusterCockpitConfig = {{ .Config }};
const resampleConfig = {{ .Resampling }};
</script>
<script src='/build/systems.js'></script>
{{end}}

View File

@ -13,6 +13,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
@ -95,6 +96,7 @@ type Page struct {
Roles map[string]schema.Role // Available roles for frontend render checks
Build Build // Latest information about the application
Clusters []schema.ClusterConfig // List of all clusters for use in the Header
SubClusters map[string][]string // Map per cluster of all subClusters for use in the Header
FilterPresets map[string]interface{} // For pages with the Filter component, this can be used to set initial filters.
Infos map[string]interface{} // For generic use (e.g. username for /monitoring/user/<id>, job id for /monitoring/job/<id>)
Config map[string]interface{} // UI settings for the currently logged in user (e.g. line width, ...)
@ -114,6 +116,15 @@ func RenderTemplate(rw http.ResponseWriter, file string, page *Page) {
}
}
if page.SubClusters == nil {
page.SubClusters = make(map[string][]string)
for _, cluster := range archive.Clusters {
for _, sc := range cluster.SubClusters {
page.SubClusters[cluster.Name] = append(page.SubClusters[cluster.Name], sc.Name)
}
}
}
log.Debugf("Page config : %v\n", page.Config)
if err := t.Execute(rw, page); err != nil {
log.Errorf("Template error: %s", err.Error())