diff --git a/api/schema.graphqls b/api/schema.graphqls index 568c15d..8c27504 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -223,7 +223,7 @@ type Query { allocatedNodes(cluster: String!): [Count!]! job(id: ID!): Job - jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]! + jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! diff --git a/internal/api/rest.go b/internal/api/rest.go index c8f4e7a..7946ab7 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1110,7 +1110,7 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) { } resolver := graph.GetResolverInstance() - data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes) + data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes, nil) if err != nil { json.NewEncoder(rw).Encode(Respone{ Error: &struct { diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 9ca0a60..23d31e7 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -246,7 +246,7 @@ type ComplexityRoot struct { Clusters func(childComplexity int) int GlobalMetrics func(childComplexity int) int Job func(childComplexity int, id string) int - JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int + JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope, resolution *int) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int @@ -368,7 +368,7 @@ type QueryResolver interface { User(ctx context.Context, username string) (*model.User, error) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) Job(ctx context.Context, id string) (*schema.Job, error) - JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) + JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) @@ -1290,7 +1290,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)), true + return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope), args["resolution"].(*int)), true case "Query.jobs": if e.complexity.Query.Jobs == nil { @@ -2059,7 +2059,7 @@ type Query { allocatedNodes(cluster: String!): [Count!]! job(id: ID!): Job - jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]! + jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! @@ -2370,6 +2370,15 @@ func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, raw } } args["scopes"] = arg2 + var arg3 *int + if tmp, ok := rawArgs["resolution"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("resolution")) + arg3, err = ec.unmarshalOInt2áš–int(ctx, tmp) + if err != nil { + return nil, err + } + } + args["resolution"] = arg3 return args, nil } @@ -8499,7 +8508,7 @@ func (ec *executionContext) _Query_jobMetrics(ctx context.Context, field graphql }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobMetrics(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope)) + return ec.resolvers.Query().JobMetrics(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope), fc.Args["resolution"].(*int)) }) if err != nil { ec.Error(ctx, err) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index f36e25a..9e7bd3d 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -224,13 +224,19 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) } // JobMetrics is the resolver for the jobMetrics field. -func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) { +func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) { + defaultRes := 600 + if resolution == nil { + resolution = &defaultRes + } + job, err := r.Query().Job(ctx, id) if err != nil { log.Warn("Error while querying job for metrics") return nil, err } + log.Debugf(">>>>> REQUEST DATA HERE FOR %v AT SCOPE %v WITH RESOLUTION OF %d", metrics, scopes, *resolution) data, err := metricdata.LoadData(job, metrics, scopes, ctx) if err != nil { log.Warn("Error while loading job data") diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 31ca6e7..d183920 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -122,14 +122,14 @@ variables: { dbid, selectedMetrics, selectedScopes }, }); - function loadAllScopes() { - selectedScopes = [...selectedScopes, "socket", "core"] - jobMetrics = queryStore({ - client: client, - query: query, - variables: { dbid, selectedMetrics, selectedScopes}, - }); - } + // function loadAllScopes() { + // selectedScopes = [...selectedScopes, "socket", "core"] + // jobMetrics = queryStore({ + // client: client, + // query: query, + // variables: { dbid, selectedMetrics, selectedScopes}, + // }); + // } // Handle Job Query on Init -> is not executed anymore getContext("on-init")(() => { @@ -229,11 +229,6 @@ $initq.data.job.subCluster, ), })); - - - const loadRes = ({ detail }) => { - console.log(">>> UPPER RES REQUEST", detail) - } @@ -362,8 +357,6 @@ {#if item.data} gm.name == item.metric)?.unit} diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 8184e50..d57fcd6 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -13,7 +13,12 @@ --> @@ -69,13 +132,13 @@ {metricName} ({unit}) @@ -87,14 +150,19 @@ {/each} {/if} - { + scopes = ["node"] + selectedScope = "node" + selectedScopes = [...scopes] + loadUpdate + }}> {#each resolutions as res} {/each} {#key series} - {#if fetching == true} + {#if $metricData?.fetching == true} {:else if error != null} {error.message}