Optimize usage dashboard: partial indexes, request cache, parallel histograms

- Add migration 14: partial covering indexes WHERE job_state='running'
  for user/project/subcluster groupings (tiny B-tree vs full table)
- Inline literal state value in BuildWhereClause so SQLite matches
  partial indexes instead of parameterized placeholders
- Add per-request statsGroupCache (sync.Once per filter+groupBy key)
  so identical grouped stats queries execute only once per GQL operation
- Parallelize 4 histogram queries in AddHistograms using errgroup
- Consolidate frontend from 6 GQL aliases to 2, sort+slice top-10
  client-side via $derived

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Entire-Checkpoint: 5b26a6e5ff10
This commit is contained in:
2026-03-13 14:31:37 +01:00
parent cbe46c3524
commit d586fe4b43
10 changed files with 277 additions and 87 deletions

View File

@@ -673,7 +673,20 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
stats, err = r.Repo.JobsStats(ctx, filter, reqFields)
} else {
startGrouped := time.Now()
stats, err = r.Repo.JobsStatsGrouped(ctx, filter, page, sortBy, groupBy, reqFields)
// Use request-scoped cache: multiple aliases with same (filter, groupBy)
// but different sortBy/page hit the DB only once.
if cache := getStatsGroupCache(ctx); cache != nil {
key := statsCacheKey(filter, groupBy)
var allStats []*model.JobsStatistics
allStats, err = cache.getOrCompute(key, func() ([]*model.JobsStatistics, error) {
return r.Repo.JobsStatsGrouped(ctx, filter, nil, nil, groupBy, nil)
})
if err == nil {
stats = sortAndPageStats(allStats, sortBy, page)
}
} else {
stats, err = r.Repo.JobsStatsGrouped(ctx, filter, page, sortBy, groupBy, reqFields)
}
cclog.Infof("Timer JobsStatsGrouped call: %s", time.Since(startGrouped))
}
} else {