fix: Add defer.close for all queries

This commit is contained in:
2026-03-11 05:04:20 +01:00
parent f9aa47ea1c
commit 5d3d77620e
12 changed files with 709 additions and 35 deletions

17
.entire/logs/entire.log Normal file
View File

@@ -0,0 +1,17 @@
{"time":"2026-03-10T21:13:26.857514+01:00","level":"INFO","msg":"session-start","component":"lifecycle","agent":"claude-code","event":"SessionStart","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","session_ref":"/Users/jan/.claude/projects/-Users-jan-prg-CC-cc-backend/28147033-ddc8-4056-b064-e0558fbc614e.jsonl"}
{"time":"2026-03-10T21:15:58.745665+01:00","level":"INFO","msg":"turn-start","component":"lifecycle","agent":"claude-code","event":"TurnStart","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","session_ref":"/Users/jan/.claude/projects/-Users-jan-prg-CC-cc-backend/28147033-ddc8-4056-b064-e0558fbc614e.jsonl"}
{"time":"2026-03-10T21:15:59.052384+01:00","level":"INFO","msg":"phase transition","component":"session","agent":"claude-code","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","event":"TurnStart","from":"","to":"active"}
{"time":"2026-03-10T21:15:59.247089+01:00","level":"INFO","msg":"initialized shadow session","component":"hooks","agent":"claude-code","session_id":"28147033-ddc8-4056-b064-e0558fbc614e"}
{"time":"2026-03-10T21:16:11.273515+01:00","level":"INFO","msg":"subagent started","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"lifecycle","agent":"claude-code","event":"SubagentStart","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","tool_use_id":"toolu_019bWLMJ1nzVAT8Fg2W5qRmi","transcript":"/Users/jan/.claude/projects/-Users-jan-prg-CC-cc-backend/28147033-ddc8-4056-b064-e0558fbc614e.jsonl"}
{"time":"2026-03-10T21:17:50.542621+01:00","level":"INFO","msg":"subagent completed","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"lifecycle","agent":"claude-code","event":"SubagentEnd","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","tool_use_id":"toolu_019bWLMJ1nzVAT8Fg2W5qRmi","agent_id":"a03483f2df2e8cef0"}
{"time":"2026-03-10T21:17:50.755441+01:00","level":"INFO","msg":"no file changes detected, skipping task checkpoint","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"lifecycle","agent":"claude-code"}
{"time":"2026-03-10T21:28:03.798482+01:00","level":"INFO","msg":"turn-end","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"lifecycle","agent":"claude-code","event":"TurnEnd","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","session_ref":"/Users/jan/.claude/projects/-Users-jan-prg-CC-cc-backend/28147033-ddc8-4056-b064-e0558fbc614e.jsonl"}
{"time":"2026-03-10T21:28:04.579301+01:00","level":"INFO","msg":"created shadow branch and committed changes","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"checkpoint","agent":"claude-code","shadow_branch":"entire/70fea39-e3b0c4"}
{"time":"2026-03-10T21:28:04.579308+01:00","level":"INFO","msg":"checkpoint saved","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"checkpoint","agent":"claude-code","strategy":"manual-commit","checkpoint_type":"session","checkpoint_count":1,"modified_files":3,"new_files":0,"deleted_files":0,"shadow_branch":"entire/70fea39-e3b0c4","branch_created":true}
{"time":"2026-03-10T21:28:04.579367+01:00","level":"INFO","msg":"phase transition","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"session","agent":"claude-code","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","event":"TurnEnd","from":"active","to":"idle"}
{"time":"2026-03-10T21:28:04.656228+01:00","level":"INFO","msg":"session-end","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"lifecycle","agent":"claude-code","event":"SessionEnd","session_id":"28147033-ddc8-4056-b064-e0558fbc614e"}
{"time":"2026-03-10T21:28:04.656276+01:00","level":"INFO","msg":"phase transition","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"session","agent":"claude-code","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","event":"SessionStop","from":"idle","to":"ended"}
{"time":"2026-03-10T21:28:04.7209+01:00","level":"INFO","msg":"session-start","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"lifecycle","agent":"claude-code","event":"SessionStart","session_id":"42401d2e-7d1c-4c0e-abe6-356cb2d48747","session_ref":"/Users/jan/.claude/projects/-Users-jan-prg-CC-cc-backend/42401d2e-7d1c-4c0e-abe6-356cb2d48747.jsonl"}
{"time":"2026-03-10T21:28:44.262904+01:00","level":"INFO","msg":"turn-end","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"lifecycle","agent":"claude-code","event":"TurnEnd","session_id":"42401d2e-7d1c-4c0e-abe6-356cb2d48747","session_ref":"/Users/jan/.claude/projects/-Users-jan-prg-CC-cc-backend/42401d2e-7d1c-4c0e-abe6-356cb2d48747.jsonl"}
{"time":"2026-03-10T21:28:44.697919+01:00","level":"INFO","msg":"committed changes to shadow branch","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"checkpoint","agent":"claude-code","shadow_branch":"entire/70fea39-e3b0c4"}
{"time":"2026-03-10T21:28:44.697926+01:00","level":"INFO","msg":"checkpoint saved","session_id":"28147033-ddc8-4056-b064-e0558fbc614e","component":"checkpoint","agent":"claude-code","strategy":"manual-commit","checkpoint_type":"session","checkpoint_count":1,"modified_files":3,"new_files":1,"deleted_files":0,"shadow_branch":"entire/70fea39-e3b0c4","branch_created":false}

View File

@@ -0,0 +1,29 @@
# Session Context
Session ID: 28147033-ddc8-4056-b064-e0558fbc614e
Commit Message: There are memory leaks in production on queries that trigger a full scan
## Prompts
### Prompt 1
There are memory leaks in production on queries that trigger a full scan. Review all queries for missing close and other bugs that could cause memory leaks in the sqlite-3 driver. Example for a query that triggers a massive allocation is a specific jobstate list with sorting on start_time on all jobs.
## Summary
All 31 `.Query()` calls now have proper `rows.Close()`. Build passes, tests pass, vet clean.
### Summary of changes
**`internal/repository/stats.go`** (7 fixes + 1 rewrite):
- Added `defer rows.Close()` + `rows.Err()` to `JobsStatsGrouped`, `JobCountGrouped`, `AddJobCountGrouped`, `jobsStatisticsHistogram`, `jobsDurationStatisticsHistogram`, `jobsMetricStatisticsHistogram`
- Rewrote `AddJobCount` to use `QueryRow()` instead of `Query()` (single-row result)
**`internal/repository/tags.go`** (5 fixes):
- Added `defer xrows.Close()` + `xrows.Err()` for `Queryx` in `CountTags`
- Added `defer rows.Close()` for second query in `CountTags`
- Added `defer rows.Close()` + `rows.Err()` to `GetTags`, `GetTagsDirect`, `getArchiveTags`
**`internal/repository/node.go`** (4 fixes + 1 rewrite):
- Added `defer rows.Close()` + `rows.Err()`, removed manual `rows.Close()` in `QueryNodes`, `QueryNodesWithMeta`, `CountStatesTimed`
- Rewrote `CountStates` to use `COUNT(*) ... GROUP BY` in SQL instead of fetching every row and counting in Go

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
There are memory leaks in production on queries that trigger a full scan. Review all queries for missing close and other bugs that could cause memory leaks in the sqlite-3 driver. Example for a query that triggers a massive allocation is a specific jobstate list with sorting on start_time on all jobs.

View File

@@ -0,0 +1,16 @@
All 31 `.Query()` calls now have proper `rows.Close()`. Build passes, tests pass, vet clean.
### Summary of changes
**`internal/repository/stats.go`** (7 fixes + 1 rewrite):
- Added `defer rows.Close()` + `rows.Err()` to `JobsStatsGrouped`, `JobCountGrouped`, `AddJobCountGrouped`, `jobsStatisticsHistogram`, `jobsDurationStatisticsHistogram`, `jobsMetricStatisticsHistogram`
- Rewrote `AddJobCount` to use `QueryRow()` instead of `Query()` (single-row result)
**`internal/repository/tags.go`** (5 fixes):
- Added `defer xrows.Close()` + `xrows.Err()` for `Queryx` in `CountTags`
- Added `defer rows.Close()` for second query in `CountTags`
- Added `defer rows.Close()` + `rows.Err()` to `GetTags`, `GetTagsDirect`, `getArchiveTags`
**`internal/repository/node.go`** (4 fixes + 1 rewrite):
- Added `defer rows.Close()` + `rows.Err()`, removed manual `rows.Close()` in `QueryNodes`, `QueryNodesWithMeta`, `CountStatesTimed`
- Rewrote `CountStates` to use `COUNT(*) ... GROUP BY` in SQL instead of fetching every row and counting in Go

View File

@@ -0,0 +1,114 @@
# Session Context
Session ID: 42401d2e-7d1c-4c0e-abe6-356cb2d48747
Commit Message: Implement the following plan:
# Fix Missing `rows.Close()` Memory Leaks
## Prompts
### Prompt 1
Implement the following plan:
# Fix Missing `rows.Close()` Memory Leaks in SQLite3 Queries
## Context
Production memory leaks traced to queries that do full table scans (e.g., job state list sorted by `start_time` on all jobs). The root cause is `sql.Rows` objects not being closed after query execution. In Go's `database/sql`, every `rows` returned by `.Query()` holds a database connection and associated buffers until `rows.Close()` is called. Without `defer rows.Close()`, these leak on every code path (both success and error returns).
## Findings
**22 total `.Query()` calls** across the repository layer. **15 have `defer rows.Close()`**. **7 do not** (listed below). Additionally, 1 `Queryx` call in `tags.go` is also missing close.
In `node.go`, `QueryNodes` and `QueryNodesWithMeta` have partial `rows.Close()` only in error paths but **not on the success path** and not via `defer`.
`CountStates` and `CountStatesTimed` in `node.go` also lack `defer rows.Close()` (same partial pattern as above for CountStates, none at all for CountStatesTimed).
## Changes Required
### 1. `internal/repository/stats.go` — 6 functions missing `defer rows.Close()`
Add `defer rows.Close()` immediately after the `if err != nil` check for each:
| Line | Function |
|------|----------|
| 233 | `JobsStatsGrouped` |
| 438 | `JobCountGrouped` |
| 494 | `AddJobCountGrouped` |
| 553 | `AddJobCount` |
| 753 | `jobsStatisticsHistogram` |
| 821 | `jobsDurationStatisticsHistogram` |
| 946 | `jobsMetricStatisticsHistogram` |
Pattern — after each `Query()` error check, add:
```go
rows, err := query.RunWith(r.DB).Query()
if err != nil {
...
return nil, err
}
defer rows.Close() // <-- ADD THIS
```
### 2. `internal/repository/tags.go` — 2 leaks in `CountTags()`
**Line 282**: `xrows` from `r.DB.Queryx(...)` — add `defer xrows.Close()` after error check.
**Line 333**: `rows` from `q.RunWith(r.stmtCache).Query()` — add `defer rows.Close()` after error check.
### 3. `internal/repository/tags.go` — 3 leaks in `GetTags`, `GetTagsDirect`, `getArchiveTags`
**Line 508** (`GetTags`): add `defer rows.Close()` after error check.
**Line 541** (`GetTagsDirect`): add `defer rows.Close()` after error check.
**Line 579** (`getArchiveTags`): add `defer rows.Close()` after error check.
### 4. `internal/repository/node.go` — 4 functions missing `defer rows.Close()`
**Line 363** (`QueryNodes`): Replace the manual `rows.Close()` in the error path with `defer rows.Close()` immediately after the error check. Remove the explicit `rows.Close()` call on line 375.
**Line 412** (`QueryNodesWithMeta`): Same pattern — add `defer rows.Close()` after error check, remove explicit `rows.Close()` on line 427.
**Line 558** (`CountStates`): Add `defer rows.Close()` after error check. Remove explicit `rows.Close()` on line 569.
**Line 620** (`CountStatesTimed`): Add `defer rows.Close()` after error check. Remove explicit `rows.Close()` on line 633.
## Summary of All Edits
| File | Function | Action |
|------|----------|--------|
| `stats.go:237` | `JobsStatsGrouped` | Add `defer rows.Close()` |
| `stats.go:442` | `JobCountGrouped` | Add `defer rows.Close()` |
| `stats.go:498` | `AddJobCountGrouped` | Add `defer rows.Close()` |
| `stats.go:557` | `AddJobCount` | Add `defer rows.Close()` |
| `stats.go:757` | `jobsStatisticsHistogram` | Add `defer rows.Close()` |
| `stats.go:825` | `jobsDurationStatisticsHistogram` | Add `defer rows.Close()` |
| `stats.go:950` | `jobsMetricStatisticsHistogram` | Add `defer rows.Close()` |
| `tags.go:284` | `CountTags` (xrows) | Add `defer xrows.Close()` |
| `tags.go:336` | `CountTags` (rows) | Add `defer rows.Close()` |
| `tags.go:513` | `GetTags` | Add `defer rows.Close()` |
| `tags.go:546` | `GetTagsDirect` | Add `defer rows.Close()` |
| `tags.go:584` | `getArchiveTags` | Add `defer rows.Close()` |
| `node.go:368` | `QueryNodes` | Add `defer rows.Close()`, remove manual close on L375 |
| `node.go:417` | `QueryNodesWithMeta` | Add `defer rows.Close()`, remove manual close on L427 |
| `node.go:563` | `CountStates` | Add `defer rows.Close()`, remove manual close on L569 |
| `node.go:625` | `CountStatesTimed` | Add `defer rows.Close()`, remove manual close on L633 |
## Verification
1. `go build ./...` — ensure it compiles
2. `go test ./internal/repository/...` — run repository tests
3. `go vet ./internal/repository/...` — static analysis
If you need specific details from before exiting plan mode (like exact code snippets, error messages, or content you generated), read the full transcript at: /Users/jan/.claude/projects/-Users-jan-prg-CC-cc-backend/28147033-ddc8-4056-b064-e0558fbc614e.jsonl
## Summary
All changes are already in place and verified:
- **Build**: passes
- **go vet**: passes
- **Tests**: pass
The changes are currently unstaged. All 16 locations from the plan have been fixed — `defer rows.Close()` added after each `Query()` error check, manual `rows.Close()` calls in error paths removed, and `rows.Err()` checks added where missing. The `AddJobCount` function was also simplified from `Query` + loop to `QueryRow().Scan()` since it returns a single value.

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,93 @@
Implement the following plan:
# Fix Missing `rows.Close()` Memory Leaks in SQLite3 Queries
## Context
Production memory leaks traced to queries that do full table scans (e.g., job state list sorted by `start_time` on all jobs). The root cause is `sql.Rows` objects not being closed after query execution. In Go's `database/sql`, every `rows` returned by `.Query()` holds a database connection and associated buffers until `rows.Close()` is called. Without `defer rows.Close()`, these leak on every code path (both success and error returns).
## Findings
**22 total `.Query()` calls** across the repository layer. **15 have `defer rows.Close()`**. **7 do not** (listed below). Additionally, 1 `Queryx` call in `tags.go` is also missing close.
In `node.go`, `QueryNodes` and `QueryNodesWithMeta` have partial `rows.Close()` only in error paths but **not on the success path** and not via `defer`.
`CountStates` and `CountStatesTimed` in `node.go` also lack `defer rows.Close()` (same partial pattern as above for CountStates, none at all for CountStatesTimed).
## Changes Required
### 1. `internal/repository/stats.go` — 6 functions missing `defer rows.Close()`
Add `defer rows.Close()` immediately after the `if err != nil` check for each:
| Line | Function |
|------|----------|
| 233 | `JobsStatsGrouped` |
| 438 | `JobCountGrouped` |
| 494 | `AddJobCountGrouped` |
| 553 | `AddJobCount` |
| 753 | `jobsStatisticsHistogram` |
| 821 | `jobsDurationStatisticsHistogram` |
| 946 | `jobsMetricStatisticsHistogram` |
Pattern — after each `Query()` error check, add:
```go
rows, err := query.RunWith(r.DB).Query()
if err != nil {
...
return nil, err
}
defer rows.Close() // <-- ADD THIS
```
### 2. `internal/repository/tags.go` — 2 leaks in `CountTags()`
**Line 282**: `xrows` from `r.DB.Queryx(...)` — add `defer xrows.Close()` after error check.
**Line 333**: `rows` from `q.RunWith(r.stmtCache).Query()` — add `defer rows.Close()` after error check.
### 3. `internal/repository/tags.go` — 3 leaks in `GetTags`, `GetTagsDirect`, `getArchiveTags`
**Line 508** (`GetTags`): add `defer rows.Close()` after error check.
**Line 541** (`GetTagsDirect`): add `defer rows.Close()` after error check.
**Line 579** (`getArchiveTags`): add `defer rows.Close()` after error check.
### 4. `internal/repository/node.go` — 4 functions missing `defer rows.Close()`
**Line 363** (`QueryNodes`): Replace the manual `rows.Close()` in the error path with `defer rows.Close()` immediately after the error check. Remove the explicit `rows.Close()` call on line 375.
**Line 412** (`QueryNodesWithMeta`): Same pattern — add `defer rows.Close()` after error check, remove explicit `rows.Close()` on line 427.
**Line 558** (`CountStates`): Add `defer rows.Close()` after error check. Remove explicit `rows.Close()` on line 569.
**Line 620** (`CountStatesTimed`): Add `defer rows.Close()` after error check. Remove explicit `rows.Close()` on line 633.
## Summary of All Edits
| File | Function | Action |
|------|----------|--------|
| `stats.go:237` | `JobsStatsGrouped` | Add `defer rows.Close()` |
| `stats.go:442` | `JobCountGrouped` | Add `defer rows.Close()` |
| `stats.go:498` | `AddJobCountGrouped` | Add `defer rows.Close()` |
| `stats.go:557` | `AddJobCount` | Add `defer rows.Close()` |
| `stats.go:757` | `jobsStatisticsHistogram` | Add `defer rows.Close()` |
| `stats.go:825` | `jobsDurationStatisticsHistogram` | Add `defer rows.Close()` |
| `stats.go:950` | `jobsMetricStatisticsHistogram` | Add `defer rows.Close()` |
| `tags.go:284` | `CountTags` (xrows) | Add `defer xrows.Close()` |
| `tags.go:336` | `CountTags` (rows) | Add `defer rows.Close()` |
| `tags.go:513` | `GetTags` | Add `defer rows.Close()` |
| `tags.go:546` | `GetTagsDirect` | Add `defer rows.Close()` |
| `tags.go:584` | `getArchiveTags` | Add `defer rows.Close()` |
| `node.go:368` | `QueryNodes` | Add `defer rows.Close()`, remove manual close on L375 |
| `node.go:417` | `QueryNodesWithMeta` | Add `defer rows.Close()`, remove manual close on L427 |
| `node.go:563` | `CountStates` | Add `defer rows.Close()`, remove manual close on L569 |
| `node.go:625` | `CountStatesTimed` | Add `defer rows.Close()`, remove manual close on L633 |
## Verification
1. `go build ./...` — ensure it compiles
2. `go test ./internal/repository/...` — run repository tests
3. `go vet ./internal/repository/...` — static analysis
If you need specific details from before exiting plan mode (like exact code snippets, error messages, or content you generated), read the full transcript at: /Users/jan/.claude/projects/-Users-jan-prg-CC-cc-backend/28147033-ddc8-4056-b064-e0558fbc614e.jsonl

View File

@@ -0,0 +1,7 @@
All changes are already in place and verified:
- **Build**: passes
- **go vet**: passes
- **Tests**: pass
The changes are currently unstaged. All 16 locations from the plan have been fixed — `defer rows.Close()` added after each `Query()` error check, manual `rows.Close()` calls in error paths removed, and `rows.Err()` checks added where missing. The `AddJobCount` function was also simplified from `Query` + loop to `QueryRow().Scan()` since it returns a single value.

View File

@@ -240,7 +240,6 @@ func (r *NodeRepository) QueryNodes(
page *model.PageRequest,
order *model.OrderByInput, // Currently unused!
) ([]*schema.Node, error) {
query, qerr := AccessCheck(ctx,
sq.Select("hostname", "cluster", "subcluster", "node_state", "health_state", "MAX(time_stamp) as time").
From("node").
@@ -286,6 +285,7 @@ func (r *NodeRepository) QueryNodes(
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
defer rows.Close()
nodes := make([]*schema.Node, 0)
for rows.Next() {
@@ -300,6 +300,89 @@ func (r *NodeRepository) QueryNodes(
nodes = append(nodes, &node)
}
if err := rows.Err(); err != nil {
return nil, err
}
return nodes, nil
}
// QueryNodesWithMeta returns a list of nodes based on a node filter. It always operates
// on the last state (largest timestamp). It includes both (!) optional JSON column data
func (r *NodeRepository) QueryNodesWithMeta(
ctx context.Context,
filters []*model.NodeFilter,
page *model.PageRequest,
order *model.OrderByInput, // Currently unused!
) ([]*schema.Node, error) {
query, qerr := AccessCheck(ctx,
sq.Select("node.hostname", "node.cluster", "node.subcluster",
"node_state.node_state", "node_state.health_state",
"node.meta_data", "node_state.health_metrics").
From("node").
Join("node_state ON node_state.node_id = node.id").
Where(latestStateCondition()))
if qerr != nil {
return nil, qerr
}
query = applyNodeFilters(query, filters)
query = query.OrderBy("node.hostname ASC")
if page != nil && page.ItemsPerPage != -1 {
limit := uint64(page.ItemsPerPage)
query = query.Offset((uint64(page.Page) - 1) * limit).Limit(limit)
}
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
queryString, queryVars, _ := query.ToSql()
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
defer rows.Close()
nodes := make([]*schema.Node, 0)
for rows.Next() {
node := schema.Node{}
RawMetaData := make([]byte, 0)
RawMetricHealth := make([]byte, 0)
if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster,
&node.NodeState, &node.HealthState, &RawMetaData, &RawMetricHealth); err != nil {
cclog.Warn("Error while scanning rows (QueryNodes)")
return nil, err
}
if len(RawMetaData) == 0 {
node.MetaData = nil
} else {
metaData := make(map[string]string)
if err := json.Unmarshal(RawMetaData, &metaData); err != nil {
cclog.Warn("Error while unmarshaling raw metadata json")
return nil, err
}
node.MetaData = metaData
}
if len(RawMetricHealth) == 0 {
node.HealthData = nil
} else {
healthData := make(map[string][]string)
if err := json.Unmarshal(RawMetricHealth, &healthData); err != nil {
cclog.Warn("Error while unmarshaling raw healthdata json")
return nil, err
}
node.HealthData = healthData
}
nodes = append(nodes, &node)
}
if err := rows.Err(); err != nil {
return nil, err
}
return nodes, nil
}
@@ -309,7 +392,6 @@ func (r *NodeRepository) CountNodes(
ctx context.Context,
filters []*model.NodeFilter,
) (int, error) {
query, qerr := AccessCheck(ctx,
sq.Select("time_stamp", "count(*) as countRes").
From("node").
@@ -426,7 +508,12 @@ func (r *NodeRepository) MapNodes(cluster string) (map[string]string, error) {
}
func (r *NodeRepository) CountStates(ctx context.Context, filters []*model.NodeFilter, column string) ([]*model.NodeStates, error) {
query, qerr := AccessCheck(ctx, sq.Select("hostname", column, "MAX(time_stamp) as time").From("node"))
query, qerr := AccessCheck(ctx,
sq.Select(column, "COUNT(*) as count").
From("node").
Join("node_state ON node_state.node_id = node.id").
Where(latestStateCondition()).
GroupBy(column))
if qerr != nil {
return nil, qerr
}
@@ -460,25 +547,21 @@ func (r *NodeRepository) CountStates(ctx context.Context, filters []*model.NodeF
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
stateMap := map[string]int{}
for rows.Next() {
var hostname, state string
var timestamp int
if err := rows.Scan(&hostname, &state, &timestamp); err != nil {
rows.Close()
cclog.Warnf("Error while scanning rows (CountStates) at time '%d'", timestamp)
return nil, err
}
stateMap[state] += 1
}
defer rows.Close()
nodes := make([]*model.NodeStates, 0)
for state, counts := range stateMap {
node := model.NodeStates{State: state, Count: counts}
nodes = append(nodes, &node)
for rows.Next() {
var state string
var count int
if err := rows.Scan(&state, &count); err != nil {
cclog.Warn("Error while scanning rows (CountStates)")
return nil, err
}
nodes = append(nodes, &model.NodeStates{State: state, Count: count})
}
if err := rows.Err(); err != nil {
return nil, err
}
return nodes, nil
@@ -524,6 +607,7 @@ func (r *NodeRepository) CountStatesTimed(ctx context.Context, filters []*model.
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
defer rows.Close()
rawData := make(map[string][][]int)
for rows.Next() {
@@ -531,7 +615,6 @@ func (r *NodeRepository) CountStatesTimed(ctx context.Context, filters []*model.
var timestamp, count int
if err := rows.Scan(&state, &timestamp, &count); err != nil {
rows.Close()
cclog.Warnf("Error while scanning rows (CountStatesTimed) at time '%d'", timestamp)
return nil, err
}
@@ -544,6 +627,10 @@ func (r *NodeRepository) CountStatesTimed(ctx context.Context, filters []*model.
rawData[state][1] = append(rawData[state][1], count)
}
if err := rows.Err(); err != nil {
return nil, err
}
timedStates := make([]*model.NodeStatesTimed, 0)
for state, data := range rawData {
entry := model.NodeStatesTimed{State: state, Times: data[0], Counts: data[1]}

View File

@@ -159,6 +159,7 @@ func (r *JobRepository) JobsStatsGrouped(
cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
defer rows.Close()
stats := make([]*model.JobsStatistics, 0, 100)
@@ -244,6 +245,10 @@ func (r *JobRepository) JobsStatsGrouped(
}
}
if err := rows.Err(); err != nil {
return nil, err
}
cclog.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
return stats, nil
}
@@ -329,6 +334,7 @@ func (r *JobRepository) JobCountGrouped(
cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
defer rows.Close()
stats := make([]*model.JobsStatistics, 0, 100)
@@ -348,6 +354,10 @@ func (r *JobRepository) JobCountGrouped(
}
}
if err := rows.Err(); err != nil {
return nil, err
}
cclog.Debugf("Timer JobCountGrouped %s", time.Since(start))
return stats, nil
}
@@ -371,6 +381,7 @@ func (r *JobRepository) AddJobCountGrouped(
cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
defer rows.Close()
counts := make(map[string]int)
@@ -386,6 +397,10 @@ func (r *JobRepository) AddJobCountGrouped(
}
}
if err := rows.Err(); err != nil {
return nil, err
}
switch kind {
case "running":
for _, s := range stats {
@@ -413,23 +428,13 @@ func (r *JobRepository) AddJobCount(
if err != nil {
return nil, err
}
rows, err := query.RunWith(r.DB).Query()
if err != nil {
cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
var count int
for rows.Next() {
var cnt sql.NullInt64
if err := rows.Scan(&cnt); err != nil {
cclog.Warn("Error while scanning rows")
if err := query.RunWith(r.DB).QueryRow().Scan(&cnt); err != nil {
cclog.Warn("Error while querying DB for job count")
return nil, err
}
count = int(cnt.Int64)
}
count := int(cnt.Int64)
switch kind {
case "running":
@@ -567,6 +572,7 @@ func (r *JobRepository) jobsStatisticsHistogram(
cclog.Error("Error while running query")
return nil, err
}
defer rows.Close()
points := make([]*model.HistoPoint, 0)
// is it possible to introduce zero values here? requires info about bincount
@@ -579,6 +585,11 @@ func (r *JobRepository) jobsStatisticsHistogram(
points = append(points, &point)
}
if err := rows.Err(); err != nil {
return nil, err
}
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil
}
@@ -614,6 +625,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
cclog.Error("Error while running query")
return nil, err
}
defer rows.Close()
// Fill Array at matching $Value
for rows.Next() {
@@ -634,6 +646,10 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
}
}
if err := rows.Err(); err != nil {
return nil, err
}
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil
}
@@ -716,6 +732,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
cclog.Errorf("Error while running mainQuery: %s", err)
return nil, err
}
defer rows.Close()
// Setup Return Array With Bin-Numbers for Match and Min/Max based on Peak
points := make([]*model.MetricHistoPoint, 0)
@@ -751,6 +768,10 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
}
}
if err := rows.Err(); err != nil {
return nil, err
}
result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points}
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))

View File

@@ -205,6 +205,7 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
if err != nil {
return nil, nil, err
}
defer xrows.Close()
for xrows.Next() {
var t schema.Tag
@@ -222,6 +223,10 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
}
}
if err := xrows.Err(); err != nil {
return nil, nil, err
}
// Query and Count Jobs with attached Tags
q := sq.Select("t.tag_name, t.id, count(jt.tag_id)").
From("tag t").
@@ -256,6 +261,7 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
if err != nil {
return nil, nil, err
}
defer rows.Close()
counts = make(map[string]int)
for rows.Next() {
@@ -405,6 +411,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
cclog.Errorf("Error get tags with %s: %v", s, err)
return nil, err
}
defer rows.Close()
tags := make([]*schema.Tag, 0)
for rows.Next() {
@@ -423,6 +430,10 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return tags, nil
}
@@ -438,6 +449,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
cclog.Errorf("Error get tags with %s: %v", s, err)
return nil, err
}
defer rows.Close()
tags := make([]*schema.Tag, 0)
for rows.Next() {
@@ -449,6 +461,10 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
tags = append(tags, tag)
}
if err := rows.Err(); err != nil {
return nil, err
}
return tags, nil
}
@@ -465,6 +481,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
cclog.Errorf("Error get tags with %s: %v", s, err)
return nil, err
}
defer rows.Close()
tags := make([]*schema.Tag, 0)
for rows.Next() {
@@ -476,6 +493,10 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
tags = append(tags, tag)
}
if err := rows.Err(); err != nil {
return nil, err
}
return tags, nil
}