mirror of
				https://github.com/ClusterCockpit/cc-backend
				synced 2025-10-31 16:05:06 +01:00 
			
		
		
		
	Merge branch 'dev' of github.com:ClusterCockpit/cc-backend into dev
This commit is contained in:
		| @@ -820,22 +820,12 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } | ||||
| // SubCluster returns generated.SubClusterResolver implementation. | ||||
| func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } | ||||
|  | ||||
| type clusterResolver struct{ *Resolver } | ||||
| type jobResolver struct{ *Resolver } | ||||
| type metricValueResolver struct{ *Resolver } | ||||
| type mutationResolver struct{ *Resolver } | ||||
| type nodeResolver struct{ *Resolver } | ||||
| type queryResolver struct{ *Resolver } | ||||
| type subClusterResolver struct{ *Resolver } | ||||
|  | ||||
| // !!! WARNING !!! | ||||
| // The code below was going to be deleted when updating resolvers. It has been copied here so you have | ||||
| // one last chance to move it out of harms way if you want. There are two reasons this happens: | ||||
| //  - When renaming or deleting a resolver the old code will be put in here. You can safely delete | ||||
| //    it when you're done. | ||||
| //  - You have helper methods in this file. Move them out to keep these resolver files clean. | ||||
| /* | ||||
| 	func (r *nodeResolver) NodeState(ctx context.Context, obj *model.Node) (string, error) { | ||||
| 	panic(fmt.Errorf("not implemented: NodeState - nodeState")) | ||||
| } | ||||
| */ | ||||
| type ( | ||||
| 	clusterResolver     struct{ *Resolver } | ||||
| 	jobResolver         struct{ *Resolver } | ||||
| 	metricValueResolver struct{ *Resolver } | ||||
| 	mutationResolver    struct{ *Resolver } | ||||
| 	nodeResolver        struct{ *Resolver } | ||||
| 	queryResolver       struct{ *Resolver } | ||||
| 	subClusterResolver  struct{ *Resolver } | ||||
| ) | ||||
|   | ||||
| @@ -27,10 +27,105 @@ CREATE TABLE "node_state" ( | ||||
|     FOREIGN KEY (node_id) REFERENCES node (id) | ||||
| ); | ||||
|  | ||||
| -- Add Indices For New Node Table VARCHAR Fields | ||||
| -- DROP indices using old column name "cluster" | ||||
| DROP INDEX IF EXISTS jobs_cluster; | ||||
| DROP INDEX IF EXISTS jobs_cluster_user; | ||||
| DROP INDEX IF EXISTS jobs_cluster_project; | ||||
| DROP INDEX IF EXISTS jobs_cluster_subcluster; | ||||
| DROP INDEX IF EXISTS jobs_cluster_starttime; | ||||
| DROP INDEX IF EXISTS jobs_cluster_duration; | ||||
| DROP INDEX IF EXISTS jobs_cluster_numnodes; | ||||
| DROP INDEX IF EXISTS jobs_cluster_numhwthreads; | ||||
| DROP INDEX IF EXISTS jobs_cluster_numacc; | ||||
| DROP INDEX IF EXISTS jobs_cluster_energy; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_starttime; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_duration; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_numnodes; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_numhwthreads; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_numacc; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_energy; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_user; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_project; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_starttime; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_duration; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numnodes; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numhwthreads; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numacc; | ||||
| DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_energy; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate_user; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate_project; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate_starttime; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate_duration; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate_numnodes; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate_numhwthreads; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate_numacc; | ||||
| DROP INDEX IF EXISTS jobs_cluster_jobstate_energy; | ||||
|  | ||||
| -- -- CREATE UPDATED indices with new column names | ||||
| -- Cluster Filter | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster ON job (hpc_cluster); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (hpc_cluster, hpc_user); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (hpc_cluster, project); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (hpc_cluster, subcluster); | ||||
| -- Cluster Filter Sorting | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (hpc_cluster, start_time); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_duration ON job (hpc_cluster, duration); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_numnodes ON job (hpc_cluster, num_nodes); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_numhwthreads ON job (hpc_cluster, num_hwthreads); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_numacc ON job (hpc_cluster, num_acc); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_energy ON job (hpc_cluster, energy); | ||||
| -- Cluster+Partition Filter | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (hpc_cluster, cluster_partition); | ||||
| -- Cluster+Partition Filter Sorting | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (hpc_cluster, cluster_partition, start_time); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (hpc_cluster, cluster_partition, duration); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (hpc_cluster, cluster_partition, num_nodes); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numhwthreads ON job (hpc_cluster, cluster_partition, num_hwthreads); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numacc ON job (hpc_cluster, cluster_partition, num_acc); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_energy ON job (hpc_cluster, cluster_partition, energy); | ||||
| -- Cluster+Partition+Jobstate Filter | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (hpc_cluster, cluster_partition, job_state); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (hpc_cluster, cluster_partition, job_state, hpc_user); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (hpc_cluster, cluster_partition, job_state, project); | ||||
| -- Cluster+Partition+Jobstate Filter Sorting | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (hpc_cluster, cluster_partition, job_state, start_time); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (hpc_cluster, cluster_partition, job_state, duration); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (hpc_cluster, cluster_partition, job_state, num_nodes); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numhwthreads ON job (hpc_cluster, cluster_partition, job_state, num_hwthreads); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numacc ON job (hpc_cluster, cluster_partition, job_state, num_acc); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_energy ON job (hpc_cluster, cluster_partition, job_state, energy); | ||||
| -- Cluster+JobState Filter | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (hpc_cluster, job_state); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (hpc_cluster, job_state, hpc_user); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (hpc_cluster, job_state, project); | ||||
| -- Cluster+JobState Filter Sorting | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (hpc_cluster, job_state, start_time); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_duration ON job (hpc_cluster, job_state, duration); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numnodes ON job (hpc_cluster, job_state, num_nodes); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numhwthreads ON job (hpc_cluster, job_state, num_hwthreads); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numacc ON job (hpc_cluster, job_state, num_acc); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_energy ON job (hpc_cluster, job_state, energy); | ||||
| --- --- END UPDATE existing indices | ||||
|  | ||||
| -- Add NEW Indices For New Job Table Columns | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_submittime ON job (hpc_cluster, submit_time); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_submittime ON job (hpc_cluster, cluster_partition, submit_time); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_submittime ON job (hpc_cluster, cluster_partition, job_state, submit_time); | ||||
| CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_submittime ON job (hpc_cluster, job_state, submit_time); | ||||
|  | ||||
| -- Add NEW Indices For New Node Table VARCHAR Fields | ||||
| CREATE INDEX IF NOT EXISTS nodes_cluster ON node (cluster); | ||||
| CREATE INDEX IF NOT EXISTS nodes_cluster_subcluster ON node (cluster, subcluster); | ||||
|  | ||||
| -- Add Indices For Increased Amounts of Tags | ||||
| -- Add NEW Indices For New Node_State Table Fields | ||||
| CREATE INDEX IF NOT EXISTS nodeStates_state ON node_state (node_state); | ||||
| CREATE INDEX IF NOT EXISTS nodeStates_health ON node_state (health_state); | ||||
| CREATE INDEX IF NOT EXISTS nodeStates_nodeid_state ON node (node_id, node_state); | ||||
| CREATE INDEX IF NOT EXISTS nodeStates_nodeid_health ON node (node_id, health_state); | ||||
|  | ||||
| -- Add NEW Indices For Increased Amounts of Tags | ||||
| CREATE INDEX IF NOT EXISTS tags_jobid ON jobtag (job_id); | ||||
| CREATE INDEX IF NOT EXISTS tags_tagid ON jobtag (tag_id); | ||||
|   | ||||
| @@ -250,6 +250,10 @@ func (r *NodeRepository) QueryNodes( | ||||
| 		return nil, qerr | ||||
| 	} | ||||
|  | ||||
| 	// Get latest Info aka closest Timestamp to $now | ||||
| 	now := time.Now().Unix() | ||||
| 	query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct() | ||||
|  | ||||
| 	for _, f := range filters { | ||||
| 		if f.Hostname != nil { | ||||
| 			query = buildStringCondition("node.hostname", f.Hostname, query) | ||||
| @@ -291,104 +295,140 @@ func (r *NodeRepository) QueryNodes( | ||||
| 	return nodes, nil | ||||
| } | ||||
|  | ||||
| // | ||||
| // func (r *NodeRepository) CountNodeStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { | ||||
| // 	query, qerr := AccessCheck(ctx, sq.Select("node_state AS state", "count(*) AS count").From("node")) | ||||
| // 	if qerr != nil { | ||||
| // 		return nil, qerr | ||||
| // 	} | ||||
| // | ||||
| // 	for _, f := range filters { | ||||
| // 		if f.Hostname != nil { | ||||
| // 			query = buildStringCondition("node.hostname", f.Hostname, query) | ||||
| // 		} | ||||
| // 		if f.Cluster != nil { | ||||
| // 			query = buildStringCondition("node.cluster", f.Cluster, query) | ||||
| // 		} | ||||
| // 		if f.Subcluster != nil { | ||||
| // 			query = buildStringCondition("node.subcluster", f.Subcluster, query) | ||||
| // 		} | ||||
| // 		if f.NodeState != nil { | ||||
| // 			query = query.Where("node.node_state = ?", f.NodeState) | ||||
| // 		} | ||||
| // 		if f.HealthState != nil { | ||||
| // 			query = query.Where("node.health_state = ?", f.HealthState) | ||||
| // 		} | ||||
| // 	} | ||||
| // | ||||
| // 	// Add Group and Order | ||||
| // 	query = query.GroupBy("state").OrderBy("count DESC") | ||||
| // | ||||
| // 	rows, err := query.RunWith(r.stmtCache).Query() | ||||
| // 	if err != nil { | ||||
| // 		queryString, queryVars, _ := query.ToSql() | ||||
| // 		cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) | ||||
| // 		return nil, err | ||||
| // 	} | ||||
| // | ||||
| // 	nodes := make([]*model.NodeStates, 0) | ||||
| // 	for rows.Next() { | ||||
| // 		node := model.NodeStates{} | ||||
| // | ||||
| // 		if err := rows.Scan(&node.State, &node.Count); err != nil { | ||||
| // 			rows.Close() | ||||
| // 			cclog.Warn("Error while scanning rows (NodeStates)") | ||||
| // 			return nil, err | ||||
| // 		} | ||||
| // 		nodes = append(nodes, &node) | ||||
| // 	} | ||||
| // | ||||
| // 	return nodes, nil | ||||
| // } | ||||
| // | ||||
| // func (r *NodeRepository) CountHealthStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { | ||||
| // 	query, qerr := AccessCheck(ctx, sq.Select("health_state AS state", "count(*) AS count").From("node")) | ||||
| // 	if qerr != nil { | ||||
| // 		return nil, qerr | ||||
| // 	} | ||||
| // | ||||
| // 	for _, f := range filters { | ||||
| // 		if f.Hostname != nil { | ||||
| // 			query = buildStringCondition("node.hostname", f.Hostname, query) | ||||
| // 		} | ||||
| // 		if f.Cluster != nil { | ||||
| // 			query = buildStringCondition("node.cluster", f.Cluster, query) | ||||
| // 		} | ||||
| // 		if f.Subcluster != nil { | ||||
| // 			query = buildStringCondition("node.subcluster", f.Subcluster, query) | ||||
| // 		} | ||||
| // 		if f.NodeState != nil { | ||||
| // 			query = query.Where("node.node_state = ?", f.NodeState) | ||||
| // 		} | ||||
| // 		if f.HealthState != nil { | ||||
| // 			query = query.Where("node.health_state = ?", f.HealthState) | ||||
| // 		} | ||||
| // 	} | ||||
| // | ||||
| // 	// Add Group and Order | ||||
| // 	query = query.GroupBy("state").OrderBy("count DESC") | ||||
| // | ||||
| // 	rows, err := query.RunWith(r.stmtCache).Query() | ||||
| // 	if err != nil { | ||||
| // 		queryString, queryVars, _ := query.ToSql() | ||||
| // 		cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) | ||||
| // 		return nil, err | ||||
| // 	} | ||||
| // | ||||
| // 	nodes := make([]*model.NodeStates, 0) | ||||
| // 	for rows.Next() { | ||||
| // 		node := model.NodeStates{} | ||||
| // | ||||
| // 		if err := rows.Scan(&node.State, &node.Count); err != nil { | ||||
| // 			rows.Close() | ||||
| // 			cclog.Warn("Error while scanning rows (NodeStates)") | ||||
| // 			return nil, err | ||||
| // 		} | ||||
| // 		nodes = append(nodes, &node) | ||||
| // 	} | ||||
| // | ||||
| // 	return nodes, nil | ||||
| // } | ||||
| func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) { | ||||
| 	// Get latest Info aka closest Timestamo to $now | ||||
| 	now := time.Now().Unix() | ||||
| 	q := sq.Select("hostname", "cluster", "subcluster", "node_state", "health_state"). | ||||
| 		From("node"). | ||||
| 		Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}). | ||||
| 		Where("node.cluster = ?", cluster).OrderBy("node.hostname ASC") | ||||
|  | ||||
| 	rows, err := q.RunWith(r.DB).Query() | ||||
| 	if err != nil { | ||||
| 		cclog.Warn("Error while querying node list") | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	nodeList := make([]*schema.Node, 0, 100) | ||||
| 	defer rows.Close() | ||||
| 	for rows.Next() { | ||||
| 		node := &schema.Node{} | ||||
| 		if err := rows.Scan(&node.Hostname, &node.Cluster, | ||||
| 			&node.SubCluster, &node.NodeState, &node.HealthState); err != nil { | ||||
| 			cclog.Warn("Error while scanning node list") | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		nodeList = append(nodeList, node) | ||||
| 	} | ||||
|  | ||||
| 	return nodeList, nil | ||||
| } | ||||
|  | ||||
| func (r *NodeRepository) CountNodeStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { | ||||
| 	query, qerr := AccessCheck(ctx, sq.Select("node_state", "count(*) AS count").From("node")) | ||||
| 	if qerr != nil { | ||||
| 		return nil, qerr | ||||
| 	} | ||||
|  | ||||
| 	// Get latest Info aka closest Timestamp to $now | ||||
| 	now := time.Now().Unix() | ||||
| 	query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct() | ||||
|  | ||||
| 	for _, f := range filters { | ||||
| 		if f.Hostname != nil { | ||||
| 			query = buildStringCondition("node.hostname", f.Hostname, query) | ||||
| 		} | ||||
| 		if f.Cluster != nil { | ||||
| 			query = buildStringCondition("node.cluster", f.Cluster, query) | ||||
| 		} | ||||
| 		if f.Subcluster != nil { | ||||
| 			query = buildStringCondition("node.subcluster", f.Subcluster, query) | ||||
| 		} | ||||
| 		if f.SchedulerState != nil { | ||||
| 			query = query.Where("node.node_state = ?", f.SchedulerState) | ||||
| 		} | ||||
| 		if f.HealthState != nil { | ||||
| 			query = query.Where("node.health_state = ?", f.HealthState) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Add Group and Order | ||||
| 	query = query.GroupBy("node_state").OrderBy("count DESC") | ||||
|  | ||||
| 	rows, err := query.RunWith(r.stmtCache).Query() | ||||
| 	if err != nil { | ||||
| 		queryString, queryVars, _ := query.ToSql() | ||||
| 		cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	nodes := make([]*model.NodeStates, 0) | ||||
| 	for rows.Next() { | ||||
| 		node := model.NodeStates{} | ||||
|  | ||||
| 		if err := rows.Scan(&node.State, &node.Count); err != nil { | ||||
| 			rows.Close() | ||||
| 			cclog.Warn("Error while scanning rows (NodeStates)") | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		nodes = append(nodes, &node) | ||||
| 	} | ||||
|  | ||||
| 	return nodes, nil | ||||
| } | ||||
|  | ||||
| func (r *NodeRepository) CountHealthStates(ctx context.Context, filters []*model.NodeFilter) ([]*model.NodeStates, error) { | ||||
| 	query, qerr := AccessCheck(ctx, sq.Select("health_state", "count(*) AS count").From("node")) | ||||
| 	if qerr != nil { | ||||
| 		return nil, qerr | ||||
| 	} | ||||
|  | ||||
| 	// Get latest Info aka closest Timestamp to $now | ||||
| 	now := time.Now().Unix() | ||||
| 	query = query.Join("node_state ON node_state.node_id = node.id").Where(sq.Gt{"node_state.time_stamp": (now - 60)}) // .Distinct() | ||||
|  | ||||
| 	for _, f := range filters { | ||||
| 		if f.Hostname != nil { | ||||
| 			query = buildStringCondition("node.hostname", f.Hostname, query) | ||||
| 		} | ||||
| 		if f.Cluster != nil { | ||||
| 			query = buildStringCondition("node.cluster", f.Cluster, query) | ||||
| 		} | ||||
| 		if f.Subcluster != nil { | ||||
| 			query = buildStringCondition("node.subcluster", f.Subcluster, query) | ||||
| 		} | ||||
| 		if f.SchedulerState != nil { | ||||
| 			query = query.Where("node.node_state = ?", f.SchedulerState) | ||||
| 		} | ||||
| 		if f.HealthState != nil { | ||||
| 			query = query.Where("node.health_state = ?", f.HealthState) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Add Group and Order | ||||
| 	query = query.GroupBy("health_state").OrderBy("count DESC") | ||||
|  | ||||
| 	rows, err := query.RunWith(r.stmtCache).Query() | ||||
| 	if err != nil { | ||||
| 		queryString, queryVars, _ := query.ToSql() | ||||
| 		cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	nodes := make([]*model.NodeStates, 0) | ||||
| 	for rows.Next() { | ||||
| 		node := model.NodeStates{} | ||||
|  | ||||
| 		if err := rows.Scan(&node.State, &node.Count); err != nil { | ||||
| 			rows.Close() | ||||
| 			cclog.Warn("Error while scanning rows (NodeStates)") | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		nodes = append(nodes, &node) | ||||
| 	} | ||||
|  | ||||
| 	return nodes, nil | ||||
| } | ||||
|  | ||||
| func AccessCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) { | ||||
| 	user := GetUserFromContext(ctx) | ||||
|   | ||||
| @@ -21,9 +21,10 @@ import ( | ||||
|  | ||||
| // GraphQL validation should make sure that no unkown values can be specified. | ||||
| var groupBy2column = map[model.Aggregate]string{ | ||||
| 	model.AggregateUser:    "job.hpc_user", | ||||
| 	model.AggregateProject: "job.project", | ||||
| 	model.AggregateCluster: "job.hpc_cluster", | ||||
| 	model.AggregateUser:       "job.hpc_user", | ||||
| 	model.AggregateProject:    "job.project", | ||||
| 	model.AggregateCluster:    "job.hpc_cluster", | ||||
| 	model.AggregateSubcluster: "job.subcluster", | ||||
| } | ||||
|  | ||||
| var sortBy2column = map[model.SortByAggregate]string{ | ||||
| @@ -176,7 +177,7 @@ func (r *JobRepository) JobsStatsGrouped( | ||||
| 		var name sql.NullString | ||||
| 		var jobs, users, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64 | ||||
| 		if err := rows.Scan(&id, &name, &jobs, &users, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil { | ||||
| 			cclog.Warn("Error while scanning rows") | ||||
| 			cclog.Warnf("Error while scanning rows: %s", err.Error()) | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user