diff --git a/api/schema.graphqls b/api/schema.graphqls
index 9092b4f..794c630 100644
--- a/api/schema.graphqls
+++ b/api/schema.graphqls
@@ -4,61 +4,78 @@ scalar Any
scalar NullableFloat
scalar MetricScope
scalar JobState
+scalar NodeState
+scalar MonitoringState
+
+type Node {
+ id: ID!
+ hostname: String!
+ cluster: String!
+ subCluster: String!
+ nodeState: NodeState!
+ HealthState: MonitoringState!
+ metaData: Any
+}
+
+type NodeStats {
+ state: String!
+ count: Int!
+}
type Job {
- id: ID!
- jobId: Int!
- user: String!
- project: String!
- cluster: String!
- subCluster: String!
- startTime: Time!
- duration: Int!
- walltime: Int!
- numNodes: Int!
- numHWThreads: Int!
- numAcc: Int!
- energy: Float!
- SMT: Int!
- exclusive: Int!
- partition: String!
- arrayJobId: Int!
+ id: ID!
+ jobId: Int!
+ user: String!
+ project: String!
+ cluster: String!
+ subCluster: String!
+ startTime: Time!
+ duration: Int!
+ walltime: Int!
+ numNodes: Int!
+ numHWThreads: Int!
+ numAcc: Int!
+ energy: Float!
+ SMT: Int!
+ exclusive: Int!
+ partition: String!
+ arrayJobId: Int!
monitoringStatus: Int!
- state: JobState!
- tags: [Tag!]!
- resources: [Resource!]!
- concurrentJobs: JobLinkResultList
- footprint: [FootprintValue]
- energyFootprint: [EnergyFootprintValue]
- metaData: Any
- userData: User
+ state: JobState!
+ tags: [Tag!]!
+ resources: [Resource!]!
+ concurrentJobs: JobLinkResultList
+ footprint: [FootprintValue]
+ energyFootprint: [EnergyFootprintValue]
+ metaData: Any
+ userData: User
}
type JobLink {
- id: ID!
- jobId: Int!
+ id: ID!
+ jobId: Int!
}
type Cluster {
- name: String!
- partitions: [String!]! # Slurm partitions
- subClusters: [SubCluster!]! # Hardware partitions/subclusters
+ name: String!
+ partitions: [String!]! # Slurm partitions
+ subClusters: [SubCluster!]! # Hardware partitions/subclusters
}
type SubCluster {
- name: String!
- nodes: String!
- numberOfNodes: Int!
- processorType: String!
- socketsPerNode: Int!
- coresPerSocket: Int!
- threadsPerCore: Int!
- flopRateScalar: MetricValue!
- flopRateSimd: MetricValue!
+ name: String!
+ nodes: String!
+ numberOfNodes: Int!
+ processorType: String!
+ socketsPerNode: Int!
+ coresPerSocket: Int!
+ threadsPerCore: Int!
+ flopRateScalar: MetricValue!
+ flopRateSimd: MetricValue!
memoryBandwidth: MetricValue!
- topology: Topology!
- metricConfig: [MetricConfig!]!
- footprint: [String!]!
+ topology: Topology!
+ metricConfig: [MetricConfig!]!
+ footprint: [String!]!
}
type FootprintValue {
@@ -80,99 +97,112 @@ type MetricValue {
}
type Topology {
- node: [Int!]
- socket: [[Int!]!]
+ node: [Int!]
+ socket: [[Int!]!]
memoryDomain: [[Int!]!]
- die: [[Int!]!]
- core: [[Int!]!]
+ die: [[Int!]!]
+ core: [[Int!]!]
accelerators: [Accelerator!]
}
type Accelerator {
- id: String!
- type: String!
+ id: String!
+ type: String!
model: String!
}
type SubClusterConfig {
- name: String!
- peak: Float
- normal: Float
+ name: String!
+ peak: Float
+ normal: Float
caution: Float
- alert: Float
- remove: Boolean
+ alert: Float
+ remove: Boolean
}
type MetricConfig {
- name: String!
- unit: Unit!
- scope: MetricScope!
+ name: String!
+ unit: Unit!
+ scope: MetricScope!
aggregation: String!
- timestep: Int!
- peak: Float!
- normal: Float
+ timestep: Int!
+ peak: Float!
+ normal: Float
caution: Float!
- alert: Float!
+ alert: Float!
lowerIsBetter: Boolean
subClusters: [SubClusterConfig!]!
}
type Tag {
- id: ID!
+ id: ID!
type: String!
name: String!
scope: String!
}
type Resource {
- hostname: String!
- hwthreads: [Int!]
- accelerators: [String!]
+ hostname: String!
+ hwthreads: [Int!]
+ accelerators: [String!]
configuration: String
}
type JobMetricWithName {
- name: String!
- scope: MetricScope!
+ name: String!
+ scope: MetricScope!
metric: JobMetric!
}
type JobMetric {
- unit: Unit
- timestep: Int!
- series: [Series!]
+ unit: Unit
+ timestep: Int!
+ series: [Series!]
statisticsSeries: StatsSeries
}
type Series {
- hostname: String!
- id: String
+ hostname: String!
+ id: String
statistics: MetricStatistics
- data: [NullableFloat!]!
+ data: [NullableFloat!]!
}
type StatsSeries {
- mean: [NullableFloat!]!
+ mean: [NullableFloat!]!
median: [NullableFloat!]!
- min: [NullableFloat!]!
- max: [NullableFloat!]!
+ min: [NullableFloat!]!
+ max: [NullableFloat!]!
}
-type JobStatsWithScope {
- name: String!
- scope: MetricScope!
- stats: [ScopedStats!]!
+type NamedStatsWithScope {
+ name: String!
+ scope: MetricScope!
+ stats: [ScopedStats!]!
}
type ScopedStats {
- hostname: String!
- id: String
- data: MetricStatistics!
+ hostname: String!
+ id: String
+ data: MetricStatistics!
}
type JobStats {
- name: String!
- stats: MetricStatistics!
+ id: Int!
+ jobId: String!
+ startTime: Int!
+ duration: Int!
+ cluster: String!
+ subCluster: String!
+ numNodes: Int!
+ numHWThreads: Int
+ numAccelerators: Int
+ stats: [NamedStats!]!
+}
+
+type NamedStats {
+ name: String!
+ data: MetricStatistics!
}
type Unit {
@@ -188,12 +218,12 @@ type MetricStatistics {
type MetricFootprints {
metric: String!
- data: [NullableFloat!]!
+ data: [NullableFloat!]!
}
type Footprints {
timeWeights: TimeWeights!
- metrics: [MetricFootprints!]!
+ metrics: [MetricFootprints!]!
}
type TimeWeights {
@@ -202,20 +232,33 @@ type TimeWeights {
coreHours: [NullableFloat!]!
}
-enum Aggregate { USER, PROJECT, CLUSTER }
-enum SortByAggregate { TOTALWALLTIME, TOTALJOBS, TOTALNODES, TOTALNODEHOURS, TOTALCORES, TOTALCOREHOURS, TOTALACCS, TOTALACCHOURS }
+enum Aggregate {
+ USER
+ PROJECT
+ CLUSTER
+}
+enum SortByAggregate {
+ TOTALWALLTIME
+ TOTALJOBS
+ TOTALNODES
+ TOTALNODEHOURS
+ TOTALCORES
+ TOTALCOREHOURS
+ TOTALACCS
+ TOTALACCHOURS
+}
type NodeMetrics {
- host: String!
+ host: String!
subCluster: String!
- metrics: [JobMetricWithName!]!
+ metrics: [JobMetricWithName!]!
}
type NodesResultList {
- items: [NodeMetrics!]!
+ items: [NodeMetrics!]!
offset: Int
- limit: Int
- count: Int
+ limit: Int
+ count: Int
totalNodes: Int
hasNextPage: Boolean
}
@@ -234,14 +277,14 @@ type GlobalMetricListItem {
}
type Count {
- name: String!
+ name: String!
count: Int!
}
type User {
username: String!
- name: String!
- email: String!
+ name: String!
+ email: String!
}
input MetricStatItem {
@@ -250,26 +293,81 @@ input MetricStatItem {
}
type Query {
- clusters: [Cluster!]! # List of all clusters
- tags: [Tag!]! # List of all tags
- globalMetrics: [GlobalMetricListItem!]!
+ clusters: [Cluster!]! # List of all clusters
+ tags: [Tag!]! # List of all tags
+ globalMetrics: [GlobalMetricListItem!]!
user(username: String!): User
allocatedNodes(cluster: String!): [Count!]!
+ node(id: ID!): Node
+ nodes(filter: [NodeFilter!], order: OrderByInput): NodeStateResultList!
+ nodeStats(filter: [NodeFilter!]): [NodeStats!]!
+
job(id: ID!): Job
- jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]!
- jobStats(id: ID!, metrics: [String!]): [JobStats!]!
- scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]!
+ jobMetrics(
+ id: ID!
+ metrics: [String!]
+ scopes: [MetricScope!]
+ resolution: Int
+ ): [JobMetricWithName!]!
+
+ jobStats(id: ID!, metrics: [String!]): [NamedStats!]!
+
+ scopedJobStats(
+ id: ID!
+ metrics: [String!]
+ scopes: [MetricScope!]
+ ): [NamedStatsWithScope!]!
+
+ jobs(
+ filter: [JobFilter!]
+ page: PageRequest
+ order: OrderByInput
+ ): JobResultList!
+
+ jobsStatistics(
+ filter: [JobFilter!]
+ metrics: [String!]
+ page: PageRequest
+ sortBy: SortByAggregate
+ groupBy: Aggregate
+ numDurationBins: String
+ numMetricBins: Int
+ ): [JobsStatistics!]!
+
+ jobsMetricStats(filter: [JobFilter!], metrics: [String!]): [JobStats!]!
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints
- jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
- jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]!
+ rooflineHeatmap(
+ filter: [JobFilter!]!
+ rows: Int!
+ cols: Int!
+ minX: Float!
+ minY: Float!
+ maxX: Float!
+ maxY: Float!
+ ): [[Float!]!]!
- rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
-
- nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]!
- nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList!
+ nodeMetrics(
+ cluster: String!
+ nodes: [String!]
+ scopes: [MetricScope!]
+ metrics: [String!]
+ from: Time!
+ to: Time!
+ ): [NodeMetrics!]!
+ nodeMetricsList(
+ cluster: String!
+ subCluster: String!
+ nodeFilter: String!
+ scopes: [MetricScope!]
+ metrics: [String!]
+ from: Time!
+ to: Time!
+ page: PageRequest
+ resolution: Int
+ ): NodesResultList!
}
type Mutation {
@@ -282,37 +380,52 @@ type Mutation {
updateConfiguration(name: String!, value: String!): String
}
-type IntRangeOutput { from: Int!, to: Int! }
-type TimeRangeOutput { range: String, from: Time!, to: Time! }
+type IntRangeOutput {
+ from: Int!
+ to: Int!
+}
+type TimeRangeOutput {
+ range: String
+ from: Time!
+ to: Time!
+}
+
+input NodeFilter {
+ hostname: StringInput
+ cluster: StringInput
+ nodeState: NodeState
+ healthState: MonitoringState
+}
input JobFilter {
- tags: [ID!]
- jobId: StringInput
- arrayJobId: Int
- user: StringInput
- project: StringInput
- jobName: StringInput
- cluster: StringInput
- partition: StringInput
- duration: IntRange
- energy: FloatRange
+ tags: [ID!]
+ dbId: [ID!]
+ jobId: StringInput
+ arrayJobId: Int
+ user: StringInput
+ project: StringInput
+ jobName: StringInput
+ cluster: StringInput
+ partition: StringInput
+ duration: IntRange
+ energy: FloatRange
minRunningFor: Int
- numNodes: IntRange
+ numNodes: IntRange
numAccelerators: IntRange
- numHWThreads: IntRange
+ numHWThreads: IntRange
- startTime: TimeRange
- state: [JobState!]
+ startTime: TimeRange
+ state: [JobState!]
metricStats: [MetricStatItem!]
- exclusive: Int
- node: StringInput
+ exclusive: Int
+ node: StringInput
}
input OrderByInput {
field: String!
- type: String!,
+ type: String!
order: SortDirectionEnum! = ASC
}
@@ -322,34 +435,46 @@ enum SortDirectionEnum {
}
input StringInput {
- eq: String
- neq: String
- contains: String
+ eq: String
+ neq: String
+ contains: String
startsWith: String
- endsWith: String
- in: [String!]
+ endsWith: String
+ in: [String!]
}
-input IntRange { from: Int!, to: Int! }
-input TimeRange { range: String, from: Time, to: Time }
+input IntRange {
+ from: Int!
+ to: Int!
+}
+input TimeRange {
+ range: String
+ from: Time
+ to: Time
+}
input FloatRange {
from: Float!
to: Float!
}
+type NodeStateResultList {
+ items: [Node!]!
+ count: Int
+}
+
type JobResultList {
- items: [Job!]!
+ items: [Job!]!
offset: Int
- limit: Int
- count: Int
+ limit: Int
+ count: Int
hasNextPage: Boolean
}
type JobLinkResultList {
listQuery: String
- items: [JobLink!]!
- count: Int
+ items: [JobLink!]!
+ count: Int
}
type HistoPoint {
@@ -371,27 +496,27 @@ type MetricHistoPoint {
max: Int
}
-type JobsStatistics {
- id: ID! # If `groupBy` was used, ID of the user/project/cluster
- name: String! # if User-Statistics: Given Name of Account (ID) Owner
- totalJobs: Int! # Number of jobs
- runningJobs: Int! # Number of running jobs
- shortJobs: Int! # Number of jobs with a duration of less than duration
- totalWalltime: Int! # Sum of the duration of all matched jobs in hours
- totalNodes: Int! # Sum of the nodes of all matched jobs
- totalNodeHours: Int! # Sum of the node hours of all matched jobs
- totalCores: Int! # Sum of the cores of all matched jobs
- totalCoreHours: Int! # Sum of the core hours of all matched jobs
- totalAccs: Int! # Sum of the accs of all matched jobs
- totalAccHours: Int! # Sum of the gpu hours of all matched jobs
- histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
- histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
- histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores
- histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs
- histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average
+type JobsStatistics {
+ id: ID! # If `groupBy` was used, ID of the user/project/cluster
+ name: String! # if User-Statistics: Given Name of Account (ID) Owner
+ totalJobs: Int! # Number of jobs
+ runningJobs: Int! # Number of running jobs
+ shortJobs: Int! # Number of jobs with a duration of less than duration
+ totalWalltime: Int! # Sum of the duration of all matched jobs in hours
+ totalNodes: Int! # Sum of the nodes of all matched jobs
+ totalNodeHours: Int! # Sum of the node hours of all matched jobs
+ totalCores: Int! # Sum of the cores of all matched jobs
+ totalCoreHours: Int! # Sum of the core hours of all matched jobs
+ totalAccs: Int! # Sum of the accs of all matched jobs
+ totalAccHours: Int! # Sum of the gpu hours of all matched jobs
+ histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
+ histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
+ histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores
+ histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs
+ histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average
}
input PageRequest {
itemsPerPage: Int!
- page: Int!
+ page: Int!
}
diff --git a/api/swagger.json b/api/swagger.json
index c32678e..911cefc 100644
--- a/api/swagger.json
+++ b/api/swagger.json
@@ -201,7 +201,7 @@
"200": {
"description": "Success message",
"schema": {
- "$ref": "#/definitions/api.DefaultJobApiResponse"
+ "$ref": "#/definitions/api.DefaultApiResponse"
}
},
"400": {
@@ -271,7 +271,7 @@
"200": {
"description": "Success message",
"schema": {
- "$ref": "#/definitions/api.DefaultJobApiResponse"
+ "$ref": "#/definitions/api.DefaultApiResponse"
}
},
"400": {
@@ -341,7 +341,7 @@
"200": {
"description": "Success message",
"schema": {
- "$ref": "#/definitions/api.DefaultJobApiResponse"
+ "$ref": "#/definitions/api.DefaultApiResponse"
}
},
"400": {
@@ -523,7 +523,7 @@
"ApiKeyAuth": []
}
],
- "description": "Job specified in request body will be saved to database as \"running\" with new DB ID.\nJob specifications follow the 'JobMeta' scheme, API will fail to execute if requirements are not met.",
+ "description": "Job specified in request body will be saved to database as \"running\" with new DB ID.\nJob specifications follow the 'Job' scheme, API will fail to execute if requirements are not met.",
"consumes": [
"application/json"
],
@@ -541,7 +541,7 @@
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/schema.JobMeta"
+ "$ref": "#/definitions/schema.Job"
}
}
],
@@ -549,7 +549,7 @@
"201": {
"description": "Job added successfully",
"schema": {
- "$ref": "#/definitions/api.DefaultJobApiResponse"
+ "$ref": "#/definitions/api.DefaultApiResponse"
}
},
"400": {
@@ -592,7 +592,7 @@
"ApiKeyAuth": []
}
],
- "description": "Job to stop is specified by request body. All fields are required in this case.\nReturns full job resource information according to 'JobMeta' scheme.",
+ "description": "Job to stop is specified by request body. All fields are required in this case.\nReturns full job resource information according to 'Job' scheme.",
"produces": [
"application/json"
],
@@ -615,7 +615,7 @@
"200": {
"description": "Success message",
"schema": {
- "$ref": "#/definitions/schema.JobMeta"
+ "$ref": "#/definitions/schema.Job"
}
},
"400": {
@@ -737,7 +737,7 @@
"ApiKeyAuth": []
}
],
- "description": "Job to get is specified by database ID\nReturns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.",
+ "description": "Job to get is specified by database ID\nReturns full job resource information according to 'Job' scheme and all metrics according to 'JobData'.",
"produces": [
"application/json"
],
@@ -811,7 +811,7 @@
"ApiKeyAuth": []
}
],
- "description": "Job to get is specified by database ID\nReturns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.",
+ "description": "Job to get is specified by database ID\nReturns full job resource information according to 'Job' scheme and all metrics according to 'JobData'.",
"consumes": [
"application/json"
],
@@ -889,6 +889,66 @@
}
}
},
+ "/api/nodestats/": {
+ "post": {
+ "security": [
+ {
+ "ApiKeyAuth": []
+ }
+ ],
+ "description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Nodestates"
+ ],
+ "summary": "Deliver updated Slurm node states",
+ "parameters": [
+ {
+ "description": "Request body containing nodes and their states",
+ "name": "request",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/api.UpdateNodeStatesRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Success message",
+ "schema": {
+ "$ref": "#/definitions/api.DefaultApiResponse"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/api.ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Unauthorized",
+ "schema": {
+ "$ref": "#/definitions/api.ErrorResponse"
+ }
+ },
+ "403": {
+ "description": "Forbidden",
+ "schema": {
+ "$ref": "#/definitions/api.ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "$ref": "#/definitions/api.ErrorResponse"
+ }
+ }
+ }
+ }
+ },
"/api/users/": {
"get": {
"security": [
@@ -1137,7 +1197,7 @@
}
}
},
- "api.DefaultJobApiResponse": {
+ "api.DefaultApiResponse": {
"type": "object",
"properties": {
"msg": {
@@ -1231,7 +1291,7 @@
"description": "Array of jobs",
"type": "array",
"items": {
- "$ref": "#/definitions/schema.JobMeta"
+ "$ref": "#/definitions/schema.Job"
}
},
"page": {
@@ -1285,6 +1345,20 @@
}
}
},
+ "api.Node": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string"
+ },
+ "states": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
"api.StopJobApiRequest": {
"type": "object",
"required": [
@@ -1318,6 +1392,21 @@
}
}
},
+ "api.UpdateNodeStatesRequest": {
+ "type": "object",
+ "properties": {
+ "cluster": {
+ "type": "string",
+ "example": "fritz"
+ },
+ "nodes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/api.Node"
+ }
+ }
+ }
+ },
"schema.Accelerator": {
"type": "object",
"properties": {
@@ -1353,7 +1442,6 @@
}
},
"schema.Job": {
- "description": "Information of a HPC job.",
"type": "object",
"properties": {
"arrayJobId": {
@@ -1451,6 +1539,12 @@
"type": "string",
"example": "abcd200"
},
+ "requestedMemory": {
+ "description": "in MB",
+ "type": "integer",
+ "minimum": 1,
+ "example": 128000
+ },
"resources": {
"type": "array",
"items": {
@@ -1462,7 +1556,14 @@
"example": 4
},
"startTime": {
- "type": "string"
+ "type": "integer",
+ "example": 1649723812
+ },
+ "statistics": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema.JobStatistics"
+ }
},
"subCluster": {
"type": "string",
@@ -1510,147 +1611,6 @@
}
}
},
- "schema.JobMeta": {
- "description": "Meta data information of a HPC job.",
- "type": "object",
- "properties": {
- "arrayJobId": {
- "type": "integer",
- "example": 123000
- },
- "cluster": {
- "type": "string",
- "example": "fritz"
- },
- "concurrentJobs": {
- "$ref": "#/definitions/schema.JobLinkResultList"
- },
- "duration": {
- "type": "integer",
- "minimum": 1,
- "example": 43200
- },
- "energy": {
- "type": "number"
- },
- "energyFootprint": {
- "type": "object",
- "additionalProperties": {
- "type": "number"
- }
- },
- "exclusive": {
- "type": "integer",
- "maximum": 2,
- "minimum": 0,
- "example": 1
- },
- "footprint": {
- "type": "object",
- "additionalProperties": {
- "type": "number"
- }
- },
- "id": {
- "type": "integer"
- },
- "jobId": {
- "type": "integer",
- "example": 123000
- },
- "jobState": {
- "enum": [
- "completed",
- "failed",
- "cancelled",
- "stopped",
- "timeout",
- "out_of_memory"
- ],
- "allOf": [
- {
- "$ref": "#/definitions/schema.JobState"
- }
- ],
- "example": "completed"
- },
- "metaData": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "monitoringStatus": {
- "type": "integer",
- "maximum": 3,
- "minimum": 0,
- "example": 1
- },
- "numAcc": {
- "type": "integer",
- "minimum": 1,
- "example": 2
- },
- "numHwthreads": {
- "type": "integer",
- "minimum": 1,
- "example": 20
- },
- "numNodes": {
- "type": "integer",
- "minimum": 1,
- "example": 2
- },
- "partition": {
- "type": "string",
- "example": "main"
- },
- "project": {
- "type": "string",
- "example": "abcd200"
- },
- "resources": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schema.Resource"
- }
- },
- "smt": {
- "type": "integer",
- "example": 4
- },
- "startTime": {
- "type": "integer",
- "minimum": 1,
- "example": 1649723812
- },
- "statistics": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/schema.JobStatistics"
- }
- },
- "subCluster": {
- "type": "string",
- "example": "main"
- },
- "tags": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schema.Tag"
- }
- },
- "user": {
- "type": "string",
- "example": "abcd100h"
- },
- "walltime": {
- "type": "integer",
- "minimum": 1,
- "example": 86400
- }
- }
- },
"schema.JobMetric": {
"type": "object",
"properties": {
@@ -1978,6 +1938,9 @@
},
"remove": {
"type": "boolean"
+ },
+ "unit": {
+ "$ref": "#/definitions/schema.Unit"
}
}
},
diff --git a/api/swagger.yaml b/api/swagger.yaml
index 96ad325..59cf6f5 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -31,7 +31,7 @@ definitions:
example: Debug
type: string
type: object
- api.DefaultJobApiResponse:
+ api.DefaultApiResponse:
properties:
msg:
type: string
@@ -96,7 +96,7 @@ definitions:
jobs:
description: Array of jobs
items:
- $ref: '#/definitions/schema.JobMeta'
+ $ref: '#/definitions/schema.Job'
type: array
page:
description: Page id returned
@@ -132,6 +132,15 @@ definitions:
scope:
$ref: '#/definitions/schema.MetricScope'
type: object
+ api.Node:
+ properties:
+ hostname:
+ type: string
+ states:
+ items:
+ type: string
+ type: array
+ type: object
api.StopJobApiRequest:
properties:
cluster:
@@ -154,6 +163,16 @@ definitions:
- jobState
- stopTime
type: object
+ api.UpdateNodeStatesRequest:
+ properties:
+ cluster:
+ example: fritz
+ type: string
+ nodes:
+ items:
+ $ref: '#/definitions/api.Node'
+ type: array
+ type: object
schema.Accelerator:
properties:
id:
@@ -177,7 +196,6 @@ definitions:
type: array
type: object
schema.Job:
- description: Information of a HPC job.
properties:
arrayJobId:
example: 123000
@@ -249,6 +267,11 @@ definitions:
project:
example: abcd200
type: string
+ requestedMemory:
+ description: in MB
+ example: 128000
+ minimum: 1
+ type: integer
resources:
items:
$ref: '#/definitions/schema.Resource'
@@ -257,7 +280,12 @@ definitions:
example: 4
type: integer
startTime:
- type: string
+ example: 1649723812
+ type: integer
+ statistics:
+ additionalProperties:
+ $ref: '#/definitions/schema.JobStatistics'
+ type: object
subCluster:
example: main
type: string
@@ -289,109 +317,6 @@ definitions:
$ref: '#/definitions/schema.JobLink'
type: array
type: object
- schema.JobMeta:
- description: Meta data information of a HPC job.
- properties:
- arrayJobId:
- example: 123000
- type: integer
- cluster:
- example: fritz
- type: string
- concurrentJobs:
- $ref: '#/definitions/schema.JobLinkResultList'
- duration:
- example: 43200
- minimum: 1
- type: integer
- energy:
- type: number
- energyFootprint:
- additionalProperties:
- type: number
- type: object
- exclusive:
- example: 1
- maximum: 2
- minimum: 0
- type: integer
- footprint:
- additionalProperties:
- type: number
- type: object
- id:
- type: integer
- jobId:
- example: 123000
- type: integer
- jobState:
- allOf:
- - $ref: '#/definitions/schema.JobState'
- enum:
- - completed
- - failed
- - cancelled
- - stopped
- - timeout
- - out_of_memory
- example: completed
- metaData:
- additionalProperties:
- type: string
- type: object
- monitoringStatus:
- example: 1
- maximum: 3
- minimum: 0
- type: integer
- numAcc:
- example: 2
- minimum: 1
- type: integer
- numHwthreads:
- example: 20
- minimum: 1
- type: integer
- numNodes:
- example: 2
- minimum: 1
- type: integer
- partition:
- example: main
- type: string
- project:
- example: abcd200
- type: string
- resources:
- items:
- $ref: '#/definitions/schema.Resource'
- type: array
- smt:
- example: 4
- type: integer
- startTime:
- example: 1649723812
- minimum: 1
- type: integer
- statistics:
- additionalProperties:
- $ref: '#/definitions/schema.JobStatistics'
- type: object
- subCluster:
- example: main
- type: string
- tags:
- items:
- $ref: '#/definitions/schema.Tag'
- type: array
- user:
- example: abcd100h
- type: string
- walltime:
- example: 86400
- minimum: 1
- type: integer
- type: object
schema.JobMetric:
properties:
series:
@@ -620,6 +545,8 @@ definitions:
type: number
remove:
type: boolean
+ unit:
+ $ref: '#/definitions/schema.Unit'
type: object
schema.Tag:
description: Defines a tag using name and type.
@@ -797,7 +724,7 @@ paths:
get:
description: |-
Job to get is specified by database ID
- Returns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.
+ Returns full job resource information according to 'Job' scheme and all metrics according to 'JobData'.
parameters:
- description: Database ID of Job
in: path
@@ -850,7 +777,7 @@ paths:
- application/json
description: |-
Job to get is specified by database ID
- Returns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.
+ Returns full job resource information according to 'Job' scheme and all metrics according to 'JobData'.
parameters:
- description: Database ID of Job
in: path
@@ -921,7 +848,7 @@ paths:
"200":
description: Success message
schema:
- $ref: '#/definitions/api.DefaultJobApiResponse'
+ $ref: '#/definitions/api.DefaultApiResponse'
"400":
description: Bad Request
schema:
@@ -968,7 +895,7 @@ paths:
"200":
description: Success message
schema:
- $ref: '#/definitions/api.DefaultJobApiResponse'
+ $ref: '#/definitions/api.DefaultApiResponse'
"400":
description: Bad Request
schema:
@@ -1015,7 +942,7 @@ paths:
"200":
description: Success message
schema:
- $ref: '#/definitions/api.DefaultJobApiResponse'
+ $ref: '#/definitions/api.DefaultApiResponse'
"400":
description: Bad Request
schema:
@@ -1141,21 +1068,21 @@ paths:
- application/json
description: |-
Job specified in request body will be saved to database as "running" with new DB ID.
- Job specifications follow the 'JobMeta' scheme, API will fail to execute if requirements are not met.
+ Job specifications follow the 'Job' scheme, API will fail to execute if requirements are not met.
parameters:
- description: Job to add
in: body
name: request
required: true
schema:
- $ref: '#/definitions/schema.JobMeta'
+ $ref: '#/definitions/schema.Job'
produces:
- application/json
responses:
"201":
description: Job added successfully
schema:
- $ref: '#/definitions/api.DefaultJobApiResponse'
+ $ref: '#/definitions/api.DefaultApiResponse'
"400":
description: Bad Request
schema:
@@ -1186,7 +1113,7 @@ paths:
post:
description: |-
Job to stop is specified by request body. All fields are required in this case.
- Returns full job resource information according to 'JobMeta' scheme.
+ Returns full job resource information according to 'Job' scheme.
parameters:
- description: All fields required
in: body
@@ -1200,7 +1127,7 @@ paths:
"200":
description: Success message
schema:
- $ref: '#/definitions/schema.JobMeta'
+ $ref: '#/definitions/schema.Job'
"400":
description: Bad Request
schema:
@@ -1280,6 +1207,46 @@ paths:
summary: Adds one or more tags to a job
tags:
- Job add and modify
+ /api/nodestats/:
+ post:
+ description: |-
+ Returns a JSON-encoded list of users.
+ Required query-parameter defines if all users or only users with additional special roles are returned.
+ parameters:
+ - description: Request body containing nodes and their states
+ in: body
+ name: request
+ required: true
+ schema:
+ $ref: '#/definitions/api.UpdateNodeStatesRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: Success message
+ schema:
+ $ref: '#/definitions/api.DefaultApiResponse'
+ "400":
+ description: Bad Request
+ schema:
+ $ref: '#/definitions/api.ErrorResponse'
+ "401":
+ description: Unauthorized
+ schema:
+ $ref: '#/definitions/api.ErrorResponse'
+ "403":
+ description: Forbidden
+ schema:
+ $ref: '#/definitions/api.ErrorResponse'
+ "500":
+ description: Internal Server Error
+ schema:
+ $ref: '#/definitions/api.ErrorResponse'
+ security:
+ - ApiKeyAuth: []
+ summary: Deliver updated Slurm node states
+ tags:
+ - Nodestates
/api/users/:
get:
description: |-
diff --git a/cmd/cc-backend/cli.go b/cmd/cc-backend/cli.go
index 8d9e7e6..235a12c 100644
--- a/cmd/cc-backend/cli.go
+++ b/cmd/cc-backend/cli.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
@@ -7,8 +7,9 @@ package main
import "flag"
var (
- flagReinitDB, flagInit, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagRevertDB, flagForceDB, flagDev, flagVersion, flagLogDateTime bool
- flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string
+ flagReinitDB, flagInit, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagRevertDB,
+ flagForceDB, flagDev, flagVersion, flagLogDateTime, flagApplyTags bool
+ flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string
)
func cliInit() {
@@ -21,6 +22,7 @@ func cliInit() {
flag.BoolVar(&flagVersion, "version", false, "Show version information and exit")
flag.BoolVar(&flagMigrateDB, "migrate-db", false, "Migrate database to supported version and exit")
flag.BoolVar(&flagRevertDB, "revert-db", false, "Migrate database to previous version and exit")
+ flag.BoolVar(&flagApplyTags, "apply-tags", false, "Run taggers on all completed jobs and exit")
flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit")
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
diff --git a/cmd/cc-backend/init.go b/cmd/cc-backend/init.go
index 0a5b836..b46100a 100644
--- a/cmd/cc-backend/init.go
+++ b/cmd/cc-backend/init.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
@@ -8,8 +8,8 @@ import (
"os"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/internal/util"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/util"
)
const envString = `
@@ -73,23 +73,23 @@ const configString = `
func initEnv() {
if util.CheckFileExists("var") {
- log.Exit("Directory ./var already exists. Cautiously exiting application initialization.")
+ cclog.Exit("Directory ./var already exists. Cautiously exiting application initialization.")
}
if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil {
- log.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
+ cclog.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
}
if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil {
- log.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
+ cclog.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
}
if err := os.Mkdir("var", 0o777); err != nil {
- log.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())
+ cclog.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())
}
err := repository.MigrateDB("sqlite3", "./var/job.db")
if err != nil {
- log.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error())
+ cclog.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error())
}
}
diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go
index 62a9b9b..871c8dd 100644
--- a/cmd/cc-backend/main.go
+++ b/cmd/cc-backend/main.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
@@ -19,12 +19,15 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/importer"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/internal/repository"
+ "github.com/ClusterCockpit/cc-backend/internal/tagger"
"github.com/ClusterCockpit/cc-backend/internal/taskManager"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
"github.com/google/gops/agent"
+ "github.com/joho/godotenv"
_ "github.com/go-sql-driver/mysql"
_ "github.com/mattn/go-sqlite3"
@@ -58,13 +61,12 @@ func main() {
os.Exit(0)
}
- // Apply config flags for pkg/log
- log.Init(flagLogLevel, flagLogDateTime)
+ cclog.Init(flagLogLevel, flagLogDateTime)
// If init flag set, run tasks here before any file dependencies cause errors
if flagInit {
initEnv()
- log.Exit("Successfully setup environment!\n" +
+ cclog.Exit("Successfully setup environment!\n" +
"Please review config.json and .env and adjust it to your needs.\n" +
"Add your job-archive at ./var/job-archive.")
}
@@ -72,12 +74,13 @@ func main() {
// See https://github.com/google/gops (Runtime overhead is almost zero)
if flagGops {
if err := agent.Listen(agent.Options{}); err != nil {
- log.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error())
+ cclog.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error())
}
}
- if err := runtimeEnv.LoadEnv("./.env"); err != nil && !os.IsNotExist(err) {
- log.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error())
+ err := godotenv.Load()
+ if err != nil {
+ cclog.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error())
}
// Initialize sub-modules and handle command line flags.
@@ -95,25 +98,25 @@ func main() {
if flagMigrateDB {
err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil {
- log.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
+ cclog.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
}
- log.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
+ cclog.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
}
if flagRevertDB {
err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil {
- log.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error())
+ cclog.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error())
}
- log.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1))
+ cclog.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1))
}
if flagForceDB {
err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil {
- log.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
+ cclog.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
}
- log.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
+ cclog.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
}
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
@@ -125,7 +128,7 @@ func main() {
if flagNewUser != "" {
parts := strings.SplitN(flagNewUser, ":", 3)
if len(parts) != 3 || len(parts[0]) == 0 {
- log.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+
+ cclog.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+
"Want: :[admin,support,manager,api,user]:\n"+
"Have: %s\n", flagNewUser)
}
@@ -134,18 +137,18 @@ func main() {
if err := ur.AddUser(&schema.User{
Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","),
}); err != nil {
- log.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error())
+ cclog.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error())
} else {
- log.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1])
+ cclog.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1])
}
}
if flagDelUser != "" {
ur := repository.GetUserRepository()
if err := ur.DelUser(flagDelUser); err != nil {
- log.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error())
+ cclog.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error())
} else {
- log.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser)
+ cclog.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser)
}
}
@@ -153,67 +156,78 @@ func main() {
if flagSyncLDAP {
if authHandle.LdapAuth == nil {
- log.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.")
+ cclog.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.")
}
if err := authHandle.LdapAuth.Sync(); err != nil {
- log.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error())
+ cclog.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error())
}
- log.Print("Sync LDAP: LDAP synchronization successfull.")
+ cclog.Print("Sync LDAP: LDAP synchronization successfull.")
}
if flagGenJWT != "" {
ur := repository.GetUserRepository()
user, err := ur.GetUser(flagGenJWT)
if err != nil {
- log.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error())
+ cclog.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error())
}
if !user.HasRole(schema.RoleApi) {
- log.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username)
+ cclog.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username)
}
jwt, err := authHandle.JwtAuth.ProvideJWT(user)
if err != nil {
- log.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error())
+ cclog.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error())
}
- log.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt)
+ cclog.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt)
}
} else if flagNewUser != "" || flagDelUser != "" {
- log.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.")
+ cclog.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.")
}
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
- log.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error())
+ cclog.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error())
}
if err := metricdata.Init(); err != nil {
- log.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error())
+ cclog.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error())
}
if flagReinitDB {
if err := importer.InitDB(); err != nil {
- log.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error())
+ cclog.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error())
} else {
- log.Print("Init DB: Sucessfully re-initialized repository DB.")
+ cclog.Print("Init DB: Sucessfully re-initialized repository DB.")
}
}
if flagImportJob != "" {
if err := importer.HandleImportFlag(flagImportJob); err != nil {
- log.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error())
+ cclog.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error())
} else {
- log.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob)
+ cclog.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob)
+ }
+ }
+
+ if config.Keys.EnableJobTaggers {
+ tagger.Init()
+ }
+
+ if flagApplyTags {
+ if err := tagger.RunTaggers(); err != nil {
+ cclog.Abortf("Running job taggers.\nError: %s\n", err.Error())
}
}
if !flagServer {
- log.Exit("No errors, server flag not set. Exiting cc-backend.")
+ cclog.Exit("No errors, server flag not set. Exiting cc-backend.")
}
archiver.Start(repository.GetJobRepository())
+
taskManager.Start()
serverInit()
@@ -235,6 +249,8 @@ func main() {
serverShutdown()
+ util.FsWatcherShutdown()
+
taskManager.Shutdown()
}()
@@ -243,5 +259,5 @@ func main() {
}
runtimeEnv.SystemdNotifiy(true, "running")
wg.Wait()
- log.Print("Graceful shutdown completed!")
+ cclog.Print("Graceful shutdown completed!")
}
diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go
index cbd85b7..c01008a 100644
--- a/cmd/cc-backend/server.go
+++ b/cmd/cc-backend/server.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
@@ -27,9 +27,9 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph"
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
"github.com/ClusterCockpit/cc-backend/internal/routerConfig"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
"github.com/ClusterCockpit/cc-backend/web"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
httpSwagger "github.com/swaggo/http-swagger"
@@ -101,7 +101,7 @@ func serverInit() {
router.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) {
rw.Header().Add("Content-Type", "text/html; charset=utf-8")
- log.Debugf("##%v##", info)
+ cclog.Debugf("##%v##", info)
web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo, Infos: info})
}).Methods(http.MethodGet)
router.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) {
@@ -237,7 +237,7 @@ func serverInit() {
if config.Keys.EmbedStaticFiles {
if i, err := os.Stat("./var/img"); err == nil {
if i.IsDir() {
- log.Info("Use local directory for static images")
+ cclog.Info("Use local directory for static images")
router.PathPrefix("/img/").Handler(http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img"))))
}
}
@@ -258,12 +258,12 @@ func serverInit() {
func serverStart() {
handler := handlers.CustomLoggingHandler(io.Discard, router, func(_ io.Writer, params handlers.LogFormatterParams) {
if strings.HasPrefix(params.Request.RequestURI, "/api/") {
- log.Debugf("%s %s (%d, %.02fkb, %dms)",
+ cclog.Debugf("%s %s (%d, %.02fkb, %dms)",
params.Request.Method, params.URL.RequestURI(),
params.StatusCode, float32(params.Size)/1024,
time.Since(params.TimeStamp).Milliseconds())
} else {
- log.Debugf("%s %s (%d, %.02fkb, %dms)",
+ cclog.Debugf("%s %s (%d, %.02fkb, %dms)",
params.Request.Method, params.URL.RequestURI(),
params.StatusCode, float32(params.Size)/1024,
time.Since(params.TimeStamp).Milliseconds())
@@ -280,7 +280,7 @@ func serverStart() {
// Start http or https server
listener, err := net.Listen("tcp", config.Keys.Addr)
if err != nil {
- log.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error())
+ cclog.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error())
}
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
@@ -293,7 +293,7 @@ func serverStart() {
cert, err := tls.LoadX509KeyPair(
config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
if err != nil {
- log.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error())
+ cclog.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error())
}
listener = tls.NewListener(listener, &tls.Config{
Certificates: []tls.Certificate{cert},
@@ -304,20 +304,20 @@ func serverStart() {
MinVersion: tls.VersionTLS12,
PreferServerCipherSuites: true,
})
- log.Printf("HTTPS server listening at %s...\n", config.Keys.Addr)
+ cclog.Printf("HTTPS server listening at %s...\n", config.Keys.Addr)
} else {
- log.Printf("HTTP server listening at %s...\n", config.Keys.Addr)
+ cclog.Printf("HTTP server listening at %s...\n", config.Keys.Addr)
}
//
// Because this program will want to bind to a privileged port (like 80), the listener must
// be established first, then the user can be changed, and after that,
// the actual http server can be started.
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
- log.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error())
+ cclog.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error())
}
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
- log.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error())
+ cclog.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error())
}
}
diff --git a/go.mod b/go.mod
index 47e3497..4b5171c 100644
--- a/go.mod
+++ b/go.mod
@@ -6,9 +6,10 @@ toolchain go1.24.1
require (
github.com/99designs/gqlgen v0.17.66
- github.com/ClusterCockpit/cc-units v0.4.0
+ github.com/ClusterCockpit/cc-lib v0.3.0
github.com/Masterminds/squirrel v1.5.4
github.com/coreos/go-oidc/v3 v3.12.0
+ github.com/expr-lang/expr v1.17.3
github.com/go-co-op/gocron/v2 v2.16.0
github.com/go-ldap/ldap/v3 v3.4.10
github.com/go-sql-driver/mysql v1.9.0
@@ -18,18 +19,17 @@ require (
github.com/gorilla/handlers v1.5.2
github.com/gorilla/mux v1.8.1
github.com/gorilla/sessions v1.4.0
- github.com/influxdata/influxdb-client-go/v2 v2.14.0
github.com/jmoiron/sqlx v1.4.0
+ github.com/joho/godotenv v1.5.1
github.com/mattn/go-sqlite3 v1.14.24
- github.com/prometheus/client_golang v1.21.0
- github.com/prometheus/common v0.62.0
+ github.com/prometheus/client_golang v1.22.0
+ github.com/prometheus/common v0.63.0
github.com/qustavo/sqlhooks/v2 v2.1.0
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
github.com/swaggo/http-swagger v1.3.4
github.com/swaggo/swag v1.16.4
github.com/vektah/gqlparser/v2 v2.5.22
- golang.org/x/crypto v0.35.0
- golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa
+ golang.org/x/crypto v0.37.0
golang.org/x/oauth2 v0.27.0
golang.org/x/time v0.5.0
)
@@ -39,11 +39,11 @@ require (
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/agnivade/levenshtein v1.2.1 // indirect
- github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
@@ -57,7 +57,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
- github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
@@ -69,9 +68,8 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
- github.com/oapi-codegen/runtime v1.1.1 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sosodev/duration v1.3.1 // indirect
@@ -79,13 +77,14 @@ require (
github.com/urfave/cli/v2 v2.27.5 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
go.uber.org/atomic v1.11.0 // indirect
- golang.org/x/mod v0.23.0 // indirect
- golang.org/x/net v0.36.0 // indirect
- golang.org/x/sync v0.11.0 // indirect
- golang.org/x/sys v0.30.0 // indirect
- golang.org/x/text v0.22.0 // indirect
- golang.org/x/tools v0.30.0 // indirect
- google.golang.org/protobuf v1.36.5 // indirect
+ golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
+ golang.org/x/mod v0.24.0 // indirect
+ golang.org/x/net v0.39.0 // indirect
+ golang.org/x/sync v0.13.0 // indirect
+ golang.org/x/sys v0.32.0 // indirect
+ golang.org/x/text v0.24.0 // indirect
+ golang.org/x/tools v0.32.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
diff --git a/go.sum b/go.sum
index e1725ed..f3d25ad 100644
--- a/go.sum
+++ b/go.sum
@@ -6,8 +6,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
-github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0=
-github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
+github.com/ClusterCockpit/cc-lib v0.3.0 h1:HEWOgnzRM01U10ZFfpiUWMzkLHg5nPdXZqdsiI2q4x0=
+github.com/ClusterCockpit/cc-lib v0.3.0/go.mod h1:7CuXVNIJdynMZf6B9v4m54VCbbFg3ZD0tvLw2bVxN0A=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
@@ -16,7 +16,6 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0=
github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U=
-github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
@@ -25,13 +24,10 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
-github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
-github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
@@ -53,8 +49,12 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/expr-lang/expr v1.17.3 h1:myeTTuDFz7k6eFe/JPlep/UsiIjVhG61FMHFu63U7j0=
+github.com/expr-lang/expr v1.17.3/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-co-op/gocron/v2 v2.16.0 h1:uqUF6WFZ4enRU45pWFNcn1xpDLc+jBOTKhPQI16Z1xs=
@@ -88,8 +88,9 @@ github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI
github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8=
github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -119,10 +120,6 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
-github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=
-github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=
-github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
-github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
@@ -137,6 +134,8 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
+github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
+github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -145,9 +144,6 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
-github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
-github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -180,8 +176,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
-github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -191,14 +185,14 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
-github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
-github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
+github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAiCZi0=
github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
@@ -213,7 +207,6 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=
github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
-github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -257,17 +250,17 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
-golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
-golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
-golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4=
-golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
+golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
+golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
-golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@@ -279,8 +272,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
-golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
-golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
+golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
+golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -290,8 +283,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
-golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -303,8 +296,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
-golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -323,8 +316,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
-golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
-golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
+golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -333,11 +326,11 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
-golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
-golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
+golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
+golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
-google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/gqlgen.yml b/gqlgen.yml
index ccd95ff..3118ec9 100644
--- a/gqlgen.yml
+++ b/gqlgen.yml
@@ -51,61 +51,52 @@ models:
- github.com/99designs/gqlgen/graphql.Int64
- github.com/99designs/gqlgen/graphql.Int32
Job:
- model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Job"
+ model: "github.com/ClusterCockpit/cc-lib/schema.Job"
fields:
tags:
resolver: true
metaData:
resolver: true
Cluster:
- model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Cluster"
+ model: "github.com/ClusterCockpit/cc-lib/schema.Cluster"
fields:
partitions:
resolver: true
- NullableFloat:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Float" }
- MetricScope:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricScope" }
- MetricValue:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricValue" }
+ Node:
+ model: "github.com/ClusterCockpit/cc-lib/schema.Node"
+ fields:
+ metaData:
+ resolver: true
+ NullableFloat: { model: "github.com/ClusterCockpit/cc-lib/schema.Float" }
+ MetricScope: { model: "github.com/ClusterCockpit/cc-lib/schema.MetricScope" }
+ MetricValue: { model: "github.com/ClusterCockpit/cc-lib/schema.MetricValue" }
JobStatistics:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobStatistics" }
+ { model: "github.com/ClusterCockpit/cc-lib/schema.JobStatistics" }
GlobalMetricListItem:
- {
- model: "github.com/ClusterCockpit/cc-backend/pkg/schema.GlobalMetricListItem",
- }
+ { model: "github.com/ClusterCockpit/cc-lib/schema.GlobalMetricListItem" }
ClusterSupport:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.ClusterSupport" }
- Tag: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Tag" }
- Resource:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" }
- JobState:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" }
- TimeRange:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.TimeRange" }
- IntRange:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.IntRange" }
- JobMetric:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" }
- Series: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Series" }
+ { model: "github.com/ClusterCockpit/cc-lib/schema.ClusterSupport" }
+ Tag: { model: "github.com/ClusterCockpit/cc-lib/schema.Tag" }
+ Resource: { model: "github.com/ClusterCockpit/cc-lib/schema.Resource" }
+ JobState: { model: "github.com/ClusterCockpit/cc-lib/schema.JobState" }
+ MonitoringState:
+ { model: "github.com/ClusterCockpit/cc-lib/schema.NodeState" }
+ HealthState:
+ { model: "github.com/ClusterCockpit/cc-lib/schema.MonitoringState" }
+ TimeRange: { model: "github.com/ClusterCockpit/cc-lib/schema.TimeRange" }
+ IntRange: { model: "github.com/ClusterCockpit/cc-lib/schema.IntRange" }
+ JobMetric: { model: "github.com/ClusterCockpit/cc-lib/schema.JobMetric" }
+ Series: { model: "github.com/ClusterCockpit/cc-lib/schema.Series" }
MetricStatistics:
- {
- model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics",
- }
+ { model: "github.com/ClusterCockpit/cc-lib/schema.MetricStatistics" }
MetricConfig:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricConfig" }
+ { model: "github.com/ClusterCockpit/cc-lib/schema.MetricConfig" }
SubClusterConfig:
- {
- model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubClusterConfig",
- }
- Accelerator:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Accelerator" }
- Topology:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Topology" }
+ { model: "github.com/ClusterCockpit/cc-lib/schema.SubClusterConfig" }
+ Accelerator: { model: "github.com/ClusterCockpit/cc-lib/schema.Accelerator" }
+ Topology: { model: "github.com/ClusterCockpit/cc-lib/schema.Topology" }
FilterRanges:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.FilterRanges" }
- SubCluster:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubCluster" }
- StatsSeries:
- { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" }
- Unit: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Unit" }
+ { model: "github.com/ClusterCockpit/cc-lib/schema.FilterRanges" }
+ SubCluster: { model: "github.com/ClusterCockpit/cc-lib/schema.SubCluster" }
+ StatsSeries: { model: "github.com/ClusterCockpit/cc-lib/schema.StatsSeries" }
+ Unit: { model: "github.com/ClusterCockpit/cc-lib/schema.Unit" }
diff --git a/internal/api/api_test.go b/internal/api/api_test.go
index e67813c..9b792c2 100644
--- a/internal/api/api_test.go
+++ b/internal/api/api_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api_test
@@ -27,8 +27,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/gorilla/mux"
_ "github.com/mattn/go-sqlite3"
@@ -116,14 +116,14 @@ func setup(t *testing.T) *api.RestApi {
]
}`
- log.Init("info", true)
+ cclog.Init("info", true)
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
if err := os.Mkdir(jobarchive, 0777); err != nil {
t.Fatal(err)
}
- if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), []byte(fmt.Sprintf("%d", 2)), 0666); err != nil {
+ if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), fmt.Appendf(nil, "%d", 2), 0666); err != nil {
t.Fatal(err)
}
@@ -204,11 +204,11 @@ func TestRestApi(t *testing.T) {
restapi.MountApiRoutes(r)
var TestJobId int64 = 123
- var TestClusterName string = "testcluster"
+ TestClusterName := "testcluster"
var TestStartTime int64 = 123456789
const startJobBody string = `{
- "jobId": 123,
+ "jobId": 123,
"user": "testuser",
"project": "testproj",
"cluster": "testcluster",
@@ -221,7 +221,6 @@ func TestRestApi(t *testing.T) {
"exclusive": 1,
"monitoringStatus": 1,
"smt": 1,
- "tags": [{ "type": "testTagType", "name": "testTagName", "scope": "testuser" }],
"resources": [
{
"hostname": "host123",
@@ -252,16 +251,17 @@ func TestRestApi(t *testing.T) {
if response.StatusCode != http.StatusCreated {
t.Fatal(response.Status, recorder.Body.String())
}
- resolver := graph.GetResolverInstance()
+ // resolver := graph.GetResolverInstance()
+ restapi.JobRepository.SyncJobs()
job, err := restapi.JobRepository.Find(&TestJobId, &TestClusterName, &TestStartTime)
if err != nil {
t.Fatal(err)
}
- job.Tags, err = resolver.Job().Tags(ctx, job)
- if err != nil {
- t.Fatal(err)
- }
+ // job.Tags, err = resolver.Job().Tags(ctx, job)
+ // if err != nil {
+ // t.Fatal(err)
+ // }
if job.JobID != 123 ||
job.User != "testuser" ||
@@ -278,13 +278,13 @@ func TestRestApi(t *testing.T) {
job.MonitoringStatus != 1 ||
job.SMT != 1 ||
!reflect.DeepEqual(job.Resources, []*schema.Resource{{Hostname: "host123", HWThreads: []int{0, 1, 2, 3, 4, 5, 6, 7}}}) ||
- job.StartTime.Unix() != 123456789 {
+ job.StartTime != 123456789 {
t.Fatalf("unexpected job properties: %#v", job)
}
- if len(job.Tags) != 1 || job.Tags[0].Type != "testTagType" || job.Tags[0].Name != "testTagName" || job.Tags[0].Scope != "testuser" {
- t.Fatalf("unexpected tags: %#v", job.Tags)
- }
+ // if len(job.Tags) != 1 || job.Tags[0].Type != "testTagType" || job.Tags[0].Name != "testTagName" || job.Tags[0].Scope != "testuser" {
+ // t.Fatalf("unexpected tags: %#v", job.Tags)
+ // }
}); !ok {
return
}
@@ -352,7 +352,7 @@ func TestRestApi(t *testing.T) {
t.Run("CheckDoubleStart", func(t *testing.T) {
// Starting a job with the same jobId and cluster should only be allowed if the startTime is far appart!
- body := strings.Replace(startJobBody, `"startTime": 123456789`, `"startTime": 123456790`, -1)
+ body := strings.ReplaceAll(startJobBody, `"startTime": 123456789`, `"startTime": 123456790`)
req := httptest.NewRequest(http.MethodPost, "/jobs/start_job/", bytes.NewBuffer([]byte(body)))
recorder := httptest.NewRecorder()
@@ -402,6 +402,7 @@ func TestRestApi(t *testing.T) {
}
time.Sleep(1 * time.Second)
+ restapi.JobRepository.SyncJobs()
const stopJobBodyFailed string = `{
"jobId": 12345,
diff --git a/internal/api/cluster.go b/internal/api/cluster.go
new file mode 100644
index 0000000..0a11d9d
--- /dev/null
+++ b/internal/api/cluster.go
@@ -0,0 +1,70 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package api
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ "github.com/ClusterCockpit/cc-backend/pkg/archive"
+ "github.com/ClusterCockpit/cc-lib/schema"
+)
+
+// GetClustersApiResponse model
+type GetClustersApiResponse struct {
+ Clusters []*schema.Cluster `json:"clusters"` // Array of clusters
+}
+
+// getClusters godoc
+// @summary Lists all cluster configs
+// @tags Cluster query
+// @description Get a list of all cluster configs. Specific cluster can be requested using query parameter.
+// @produce json
+// @param cluster query string false "Job Cluster"
+// @success 200 {object} api.GetClustersApiResponse "Array of clusters"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/clusters/ [get]
+func (api *RestApi) getClusters(rw http.ResponseWriter, r *http.Request) {
+ if user := repository.GetUserFromContext(r.Context()); user != nil &&
+ !user.HasRole(schema.RoleApi) {
+
+ handleError(fmt.Errorf("missing role: %v", schema.GetRoleString(schema.RoleApi)), http.StatusForbidden, rw)
+ return
+ }
+
+ rw.Header().Add("Content-Type", "application/json")
+ bw := bufio.NewWriter(rw)
+ defer bw.Flush()
+
+ var clusters []*schema.Cluster
+
+ if r.URL.Query().Has("cluster") {
+ name := r.URL.Query().Get("cluster")
+ cluster := archive.GetCluster(name)
+ if cluster == nil {
+ handleError(fmt.Errorf("unknown cluster: %s", name), http.StatusBadRequest, rw)
+ return
+ }
+ clusters = append(clusters, cluster)
+ } else {
+ clusters = archive.Clusters
+ }
+
+ payload := GetClustersApiResponse{
+ Clusters: clusters,
+ }
+
+ if err := json.NewEncoder(bw).Encode(payload); err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+}
diff --git a/internal/api/docs.go b/internal/api/docs.go
index 567789c..83843eb 100644
--- a/internal/api/docs.go
+++ b/internal/api/docs.go
@@ -208,7 +208,7 @@ const docTemplate = `{
"200": {
"description": "Success message",
"schema": {
- "$ref": "#/definitions/api.DefaultJobApiResponse"
+ "$ref": "#/definitions/api.DefaultApiResponse"
}
},
"400": {
@@ -278,7 +278,7 @@ const docTemplate = `{
"200": {
"description": "Success message",
"schema": {
- "$ref": "#/definitions/api.DefaultJobApiResponse"
+ "$ref": "#/definitions/api.DefaultApiResponse"
}
},
"400": {
@@ -348,7 +348,7 @@ const docTemplate = `{
"200": {
"description": "Success message",
"schema": {
- "$ref": "#/definitions/api.DefaultJobApiResponse"
+ "$ref": "#/definitions/api.DefaultApiResponse"
}
},
"400": {
@@ -530,7 +530,7 @@ const docTemplate = `{
"ApiKeyAuth": []
}
],
- "description": "Job specified in request body will be saved to database as \"running\" with new DB ID.\nJob specifications follow the 'JobMeta' scheme, API will fail to execute if requirements are not met.",
+ "description": "Job specified in request body will be saved to database as \"running\" with new DB ID.\nJob specifications follow the 'Job' scheme, API will fail to execute if requirements are not met.",
"consumes": [
"application/json"
],
@@ -548,7 +548,7 @@ const docTemplate = `{
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/schema.JobMeta"
+ "$ref": "#/definitions/schema.Job"
}
}
],
@@ -556,7 +556,7 @@ const docTemplate = `{
"201": {
"description": "Job added successfully",
"schema": {
- "$ref": "#/definitions/api.DefaultJobApiResponse"
+ "$ref": "#/definitions/api.DefaultApiResponse"
}
},
"400": {
@@ -599,7 +599,7 @@ const docTemplate = `{
"ApiKeyAuth": []
}
],
- "description": "Job to stop is specified by request body. All fields are required in this case.\nReturns full job resource information according to 'JobMeta' scheme.",
+ "description": "Job to stop is specified by request body. All fields are required in this case.\nReturns full job resource information according to 'Job' scheme.",
"produces": [
"application/json"
],
@@ -622,7 +622,7 @@ const docTemplate = `{
"200": {
"description": "Success message",
"schema": {
- "$ref": "#/definitions/schema.JobMeta"
+ "$ref": "#/definitions/schema.Job"
}
},
"400": {
@@ -744,7 +744,7 @@ const docTemplate = `{
"ApiKeyAuth": []
}
],
- "description": "Job to get is specified by database ID\nReturns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.",
+ "description": "Job to get is specified by database ID\nReturns full job resource information according to 'Job' scheme and all metrics according to 'JobData'.",
"produces": [
"application/json"
],
@@ -818,7 +818,7 @@ const docTemplate = `{
"ApiKeyAuth": []
}
],
- "description": "Job to get is specified by database ID\nReturns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.",
+ "description": "Job to get is specified by database ID\nReturns full job resource information according to 'Job' scheme and all metrics according to 'JobData'.",
"consumes": [
"application/json"
],
@@ -896,6 +896,66 @@ const docTemplate = `{
}
}
},
+ "/api/nodestats/": {
+ "post": {
+ "security": [
+ {
+ "ApiKeyAuth": []
+ }
+ ],
+ "description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Nodestates"
+ ],
+ "summary": "Deliver updated Slurm node states",
+ "parameters": [
+ {
+ "description": "Request body containing nodes and their states",
+ "name": "request",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/api.UpdateNodeStatesRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Success message",
+ "schema": {
+ "$ref": "#/definitions/api.DefaultApiResponse"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "$ref": "#/definitions/api.ErrorResponse"
+ }
+ },
+ "401": {
+ "description": "Unauthorized",
+ "schema": {
+ "$ref": "#/definitions/api.ErrorResponse"
+ }
+ },
+ "403": {
+ "description": "Forbidden",
+ "schema": {
+ "$ref": "#/definitions/api.ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "$ref": "#/definitions/api.ErrorResponse"
+ }
+ }
+ }
+ }
+ },
"/api/users/": {
"get": {
"security": [
@@ -1144,7 +1204,7 @@ const docTemplate = `{
}
}
},
- "api.DefaultJobApiResponse": {
+ "api.DefaultApiResponse": {
"type": "object",
"properties": {
"msg": {
@@ -1238,7 +1298,7 @@ const docTemplate = `{
"description": "Array of jobs",
"type": "array",
"items": {
- "$ref": "#/definitions/schema.JobMeta"
+ "$ref": "#/definitions/schema.Job"
}
},
"page": {
@@ -1292,6 +1352,20 @@ const docTemplate = `{
}
}
},
+ "api.Node": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string"
+ },
+ "states": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
"api.StopJobApiRequest": {
"type": "object",
"required": [
@@ -1325,6 +1399,21 @@ const docTemplate = `{
}
}
},
+ "api.UpdateNodeStatesRequest": {
+ "type": "object",
+ "properties": {
+ "cluster": {
+ "type": "string",
+ "example": "fritz"
+ },
+ "nodes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/api.Node"
+ }
+ }
+ }
+ },
"schema.Accelerator": {
"type": "object",
"properties": {
@@ -1360,7 +1449,6 @@ const docTemplate = `{
}
},
"schema.Job": {
- "description": "Information of a HPC job.",
"type": "object",
"properties": {
"arrayJobId": {
@@ -1458,6 +1546,12 @@ const docTemplate = `{
"type": "string",
"example": "abcd200"
},
+ "requestedMemory": {
+ "description": "in MB",
+ "type": "integer",
+ "minimum": 1,
+ "example": 128000
+ },
"resources": {
"type": "array",
"items": {
@@ -1469,7 +1563,14 @@ const docTemplate = `{
"example": 4
},
"startTime": {
- "type": "string"
+ "type": "integer",
+ "example": 1649723812
+ },
+ "statistics": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema.JobStatistics"
+ }
},
"subCluster": {
"type": "string",
@@ -1517,147 +1618,6 @@ const docTemplate = `{
}
}
},
- "schema.JobMeta": {
- "description": "Meta data information of a HPC job.",
- "type": "object",
- "properties": {
- "arrayJobId": {
- "type": "integer",
- "example": 123000
- },
- "cluster": {
- "type": "string",
- "example": "fritz"
- },
- "concurrentJobs": {
- "$ref": "#/definitions/schema.JobLinkResultList"
- },
- "duration": {
- "type": "integer",
- "minimum": 1,
- "example": 43200
- },
- "energy": {
- "type": "number"
- },
- "energyFootprint": {
- "type": "object",
- "additionalProperties": {
- "type": "number"
- }
- },
- "exclusive": {
- "type": "integer",
- "maximum": 2,
- "minimum": 0,
- "example": 1
- },
- "footprint": {
- "type": "object",
- "additionalProperties": {
- "type": "number"
- }
- },
- "id": {
- "type": "integer"
- },
- "jobId": {
- "type": "integer",
- "example": 123000
- },
- "jobState": {
- "enum": [
- "completed",
- "failed",
- "cancelled",
- "stopped",
- "timeout",
- "out_of_memory"
- ],
- "allOf": [
- {
- "$ref": "#/definitions/schema.JobState"
- }
- ],
- "example": "completed"
- },
- "metaData": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "monitoringStatus": {
- "type": "integer",
- "maximum": 3,
- "minimum": 0,
- "example": 1
- },
- "numAcc": {
- "type": "integer",
- "minimum": 1,
- "example": 2
- },
- "numHwthreads": {
- "type": "integer",
- "minimum": 1,
- "example": 20
- },
- "numNodes": {
- "type": "integer",
- "minimum": 1,
- "example": 2
- },
- "partition": {
- "type": "string",
- "example": "main"
- },
- "project": {
- "type": "string",
- "example": "abcd200"
- },
- "resources": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schema.Resource"
- }
- },
- "smt": {
- "type": "integer",
- "example": 4
- },
- "startTime": {
- "type": "integer",
- "minimum": 1,
- "example": 1649723812
- },
- "statistics": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/schema.JobStatistics"
- }
- },
- "subCluster": {
- "type": "string",
- "example": "main"
- },
- "tags": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schema.Tag"
- }
- },
- "user": {
- "type": "string",
- "example": "abcd100h"
- },
- "walltime": {
- "type": "integer",
- "minimum": 1,
- "example": 86400
- }
- }
- },
"schema.JobMetric": {
"type": "object",
"properties": {
@@ -1985,6 +1945,9 @@ const docTemplate = `{
},
"remove": {
"type": "boolean"
+ },
+ "unit": {
+ "$ref": "#/definitions/schema.Unit"
}
}
},
diff --git a/internal/api/job.go b/internal/api/job.go
new file mode 100644
index 0000000..c3c2f2d
--- /dev/null
+++ b/internal/api/job.go
@@ -0,0 +1,1038 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package api
+
+import (
+ "bufio"
+ "database/sql"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ClusterCockpit/cc-backend/internal/archiver"
+ "github.com/ClusterCockpit/cc-backend/internal/graph"
+ "github.com/ClusterCockpit/cc-backend/internal/graph/model"
+ "github.com/ClusterCockpit/cc-backend/internal/importer"
+ "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ "github.com/ClusterCockpit/cc-backend/pkg/archive"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/gorilla/mux"
+)
+
+// StopJobApiRequest model
+type StopJobApiRequest struct {
+ JobId *int64 `json:"jobId" example:"123000"`
+ Cluster *string `json:"cluster" example:"fritz"`
+ StartTime *int64 `json:"startTime" example:"1649723812"`
+ State schema.JobState `json:"jobState" validate:"required" example:"completed"`
+ StopTime int64 `json:"stopTime" validate:"required" example:"1649763839"`
+}
+
+// DeleteJobApiRequest model
+type DeleteJobApiRequest struct {
+ JobId *int64 `json:"jobId" validate:"required" example:"123000"` // Cluster Job ID of job
+ Cluster *string `json:"cluster" example:"fritz"` // Cluster of job
+ StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch
+}
+
+// GetJobsApiResponse model
+type GetJobsApiResponse struct {
+ Jobs []*schema.Job `json:"jobs"` // Array of jobs
+ Items int `json:"items"` // Number of jobs returned
+ Page int `json:"page"` // Page id returned
+}
+
+// ApiTag model
+type ApiTag struct {
+ // Tag Type
+ Type string `json:"type" example:"Debug"`
+ Name string `json:"name" example:"Testjob"` // Tag Name
+ Scope string `json:"scope" example:"global"` // Tag Scope for Frontend Display
+}
+
+// ApiMeta model
+type EditMetaRequest struct {
+ Key string `json:"key" example:"jobScript"`
+ Value string `json:"value" example:"bash script"`
+}
+
+// JobMetaRequest model
+type JobMetaRequest struct {
+ JobId *int64 `json:"jobId" validate:"required" example:"123000"` // Cluster Job ID of job
+ Cluster *string `json:"cluster" example:"fritz"` // Cluster of job
+ StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch
+ Payload EditMetaRequest `json:"payload"` // Content to Add to Job Meta_Data
+}
+
+type TagJobApiRequest []*ApiTag
+
+type GetJobApiRequest []string
+
+type GetJobApiResponse struct {
+ Meta *schema.Job
+ Data []*JobMetricWithName
+}
+
+type GetCompleteJobApiResponse struct {
+ Meta *schema.Job
+ Data schema.JobData
+}
+
+type JobMetricWithName struct {
+ Metric *schema.JobMetric `json:"metric"`
+ Name string `json:"name"`
+ Scope schema.MetricScope `json:"scope"`
+}
+
+// getJobs godoc
+// @summary Lists all jobs
+// @tags Job query
+// @description Get a list of all jobs. Filters can be applied using query parameters.
+// @description Number of results can be limited by page. Results are sorted by descending startTime.
+// @produce json
+// @param state query string false "Job State" Enums(running, completed, failed, cancelled, stopped, timeout)
+// @param cluster query string false "Job Cluster"
+// @param start-time query string false "Syntax: '$from-$to', as unix epoch timestamps in seconds"
+// @param items-per-page query int false "Items per page (Default: 25)"
+// @param page query int false "Page Number (Default: 1)"
+// @param with-metadata query bool false "Include metadata (e.g. jobScript) in response"
+// @success 200 {object} api.GetJobsApiResponse "Job array and page info"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/ [get]
+func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
+ withMetadata := false
+ filter := &model.JobFilter{}
+ page := &model.PageRequest{ItemsPerPage: 25, Page: 1}
+ order := &model.OrderByInput{Field: "startTime", Type: "col", Order: model.SortDirectionEnumDesc}
+
+ for key, vals := range r.URL.Query() {
+ switch key {
+ case "state":
+ for _, s := range vals {
+ state := schema.JobState(s)
+ if !state.Valid() {
+ handleError(fmt.Errorf("invalid query parameter value: state"),
+ http.StatusBadRequest, rw)
+ return
+ }
+ filter.State = append(filter.State, state)
+ }
+ case "cluster":
+ filter.Cluster = &model.StringInput{Eq: &vals[0]}
+ case "start-time":
+ st := strings.Split(vals[0], "-")
+ if len(st) != 2 {
+ handleError(fmt.Errorf("invalid query parameter value: startTime"),
+ http.StatusBadRequest, rw)
+ return
+ }
+ from, err := strconv.ParseInt(st[0], 10, 64)
+ if err != nil {
+ handleError(err, http.StatusBadRequest, rw)
+ return
+ }
+ to, err := strconv.ParseInt(st[1], 10, 64)
+ if err != nil {
+ handleError(err, http.StatusBadRequest, rw)
+ return
+ }
+ ufrom, uto := time.Unix(from, 0), time.Unix(to, 0)
+ filter.StartTime = &schema.TimeRange{From: &ufrom, To: &uto}
+ case "page":
+ x, err := strconv.Atoi(vals[0])
+ if err != nil {
+ handleError(err, http.StatusBadRequest, rw)
+ return
+ }
+ page.Page = x
+ case "items-per-page":
+ x, err := strconv.Atoi(vals[0])
+ if err != nil {
+ handleError(err, http.StatusBadRequest, rw)
+ return
+ }
+ page.ItemsPerPage = x
+ case "with-metadata":
+ withMetadata = true
+ default:
+ handleError(fmt.Errorf("invalid query parameter: %s", key),
+ http.StatusBadRequest, rw)
+ return
+ }
+ }
+
+ jobs, err := api.JobRepository.QueryJobs(r.Context(), []*model.JobFilter{filter}, page, order)
+ if err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+
+ results := make([]*schema.Job, 0, len(jobs))
+ for _, job := range jobs {
+ if withMetadata {
+ if _, err = api.JobRepository.FetchMetadata(job); err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+ }
+
+ job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), job.ID)
+ if err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+
+ if job.MonitoringStatus == schema.MonitoringStatusArchivingSuccessful {
+ job.Statistics, err = archive.GetStatistics(job)
+ if err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+ }
+
+ results = append(results, job)
+ }
+
+ cclog.Debugf("/api/jobs: %d jobs returned", len(results))
+ rw.Header().Add("Content-Type", "application/json")
+ bw := bufio.NewWriter(rw)
+ defer bw.Flush()
+
+ payload := GetJobsApiResponse{
+ Jobs: results,
+ Items: page.ItemsPerPage,
+ Page: page.Page,
+ }
+
+ if err := json.NewEncoder(bw).Encode(payload); err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+}
+
+// getCompleteJobById godoc
+// @summary Get job meta and optional all metric data
+// @tags Job query
+// @description Job to get is specified by database ID
+// @description Returns full job resource information according to 'Job' scheme and all metrics according to 'JobData'.
+// @produce json
+// @param id path int true "Database ID of Job"
+// @param all-metrics query bool false "Include all available metrics"
+// @success 200 {object} api.GetJobApiResponse "Job resource"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 404 {object} api.ErrorResponse "Resource not found"
+// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/{id} [get]
+func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) {
+ // Fetch job from db
+ id, ok := mux.Vars(r)["id"]
+ var job *schema.Job
+ var err error
+ if ok {
+ id, e := strconv.ParseInt(id, 10, 64)
+ if e != nil {
+ handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
+ return
+ }
+
+ job, err = api.JobRepository.FindById(r.Context(), id) // Get Job from Repo by ID
+ } else {
+ handleError(fmt.Errorf("the parameter 'id' is required"), http.StatusBadRequest, rw)
+ return
+ }
+ if err != nil {
+ handleError(fmt.Errorf("finding job with db id %s failed: %w", id, err), http.StatusUnprocessableEntity, rw)
+ return
+ }
+
+ job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), job.ID)
+ if err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+
+ }
+ if _, err = api.JobRepository.FetchMetadata(job); err != nil {
+
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+
+ var scopes []schema.MetricScope
+
+ if job.NumNodes == 1 {
+ scopes = []schema.MetricScope{"core"}
+ } else {
+ scopes = []schema.MetricScope{"node"}
+ }
+
+ var data schema.JobData
+
+ metricConfigs := archive.GetCluster(job.Cluster).MetricConfig
+ resolution := 0
+
+ for _, mc := range metricConfigs {
+ resolution = max(resolution, mc.Timestep)
+ }
+
+ if r.URL.Query().Get("all-metrics") == "true" {
+ data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution)
+ if err != nil {
+ cclog.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
+ return
+ }
+ }
+
+ cclog.Debugf("/api/job/%s: get job %d", id, job.JobID)
+ rw.Header().Add("Content-Type", "application/json")
+ bw := bufio.NewWriter(rw)
+ defer bw.Flush()
+
+ payload := GetCompleteJobApiResponse{
+ Meta: job,
+ Data: data,
+ }
+
+ if err := json.NewEncoder(bw).Encode(payload); err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+}
+
+// getJobById godoc
+// @summary Get job meta and configurable metric data
+// @tags Job query
+// @description Job to get is specified by database ID
+// @description Returns full job resource information according to 'Job' scheme and all metrics according to 'JobData'.
+// @accept json
+// @produce json
+// @param id path int true "Database ID of Job"
+// @param request body api.GetJobApiRequest true "Array of metric names"
+// @success 200 {object} api.GetJobApiResponse "Job resource"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 404 {object} api.ErrorResponse "Resource not found"
+// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/{id} [post]
+func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
+ // Fetch job from db
+ id, ok := mux.Vars(r)["id"]
+ var job *schema.Job
+ var err error
+ if ok {
+ id, e := strconv.ParseInt(id, 10, 64)
+ if e != nil {
+ handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
+ return
+ }
+
+ job, err = api.JobRepository.FindById(r.Context(), id)
+ } else {
+ handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw)
+ return
+ }
+ if err != nil {
+ handleError(fmt.Errorf("finding job with db id %s failed: %w", id, err), http.StatusUnprocessableEntity, rw)
+ return
+ }
+
+ job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), job.ID)
+ if err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+
+ }
+ if _, err = api.JobRepository.FetchMetadata(job); err != nil {
+
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+
+ var metrics GetJobApiRequest
+ if err = decode(r.Body, &metrics); err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ var scopes []schema.MetricScope
+
+ if job.NumNodes == 1 {
+ scopes = []schema.MetricScope{"core"}
+ } else {
+ scopes = []schema.MetricScope{"node"}
+ }
+
+ metricConfigs := archive.GetCluster(job.Cluster).MetricConfig
+ resolution := 0
+
+ for _, mc := range metricConfigs {
+ resolution = max(resolution, mc.Timestep)
+ }
+
+ data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution)
+ if err != nil {
+ cclog.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
+ return
+ }
+
+ res := []*JobMetricWithName{}
+ for name, md := range data {
+ for scope, metric := range md {
+ res = append(res, &JobMetricWithName{
+ Name: name,
+ Scope: scope,
+ Metric: metric,
+ })
+ }
+ }
+
+ cclog.Debugf("/api/job/%s: get job %d", id, job.JobID)
+ rw.Header().Add("Content-Type", "application/json")
+ bw := bufio.NewWriter(rw)
+ defer bw.Flush()
+
+ payload := GetJobApiResponse{
+ Meta: job,
+ Data: res,
+ }
+
+ if err := json.NewEncoder(bw).Encode(payload); err != nil {
+ handleError(err, http.StatusInternalServerError, rw)
+ return
+ }
+}
+
+// editMeta godoc
+// @summary Edit meta-data json of job identified by database id
+// @tags Job add and modify
+// @description Edit key value pairs in job metadata json of job specified by database id
+// @description If a key already exists its content will be overwritten
+// @accept json
+// @produce json
+// @param id path int true "Job Database ID"
+// @param request body api.EditMetaRequest true "Metadata Key value pair to add or update"
+// @success 200 {object} schema.Job "Updated job resource"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 404 {object} api.ErrorResponse "Job does not exist"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/edit_meta/{id} [patch]
+func (api *RestApi) editMeta(rw http.ResponseWriter, r *http.Request) {
+ id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ job, err := api.JobRepository.FindById(r.Context(), id)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusNotFound)
+ return
+ }
+
+ var req EditMetaRequest
+ if err := decode(r.Body, &req); err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ if err := api.JobRepository.UpdateMetadata(job, req.Key, req.Value); err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+ json.NewEncoder(rw).Encode(job)
+}
+
+// editMetaByRequest godoc
+// @summary Edit meta-data json of job identified by request
+// @tags Job add and modify
+// @description Edit key value pairs in metadata json of job specified by jobID, StartTime and Cluster
+// @description If a key already exists its content will be overwritten
+// @accept json
+// @produce json
+// @param request body api.JobMetaRequest true "Specifies job and payload to add or update"
+// @success 200 {object} schema.Job "Updated job resource"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 404 {object} api.ErrorResponse "Job does not exist"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/edit_meta/ [patch]
+func (api *RestApi) editMetaByRequest(rw http.ResponseWriter, r *http.Request) {
+ // Parse request body
+ req := JobMetaRequest{}
+ if err := decode(r.Body, &req); err != nil {
+ handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
+ return
+ }
+
+ // Fetch job (that will have its meta_data edited) from db
+ var job *schema.Job
+ var err error
+ if req.JobId == nil {
+ handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
+ return
+ }
+
+ // log.Printf("loading db job for editMetaByRequest... : JobMetaRequest=%v", req)
+ job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
+ if err != nil {
+ handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
+ return
+ }
+
+ if err := api.JobRepository.UpdateMetadata(job, req.Payload.Key, req.Payload.Value); err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+ json.NewEncoder(rw).Encode(job)
+}
+
+// tagJob godoc
+// @summary Adds one or more tags to a job
+// @tags Job add and modify
+// @description Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.
+// @description Tag Scope for frontend visibility will default to "global" if none entered, other options: "admin" or specific username.
+// @description If tagged job is already finished: Tag will be written directly to respective archive files.
+// @accept json
+// @produce json
+// @param id path int true "Job Database ID"
+// @param request body api.TagJobApiRequest true "Array of tag-objects to add"
+// @success 200 {object} schema.Job "Updated job resource"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/tag_job/{id} [post]
+func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
+ id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ job, err := api.JobRepository.FindById(r.Context(), id)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusNotFound)
+ return
+ }
+
+ job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), job.ID)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var req TagJobApiRequest
+ if err := decode(r.Body, &req); err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ for _, tag := range req {
+ tagId, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), *job.ID, tag.Type, tag.Name, tag.Scope)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ job.Tags = append(job.Tags, &schema.Tag{
+ ID: tagId,
+ Type: tag.Type,
+ Name: tag.Name,
+ Scope: tag.Scope,
+ })
+ }
+
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+ json.NewEncoder(rw).Encode(job)
+}
+
+// removeTagJob godoc
+// @summary Removes one or more tags from a job
+// @tags Job add and modify
+// @description Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match.
+// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API.
+// @description If tagged job is already finished: Tag will be removed from respective archive files.
+// @accept json
+// @produce json
+// @param id path int true "Job Database ID"
+// @param request body api.TagJobApiRequest true "Array of tag-objects to remove"
+// @success 200 {object} schema.Job "Updated job resource"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /jobs/tag_job/{id} [delete]
+func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) {
+ id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ job, err := api.JobRepository.FindById(r.Context(), id)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusNotFound)
+ return
+ }
+
+ job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), job.ID)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var req TagJobApiRequest
+ if err := decode(r.Body, &req); err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ for _, rtag := range req {
+ // Only Global and Admin Tags
+ if rtag.Scope != "global" && rtag.Scope != "admin" {
+ cclog.Warnf("Cannot delete private tag for job %d: Skip", job.JobID)
+ continue
+ }
+
+ remainingTags, err := api.JobRepository.RemoveJobTagByRequest(repository.GetUserFromContext(r.Context()), *job.ID, rtag.Type, rtag.Name, rtag.Scope)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ job.Tags = remainingTags
+ }
+
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+ json.NewEncoder(rw).Encode(job)
+}
+
+// removeTags godoc
+// @summary Removes all tags and job-relations for type:name tuple
+// @tags Tag remove
+// @description Removes tags by type and name. Name and Type of Tag(s) must match.
+// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API.
+// @description Tag wills be removed from respective archive files.
+// @accept json
+// @produce plain
+// @param request body api.TagJobApiRequest true "Array of tag-objects to remove"
+// @success 200 {string} string "Success Response"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /tags/ [delete]
+func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) {
+ var req TagJobApiRequest
+ if err := decode(r.Body, &req); err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ targetCount := len(req)
+ currentCount := 0
+ for _, rtag := range req {
+ // Only Global and Admin Tags
+ if rtag.Scope != "global" && rtag.Scope != "admin" {
+ cclog.Warn("Cannot delete private tag: Skip")
+ continue
+ }
+
+ err := api.JobRepository.RemoveTagByRequest(rtag.Type, rtag.Name, rtag.Scope)
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ return
+ } else {
+ currentCount++
+ }
+ }
+
+ rw.WriteHeader(http.StatusOK)
+ fmt.Fprintf(rw, "Deleted Tags from DB: %d successfull of %d requested\n", currentCount, targetCount)
+}
+
+// startJob godoc
+// @summary Adds a new job as "running"
+// @tags Job add and modify
+// @description Job specified in request body will be saved to database as "running" with new DB ID.
+// @description Job specifications follow the 'Job' scheme, API will fail to execute if requirements are not met.
+// @accept json
+// @produce json
+// @param request body schema.Job true "Job to add"
+// @success 201 {object} api.DefaultApiResponse "Job added successfully"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: The combination of jobId, clusterId and startTime does already exist"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/start_job/ [post]
+func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
+ req := schema.Job{
+ Exclusive: 1,
+ MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
+ }
+ if err := decode(r.Body, &req); err != nil {
+ handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
+ return
+ }
+
+ cclog.Printf("REST: %s\n", req.GoString())
+ req.State = schema.JobStateRunning
+
+ if err := importer.SanityChecks(&req); err != nil {
+ handleError(err, http.StatusBadRequest, rw)
+ return
+ }
+
+ // aquire lock to avoid race condition between API calls
+ var unlockOnce sync.Once
+ api.RepositoryMutex.Lock()
+ defer unlockOnce.Do(api.RepositoryMutex.Unlock)
+
+ // Check if combination of (job_id, cluster_id, start_time) already exists:
+ jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil)
+ if err != nil && err != sql.ErrNoRows {
+ handleError(fmt.Errorf("checking for duplicate failed: %w", err), http.StatusInternalServerError, rw)
+ return
+ } else if err == nil {
+ for _, job := range jobs {
+ if (req.StartTime - job.StartTime) < 86400 {
+ handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d, jobid: %d", job.ID, job.JobID), http.StatusUnprocessableEntity, rw)
+ return
+ }
+ }
+ }
+
+ id, err := api.JobRepository.Start(&req)
+ if err != nil {
+ handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw)
+ return
+ }
+ // unlock here, adding Tags can be async
+ unlockOnce.Do(api.RepositoryMutex.Unlock)
+
+ for _, tag := range req.Tags {
+ if _, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), id, tag.Type, tag.Name, tag.Scope); err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw)
+ return
+ }
+ }
+
+ cclog.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusCreated)
+ json.NewEncoder(rw).Encode(DefaultApiResponse{
+ Message: "success",
+ })
+}
+
+// stopJobByRequest godoc
+// @summary Marks job as completed and triggers archiving
+// @tags Job add and modify
+// @description Job to stop is specified by request body. All fields are required in this case.
+// @description Returns full job resource information according to 'Job' scheme.
+// @produce json
+// @param request body api.StopJobApiRequest true "All fields required"
+// @success 200 {object} schema.Job "Success message"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 404 {object} api.ErrorResponse "Resource not found"
+// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: job has already been stopped"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/stop_job/ [post]
+func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
+ // Parse request body
+ req := StopJobApiRequest{}
+ if err := decode(r.Body, &req); err != nil {
+ handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
+ return
+ }
+
+ // Fetch job (that will be stopped) from db
+ var job *schema.Job
+ var err error
+ if req.JobId == nil {
+ handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
+ return
+ }
+
+ // cclog.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req)
+ job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
+ if err != nil {
+ job, err = api.JobRepository.FindCached(req.JobId, req.Cluster, req.StartTime)
+ // FIXME: Previous error is hidden
+ if err != nil {
+ handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
+ return
+ }
+ }
+
+ api.checkAndHandleStopJob(rw, job, req)
+}
+
+// deleteJobById godoc
+// @summary Remove a job from the sql database
+// @tags Job remove
+// @description Job to remove is specified by database ID. This will not remove the job from the job archive.
+// @produce json
+// @param id path int true "Database ID of Job"
+// @success 200 {object} api.DefaultApiResponse "Success message"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 404 {object} api.ErrorResponse "Resource not found"
+// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/delete_job/{id} [delete]
+func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
+ // Fetch job (that will be stopped) from db
+ id, ok := mux.Vars(r)["id"]
+ var err error
+ if ok {
+ id, e := strconv.ParseInt(id, 10, 64)
+ if e != nil {
+ handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
+ return
+ }
+
+ err = api.JobRepository.DeleteJobById(id)
+ } else {
+ handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw)
+ return
+ }
+ if err != nil {
+ handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
+ return
+ }
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+ json.NewEncoder(rw).Encode(DefaultApiResponse{
+ Message: fmt.Sprintf("Successfully deleted job %s", id),
+ })
+}
+
+// deleteJobByRequest godoc
+// @summary Remove a job from the sql database
+// @tags Job remove
+// @description Job to delete is specified by request body. All fields are required in this case.
+// @accept json
+// @produce json
+// @param request body api.DeleteJobApiRequest true "All fields required"
+// @success 200 {object} api.DefaultApiResponse "Success message"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 404 {object} api.ErrorResponse "Resource not found"
+// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/delete_job/ [delete]
+func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) {
+ // Parse request body
+ req := DeleteJobApiRequest{}
+ if err := decode(r.Body, &req); err != nil {
+ handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
+ return
+ }
+
+ // Fetch job (that will be deleted) from db
+ var job *schema.Job
+ var err error
+ if req.JobId == nil {
+ handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
+ return
+ }
+
+ job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
+ if err != nil {
+ handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
+ return
+ }
+
+ err = api.JobRepository.DeleteJobById(*job.ID)
+ if err != nil {
+ handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
+ return
+ }
+
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+ json.NewEncoder(rw).Encode(DefaultApiResponse{
+ Message: fmt.Sprintf("Successfully deleted job %d", job.ID),
+ })
+}
+
+// deleteJobBefore godoc
+// @summary Remove a job from the sql database
+// @tags Job remove
+// @description Remove all jobs with start time before timestamp. The jobs will not be removed from the job archive.
+// @produce json
+// @param ts path int true "Unix epoch timestamp"
+// @success 200 {object} api.DefaultApiResponse "Success message"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 404 {object} api.ErrorResponse "Resource not found"
+// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/jobs/delete_job_before/{ts} [delete]
+func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
+ var cnt int
+ // Fetch job (that will be stopped) from db
+ id, ok := mux.Vars(r)["ts"]
+ var err error
+ if ok {
+ ts, e := strconv.ParseInt(id, 10, 64)
+ if e != nil {
+ handleError(fmt.Errorf("integer expected in path for ts: %w", e), http.StatusBadRequest, rw)
+ return
+ }
+
+ cnt, err = api.JobRepository.DeleteJobsBefore(ts)
+ } else {
+ handleError(errors.New("the parameter 'ts' is required"), http.StatusBadRequest, rw)
+ return
+ }
+ if err != nil {
+ handleError(fmt.Errorf("deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw)
+ return
+ }
+
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+ json.NewEncoder(rw).Encode(DefaultApiResponse{
+ Message: fmt.Sprintf("Successfully deleted %d jobs", cnt),
+ })
+}
+
+func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Job, req StopJobApiRequest) {
+ // Sanity checks
+ if job.State != schema.JobStateRunning {
+ handleError(fmt.Errorf("jobId %d (id %d) on %s : job has already been stopped (state is: %s)", job.JobID, job.ID, job.Cluster, job.State), http.StatusUnprocessableEntity, rw)
+ return
+ }
+
+ if job == nil || job.StartTime > req.StopTime {
+ handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger/equal than startTime %d", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime), http.StatusBadRequest, rw)
+ return
+ }
+
+ if req.State != "" && !req.State.Valid() {
+ handleError(fmt.Errorf("jobId %d (id %d) on %s : invalid requested job state: %#v", job.JobID, job.ID, job.Cluster, req.State), http.StatusBadRequest, rw)
+ return
+ } else if req.State == "" {
+ req.State = schema.JobStateCompleted
+ }
+
+ // Mark job as stopped in the database (update state and duration)
+ job.Duration = int32(req.StopTime - job.StartTime)
+ job.State = req.State
+ api.JobRepository.Mutex.Lock()
+ if err := api.JobRepository.Stop(*job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil {
+ if err := api.JobRepository.StopCached(*job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil {
+ api.JobRepository.Mutex.Unlock()
+ handleError(fmt.Errorf("jobId %d (id %d) on %s : marking job as '%s' (duration: %d) in DB failed: %w", job.JobID, job.ID, job.Cluster, job.State, job.Duration, err), http.StatusInternalServerError, rw)
+ return
+ }
+ }
+ api.JobRepository.Mutex.Unlock()
+
+ cclog.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%d, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State)
+
+ // Send a response (with status OK). This means that errors that happen from here on forward
+ // can *NOT* be communicated to the client. If reading from a MetricDataRepository or
+ // writing to the filesystem fails, the client will not know.
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+ json.NewEncoder(rw).Encode(job)
+
+ // Monitoring is disabled...
+ if job.MonitoringStatus == schema.MonitoringStatusDisabled {
+ return
+ }
+
+ // Trigger async archiving
+ archiver.TriggerArchiving(job)
+}
+
+func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ metrics := r.URL.Query()["metric"]
+ var scopes []schema.MetricScope
+ for _, scope := range r.URL.Query()["scope"] {
+ var s schema.MetricScope
+ if err := s.UnmarshalGQL(scope); err != nil {
+ http.Error(rw, err.Error(), http.StatusBadRequest)
+ return
+ }
+ scopes = append(scopes, s)
+ }
+
+ rw.Header().Add("Content-Type", "application/json")
+ rw.WriteHeader(http.StatusOK)
+
+ type Respone struct {
+ Data *struct {
+ JobMetrics []*model.JobMetricWithName `json:"jobMetrics"`
+ } `json:"data"`
+ Error *struct {
+ Message string `json:"message"`
+ } `json:"error"`
+ }
+
+ resolver := graph.GetResolverInstance()
+ data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes, nil)
+ if err != nil {
+ json.NewEncoder(rw).Encode(Respone{
+ Error: &struct {
+ Message string "json:\"message\""
+ }{Message: err.Error()},
+ })
+ return
+ }
+
+ json.NewEncoder(rw).Encode(Respone{
+ Data: &struct {
+ JobMetrics []*model.JobMetricWithName "json:\"jobMetrics\""
+ }{JobMetrics: data},
+ })
+}
diff --git a/internal/api/node.go b/internal/api/node.go
new file mode 100644
index 0000000..385b2da
--- /dev/null
+++ b/internal/api/node.go
@@ -0,0 +1,80 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package api
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ "github.com/ClusterCockpit/cc-lib/schema"
+)
+
+type Node struct {
+ Name string `json:"hostname"`
+ States []string `json:"states"`
+ CpusAllocated int `json:"cpusAllocated"`
+ CpusTotal int `json:"cpusTotal"`
+ MemoryAllocated int `json:"memoryAllocated"`
+ MemoryTotal int `json:"memoryTotal"`
+ GpusAllocated int `json:"gpusAllocated"`
+ GpusTotal int `json:"gpusTotal"`
+}
+
+// updateNodeStatesRequest model
+type UpdateNodeStatesRequest struct {
+ Nodes []Node `json:"nodes"`
+ Cluster string `json:"cluster" example:"fritz"`
+}
+
+// this routine assumes that only one of them exists per node
+func determineState(states []string) schema.NodeState {
+ for _, state := range states {
+ switch strings.ToLower(state) {
+ case "allocated":
+ return schema.NodeStateAllocated
+ case "reserved":
+ return schema.NodeStateReserved
+ case "idle":
+ return schema.NodeStateIdle
+ case "down":
+ return schema.NodeStateDown
+ case "mixed":
+ return schema.NodeStateMixed
+ }
+ }
+
+ return schema.NodeStateUnknown
+}
+
+// updateNodeStates godoc
+// @summary Deliver updated Slurm node states
+// @tags Nodestates
+// @description Returns a JSON-encoded list of users.
+// @description Required query-parameter defines if all users or only users with additional special roles are returned.
+// @produce json
+// @param request body UpdateNodeStatesRequest true "Request body containing nodes and their states"
+// @success 200 {object} api.DefaultApiResponse "Success message"
+// @failure 400 {object} api.ErrorResponse "Bad Request"
+// @failure 401 {object} api.ErrorResponse "Unauthorized"
+// @failure 403 {object} api.ErrorResponse "Forbidden"
+// @failure 500 {object} api.ErrorResponse "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/nodestats/ [post]
+func (api *RestApi) updateNodeStates(rw http.ResponseWriter, r *http.Request) {
+ // Parse request body
+ req := UpdateNodeStatesRequest{}
+ if err := decode(r.Body, &req); err != nil {
+ handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
+ return
+ }
+ repo := repository.GetNodeRepository()
+
+ for _, node := range req.Nodes {
+ state := determineState(node.States)
+ repo.UpdateNodeState(node.Name, req.Cluster, &state)
+ }
+}
diff --git a/internal/api/rest.go b/internal/api/rest.go
index 23ff618..2e01059 100644
--- a/internal/api/rest.go
+++ b/internal/api/rest.go
@@ -1,36 +1,24 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package api
import (
- "bufio"
- "database/sql"
"encoding/json"
- "errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
- "strconv"
- "strings"
"sync"
- "time"
- "github.com/ClusterCockpit/cc-backend/internal/archiver"
"github.com/ClusterCockpit/cc-backend/internal/auth"
"github.com/ClusterCockpit/cc-backend/internal/config"
- "github.com/ClusterCockpit/cc-backend/internal/graph"
- "github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/internal/importer"
- "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/internal/util"
- "github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
"github.com/gorilla/mux"
)
@@ -73,6 +61,8 @@ func (api *RestApi) MountApiRoutes(r *mux.Router) {
r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet)
// Cluster List
r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet)
+ // Slurm node state
+ r.HandleFunc("/nodestate/", api.updateNodeStates).Methods(http.MethodPost, http.MethodPut)
// Job Handler
r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut)
r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut)
@@ -121,46 +111,13 @@ func (api *RestApi) MountConfigApiRoutes(r *mux.Router) {
func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) {
r.StrictSlash(true)
- // Settings Frontrend Uses SessionAuth
+ // Settings Frontend Uses SessionAuth
if api.Authentication != nil {
r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet)
r.HandleFunc("/configuration/", api.updateConfiguration).Methods(http.MethodPost)
}
}
-// DefaultApiResponse model
-type DefaultJobApiResponse struct {
- Message string `json:"msg"`
-}
-
-// StopJobApiRequest model
-type StopJobApiRequest struct {
- JobId *int64 `json:"jobId" example:"123000"`
- Cluster *string `json:"cluster" example:"fritz"`
- StartTime *int64 `json:"startTime" example:"1649723812"`
- State schema.JobState `json:"jobState" validate:"required" example:"completed"`
- StopTime int64 `json:"stopTime" validate:"required" example:"1649763839"`
-}
-
-// DeleteJobApiRequest model
-type DeleteJobApiRequest struct {
- JobId *int64 `json:"jobId" validate:"required" example:"123000"` // Cluster Job ID of job
- Cluster *string `json:"cluster" example:"fritz"` // Cluster of job
- StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch
-}
-
-// GetJobsApiResponse model
-type GetJobsApiResponse struct {
- Jobs []*schema.JobMeta `json:"jobs"` // Array of jobs
- Items int `json:"items"` // Number of jobs returned
- Page int `json:"page"` // Page id returned
-}
-
-// GetClustersApiResponse model
-type GetClustersApiResponse struct {
- Clusters []*schema.Cluster `json:"clusters"` // Array of clusters
-}
-
// ErrorResponse model
type ErrorResponse struct {
// Statustext of Errorcode
@@ -168,58 +125,13 @@ type ErrorResponse struct {
Error string `json:"error"` // Error Message
}
-// ApiTag model
-type ApiTag struct {
- // Tag Type
- Type string `json:"type" example:"Debug"`
- Name string `json:"name" example:"Testjob"` // Tag Name
- Scope string `json:"scope" example:"global"` // Tag Scope for Frontend Display
-}
-
-// ApiMeta model
-type EditMetaRequest struct {
- Key string `json:"key" example:"jobScript"`
- Value string `json:"value" example:"bash script"`
-}
-
-// JobMetaRequest model
-type JobMetaRequest struct {
- JobId *int64 `json:"jobId" validate:"required" example:"123000"` // Cluster Job ID of job
- Cluster *string `json:"cluster" example:"fritz"` // Cluster of job
- StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch
- Payload EditMetaRequest `json:"payload"` // Content to Add to Job Meta_Data
-}
-
-type TagJobApiRequest []*ApiTag
-
-type GetJobApiRequest []string
-
-type GetJobApiResponse struct {
- Meta *schema.Job
- Data []*JobMetricWithName
-}
-
-type GetCompleteJobApiResponse struct {
- Meta *schema.Job
- Data schema.JobData
-}
-
-type JobMetricWithName struct {
- Metric *schema.JobMetric `json:"metric"`
- Name string `json:"name"`
- Scope schema.MetricScope `json:"scope"`
-}
-
-type ApiReturnedUser struct {
- Username string `json:"username"`
- Name string `json:"name"`
- Roles []string `json:"roles"`
- Email string `json:"email"`
- Projects []string `json:"projects"`
+// DefaultApiResponse model
+type DefaultApiResponse struct {
+ Message string `json:"msg"`
}
func handleError(err error, statusCode int, rw http.ResponseWriter) {
- log.Warnf("REST ERROR : %s", err.Error())
+ cclog.Warnf("REST ERROR : %s", err.Error())
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(statusCode)
json.NewEncoder(rw).Encode(ErrorResponse{
@@ -234,1128 +146,6 @@ func decode(r io.Reader, val any) error {
return dec.Decode(val)
}
-// getClusters godoc
-// @summary Lists all cluster configs
-// @tags Cluster query
-// @description Get a list of all cluster configs. Specific cluster can be requested using query parameter.
-// @produce json
-// @param cluster query string false "Job Cluster"
-// @success 200 {object} api.GetClustersApiResponse "Array of clusters"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/clusters/ [get]
-func (api *RestApi) getClusters(rw http.ResponseWriter, r *http.Request) {
- if user := repository.GetUserFromContext(r.Context()); user != nil &&
- !user.HasRole(schema.RoleApi) {
-
- handleError(fmt.Errorf("missing role: %v", schema.GetRoleString(schema.RoleApi)), http.StatusForbidden, rw)
- return
- }
-
- rw.Header().Add("Content-Type", "application/json")
- bw := bufio.NewWriter(rw)
- defer bw.Flush()
-
- var clusters []*schema.Cluster
-
- if r.URL.Query().Has("cluster") {
- name := r.URL.Query().Get("cluster")
- cluster := archive.GetCluster(name)
- if cluster == nil {
- handleError(fmt.Errorf("unknown cluster: %s", name), http.StatusBadRequest, rw)
- return
- }
- clusters = append(clusters, cluster)
- } else {
- clusters = archive.Clusters
- }
-
- payload := GetClustersApiResponse{
- Clusters: clusters,
- }
-
- if err := json.NewEncoder(bw).Encode(payload); err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
-}
-
-// getJobs godoc
-// @summary Lists all jobs
-// @tags Job query
-// @description Get a list of all jobs. Filters can be applied using query parameters.
-// @description Number of results can be limited by page. Results are sorted by descending startTime.
-// @produce json
-// @param state query string false "Job State" Enums(running, completed, failed, cancelled, stopped, timeout)
-// @param cluster query string false "Job Cluster"
-// @param start-time query string false "Syntax: '$from-$to', as unix epoch timestamps in seconds"
-// @param items-per-page query int false "Items per page (Default: 25)"
-// @param page query int false "Page Number (Default: 1)"
-// @param with-metadata query bool false "Include metadata (e.g. jobScript) in response"
-// @success 200 {object} api.GetJobsApiResponse "Job array and page info"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/ [get]
-func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
- withMetadata := false
- filter := &model.JobFilter{}
- page := &model.PageRequest{ItemsPerPage: 25, Page: 1}
- order := &model.OrderByInput{Field: "startTime", Type: "col", Order: model.SortDirectionEnumDesc}
-
- for key, vals := range r.URL.Query() {
- switch key {
- case "state":
- for _, s := range vals {
- state := schema.JobState(s)
- if !state.Valid() {
- handleError(fmt.Errorf("invalid query parameter value: state"),
- http.StatusBadRequest, rw)
- return
- }
- filter.State = append(filter.State, state)
- }
- case "cluster":
- filter.Cluster = &model.StringInput{Eq: &vals[0]}
- case "start-time":
- st := strings.Split(vals[0], "-")
- if len(st) != 2 {
- handleError(fmt.Errorf("invalid query parameter value: startTime"),
- http.StatusBadRequest, rw)
- return
- }
- from, err := strconv.ParseInt(st[0], 10, 64)
- if err != nil {
- handleError(err, http.StatusBadRequest, rw)
- return
- }
- to, err := strconv.ParseInt(st[1], 10, 64)
- if err != nil {
- handleError(err, http.StatusBadRequest, rw)
- return
- }
- ufrom, uto := time.Unix(from, 0), time.Unix(to, 0)
- filter.StartTime = &schema.TimeRange{From: &ufrom, To: &uto}
- case "page":
- x, err := strconv.Atoi(vals[0])
- if err != nil {
- handleError(err, http.StatusBadRequest, rw)
- return
- }
- page.Page = x
- case "items-per-page":
- x, err := strconv.Atoi(vals[0])
- if err != nil {
- handleError(err, http.StatusBadRequest, rw)
- return
- }
- page.ItemsPerPage = x
- case "with-metadata":
- withMetadata = true
- default:
- handleError(fmt.Errorf("invalid query parameter: %s", key),
- http.StatusBadRequest, rw)
- return
- }
- }
-
- jobs, err := api.JobRepository.QueryJobs(r.Context(), []*model.JobFilter{filter}, page, order)
- if err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
-
- results := make([]*schema.JobMeta, 0, len(jobs))
- for _, job := range jobs {
- if withMetadata {
- if _, err = api.JobRepository.FetchMetadata(job); err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
- }
-
- res := &schema.JobMeta{
- ID: &job.ID,
- BaseJob: job.BaseJob,
- StartTime: job.StartTime.Unix(),
- }
-
- res.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID)
- if err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
-
- if res.MonitoringStatus == schema.MonitoringStatusArchivingSuccessful {
- res.Statistics, err = archive.GetStatistics(job)
- if err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
- }
-
- results = append(results, res)
- }
-
- log.Debugf("/api/jobs: %d jobs returned", len(results))
- rw.Header().Add("Content-Type", "application/json")
- bw := bufio.NewWriter(rw)
- defer bw.Flush()
-
- payload := GetJobsApiResponse{
- Jobs: results,
- Items: page.ItemsPerPage,
- Page: page.Page,
- }
-
- if err := json.NewEncoder(bw).Encode(payload); err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
-}
-
-// getCompleteJobById godoc
-// @summary Get job meta and optional all metric data
-// @tags Job query
-// @description Job to get is specified by database ID
-// @description Returns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.
-// @produce json
-// @param id path int true "Database ID of Job"
-// @param all-metrics query bool false "Include all available metrics"
-// @success 200 {object} api.GetJobApiResponse "Job resource"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 404 {object} api.ErrorResponse "Resource not found"
-// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/{id} [get]
-func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) {
- // Fetch job from db
- id, ok := mux.Vars(r)["id"]
- var job *schema.Job
- var err error
- if ok {
- id, e := strconv.ParseInt(id, 10, 64)
- if e != nil {
- handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
- return
- }
-
- job, err = api.JobRepository.FindById(r.Context(), id) // Get Job from Repo by ID
- } else {
- handleError(fmt.Errorf("the parameter 'id' is required"), http.StatusBadRequest, rw)
- return
- }
- if err != nil {
- handleError(fmt.Errorf("finding job with db id %s failed: %w", id, err), http.StatusUnprocessableEntity, rw)
- return
- }
-
- job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID)
- if err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
-
- }
- if _, err = api.JobRepository.FetchMetadata(job); err != nil {
-
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
-
- var scopes []schema.MetricScope
-
- if job.NumNodes == 1 {
- scopes = []schema.MetricScope{"core"}
- } else {
- scopes = []schema.MetricScope{"node"}
- }
-
- var data schema.JobData
-
- metricConfigs := archive.GetCluster(job.Cluster).MetricConfig
- resolution := 0
-
- for _, mc := range metricConfigs {
- resolution = max(resolution, mc.Timestep)
- }
-
- if r.URL.Query().Get("all-metrics") == "true" {
- data, err = metricDataDispatcher.LoadData(job, nil, scopes, r.Context(), resolution)
- if err != nil {
- log.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster)
- return
- }
- }
-
- log.Debugf("/api/job/%s: get job %d", id, job.JobID)
- rw.Header().Add("Content-Type", "application/json")
- bw := bufio.NewWriter(rw)
- defer bw.Flush()
-
- payload := GetCompleteJobApiResponse{
- Meta: job,
- Data: data,
- }
-
- if err := json.NewEncoder(bw).Encode(payload); err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
-}
-
-// getJobById godoc
-// @summary Get job meta and configurable metric data
-// @tags Job query
-// @description Job to get is specified by database ID
-// @description Returns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.
-// @accept json
-// @produce json
-// @param id path int true "Database ID of Job"
-// @param request body api.GetJobApiRequest true "Array of metric names"
-// @success 200 {object} api.GetJobApiResponse "Job resource"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 404 {object} api.ErrorResponse "Resource not found"
-// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/{id} [post]
-func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
- // Fetch job from db
- id, ok := mux.Vars(r)["id"]
- var job *schema.Job
- var err error
- if ok {
- id, e := strconv.ParseInt(id, 10, 64)
- if e != nil {
- handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
- return
- }
-
- job, err = api.JobRepository.FindById(r.Context(), id)
- } else {
- handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw)
- return
- }
- if err != nil {
- handleError(fmt.Errorf("finding job with db id %s failed: %w", id, err), http.StatusUnprocessableEntity, rw)
- return
- }
-
- job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID)
- if err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
-
- }
- if _, err = api.JobRepository.FetchMetadata(job); err != nil {
-
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
-
- var metrics GetJobApiRequest
- if err = decode(r.Body, &metrics); err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
-
- var scopes []schema.MetricScope
-
- if job.NumNodes == 1 {
- scopes = []schema.MetricScope{"core"}
- } else {
- scopes = []schema.MetricScope{"node"}
- }
-
- metricConfigs := archive.GetCluster(job.Cluster).MetricConfig
- resolution := 0
-
- for _, mc := range metricConfigs {
- resolution = max(resolution, mc.Timestep)
- }
-
- data, err := metricDataDispatcher.LoadData(job, metrics, scopes, r.Context(), resolution)
- if err != nil {
- log.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster)
- return
- }
-
- res := []*JobMetricWithName{}
- for name, md := range data {
- for scope, metric := range md {
- res = append(res, &JobMetricWithName{
- Name: name,
- Scope: scope,
- Metric: metric,
- })
- }
- }
-
- log.Debugf("/api/job/%s: get job %d", id, job.JobID)
- rw.Header().Add("Content-Type", "application/json")
- bw := bufio.NewWriter(rw)
- defer bw.Flush()
-
- payload := GetJobApiResponse{
- Meta: job,
- Data: res,
- }
-
- if err := json.NewEncoder(bw).Encode(payload); err != nil {
- handleError(err, http.StatusInternalServerError, rw)
- return
- }
-}
-
-// editMeta godoc
-// @summary Edit meta-data json of job identified by database id
-// @tags Job add and modify
-// @description Edit key value pairs in job metadata json of job specified by database id
-// @description If a key already exists its content will be overwritten
-// @accept json
-// @produce json
-// @param id path int true "Job Database ID"
-// @param request body api.EditMetaRequest true "Metadata Key value pair to add or update"
-// @success 200 {object} schema.Job "Updated job resource"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 404 {object} api.ErrorResponse "Job does not exist"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/edit_meta/{id} [patch]
-func (api *RestApi) editMeta(rw http.ResponseWriter, r *http.Request) {
- id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
-
- job, err := api.JobRepository.FindById(r.Context(), id)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusNotFound)
- return
- }
-
- var req EditMetaRequest
- if err := decode(r.Body, &req); err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
-
- if err := api.JobRepository.UpdateMetadata(job, req.Key, req.Value); err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
- json.NewEncoder(rw).Encode(job)
-}
-
-// editMetaByRequest godoc
-// @summary Edit meta-data json of job identified by request
-// @tags Job add and modify
-// @description Edit key value pairs in metadata json of job specified by jobID, StartTime and Cluster
-// @description If a key already exists its content will be overwritten
-// @accept json
-// @produce json
-// @param request body api.JobMetaRequest true "Specifies job and payload to add or update"
-// @success 200 {object} schema.Job "Updated job resource"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 404 {object} api.ErrorResponse "Job does not exist"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/edit_meta/ [patch]
-func (api *RestApi) editMetaByRequest(rw http.ResponseWriter, r *http.Request) {
- // Parse request body
- req := JobMetaRequest{}
- if err := decode(r.Body, &req); err != nil {
- handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
- return
- }
-
- // Fetch job (that will have its meta_data edited) from db
- var job *schema.Job
- var err error
- if req.JobId == nil {
- handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
- return
- }
-
- // log.Printf("loading db job for editMetaByRequest... : JobMetaRequest=%v", req)
- job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
- if err != nil {
- handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
- return
- }
-
- if err := api.JobRepository.UpdateMetadata(job, req.Payload.Key, req.Payload.Value); err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
- json.NewEncoder(rw).Encode(job)
-}
-
-// tagJob godoc
-// @summary Adds one or more tags to a job
-// @tags Job add and modify
-// @description Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.
-// @description Tag Scope for frontend visibility will default to "global" if none entered, other options: "admin" or specific username.
-// @description If tagged job is already finished: Tag will be written directly to respective archive files.
-// @accept json
-// @produce json
-// @param id path int true "Job Database ID"
-// @param request body api.TagJobApiRequest true "Array of tag-objects to add"
-// @success 200 {object} schema.Job "Updated job resource"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/tag_job/{id} [post]
-func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
- id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
-
- job, err := api.JobRepository.FindById(r.Context(), id)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusNotFound)
- return
- }
-
- job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- var req TagJobApiRequest
- if err := decode(r.Body, &req); err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
-
- for _, tag := range req {
- tagId, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), job.ID, tag.Type, tag.Name, tag.Scope)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- job.Tags = append(job.Tags, &schema.Tag{
- ID: tagId,
- Type: tag.Type,
- Name: tag.Name,
- Scope: tag.Scope,
- })
- }
-
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
- json.NewEncoder(rw).Encode(job)
-}
-
-// removeTagJob godoc
-// @summary Removes one or more tags from a job
-// @tags Job add and modify
-// @description Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match.
-// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API.
-// @description If tagged job is already finished: Tag will be removed from respective archive files.
-// @accept json
-// @produce json
-// @param id path int true "Job Database ID"
-// @param request body api.TagJobApiRequest true "Array of tag-objects to remove"
-// @success 200 {object} schema.Job "Updated job resource"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /jobs/tag_job/{id} [delete]
-func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) {
- id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
-
- job, err := api.JobRepository.FindById(r.Context(), id)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusNotFound)
- return
- }
-
- job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- var req TagJobApiRequest
- if err := decode(r.Body, &req); err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
-
- for _, rtag := range req {
- // Only Global and Admin Tags
- if rtag.Scope != "global" && rtag.Scope != "admin" {
- log.Warnf("Cannot delete private tag for job %d: Skip", job.JobID)
- continue
- }
-
- remainingTags, err := api.JobRepository.RemoveJobTagByRequest(repository.GetUserFromContext(r.Context()), job.ID, rtag.Type, rtag.Name, rtag.Scope)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- job.Tags = remainingTags
- }
-
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
- json.NewEncoder(rw).Encode(job)
-}
-
-// removeTags godoc
-// @summary Removes all tags and job-relations for type:name tuple
-// @tags Tag remove
-// @description Removes tags by type and name. Name and Type of Tag(s) must match.
-// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API.
-// @description Tag wills be removed from respective archive files.
-// @accept json
-// @produce plain
-// @param request body api.TagJobApiRequest true "Array of tag-objects to remove"
-// @success 200 {string} string "Success Response"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /tags/ [delete]
-func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) {
- var req TagJobApiRequest
- if err := decode(r.Body, &req); err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
-
- targetCount := len(req)
- currentCount := 0
- for _, rtag := range req {
- // Only Global and Admin Tags
- if rtag.Scope != "global" && rtag.Scope != "admin" {
- log.Warn("Cannot delete private tag: Skip")
- continue
- }
-
- err := api.JobRepository.RemoveTagByRequest(rtag.Type, rtag.Name, rtag.Scope)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- } else {
- currentCount++
- }
- }
-
- rw.WriteHeader(http.StatusOK)
- rw.Write([]byte(fmt.Sprintf("Deleted Tags from DB: %d successfull of %d requested\n", currentCount, targetCount)))
-}
-
-// startJob godoc
-// @summary Adds a new job as "running"
-// @tags Job add and modify
-// @description Job specified in request body will be saved to database as "running" with new DB ID.
-// @description Job specifications follow the 'JobMeta' scheme, API will fail to execute if requirements are not met.
-// @accept json
-// @produce json
-// @param request body schema.JobMeta true "Job to add"
-// @success 201 {object} api.DefaultJobApiResponse "Job added successfully"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: The combination of jobId, clusterId and startTime does already exist"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/start_job/ [post]
-func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
- req := schema.JobMeta{BaseJob: schema.JobDefaults}
- if err := decode(r.Body, &req); err != nil {
- handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
- return
- }
-
- req.State = schema.JobStateRunning
-
- if err := importer.SanityChecks(&req.BaseJob); err != nil {
- handleError(err, http.StatusBadRequest, rw)
- return
- }
-
- // aquire lock to avoid race condition between API calls
- var unlockOnce sync.Once
- api.RepositoryMutex.Lock()
- defer unlockOnce.Do(api.RepositoryMutex.Unlock)
-
- // Check if combination of (job_id, cluster_id, start_time) already exists:
- jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil)
- if err != nil && err != sql.ErrNoRows {
- handleError(fmt.Errorf("checking for duplicate failed: %w", err), http.StatusInternalServerError, rw)
- return
- } else if err == nil {
- for _, job := range jobs {
- if (req.StartTime - job.StartTimeUnix) < 86400 {
- handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d, jobid: %d", job.ID, job.JobID), http.StatusUnprocessableEntity, rw)
- return
- }
- }
- }
-
- id, err := api.JobRepository.Start(&req)
- if err != nil {
- handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw)
- return
- }
- // unlock here, adding Tags can be async
- unlockOnce.Do(api.RepositoryMutex.Unlock)
-
- for _, tag := range req.Tags {
- if _, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), id, tag.Type, tag.Name, tag.Scope); err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw)
- return
- }
- }
-
- log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusCreated)
- json.NewEncoder(rw).Encode(DefaultJobApiResponse{
- Message: "success",
- })
-}
-
-// stopJobByRequest godoc
-// @summary Marks job as completed and triggers archiving
-// @tags Job add and modify
-// @description Job to stop is specified by request body. All fields are required in this case.
-// @description Returns full job resource information according to 'JobMeta' scheme.
-// @produce json
-// @param request body api.StopJobApiRequest true "All fields required"
-// @success 200 {object} schema.JobMeta "Success message"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 404 {object} api.ErrorResponse "Resource not found"
-// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: job has already been stopped"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/stop_job/ [post]
-func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
- // Parse request body
- req := StopJobApiRequest{}
- if err := decode(r.Body, &req); err != nil {
- handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
- return
- }
-
- // Fetch job (that will be stopped) from db
- var job *schema.Job
- var err error
- if req.JobId == nil {
- handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
- return
- }
-
- // log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req)
- job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
- if err != nil {
- handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
- return
- }
-
- api.checkAndHandleStopJob(rw, job, req)
-}
-
-// deleteJobById godoc
-// @summary Remove a job from the sql database
-// @tags Job remove
-// @description Job to remove is specified by database ID. This will not remove the job from the job archive.
-// @produce json
-// @param id path int true "Database ID of Job"
-// @success 200 {object} api.DefaultJobApiResponse "Success message"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 404 {object} api.ErrorResponse "Resource not found"
-// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/delete_job/{id} [delete]
-func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
- // Fetch job (that will be stopped) from db
- id, ok := mux.Vars(r)["id"]
- var err error
- if ok {
- id, e := strconv.ParseInt(id, 10, 64)
- if e != nil {
- handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
- return
- }
-
- err = api.JobRepository.DeleteJobById(id)
- } else {
- handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw)
- return
- }
- if err != nil {
- handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
- return
- }
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
- json.NewEncoder(rw).Encode(DefaultJobApiResponse{
- Message: fmt.Sprintf("Successfully deleted job %s", id),
- })
-}
-
-// deleteJobByRequest godoc
-// @summary Remove a job from the sql database
-// @tags Job remove
-// @description Job to delete is specified by request body. All fields are required in this case.
-// @accept json
-// @produce json
-// @param request body api.DeleteJobApiRequest true "All fields required"
-// @success 200 {object} api.DefaultJobApiResponse "Success message"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 404 {object} api.ErrorResponse "Resource not found"
-// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/delete_job/ [delete]
-func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) {
- // Parse request body
- req := DeleteJobApiRequest{}
- if err := decode(r.Body, &req); err != nil {
- handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
- return
- }
-
- // Fetch job (that will be deleted) from db
- var job *schema.Job
- var err error
- if req.JobId == nil {
- handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
- return
- }
-
- job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
- if err != nil {
- handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
- return
- }
-
- err = api.JobRepository.DeleteJobById(job.ID)
- if err != nil {
- handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
- return
- }
-
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
- json.NewEncoder(rw).Encode(DefaultJobApiResponse{
- Message: fmt.Sprintf("Successfully deleted job %d", job.ID),
- })
-}
-
-// deleteJobBefore godoc
-// @summary Remove a job from the sql database
-// @tags Job remove
-// @description Remove all jobs with start time before timestamp. The jobs will not be removed from the job archive.
-// @produce json
-// @param ts path int true "Unix epoch timestamp"
-// @success 200 {object} api.DefaultJobApiResponse "Success message"
-// @failure 400 {object} api.ErrorResponse "Bad Request"
-// @failure 401 {object} api.ErrorResponse "Unauthorized"
-// @failure 403 {object} api.ErrorResponse "Forbidden"
-// @failure 404 {object} api.ErrorResponse "Resource not found"
-// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
-// @failure 500 {object} api.ErrorResponse "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/jobs/delete_job_before/{ts} [delete]
-func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
- var cnt int
- // Fetch job (that will be stopped) from db
- id, ok := mux.Vars(r)["ts"]
- var err error
- if ok {
- ts, e := strconv.ParseInt(id, 10, 64)
- if e != nil {
- handleError(fmt.Errorf("integer expected in path for ts: %w", e), http.StatusBadRequest, rw)
- return
- }
-
- cnt, err = api.JobRepository.DeleteJobsBefore(ts)
- } else {
- handleError(errors.New("the parameter 'ts' is required"), http.StatusBadRequest, rw)
- return
- }
- if err != nil {
- handleError(fmt.Errorf("deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw)
- return
- }
-
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
- json.NewEncoder(rw).Encode(DefaultJobApiResponse{
- Message: fmt.Sprintf("Successfully deleted %d jobs", cnt),
- })
-}
-
-func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Job, req StopJobApiRequest) {
- // Sanity checks
- if job.State != schema.JobStateRunning {
- handleError(fmt.Errorf("jobId %d (id %d) on %s : job has already been stopped (state is: %s)", job.JobID, job.ID, job.Cluster, job.State), http.StatusUnprocessableEntity, rw)
- return
- }
-
- if job == nil || job.StartTime.Unix() > req.StopTime {
- handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger/equal than startTime %d", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix()), http.StatusBadRequest, rw)
- return
- }
-
- if req.State != "" && !req.State.Valid() {
- handleError(fmt.Errorf("jobId %d (id %d) on %s : invalid requested job state: %#v", job.JobID, job.ID, job.Cluster, req.State), http.StatusBadRequest, rw)
- return
- } else if req.State == "" {
- req.State = schema.JobStateCompleted
- }
-
- // Mark job as stopped in the database (update state and duration)
- job.Duration = int32(req.StopTime - job.StartTime.Unix())
- job.State = req.State
- if err := api.JobRepository.Stop(job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil {
- handleError(fmt.Errorf("jobId %d (id %d) on %s : marking job as '%s' (duration: %d) in DB failed: %w", job.JobID, job.ID, job.Cluster, job.State, job.Duration, err), http.StatusInternalServerError, rw)
- return
- }
-
- log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State)
-
- // Send a response (with status OK). This means that erros that happen from here on forward
- // can *NOT* be communicated to the client. If reading from a MetricDataRepository or
- // writing to the filesystem fails, the client will not know.
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
- json.NewEncoder(rw).Encode(job)
-
- // Monitoring is disabled...
- if job.MonitoringStatus == schema.MonitoringStatusDisabled {
- return
- }
-
- // Trigger async archiving
- archiver.TriggerArchiving(job)
-}
-
-func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) {
- id := mux.Vars(r)["id"]
- metrics := r.URL.Query()["metric"]
- var scopes []schema.MetricScope
- for _, scope := range r.URL.Query()["scope"] {
- var s schema.MetricScope
- if err := s.UnmarshalGQL(scope); err != nil {
- http.Error(rw, err.Error(), http.StatusBadRequest)
- return
- }
- scopes = append(scopes, s)
- }
-
- rw.Header().Add("Content-Type", "application/json")
- rw.WriteHeader(http.StatusOK)
-
- type Respone struct {
- Data *struct {
- JobMetrics []*model.JobMetricWithName `json:"jobMetrics"`
- } `json:"data"`
- Error *struct {
- Message string `json:"message"`
- } `json:"error"`
- }
-
- resolver := graph.GetResolverInstance()
- data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes, nil)
- if err != nil {
- json.NewEncoder(rw).Encode(Respone{
- Error: &struct {
- Message string "json:\"message\""
- }{Message: err.Error()},
- })
- return
- }
-
- json.NewEncoder(rw).Encode(Respone{
- Data: &struct {
- JobMetrics []*model.JobMetricWithName "json:\"jobMetrics\""
- }{JobMetrics: data},
- })
-}
-
-func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
- // SecuredCheck() only worked with TokenAuth: Removed
-
- rw.Header().Set("Content-Type", "text/plain")
- me := repository.GetUserFromContext(r.Context())
- if !me.HasRole(schema.RoleAdmin) {
- http.Error(rw, "Only admins are allowed to create new users", http.StatusForbidden)
- return
- }
-
- username, password, role, name, email, project := r.FormValue("username"),
- r.FormValue("password"), r.FormValue("role"), r.FormValue("name"),
- r.FormValue("email"), r.FormValue("project")
-
- if len(password) == 0 && role != schema.GetRoleString(schema.RoleApi) {
- http.Error(rw, "Only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest)
- return
- }
-
- if len(project) != 0 && role != schema.GetRoleString(schema.RoleManager) {
- http.Error(rw, "only managers require a project (can be changed later)",
- http.StatusBadRequest)
- return
- } else if len(project) == 0 && role == schema.GetRoleString(schema.RoleManager) {
- http.Error(rw, "managers require a project to manage (can be changed later)",
- http.StatusBadRequest)
- return
- }
-
- if err := repository.GetUserRepository().AddUser(&schema.User{
- Username: username,
- Name: name,
- Password: password,
- Email: email,
- Projects: []string{project},
- Roles: []string{role},
- }); err != nil {
- http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
- return
- }
-
- fmt.Fprintf(rw, "User %v successfully created!\n", username)
-}
-
-func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
- // SecuredCheck() only worked with TokenAuth: Removed
-
- if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
- http.Error(rw, "Only admins are allowed to delete a user", http.StatusForbidden)
- return
- }
-
- username := r.FormValue("username")
- if err := repository.GetUserRepository().DelUser(username); err != nil {
- http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
- return
- }
-
- rw.WriteHeader(http.StatusOK)
-}
-
-// getUsers godoc
-// @summary Returns a list of users
-// @tags User
-// @description Returns a JSON-encoded list of users.
-// @description Required query-parameter defines if all users or only users with additional special roles are returned.
-// @produce json
-// @param not-just-user query bool true "If returned list should contain all users or only users with additional special roles"
-// @success 200 {array} api.ApiReturnedUser "List of users returned successfully"
-// @failure 400 {string} string "Bad Request"
-// @failure 401 {string} string "Unauthorized"
-// @failure 403 {string} string "Forbidden"
-// @failure 500 {string} string "Internal Server Error"
-// @security ApiKeyAuth
-// @router /api/users/ [get]
-func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
- // SecuredCheck() only worked with TokenAuth: Removed
-
- if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
- http.Error(rw, "Only admins are allowed to fetch a list of users", http.StatusForbidden)
- return
- }
-
- users, err := repository.GetUserRepository().ListUsers(r.URL.Query().Get("not-just-user") == "true")
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- json.NewEncoder(rw).Encode(users)
-}
-
-func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
- // SecuredCheck() only worked with TokenAuth: Removed
-
- if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
- http.Error(rw, "Only admins are allowed to update a user", http.StatusForbidden)
- return
- }
-
- // Get Values
- newrole := r.FormValue("add-role")
- delrole := r.FormValue("remove-role")
- newproj := r.FormValue("add-project")
- delproj := r.FormValue("remove-project")
-
- // TODO: Handle anything but roles...
- if newrole != "" {
- if err := repository.GetUserRepository().AddRole(r.Context(), mux.Vars(r)["id"], newrole); err != nil {
- http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
- return
- }
- rw.Write([]byte("Add Role Success"))
- } else if delrole != "" {
- if err := repository.GetUserRepository().RemoveRole(r.Context(), mux.Vars(r)["id"], delrole); err != nil {
- http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
- return
- }
- rw.Write([]byte("Remove Role Success"))
- } else if newproj != "" {
- if err := repository.GetUserRepository().AddProject(r.Context(), mux.Vars(r)["id"], newproj); err != nil {
- http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
- return
- }
- rw.Write([]byte("Add Project Success"))
- } else if delproj != "" {
- if err := repository.GetUserRepository().RemoveProject(r.Context(), mux.Vars(r)["id"], delproj); err != nil {
- http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
- return
- }
- rw.Write([]byte("Remove Project Success"))
- } else {
- http.Error(rw, "Not Add or Del [role|project]?", http.StatusInternalServerError)
- }
-}
-
func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
// SecuredCheck() only worked with TokenAuth: Removed
@@ -1372,7 +162,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
if !noticeExists {
ntxt, err := os.Create("./var/notice.txt")
if err != nil {
- log.Errorf("Creating ./var/notice.txt failed: %s", err.Error())
+ cclog.Errorf("Creating ./var/notice.txt failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
}
@@ -1381,7 +171,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
if newContent != "" {
if err := os.WriteFile("./var/notice.txt", []byte(newContent), 0o666); err != nil {
- log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
+ cclog.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
} else {
@@ -1389,7 +179,7 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
}
} else {
if err := os.WriteFile("./var/notice.txt", []byte(""), 0o666); err != nil {
- log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
+ cclog.Errorf("Writing to ./var/notice.txt failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
} else {
diff --git a/internal/api/user.go b/internal/api/user.go
new file mode 100644
index 0000000..7e17e36
--- /dev/null
+++ b/internal/api/user.go
@@ -0,0 +1,159 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/gorilla/mux"
+)
+
+type ApiReturnedUser struct {
+ Username string `json:"username"`
+ Name string `json:"name"`
+ Roles []string `json:"roles"`
+ Email string `json:"email"`
+ Projects []string `json:"projects"`
+}
+
+// getUsers godoc
+// @summary Returns a list of users
+// @tags User
+// @description Returns a JSON-encoded list of users.
+// @description Required query-parameter defines if all users or only users with additional special roles are returned.
+// @produce json
+// @param not-just-user query bool true "If returned list should contain all users or only users with additional special roles"
+// @success 200 {array} api.ApiReturnedUser "List of users returned successfully"
+// @failure 400 {string} string "Bad Request"
+// @failure 401 {string} string "Unauthorized"
+// @failure 403 {string} string "Forbidden"
+// @failure 500 {string} string "Internal Server Error"
+// @security ApiKeyAuth
+// @router /api/users/ [get]
+func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
+ // SecuredCheck() only worked with TokenAuth: Removed
+
+ if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
+ http.Error(rw, "Only admins are allowed to fetch a list of users", http.StatusForbidden)
+ return
+ }
+
+ users, err := repository.GetUserRepository().ListUsers(r.URL.Query().Get("not-just-user") == "true")
+ if err != nil {
+ http.Error(rw, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ json.NewEncoder(rw).Encode(users)
+}
+
+func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
+ // SecuredCheck() only worked with TokenAuth: Removed
+
+ if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
+ http.Error(rw, "Only admins are allowed to update a user", http.StatusForbidden)
+ return
+ }
+
+ // Get Values
+ newrole := r.FormValue("add-role")
+ delrole := r.FormValue("remove-role")
+ newproj := r.FormValue("add-project")
+ delproj := r.FormValue("remove-project")
+
+ // TODO: Handle anything but roles...
+ if newrole != "" {
+ if err := repository.GetUserRepository().AddRole(r.Context(), mux.Vars(r)["id"], newrole); err != nil {
+ http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
+ return
+ }
+ rw.Write([]byte("Add Role Success"))
+ } else if delrole != "" {
+ if err := repository.GetUserRepository().RemoveRole(r.Context(), mux.Vars(r)["id"], delrole); err != nil {
+ http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
+ return
+ }
+ rw.Write([]byte("Remove Role Success"))
+ } else if newproj != "" {
+ if err := repository.GetUserRepository().AddProject(r.Context(), mux.Vars(r)["id"], newproj); err != nil {
+ http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
+ return
+ }
+ rw.Write([]byte("Add Project Success"))
+ } else if delproj != "" {
+ if err := repository.GetUserRepository().RemoveProject(r.Context(), mux.Vars(r)["id"], delproj); err != nil {
+ http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
+ return
+ }
+ rw.Write([]byte("Remove Project Success"))
+ } else {
+ http.Error(rw, "Not Add or Del [role|project]?", http.StatusInternalServerError)
+ }
+}
+
+func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
+ // SecuredCheck() only worked with TokenAuth: Removed
+
+ rw.Header().Set("Content-Type", "text/plain")
+ me := repository.GetUserFromContext(r.Context())
+ if !me.HasRole(schema.RoleAdmin) {
+ http.Error(rw, "Only admins are allowed to create new users", http.StatusForbidden)
+ return
+ }
+
+ username, password, role, name, email, project := r.FormValue("username"),
+ r.FormValue("password"), r.FormValue("role"), r.FormValue("name"),
+ r.FormValue("email"), r.FormValue("project")
+
+ if len(password) == 0 && role != schema.GetRoleString(schema.RoleApi) {
+ http.Error(rw, "Only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest)
+ return
+ }
+
+ if len(project) != 0 && role != schema.GetRoleString(schema.RoleManager) {
+ http.Error(rw, "only managers require a project (can be changed later)",
+ http.StatusBadRequest)
+ return
+ } else if len(project) == 0 && role == schema.GetRoleString(schema.RoleManager) {
+ http.Error(rw, "managers require a project to manage (can be changed later)",
+ http.StatusBadRequest)
+ return
+ }
+
+ if err := repository.GetUserRepository().AddUser(&schema.User{
+ Username: username,
+ Name: name,
+ Password: password,
+ Email: email,
+ Projects: []string{project},
+ Roles: []string{role},
+ }); err != nil {
+ http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
+ return
+ }
+
+ fmt.Fprintf(rw, "User %v successfully created!\n", username)
+}
+
+func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
+ // SecuredCheck() only worked with TokenAuth: Removed
+
+ if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
+ http.Error(rw, "Only admins are allowed to delete a user", http.StatusForbidden)
+ return
+ }
+
+ username := r.FormValue("username")
+ if err := repository.GetUserRepository().DelUser(username); err != nil {
+ http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
+ return
+ }
+
+ rw.WriteHeader(http.StatusOK)
+}
diff --git a/internal/archiver/archiveWorker.go b/internal/archiver/archiveWorker.go
index 628e36e..9e834b2 100644
--- a/internal/archiver/archiveWorker.go
+++ b/internal/archiver/archiveWorker.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archiver
@@ -10,8 +10,8 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -40,8 +40,8 @@ func archivingWorker() {
// not using meta data, called to load JobMeta into Cache?
// will fail if job meta not in repository
if _, err := jobRepo.FetchMetadata(job); err != nil {
- log.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error())
- jobRepo.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
+ cclog.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error())
+ jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed)
continue
}
@@ -49,30 +49,34 @@ func archivingWorker() {
// TODO: Maybe use context with cancel/timeout here
jobMeta, err := ArchiveJob(job, context.Background())
if err != nil {
- log.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error())
- jobRepo.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
+ cclog.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error())
+ jobRepo.UpdateMonitoringStatus(*job.ID, schema.MonitoringStatusArchivingFailed)
continue
}
stmt := sq.Update("job").Where("job.id = ?", job.ID)
if stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta); err != nil {
- log.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error())
+ cclog.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error())
continue
}
if stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta); err != nil {
- log.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error())
+ cclog.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error())
continue
}
// Update the jobs database entry one last time:
stmt = jobRepo.MarkArchived(stmt, schema.MonitoringStatusArchivingSuccessful)
if err := jobRepo.Execute(stmt); err != nil {
- log.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error())
+ cclog.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error())
continue
}
- log.Debugf("archiving job %d took %s", job.JobID, time.Since(start))
- log.Printf("archiving job (dbid: %d) successful", job.ID)
+ cclog.Debugf("archiving job %d took %s", job.JobID, time.Since(start))
+ cclog.Printf("archiving job (dbid: %d) successful", job.ID)
+
+ repository.CallJobStopHooks(job)
archivePending.Done()
+ default:
+ continue
}
}
}
@@ -80,7 +84,7 @@ func archivingWorker() {
// Trigger async archiving
func TriggerArchiving(job *schema.Job) {
if archiveChannel == nil {
- log.Fatal("Cannot archive without archiving channel. Did you Start the archiver?")
+ cclog.Fatal("Cannot archive without archiving channel. Did you Start the archiver?")
}
archivePending.Add(1)
diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go
index 1050ca1..e21be13 100644
--- a/internal/archiver/archiver.go
+++ b/internal/archiver/archiver.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archiver
@@ -11,12 +11,12 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
// Writes a running job to the job-archive
-func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) {
+func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.Job, error) {
allMetrics := make([]string, 0)
metricConfigs := archive.GetCluster(job.Cluster).MetricConfig
for _, mc := range metricConfigs {
@@ -36,15 +36,11 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) {
jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s)
if err != nil {
- log.Error("Error wile loading job data for archiving")
+ cclog.Error("Error wile loading job data for archiving")
return nil, err
}
- jobMeta := &schema.JobMeta{
- BaseJob: job.BaseJob,
- StartTime: job.StartTime.Unix(),
- Statistics: make(map[string]schema.JobStatistics),
- }
+ job.Statistics = make(map[string]schema.JobStatistics)
for metric, data := range jobData {
avg, min, max := 0.0, math.MaxFloat32, -math.MaxFloat32
@@ -61,7 +57,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) {
}
// Round AVG Result to 2 Digits
- jobMeta.Statistics[metric] = schema.JobStatistics{
+ job.Statistics[metric] = schema.JobStatistics{
Unit: schema.Unit{
Prefix: archive.GetMetricConfig(job.Cluster, metric).Unit.Prefix,
Base: archive.GetMetricConfig(job.Cluster, metric).Unit.Base,
@@ -76,8 +72,8 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) {
// only return the JobMeta structure as the
// statistics in there are needed.
if config.Keys.DisableArchive {
- return jobMeta, nil
+ return job, nil
}
- return jobMeta, archive.GetHandle().ImportJob(jobMeta, &jobData)
+ return job, archive.GetHandle().ImportJob(job, &jobData)
}
diff --git a/internal/auth/auth.go b/internal/auth/auth.go
index a4d4c9f..1ea27a9 100644
--- a/internal/auth/auth.go
+++ b/internal/auth/auth.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -22,9 +22,9 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/internal/util"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
"github.com/gorilla/sessions"
)
@@ -66,7 +66,7 @@ func (auth *Authentication) AuthViaSession(
) (*schema.User, error) {
session, err := auth.sessionStore.Get(r, "session")
if err != nil {
- log.Error("Error while getting session store")
+ cclog.Error("Error while getting session store")
return nil, err
}
@@ -93,16 +93,16 @@ func Init() {
sessKey := os.Getenv("SESSION_KEY")
if sessKey == "" {
- log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
+ cclog.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
bytes := make([]byte, 32)
if _, err := rand.Read(bytes); err != nil {
- log.Fatal("Error while initializing authentication -> failed to generate random bytes for session key")
+ cclog.Fatal("Error while initializing authentication -> failed to generate random bytes for session key")
}
authInstance.sessionStore = sessions.NewCookieStore(bytes)
} else {
bytes, err := base64.StdEncoding.DecodeString(sessKey)
if err != nil {
- log.Fatal("Error while initializing authentication -> decoding session key failed")
+ cclog.Fatal("Error while initializing authentication -> decoding session key failed")
}
authInstance.sessionStore = sessions.NewCookieStore(bytes)
}
@@ -114,41 +114,41 @@ func Init() {
if config.Keys.LdapConfig != nil {
ldapAuth := &LdapAuthenticator{}
if err := ldapAuth.Init(); err != nil {
- log.Warn("Error while initializing authentication -> ldapAuth init failed")
+ cclog.Warn("Error while initializing authentication -> ldapAuth init failed")
} else {
authInstance.LdapAuth = ldapAuth
authInstance.authenticators = append(authInstance.authenticators, authInstance.LdapAuth)
}
} else {
- log.Info("Missing LDAP configuration: No LDAP support!")
+ cclog.Info("Missing LDAP configuration: No LDAP support!")
}
if config.Keys.JwtConfig != nil {
authInstance.JwtAuth = &JWTAuthenticator{}
if err := authInstance.JwtAuth.Init(); err != nil {
- log.Fatal("Error while initializing authentication -> jwtAuth init failed")
+ cclog.Fatal("Error while initializing authentication -> jwtAuth init failed")
}
jwtSessionAuth := &JWTSessionAuthenticator{}
if err := jwtSessionAuth.Init(); err != nil {
- log.Info("jwtSessionAuth init failed: No JWT login support!")
+ cclog.Info("jwtSessionAuth init failed: No JWT login support!")
} else {
authInstance.authenticators = append(authInstance.authenticators, jwtSessionAuth)
}
jwtCookieSessionAuth := &JWTCookieSessionAuthenticator{}
if err := jwtCookieSessionAuth.Init(); err != nil {
- log.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!")
+ cclog.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!")
} else {
authInstance.authenticators = append(authInstance.authenticators, jwtCookieSessionAuth)
}
} else {
- log.Info("Missing JWT configuration: No JWT token support!")
+ cclog.Info("Missing JWT configuration: No JWT token support!")
}
authInstance.LocalAuth = &LocalAuthenticator{}
if err := authInstance.LocalAuth.Init(); err != nil {
- log.Fatal("Error while initializing authentication -> localAuth init failed")
+ cclog.Fatal("Error while initializing authentication -> localAuth init failed")
}
authInstance.authenticators = append(authInstance.authenticators, authInstance.LocalAuth)
})
@@ -156,7 +156,7 @@ func Init() {
func GetAuthInstance() *Authentication {
if authInstance == nil {
- log.Fatal("Authentication module not initialized!")
+ cclog.Fatal("Authentication module not initialized!")
}
return authInstance
@@ -167,14 +167,14 @@ func handleTokenUser(tokenUser *schema.User) {
dbUser, err := r.GetUser(tokenUser.Username)
if err != nil && err != sql.ErrNoRows {
- log.Errorf("Error while loading user '%s': %v", tokenUser.Username, err)
+ cclog.Errorf("Error while loading user '%s': %v", tokenUser.Username, err)
} else if err == sql.ErrNoRows && config.Keys.JwtConfig.SyncUserOnLogin { // Adds New User
if err := r.AddUser(tokenUser); err != nil {
- log.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err)
+ cclog.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err)
}
} else if err == nil && config.Keys.JwtConfig.UpdateUserOnLogin { // Update Existing User
if err := r.UpdateUser(dbUser, tokenUser); err != nil {
- log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
+ cclog.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
}
}
}
@@ -184,14 +184,14 @@ func handleOIDCUser(OIDCUser *schema.User) {
dbUser, err := r.GetUser(OIDCUser.Username)
if err != nil && err != sql.ErrNoRows {
- log.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err)
+ cclog.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err)
} else if err == sql.ErrNoRows && config.Keys.OpenIDConfig.SyncUserOnLogin { // Adds New User
if err := r.AddUser(OIDCUser); err != nil {
- log.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err)
+ cclog.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err)
}
} else if err == nil && config.Keys.OpenIDConfig.UpdateUserOnLogin { // Update Existing User
if err := r.UpdateUser(dbUser, OIDCUser); err != nil {
- log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
+ cclog.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err)
}
}
}
@@ -199,7 +199,7 @@ func handleOIDCUser(OIDCUser *schema.User) {
func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, user *schema.User) error {
session, err := auth.sessionStore.New(r, "session")
if err != nil {
- log.Errorf("session creation failed: %s", err.Error())
+ cclog.Errorf("session creation failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
return err
}
@@ -215,7 +215,7 @@ func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request,
session.Values["projects"] = user.Projects
session.Values["roles"] = user.Roles
if err := auth.sessionStore.Save(r, rw, session); err != nil {
- log.Warnf("session save failed: %s", err.Error())
+ cclog.Warnf("session save failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
return err
}
@@ -236,8 +236,8 @@ func (auth *Authentication) Login(
limiter := getIPUserLimiter(ip, username)
if !limiter.Allow() {
- log.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username)
- onfailure(rw, r, errors.New("Too many login attempts, try again in a few minutes."))
+ cclog.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username)
+ onfailure(rw, r, errors.New("too many login attempts, try again in a few minutes"))
return
}
@@ -246,7 +246,7 @@ func (auth *Authentication) Login(
var err error
dbUser, err = repository.GetUserRepository().GetUser(username)
if err != nil && err != sql.ErrNoRows {
- log.Errorf("Error while loading user '%v'", username)
+ cclog.Errorf("Error while loading user '%v'", username)
}
}
@@ -256,12 +256,12 @@ func (auth *Authentication) Login(
if user, ok = authenticator.CanLogin(dbUser, username, rw, r); !ok {
continue
} else {
- log.Debugf("Can login with user %v", user)
+ cclog.Debugf("Can login with user %v", user)
}
user, err := authenticator.Login(user, rw, r)
if err != nil {
- log.Warnf("user login failed: %s", err.Error())
+ cclog.Warnf("user login failed: %s", err.Error())
onfailure(rw, r, err)
return
}
@@ -270,7 +270,7 @@ func (auth *Authentication) Login(
return
}
- log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
+ cclog.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
if r.FormValue("redirect") != "" {
@@ -282,7 +282,7 @@ func (auth *Authentication) Login(
return
}
- log.Debugf("login failed: no authenticator applied")
+ cclog.Debugf("login failed: no authenticator applied")
onfailure(rw, r, errors.New("no authenticator applied"))
})
}
@@ -294,14 +294,14 @@ func (auth *Authentication) Auth(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
if err != nil {
- log.Infof("auth -> authentication failed: %s", err.Error())
+ cclog.Infof("auth -> authentication failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnauthorized)
return
}
if user == nil {
user, err = auth.AuthViaSession(rw, r)
if err != nil {
- log.Infof("auth -> authentication failed: %s", err.Error())
+ cclog.Infof("auth -> authentication failed: %s", err.Error())
http.Error(rw, err.Error(), http.StatusUnauthorized)
return
}
@@ -312,7 +312,7 @@ func (auth *Authentication) Auth(
return
}
- log.Info("auth -> authentication failed")
+ cclog.Info("auth -> authentication failed")
onfailure(rw, r, errors.New("unauthorized (please login first)"))
})
}
@@ -324,14 +324,14 @@ func (auth *Authentication) AuthApi(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
if err != nil {
- log.Infof("auth api -> authentication failed: %s", err.Error())
+ cclog.Infof("auth api -> authentication failed: %s", err.Error())
onfailure(rw, r, err)
return
}
ipErr := securedCheck(user, r)
if ipErr != nil {
- log.Infof("auth api -> secured check failed: %s", ipErr.Error())
+ cclog.Infof("auth api -> secured check failed: %s", ipErr.Error())
onfailure(rw, r, ipErr)
return
}
@@ -351,11 +351,11 @@ func (auth *Authentication) AuthApi(
return
}
default:
- log.Info("auth api -> authentication failed: missing role")
+ cclog.Info("auth api -> authentication failed: missing role")
onfailure(rw, r, errors.New("unauthorized"))
}
}
- log.Info("auth api -> authentication failed: no auth")
+ cclog.Info("auth api -> authentication failed: no auth")
onfailure(rw, r, errors.New("unauthorized"))
})
}
@@ -367,7 +367,7 @@ func (auth *Authentication) AuthUserApi(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.JwtAuth.AuthViaJWT(rw, r)
if err != nil {
- log.Infof("auth user api -> authentication failed: %s", err.Error())
+ cclog.Infof("auth user api -> authentication failed: %s", err.Error())
onfailure(rw, r, err)
return
}
@@ -387,11 +387,11 @@ func (auth *Authentication) AuthUserApi(
return
}
default:
- log.Info("auth user api -> authentication failed: missing role")
+ cclog.Info("auth user api -> authentication failed: missing role")
onfailure(rw, r, errors.New("unauthorized"))
}
}
- log.Info("auth user api -> authentication failed: no auth")
+ cclog.Info("auth user api -> authentication failed: no auth")
onfailure(rw, r, errors.New("unauthorized"))
})
}
@@ -403,7 +403,7 @@ func (auth *Authentication) AuthConfigApi(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.AuthViaSession(rw, r)
if err != nil {
- log.Infof("auth config api -> authentication failed: %s", err.Error())
+ cclog.Infof("auth config api -> authentication failed: %s", err.Error())
onfailure(rw, r, err)
return
}
@@ -412,7 +412,7 @@ func (auth *Authentication) AuthConfigApi(
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return
}
- log.Info("auth config api -> authentication failed: no auth")
+ cclog.Info("auth config api -> authentication failed: no auth")
onfailure(rw, r, errors.New("unauthorized"))
})
}
@@ -424,7 +424,7 @@ func (auth *Authentication) AuthFrontendApi(
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
user, err := auth.AuthViaSession(rw, r)
if err != nil {
- log.Infof("auth frontend api -> authentication failed: %s", err.Error())
+ cclog.Infof("auth frontend api -> authentication failed: %s", err.Error())
onfailure(rw, r, err)
return
}
@@ -433,7 +433,7 @@ func (auth *Authentication) AuthFrontendApi(
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
return
}
- log.Info("auth frontend api -> authentication failed: no auth")
+ cclog.Info("auth frontend api -> authentication failed: no auth")
onfailure(rw, r, errors.New("unauthorized"))
})
}
diff --git a/internal/auth/jwt.go b/internal/auth/jwt.go
index 7bac278..2cc2c37 100644
--- a/internal/auth/jwt.go
+++ b/internal/auth/jwt.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/golang-jwt/jwt/v5"
)
@@ -28,17 +28,17 @@ type JWTAuthenticator struct {
func (ja *JWTAuthenticator) Init() error {
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
if pubKey == "" || privKey == "" {
- log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
+ cclog.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
} else {
bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil {
- log.Warn("Could not decode JWT public key")
+ cclog.Warn("Could not decode JWT public key")
return err
}
ja.publicKey = ed25519.PublicKey(bytes)
bytes, err = base64.StdEncoding.DecodeString(privKey)
if err != nil {
- log.Warn("Could not decode JWT private key")
+ cclog.Warn("Could not decode JWT private key")
return err
}
ja.privateKey = ed25519.PrivateKey(bytes)
@@ -70,11 +70,11 @@ func (ja *JWTAuthenticator) AuthViaJWT(
return ja.publicKey, nil
})
if err != nil {
- log.Warn("Error while parsing JWT token")
+ cclog.Warn("Error while parsing JWT token")
return nil, err
}
if !token.Valid {
- log.Warn("jwt token claims are not valid")
+ cclog.Warn("jwt token claims are not valid")
return nil, errors.New("jwt token claims are not valid")
}
@@ -90,7 +90,7 @@ func (ja *JWTAuthenticator) AuthViaJWT(
user, err := ur.GetUser(sub)
// Deny any logins for unknown usernames
if err != nil {
- log.Warn("Could not find user from JWT in internal database.")
+ cclog.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
// Take user roles from database instead of trusting the JWT
diff --git a/internal/auth/jwtCookieSession.go b/internal/auth/jwtCookieSession.go
index 7e0e045..8f6d064 100644
--- a/internal/auth/jwtCookieSession.go
+++ b/internal/auth/jwtCookieSession.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/golang-jwt/jwt/v5"
)
@@ -31,18 +31,18 @@ var _ Authenticator = (*JWTCookieSessionAuthenticator)(nil)
func (ja *JWTCookieSessionAuthenticator) Init() error {
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
if pubKey == "" || privKey == "" {
- log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
+ cclog.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
return errors.New("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)")
} else {
bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil {
- log.Warn("Could not decode JWT public key")
+ cclog.Warn("Could not decode JWT public key")
return err
}
ja.publicKey = ed25519.PublicKey(bytes)
bytes, err = base64.StdEncoding.DecodeString(privKey)
if err != nil {
- log.Warn("Could not decode JWT private key")
+ cclog.Warn("Could not decode JWT private key")
return err
}
ja.privateKey = ed25519.PrivateKey(bytes)
@@ -53,13 +53,13 @@ func (ja *JWTCookieSessionAuthenticator) Init() error {
if keyFound && pubKeyCrossLogin != "" {
bytes, err := base64.StdEncoding.DecodeString(pubKeyCrossLogin)
if err != nil {
- log.Warn("Could not decode cross login JWT public key")
+ cclog.Warn("Could not decode cross login JWT public key")
return err
}
ja.publicKeyCrossLogin = ed25519.PublicKey(bytes)
} else {
ja.publicKeyCrossLogin = nil
- log.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
+ cclog.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
return errors.New("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
}
@@ -67,22 +67,22 @@ func (ja *JWTCookieSessionAuthenticator) Init() error {
// Warn if other necessary settings are not configured
if jc != nil {
if jc.CookieName == "" {
- log.Info("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
+ cclog.Info("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
return errors.New("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
}
if !jc.ValidateUser {
- log.Info("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
+ cclog.Info("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
}
if jc.TrustedIssuer == "" {
- log.Info("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
+ cclog.Info("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
return errors.New("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
}
} else {
- log.Warn("config for JWTs not configured (cross login via JWT cookie will fail)")
+ cclog.Warn("config for JWTs not configured (cross login via JWT cookie will fail)")
return errors.New("config for JWTs not configured (cross login via JWT cookie will fail)")
}
- log.Info("JWT Cookie Session authenticator successfully registered")
+ cclog.Info("JWT Cookie Session authenticator successfully registered")
return nil
}
@@ -140,12 +140,12 @@ func (ja *JWTCookieSessionAuthenticator) Login(
return ja.publicKey, nil
})
if err != nil {
- log.Warn("JWT cookie session: error while parsing token")
+ cclog.Warn("JWT cookie session: error while parsing token")
return nil, err
}
if !token.Valid {
- log.Warn("jwt token claims are not valid")
+ cclog.Warn("jwt token claims are not valid")
return nil, errors.New("jwt token claims are not valid")
}
@@ -159,12 +159,12 @@ func (ja *JWTCookieSessionAuthenticator) Login(
var err error
user, err = repository.GetUserRepository().GetUser(sub)
if err != nil && err != sql.ErrNoRows {
- log.Errorf("Error while loading user '%v'", sub)
+ cclog.Errorf("Error while loading user '%v'", sub)
}
// Deny any logins for unknown usernames
if user == nil {
- log.Warn("Could not find user from JWT in internal database.")
+ cclog.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
} else {
diff --git a/internal/auth/jwtSession.go b/internal/auth/jwtSession.go
index 67457ee..9c79e72 100644
--- a/internal/auth/jwtSession.go
+++ b/internal/auth/jwtSession.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/golang-jwt/jwt/v5"
)
@@ -30,13 +30,13 @@ func (ja *JWTSessionAuthenticator) Init() error {
if pubKey := os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" {
bytes, err := base64.StdEncoding.DecodeString(pubKey)
if err != nil {
- log.Warn("Could not decode cross login JWT HS512 key")
+ cclog.Warn("Could not decode cross login JWT HS512 key")
return err
}
ja.loginTokenKey = bytes
}
- log.Info("JWT Session authenticator successfully registered")
+ cclog.Info("JWT Session authenticator successfully registered")
return nil
}
@@ -67,12 +67,12 @@ func (ja *JWTSessionAuthenticator) Login(
return nil, fmt.Errorf("unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg())
})
if err != nil {
- log.Warn("Error while parsing jwt token")
+ cclog.Warn("Error while parsing jwt token")
return nil, err
}
if !token.Valid {
- log.Warn("jwt token claims are not valid")
+ cclog.Warn("jwt token claims are not valid")
return nil, errors.New("jwt token claims are not valid")
}
@@ -86,12 +86,12 @@ func (ja *JWTSessionAuthenticator) Login(
var err error
user, err = repository.GetUserRepository().GetUser(sub)
if err != nil && err != sql.ErrNoRows {
- log.Errorf("Error while loading user '%v'", sub)
+ cclog.Errorf("Error while loading user '%v'", sub)
}
// Deny any logins for unknown usernames
if user == nil {
- log.Warn("Could not find user from JWT in internal database.")
+ cclog.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
} else {
diff --git a/internal/auth/ldap.go b/internal/auth/ldap.go
index cc7c4f6..d7843e4 100644
--- a/internal/auth/ldap.go
+++ b/internal/auth/ldap.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -13,8 +13,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/go-ldap/ldap/v3"
)
@@ -28,7 +28,7 @@ var _ Authenticator = (*LdapAuthenticator)(nil)
func (la *LdapAuthenticator) Init() error {
la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD")
if la.syncPassword == "" {
- log.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
+ cclog.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
}
lc := config.Keys.LdapConfig
@@ -58,7 +58,7 @@ func (la *LdapAuthenticator) CanLogin(
if lc.SyncUserOnLogin {
l, err := la.getLdapConnection(true)
if err != nil {
- log.Error("LDAP connection error")
+ cclog.Error("LDAP connection error")
}
defer l.Close()
@@ -71,12 +71,12 @@ func (la *LdapAuthenticator) CanLogin(
sr, err := l.Search(searchRequest)
if err != nil {
- log.Warn(err)
+ cclog.Warn(err)
return nil, false
}
if len(sr.Entries) != 1 {
- log.Warn("LDAP: User does not exist or too many entries returned")
+ cclog.Warn("LDAP: User does not exist or too many entries returned")
return nil, false
}
@@ -96,7 +96,7 @@ func (la *LdapAuthenticator) CanLogin(
}
if err := repository.GetUserRepository().AddUser(user); err != nil {
- log.Errorf("User '%s' LDAP: Insert into DB failed", username)
+ cclog.Errorf("User '%s' LDAP: Insert into DB failed", username)
return nil, false
}
@@ -114,14 +114,14 @@ func (la *LdapAuthenticator) Login(
) (*schema.User, error) {
l, err := la.getLdapConnection(false)
if err != nil {
- log.Warn("Error while getting ldap connection")
+ cclog.Warn("Error while getting ldap connection")
return nil, err
}
defer l.Close()
userDn := strings.Replace(config.Keys.LdapConfig.UserBind, "{username}", user.Username, -1)
if err := l.Bind(userDn, r.FormValue("password")); err != nil {
- log.Errorf("AUTH/LDAP > Authentication for user %s failed: %v",
+ cclog.Errorf("AUTH/LDAP > Authentication for user %s failed: %v",
user.Username, err)
return nil, fmt.Errorf("Authentication failed")
}
@@ -148,7 +148,7 @@ func (la *LdapAuthenticator) Sync() error {
l, err := la.getLdapConnection(true)
if err != nil {
- log.Error("LDAP connection error")
+ cclog.Error("LDAP connection error")
return err
}
defer l.Close()
@@ -159,7 +159,7 @@ func (la *LdapAuthenticator) Sync() error {
lc.UserFilter,
[]string{"dn", "uid", la.UserAttr}, nil))
if err != nil {
- log.Warn("LDAP search error")
+ cclog.Warn("LDAP search error")
return err
}
@@ -182,7 +182,7 @@ func (la *LdapAuthenticator) Sync() error {
for username, where := range users {
if where == IN_DB && lc.SyncDelOldUsers {
ur.DelUser(username)
- log.Debugf("sync: remove %v (does not show up in LDAP anymore)", username)
+ cclog.Debugf("sync: remove %v (does not show up in LDAP anymore)", username)
} else if where == IN_LDAP {
name := newnames[username]
@@ -198,9 +198,9 @@ func (la *LdapAuthenticator) Sync() error {
AuthSource: schema.AuthViaLDAP,
}
- log.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name)
+ cclog.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name)
if err := ur.AddUser(user); err != nil {
- log.Errorf("User '%s' LDAP: Insert into DB failed", username)
+ cclog.Errorf("User '%s' LDAP: Insert into DB failed", username)
return err
}
}
@@ -213,14 +213,14 @@ func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) {
lc := config.Keys.LdapConfig
conn, err := ldap.DialURL(lc.Url)
if err != nil {
- log.Warn("LDAP URL dial failed")
+ cclog.Warn("LDAP URL dial failed")
return nil, err
}
if admin {
if err := conn.Bind(lc.SearchDN, la.syncPassword); err != nil {
conn.Close()
- log.Warn("LDAP connection bind failed")
+ cclog.Warn("LDAP connection bind failed")
return nil, err
}
}
diff --git a/internal/auth/local.go b/internal/auth/local.go
index 8d39793..5dc0bf4 100644
--- a/internal/auth/local.go
+++ b/internal/auth/local.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -8,8 +8,8 @@ import (
"fmt"
"net/http"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"golang.org/x/crypto/bcrypt"
)
@@ -27,19 +27,19 @@ func (la *LocalAuthenticator) CanLogin(
user *schema.User,
username string,
rw http.ResponseWriter,
- r *http.Request) (*schema.User, bool) {
-
+ r *http.Request,
+) (*schema.User, bool) {
return user, user != nil && user.AuthSource == schema.AuthViaLocalPassword
}
func (la *LocalAuthenticator) Login(
user *schema.User,
rw http.ResponseWriter,
- r *http.Request) (*schema.User, error) {
-
+ r *http.Request,
+) (*schema.User, error) {
if e := bcrypt.CompareHashAndPassword([]byte(user.Password),
[]byte(r.FormValue("password"))); e != nil {
- log.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username)
+ cclog.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username)
return nil, fmt.Errorf("Authentication failed")
}
diff --git a/internal/auth/oidc.go b/internal/auth/oidc.go
index ba1c9da..f688aab 100644
--- a/internal/auth/oidc.go
+++ b/internal/auth/oidc.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package auth
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/coreos/go-oidc/v3/oidc"
"github.com/gorilla/mux"
"golang.org/x/oauth2"
@@ -51,15 +51,15 @@ func setCallbackCookie(w http.ResponseWriter, r *http.Request, name, value strin
func NewOIDC(a *Authentication) *OIDC {
provider, err := oidc.NewProvider(context.Background(), config.Keys.OpenIDConfig.Provider)
if err != nil {
- log.Fatal(err)
+ cclog.Fatal(err)
}
clientID := os.Getenv("OID_CLIENT_ID")
if clientID == "" {
- log.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)")
+ cclog.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)")
}
clientSecret := os.Getenv("OID_CLIENT_SECRET")
if clientSecret == "" {
- log.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)")
+ cclog.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)")
}
client := &oauth2.Config{
@@ -173,7 +173,7 @@ func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) {
}
oa.authentication.SaveSession(rw, r, user)
- log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
+ cclog.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
ctx := context.WithValue(r.Context(), repository.ContextUserKey, user)
http.RedirectHandler("/", http.StatusTemporaryRedirect).ServeHTTP(rw, r.WithContext(ctx))
}
diff --git a/internal/config/config.go b/internal/config/config.go
index 31760c7..bb965b8 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package config
@@ -9,8 +9,8 @@ import (
"encoding/json"
"os"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
var Keys schema.ProgramConfig = schema.ProgramConfig{
@@ -53,20 +53,20 @@ func Init(flagConfigFile string) {
raw, err := os.ReadFile(flagConfigFile)
if err != nil {
if !os.IsNotExist(err) {
- log.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
+ cclog.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
}
} else {
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
- log.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
+ cclog.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
if err := dec.Decode(&Keys); err != nil {
- log.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
+ cclog.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
}
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
- log.Abort("Config Init: At least one cluster required in config. Exited with error.")
+ cclog.Abort("Config Init: At least one cluster required in config. Exited with error.")
}
}
}
diff --git a/internal/config/config_test.go b/internal/config/config_test.go
index ed282be..993b6f0 100644
--- a/internal/config/config_test.go
+++ b/internal/config/config_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package config
diff --git a/internal/config/default_metrics.go b/internal/config/default_metrics.go
index b0a0cc5..48a0a0b 100644
--- a/internal/config/default_metrics.go
+++ b/internal/config/default_metrics.go
@@ -1,3 +1,7 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
package config
import (
diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go
index 5dbdfd9..238270f 100644
--- a/internal/graph/generated/generated.go
+++ b/internal/graph/generated/generated.go
@@ -15,7 +15,7 @@ import (
"github.com/99designs/gqlgen/graphql"
"github.com/99designs/gqlgen/graphql/introspection"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ "github.com/ClusterCockpit/cc-lib/schema"
gqlparser "github.com/vektah/gqlparser/v2"
"github.com/vektah/gqlparser/v2/ast"
)
@@ -44,6 +44,7 @@ type ResolverRoot interface {
Job() JobResolver
MetricValue() MetricValueResolver
Mutation() MutationResolver
+ Node() NodeResolver
Query() QueryResolver
SubCluster() SubClusterResolver
}
@@ -171,14 +172,16 @@ type ComplexityRoot struct {
}
JobStats struct {
- Name func(childComplexity int) int
- Stats func(childComplexity int) int
- }
-
- JobStatsWithScope struct {
- Name func(childComplexity int) int
- Scope func(childComplexity int) int
- Stats func(childComplexity int) int
+ Cluster func(childComplexity int) int
+ Duration func(childComplexity int) int
+ ID func(childComplexity int) int
+ JobID func(childComplexity int) int
+ NumAccelerators func(childComplexity int) int
+ NumHWThreads func(childComplexity int) int
+ NumNodes func(childComplexity int) int
+ StartTime func(childComplexity int) int
+ Stats func(childComplexity int) int
+ SubCluster func(childComplexity int) int
}
JobsStatistics struct {
@@ -255,12 +258,43 @@ type ComplexityRoot struct {
UpdateConfiguration func(childComplexity int, name string, value string) int
}
+ NamedStats struct {
+ Data func(childComplexity int) int
+ Name func(childComplexity int) int
+ }
+
+ NamedStatsWithScope struct {
+ Name func(childComplexity int) int
+ Scope func(childComplexity int) int
+ Stats func(childComplexity int) int
+ }
+
+ Node struct {
+ Cluster func(childComplexity int) int
+ HealthState func(childComplexity int) int
+ Hostname func(childComplexity int) int
+ ID func(childComplexity int) int
+ MetaData func(childComplexity int) int
+ NodeState func(childComplexity int) int
+ SubCluster func(childComplexity int) int
+ }
+
NodeMetrics struct {
Host func(childComplexity int) int
Metrics func(childComplexity int) int
SubCluster func(childComplexity int) int
}
+ NodeStateResultList struct {
+ Count func(childComplexity int) int
+ Items func(childComplexity int) int
+ }
+
+ NodeStats struct {
+ Count func(childComplexity int) int
+ State func(childComplexity int) int
+ }
+
NodesResultList struct {
Count func(childComplexity int) int
HasNextPage func(childComplexity int) int
@@ -279,9 +313,13 @@ type ComplexityRoot struct {
JobStats func(childComplexity int, id string, metrics []string) int
Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int
JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int
+ JobsMetricStats func(childComplexity int, filter []*model.JobFilter, metrics []string) int
JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) int
+ Node func(childComplexity int, id string) int
NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int
NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int
+ NodeStats func(childComplexity int, filter []*model.NodeFilter) int
+ Nodes func(childComplexity int, filter []*model.NodeFilter, order *model.OrderByInput) int
RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int
ScopedJobStats func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int
Tags func(childComplexity int) int
@@ -384,6 +422,8 @@ type ClusterResolver interface {
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
}
type JobResolver interface {
+ StartTime(ctx context.Context, obj *schema.Job) (*time.Time, error)
+
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error)
@@ -403,19 +443,28 @@ type MutationResolver interface {
RemoveTagFromList(ctx context.Context, tagIds []string) ([]int, error)
UpdateConfiguration(ctx context.Context, name string, value string) (*string, error)
}
+type NodeResolver interface {
+ NodeState(ctx context.Context, obj *schema.Node) (string, error)
+ HealthState(ctx context.Context, obj *schema.Node) (schema.NodeState, error)
+ MetaData(ctx context.Context, obj *schema.Node) (any, error)
+}
type QueryResolver interface {
Clusters(ctx context.Context) ([]*schema.Cluster, error)
Tags(ctx context.Context) ([]*schema.Tag, error)
GlobalMetrics(ctx context.Context) ([]*schema.GlobalMetricListItem, error)
User(ctx context.Context, username string) (*model.User, error)
AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error)
+ Node(ctx context.Context, id string) (*schema.Node, error)
+ Nodes(ctx context.Context, filter []*model.NodeFilter, order *model.OrderByInput) (*model.NodeStateResultList, error)
+ NodeStats(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStats, error)
Job(ctx context.Context, id string) (*schema.Job, error)
JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error)
- JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error)
- ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error)
- JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error)
+ JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error)
+ ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error)
Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error)
JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error)
+ JobsMetricStats(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.JobStats, error)
+ JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error)
RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error)
NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error)
NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error)
@@ -933,12 +982,61 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.JobResultList.Offset(childComplexity), true
- case "JobStats.name":
- if e.complexity.JobStats.Name == nil {
+ case "JobStats.cluster":
+ if e.complexity.JobStats.Cluster == nil {
break
}
- return e.complexity.JobStats.Name(childComplexity), true
+ return e.complexity.JobStats.Cluster(childComplexity), true
+
+ case "JobStats.duration":
+ if e.complexity.JobStats.Duration == nil {
+ break
+ }
+
+ return e.complexity.JobStats.Duration(childComplexity), true
+
+ case "JobStats.id":
+ if e.complexity.JobStats.ID == nil {
+ break
+ }
+
+ return e.complexity.JobStats.ID(childComplexity), true
+
+ case "JobStats.jobId":
+ if e.complexity.JobStats.JobID == nil {
+ break
+ }
+
+ return e.complexity.JobStats.JobID(childComplexity), true
+
+ case "JobStats.numAccelerators":
+ if e.complexity.JobStats.NumAccelerators == nil {
+ break
+ }
+
+ return e.complexity.JobStats.NumAccelerators(childComplexity), true
+
+ case "JobStats.numHWThreads":
+ if e.complexity.JobStats.NumHWThreads == nil {
+ break
+ }
+
+ return e.complexity.JobStats.NumHWThreads(childComplexity), true
+
+ case "JobStats.numNodes":
+ if e.complexity.JobStats.NumNodes == nil {
+ break
+ }
+
+ return e.complexity.JobStats.NumNodes(childComplexity), true
+
+ case "JobStats.startTime":
+ if e.complexity.JobStats.StartTime == nil {
+ break
+ }
+
+ return e.complexity.JobStats.StartTime(childComplexity), true
case "JobStats.stats":
if e.complexity.JobStats.Stats == nil {
@@ -947,26 +1045,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.JobStats.Stats(childComplexity), true
- case "JobStatsWithScope.name":
- if e.complexity.JobStatsWithScope.Name == nil {
+ case "JobStats.subCluster":
+ if e.complexity.JobStats.SubCluster == nil {
break
}
- return e.complexity.JobStatsWithScope.Name(childComplexity), true
-
- case "JobStatsWithScope.scope":
- if e.complexity.JobStatsWithScope.Scope == nil {
- break
- }
-
- return e.complexity.JobStatsWithScope.Scope(childComplexity), true
-
- case "JobStatsWithScope.stats":
- if e.complexity.JobStatsWithScope.Stats == nil {
- break
- }
-
- return e.complexity.JobStatsWithScope.Stats(childComplexity), true
+ return e.complexity.JobStats.SubCluster(childComplexity), true
case "JobsStatistics.histDuration":
if e.complexity.JobsStatistics.HistDuration == nil {
@@ -1348,6 +1432,90 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Mutation.UpdateConfiguration(childComplexity, args["name"].(string), args["value"].(string)), true
+ case "NamedStats.data":
+ if e.complexity.NamedStats.Data == nil {
+ break
+ }
+
+ return e.complexity.NamedStats.Data(childComplexity), true
+
+ case "NamedStats.name":
+ if e.complexity.NamedStats.Name == nil {
+ break
+ }
+
+ return e.complexity.NamedStats.Name(childComplexity), true
+
+ case "NamedStatsWithScope.name":
+ if e.complexity.NamedStatsWithScope.Name == nil {
+ break
+ }
+
+ return e.complexity.NamedStatsWithScope.Name(childComplexity), true
+
+ case "NamedStatsWithScope.scope":
+ if e.complexity.NamedStatsWithScope.Scope == nil {
+ break
+ }
+
+ return e.complexity.NamedStatsWithScope.Scope(childComplexity), true
+
+ case "NamedStatsWithScope.stats":
+ if e.complexity.NamedStatsWithScope.Stats == nil {
+ break
+ }
+
+ return e.complexity.NamedStatsWithScope.Stats(childComplexity), true
+
+ case "Node.cluster":
+ if e.complexity.Node.Cluster == nil {
+ break
+ }
+
+ return e.complexity.Node.Cluster(childComplexity), true
+
+ case "Node.HealthState":
+ if e.complexity.Node.HealthState == nil {
+ break
+ }
+
+ return e.complexity.Node.HealthState(childComplexity), true
+
+ case "Node.hostname":
+ if e.complexity.Node.Hostname == nil {
+ break
+ }
+
+ return e.complexity.Node.Hostname(childComplexity), true
+
+ case "Node.id":
+ if e.complexity.Node.ID == nil {
+ break
+ }
+
+ return e.complexity.Node.ID(childComplexity), true
+
+ case "Node.metaData":
+ if e.complexity.Node.MetaData == nil {
+ break
+ }
+
+ return e.complexity.Node.MetaData(childComplexity), true
+
+ case "Node.nodeState":
+ if e.complexity.Node.NodeState == nil {
+ break
+ }
+
+ return e.complexity.Node.NodeState(childComplexity), true
+
+ case "Node.subCluster":
+ if e.complexity.Node.SubCluster == nil {
+ break
+ }
+
+ return e.complexity.Node.SubCluster(childComplexity), true
+
case "NodeMetrics.host":
if e.complexity.NodeMetrics.Host == nil {
break
@@ -1369,6 +1537,34 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.NodeMetrics.SubCluster(childComplexity), true
+ case "NodeStateResultList.count":
+ if e.complexity.NodeStateResultList.Count == nil {
+ break
+ }
+
+ return e.complexity.NodeStateResultList.Count(childComplexity), true
+
+ case "NodeStateResultList.items":
+ if e.complexity.NodeStateResultList.Items == nil {
+ break
+ }
+
+ return e.complexity.NodeStateResultList.Items(childComplexity), true
+
+ case "NodeStats.count":
+ if e.complexity.NodeStats.Count == nil {
+ break
+ }
+
+ return e.complexity.NodeStats.Count(childComplexity), true
+
+ case "NodeStats.state":
+ if e.complexity.NodeStats.State == nil {
+ break
+ }
+
+ return e.complexity.NodeStats.State(childComplexity), true
+
case "NodesResultList.count":
if e.complexity.NodesResultList.Count == nil {
break
@@ -1497,6 +1693,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Query.JobsFootprints(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string)), true
+ case "Query.jobsMetricStats":
+ if e.complexity.Query.JobsMetricStats == nil {
+ break
+ }
+
+ args, err := ec.field_Query_jobsMetricStats_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.JobsMetricStats(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string)), true
+
case "Query.jobsStatistics":
if e.complexity.Query.JobsStatistics == nil {
break
@@ -1509,6 +1717,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate), args["numDurationBins"].(*string), args["numMetricBins"].(*int)), true
+ case "Query.node":
+ if e.complexity.Query.Node == nil {
+ break
+ }
+
+ args, err := ec.field_Query_node_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.Node(childComplexity, args["id"].(string)), true
+
case "Query.nodeMetrics":
if e.complexity.Query.NodeMetrics == nil {
break
@@ -1533,6 +1753,30 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Query.NodeMetricsList(childComplexity, args["cluster"].(string), args["subCluster"].(string), args["nodeFilter"].(string), args["scopes"].([]schema.MetricScope), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time), args["page"].(*model.PageRequest), args["resolution"].(*int)), true
+ case "Query.nodeStats":
+ if e.complexity.Query.NodeStats == nil {
+ break
+ }
+
+ args, err := ec.field_Query_nodeStats_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.NodeStats(childComplexity, args["filter"].([]*model.NodeFilter)), true
+
+ case "Query.nodes":
+ if e.complexity.Query.Nodes == nil {
+ break
+ }
+
+ args, err := ec.field_Query_nodes_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.Nodes(childComplexity, args["filter"].([]*model.NodeFilter), args["order"].(*model.OrderByInput)), true
+
case "Query.rooflineHeatmap":
if e.complexity.Query.RooflineHeatmap == nil {
break
@@ -1973,6 +2217,7 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
ec.unmarshalInputIntRange,
ec.unmarshalInputJobFilter,
ec.unmarshalInputMetricStatItem,
+ ec.unmarshalInputNodeFilter,
ec.unmarshalInputOrderByInput,
ec.unmarshalInputPageRequest,
ec.unmarshalInputStringInput,
@@ -2080,61 +2325,78 @@ scalar Any
scalar NullableFloat
scalar MetricScope
scalar JobState
+scalar NodeState
+scalar MonitoringState
+
+type Node {
+ id: ID!
+ hostname: String!
+ cluster: String!
+ subCluster: String!
+ nodeState: NodeState!
+ HealthState: MonitoringState!
+ metaData: Any
+}
+
+type NodeStats {
+ state: String!
+ count: Int!
+}
type Job {
- id: ID!
- jobId: Int!
- user: String!
- project: String!
- cluster: String!
- subCluster: String!
- startTime: Time!
- duration: Int!
- walltime: Int!
- numNodes: Int!
- numHWThreads: Int!
- numAcc: Int!
- energy: Float!
- SMT: Int!
- exclusive: Int!
- partition: String!
- arrayJobId: Int!
+ id: ID!
+ jobId: Int!
+ user: String!
+ project: String!
+ cluster: String!
+ subCluster: String!
+ startTime: Time!
+ duration: Int!
+ walltime: Int!
+ numNodes: Int!
+ numHWThreads: Int!
+ numAcc: Int!
+ energy: Float!
+ SMT: Int!
+ exclusive: Int!
+ partition: String!
+ arrayJobId: Int!
monitoringStatus: Int!
- state: JobState!
- tags: [Tag!]!
- resources: [Resource!]!
- concurrentJobs: JobLinkResultList
- footprint: [FootprintValue]
- energyFootprint: [EnergyFootprintValue]
- metaData: Any
- userData: User
+ state: JobState!
+ tags: [Tag!]!
+ resources: [Resource!]!
+ concurrentJobs: JobLinkResultList
+ footprint: [FootprintValue]
+ energyFootprint: [EnergyFootprintValue]
+ metaData: Any
+ userData: User
}
type JobLink {
- id: ID!
- jobId: Int!
+ id: ID!
+ jobId: Int!
}
type Cluster {
- name: String!
- partitions: [String!]! # Slurm partitions
- subClusters: [SubCluster!]! # Hardware partitions/subclusters
+ name: String!
+ partitions: [String!]! # Slurm partitions
+ subClusters: [SubCluster!]! # Hardware partitions/subclusters
}
type SubCluster {
- name: String!
- nodes: String!
- numberOfNodes: Int!
- processorType: String!
- socketsPerNode: Int!
- coresPerSocket: Int!
- threadsPerCore: Int!
- flopRateScalar: MetricValue!
- flopRateSimd: MetricValue!
+ name: String!
+ nodes: String!
+ numberOfNodes: Int!
+ processorType: String!
+ socketsPerNode: Int!
+ coresPerSocket: Int!
+ threadsPerCore: Int!
+ flopRateScalar: MetricValue!
+ flopRateSimd: MetricValue!
memoryBandwidth: MetricValue!
- topology: Topology!
- metricConfig: [MetricConfig!]!
- footprint: [String!]!
+ topology: Topology!
+ metricConfig: [MetricConfig!]!
+ footprint: [String!]!
}
type FootprintValue {
@@ -2156,99 +2418,112 @@ type MetricValue {
}
type Topology {
- node: [Int!]
- socket: [[Int!]!]
+ node: [Int!]
+ socket: [[Int!]!]
memoryDomain: [[Int!]!]
- die: [[Int!]!]
- core: [[Int!]!]
+ die: [[Int!]!]
+ core: [[Int!]!]
accelerators: [Accelerator!]
}
type Accelerator {
- id: String!
- type: String!
+ id: String!
+ type: String!
model: String!
}
type SubClusterConfig {
- name: String!
- peak: Float
- normal: Float
+ name: String!
+ peak: Float
+ normal: Float
caution: Float
- alert: Float
- remove: Boolean
+ alert: Float
+ remove: Boolean
}
type MetricConfig {
- name: String!
- unit: Unit!
- scope: MetricScope!
+ name: String!
+ unit: Unit!
+ scope: MetricScope!
aggregation: String!
- timestep: Int!
- peak: Float!
- normal: Float
+ timestep: Int!
+ peak: Float!
+ normal: Float
caution: Float!
- alert: Float!
+ alert: Float!
lowerIsBetter: Boolean
subClusters: [SubClusterConfig!]!
}
type Tag {
- id: ID!
+ id: ID!
type: String!
name: String!
scope: String!
}
type Resource {
- hostname: String!
- hwthreads: [Int!]
- accelerators: [String!]
+ hostname: String!
+ hwthreads: [Int!]
+ accelerators: [String!]
configuration: String
}
type JobMetricWithName {
- name: String!
- scope: MetricScope!
+ name: String!
+ scope: MetricScope!
metric: JobMetric!
}
type JobMetric {
- unit: Unit
- timestep: Int!
- series: [Series!]
+ unit: Unit
+ timestep: Int!
+ series: [Series!]
statisticsSeries: StatsSeries
}
type Series {
- hostname: String!
- id: String
+ hostname: String!
+ id: String
statistics: MetricStatistics
- data: [NullableFloat!]!
+ data: [NullableFloat!]!
}
type StatsSeries {
- mean: [NullableFloat!]!
+ mean: [NullableFloat!]!
median: [NullableFloat!]!
- min: [NullableFloat!]!
- max: [NullableFloat!]!
+ min: [NullableFloat!]!
+ max: [NullableFloat!]!
}
-type JobStatsWithScope {
- name: String!
- scope: MetricScope!
- stats: [ScopedStats!]!
+type NamedStatsWithScope {
+ name: String!
+ scope: MetricScope!
+ stats: [ScopedStats!]!
}
type ScopedStats {
- hostname: String!
- id: String
- data: MetricStatistics!
+ hostname: String!
+ id: String
+ data: MetricStatistics!
}
type JobStats {
- name: String!
- stats: MetricStatistics!
+ id: Int!
+ jobId: String!
+ startTime: Int!
+ duration: Int!
+ cluster: String!
+ subCluster: String!
+ numNodes: Int!
+ numHWThreads: Int
+ numAccelerators: Int
+ stats: [NamedStats!]!
+}
+
+type NamedStats {
+ name: String!
+ data: MetricStatistics!
}
type Unit {
@@ -2264,12 +2539,12 @@ type MetricStatistics {
type MetricFootprints {
metric: String!
- data: [NullableFloat!]!
+ data: [NullableFloat!]!
}
type Footprints {
timeWeights: TimeWeights!
- metrics: [MetricFootprints!]!
+ metrics: [MetricFootprints!]!
}
type TimeWeights {
@@ -2278,20 +2553,33 @@ type TimeWeights {
coreHours: [NullableFloat!]!
}
-enum Aggregate { USER, PROJECT, CLUSTER }
-enum SortByAggregate { TOTALWALLTIME, TOTALJOBS, TOTALNODES, TOTALNODEHOURS, TOTALCORES, TOTALCOREHOURS, TOTALACCS, TOTALACCHOURS }
+enum Aggregate {
+ USER
+ PROJECT
+ CLUSTER
+}
+enum SortByAggregate {
+ TOTALWALLTIME
+ TOTALJOBS
+ TOTALNODES
+ TOTALNODEHOURS
+ TOTALCORES
+ TOTALCOREHOURS
+ TOTALACCS
+ TOTALACCHOURS
+}
type NodeMetrics {
- host: String!
+ host: String!
subCluster: String!
- metrics: [JobMetricWithName!]!
+ metrics: [JobMetricWithName!]!
}
type NodesResultList {
- items: [NodeMetrics!]!
+ items: [NodeMetrics!]!
offset: Int
- limit: Int
- count: Int
+ limit: Int
+ count: Int
totalNodes: Int
hasNextPage: Boolean
}
@@ -2310,14 +2598,14 @@ type GlobalMetricListItem {
}
type Count {
- name: String!
+ name: String!
count: Int!
}
type User {
username: String!
- name: String!
- email: String!
+ name: String!
+ email: String!
}
input MetricStatItem {
@@ -2326,26 +2614,81 @@ input MetricStatItem {
}
type Query {
- clusters: [Cluster!]! # List of all clusters
- tags: [Tag!]! # List of all tags
- globalMetrics: [GlobalMetricListItem!]!
+ clusters: [Cluster!]! # List of all clusters
+ tags: [Tag!]! # List of all tags
+ globalMetrics: [GlobalMetricListItem!]!
user(username: String!): User
allocatedNodes(cluster: String!): [Count!]!
+ node(id: ID!): Node
+ nodes(filter: [NodeFilter!], order: OrderByInput): NodeStateResultList!
+ nodeStats(filter: [NodeFilter!]): [NodeStats!]!
+
job(id: ID!): Job
- jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]!
- jobStats(id: ID!, metrics: [String!]): [JobStats!]!
- scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]!
+ jobMetrics(
+ id: ID!
+ metrics: [String!]
+ scopes: [MetricScope!]
+ resolution: Int
+ ): [JobMetricWithName!]!
+
+ jobStats(id: ID!, metrics: [String!]): [NamedStats!]!
+
+ scopedJobStats(
+ id: ID!
+ metrics: [String!]
+ scopes: [MetricScope!]
+ ): [NamedStatsWithScope!]!
+
+ jobs(
+ filter: [JobFilter!]
+ page: PageRequest
+ order: OrderByInput
+ ): JobResultList!
+
+ jobsStatistics(
+ filter: [JobFilter!]
+ metrics: [String!]
+ page: PageRequest
+ sortBy: SortByAggregate
+ groupBy: Aggregate
+ numDurationBins: String
+ numMetricBins: Int
+ ): [JobsStatistics!]!
+
+ jobsMetricStats(filter: [JobFilter!], metrics: [String!]): [JobStats!]!
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints
- jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
- jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]!
+ rooflineHeatmap(
+ filter: [JobFilter!]!
+ rows: Int!
+ cols: Int!
+ minX: Float!
+ minY: Float!
+ maxX: Float!
+ maxY: Float!
+ ): [[Float!]!]!
- rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
-
- nodeMetrics(cluster: String!, nodes: [String!], scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]!
- nodeMetricsList(cluster: String!, subCluster: String!, nodeFilter: String!, scopes: [MetricScope!], metrics: [String!], from: Time!, to: Time!, page: PageRequest, resolution: Int): NodesResultList!
+ nodeMetrics(
+ cluster: String!
+ nodes: [String!]
+ scopes: [MetricScope!]
+ metrics: [String!]
+ from: Time!
+ to: Time!
+ ): [NodeMetrics!]!
+ nodeMetricsList(
+ cluster: String!
+ subCluster: String!
+ nodeFilter: String!
+ scopes: [MetricScope!]
+ metrics: [String!]
+ from: Time!
+ to: Time!
+ page: PageRequest
+ resolution: Int
+ ): NodesResultList!
}
type Mutation {
@@ -2358,37 +2701,52 @@ type Mutation {
updateConfiguration(name: String!, value: String!): String
}
-type IntRangeOutput { from: Int!, to: Int! }
-type TimeRangeOutput { range: String, from: Time!, to: Time! }
+type IntRangeOutput {
+ from: Int!
+ to: Int!
+}
+type TimeRangeOutput {
+ range: String
+ from: Time!
+ to: Time!
+}
+
+input NodeFilter {
+ hostname: StringInput
+ cluster: StringInput
+ nodeState: NodeState
+ healthState: MonitoringState
+}
input JobFilter {
- tags: [ID!]
- jobId: StringInput
- arrayJobId: Int
- user: StringInput
- project: StringInput
- jobName: StringInput
- cluster: StringInput
- partition: StringInput
- duration: IntRange
- energy: FloatRange
+ tags: [ID!]
+ dbId: [ID!]
+ jobId: StringInput
+ arrayJobId: Int
+ user: StringInput
+ project: StringInput
+ jobName: StringInput
+ cluster: StringInput
+ partition: StringInput
+ duration: IntRange
+ energy: FloatRange
minRunningFor: Int
- numNodes: IntRange
+ numNodes: IntRange
numAccelerators: IntRange
- numHWThreads: IntRange
+ numHWThreads: IntRange
- startTime: TimeRange
- state: [JobState!]
+ startTime: TimeRange
+ state: [JobState!]
metricStats: [MetricStatItem!]
- exclusive: Int
- node: StringInput
+ exclusive: Int
+ node: StringInput
}
input OrderByInput {
field: String!
- type: String!,
+ type: String!
order: SortDirectionEnum! = ASC
}
@@ -2398,34 +2756,46 @@ enum SortDirectionEnum {
}
input StringInput {
- eq: String
- neq: String
- contains: String
+ eq: String
+ neq: String
+ contains: String
startsWith: String
- endsWith: String
- in: [String!]
+ endsWith: String
+ in: [String!]
}
-input IntRange { from: Int!, to: Int! }
-input TimeRange { range: String, from: Time, to: Time }
+input IntRange {
+ from: Int!
+ to: Int!
+}
+input TimeRange {
+ range: String
+ from: Time
+ to: Time
+}
input FloatRange {
from: Float!
to: Float!
}
+type NodeStateResultList {
+ items: [Node!]!
+ count: Int
+}
+
type JobResultList {
- items: [Job!]!
+ items: [Job!]!
offset: Int
- limit: Int
- count: Int
+ limit: Int
+ count: Int
hasNextPage: Boolean
}
type JobLinkResultList {
listQuery: String
- items: [JobLink!]!
- count: Int
+ items: [JobLink!]!
+ count: Int
}
type HistoPoint {
@@ -2447,29 +2817,29 @@ type MetricHistoPoint {
max: Int
}
-type JobsStatistics {
- id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
- name: String! # if User-Statistics: Given Name of Account (ID) Owner
- totalJobs: Int! # Number of jobs
- runningJobs: Int! # Number of running jobs
- shortJobs: Int! # Number of jobs with a duration of less than duration
- totalWalltime: Int! # Sum of the duration of all matched jobs in hours
- totalNodes: Int! # Sum of the nodes of all matched jobs
- totalNodeHours: Int! # Sum of the node hours of all matched jobs
- totalCores: Int! # Sum of the cores of all matched jobs
- totalCoreHours: Int! # Sum of the core hours of all matched jobs
- totalAccs: Int! # Sum of the accs of all matched jobs
- totalAccHours: Int! # Sum of the gpu hours of all matched jobs
- histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
- histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
- histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores
- histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs
- histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average
+type JobsStatistics {
+ id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
+ name: String! # if User-Statistics: Given Name of Account (ID) Owner
+ totalJobs: Int! # Number of jobs
+ runningJobs: Int! # Number of running jobs
+ shortJobs: Int! # Number of jobs with a duration of less than duration
+ totalWalltime: Int! # Sum of the duration of all matched jobs in hours
+ totalNodes: Int! # Sum of the nodes of all matched jobs
+ totalNodeHours: Int! # Sum of the node hours of all matched jobs
+ totalCores: Int! # Sum of the cores of all matched jobs
+ totalCoreHours: Int! # Sum of the core hours of all matched jobs
+ totalAccs: Int! # Sum of the accs of all matched jobs
+ totalAccHours: Int! # Sum of the gpu hours of all matched jobs
+ histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
+ histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
+ histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores
+ histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs
+ histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average
}
input PageRequest {
itemsPerPage: Int!
- page: Int!
+ page: Int!
}
`, BuiltIn: false},
}
@@ -2890,7 +3260,7 @@ func (ec *executionContext) field_Query_jobMetrics_argsScopes(
ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes"))
if tmp, ok := rawArgs["scopes"]; ok {
- return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp)
+ return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx, tmp)
}
var zeroVal []schema.MetricScope
@@ -3045,6 +3415,57 @@ func (ec *executionContext) field_Query_jobsFootprints_argsMetrics(
return zeroVal, nil
}
+func (ec *executionContext) field_Query_jobsMetricStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
+ var err error
+ args := map[string]any{}
+ arg0, err := ec.field_Query_jobsMetricStats_argsFilter(ctx, rawArgs)
+ if err != nil {
+ return nil, err
+ }
+ args["filter"] = arg0
+ arg1, err := ec.field_Query_jobsMetricStats_argsMetrics(ctx, rawArgs)
+ if err != nil {
+ return nil, err
+ }
+ args["metrics"] = arg1
+ return args, nil
+}
+func (ec *executionContext) field_Query_jobsMetricStats_argsFilter(
+ ctx context.Context,
+ rawArgs map[string]any,
+) ([]*model.JobFilter, error) {
+ if _, ok := rawArgs["filter"]; !ok {
+ var zeroVal []*model.JobFilter
+ return zeroVal, nil
+ }
+
+ ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("filter"))
+ if tmp, ok := rawArgs["filter"]; ok {
+ return ec.unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp)
+ }
+
+ var zeroVal []*model.JobFilter
+ return zeroVal, nil
+}
+
+func (ec *executionContext) field_Query_jobsMetricStats_argsMetrics(
+ ctx context.Context,
+ rawArgs map[string]any,
+) ([]string, error) {
+ if _, ok := rawArgs["metrics"]; !ok {
+ var zeroVal []string
+ return zeroVal, nil
+ }
+
+ ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics"))
+ if tmp, ok := rawArgs["metrics"]; ok {
+ return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp)
+ }
+
+ var zeroVal []string
+ return zeroVal, nil
+}
+
func (ec *executionContext) field_Query_jobsStatistics_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
var err error
args := map[string]any{}
@@ -3400,7 +3821,7 @@ func (ec *executionContext) field_Query_nodeMetricsList_argsScopes(
ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes"))
if tmp, ok := rawArgs["scopes"]; ok {
- return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp)
+ return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx, tmp)
}
var zeroVal []schema.MetricScope
@@ -3579,7 +4000,7 @@ func (ec *executionContext) field_Query_nodeMetrics_argsScopes(
ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes"))
if tmp, ok := rawArgs["scopes"]; ok {
- return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp)
+ return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx, tmp)
}
var zeroVal []schema.MetricScope
@@ -3640,6 +4061,113 @@ func (ec *executionContext) field_Query_nodeMetrics_argsTo(
return zeroVal, nil
}
+func (ec *executionContext) field_Query_nodeStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
+ var err error
+ args := map[string]any{}
+ arg0, err := ec.field_Query_nodeStats_argsFilter(ctx, rawArgs)
+ if err != nil {
+ return nil, err
+ }
+ args["filter"] = arg0
+ return args, nil
+}
+func (ec *executionContext) field_Query_nodeStats_argsFilter(
+ ctx context.Context,
+ rawArgs map[string]any,
+) ([]*model.NodeFilter, error) {
+ if _, ok := rawArgs["filter"]; !ok {
+ var zeroVal []*model.NodeFilter
+ return zeroVal, nil
+ }
+
+ ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("filter"))
+ if tmp, ok := rawArgs["filter"]; ok {
+ return ec.unmarshalONodeFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeFilterᚄ(ctx, tmp)
+ }
+
+ var zeroVal []*model.NodeFilter
+ return zeroVal, nil
+}
+
+func (ec *executionContext) field_Query_node_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
+ var err error
+ args := map[string]any{}
+ arg0, err := ec.field_Query_node_argsID(ctx, rawArgs)
+ if err != nil {
+ return nil, err
+ }
+ args["id"] = arg0
+ return args, nil
+}
+func (ec *executionContext) field_Query_node_argsID(
+ ctx context.Context,
+ rawArgs map[string]any,
+) (string, error) {
+ if _, ok := rawArgs["id"]; !ok {
+ var zeroVal string
+ return zeroVal, nil
+ }
+
+ ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id"))
+ if tmp, ok := rawArgs["id"]; ok {
+ return ec.unmarshalNID2string(ctx, tmp)
+ }
+
+ var zeroVal string
+ return zeroVal, nil
+}
+
+func (ec *executionContext) field_Query_nodes_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
+ var err error
+ args := map[string]any{}
+ arg0, err := ec.field_Query_nodes_argsFilter(ctx, rawArgs)
+ if err != nil {
+ return nil, err
+ }
+ args["filter"] = arg0
+ arg1, err := ec.field_Query_nodes_argsOrder(ctx, rawArgs)
+ if err != nil {
+ return nil, err
+ }
+ args["order"] = arg1
+ return args, nil
+}
+func (ec *executionContext) field_Query_nodes_argsFilter(
+ ctx context.Context,
+ rawArgs map[string]any,
+) ([]*model.NodeFilter, error) {
+ if _, ok := rawArgs["filter"]; !ok {
+ var zeroVal []*model.NodeFilter
+ return zeroVal, nil
+ }
+
+ ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("filter"))
+ if tmp, ok := rawArgs["filter"]; ok {
+ return ec.unmarshalONodeFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeFilterᚄ(ctx, tmp)
+ }
+
+ var zeroVal []*model.NodeFilter
+ return zeroVal, nil
+}
+
+func (ec *executionContext) field_Query_nodes_argsOrder(
+ ctx context.Context,
+ rawArgs map[string]any,
+) (*model.OrderByInput, error) {
+ if _, ok := rawArgs["order"]; !ok {
+ var zeroVal *model.OrderByInput
+ return zeroVal, nil
+ }
+
+ ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("order"))
+ if tmp, ok := rawArgs["order"]; ok {
+ return ec.unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx, tmp)
+ }
+
+ var zeroVal *model.OrderByInput
+ return zeroVal, nil
+}
+
func (ec *executionContext) field_Query_rooflineHeatmap_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) {
var err error
args := map[string]any{}
@@ -3873,7 +4401,7 @@ func (ec *executionContext) field_Query_scopedJobStats_argsScopes(
ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes"))
if tmp, ok := rawArgs["scopes"]; ok {
- return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp)
+ return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx, tmp)
}
var zeroVal []schema.MetricScope
@@ -4276,7 +4804,7 @@ func (ec *executionContext) _Cluster_subClusters(ctx context.Context, field grap
}
res := resTmp.([]*schema.SubCluster)
fc.Result = res
- return ec.marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx, field.Selections, res)
+ return ec.marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Cluster_subClusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -4934,7 +5462,7 @@ func (ec *executionContext) _GlobalMetricListItem_unit(ctx context.Context, fiel
}
res := resTmp.(schema.Unit)
fc.Result = res
- return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res)
+ return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_GlobalMetricListItem_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -4984,7 +5512,7 @@ func (ec *executionContext) _GlobalMetricListItem_scope(ctx context.Context, fie
}
res := resTmp.(schema.MetricScope)
fc.Result = res
- return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res)
+ return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_GlobalMetricListItem_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -5069,7 +5597,7 @@ func (ec *executionContext) _GlobalMetricListItem_availability(ctx context.Conte
}
res := resTmp.([]schema.ClusterSupport)
fc.Result = res
- return ec.marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx, field.Selections, res)
+ return ec.marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterSupportᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_GlobalMetricListItem_availability(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -5293,9 +5821,9 @@ func (ec *executionContext) _Job_id(ctx context.Context, field graphql.Collected
}
return graphql.Null
}
- res := resTmp.(int64)
+ res := resTmp.(*int64)
fc.Result = res
- return ec.marshalNID2int64(ctx, field.Selections, res)
+ return ec.marshalNID2ᚖint64(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Job_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -5545,7 +6073,7 @@ func (ec *executionContext) _Job_startTime(ctx context.Context, field graphql.Co
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
ctx = rctx // use context from middleware stack in children
- return obj.StartTime, nil
+ return ec.resolvers.Job().StartTime(rctx, obj)
})
if err != nil {
ec.Error(ctx, err)
@@ -5557,17 +6085,17 @@ func (ec *executionContext) _Job_startTime(ctx context.Context, field graphql.Co
}
return graphql.Null
}
- res := resTmp.(time.Time)
+ res := resTmp.(*time.Time)
fc.Result = res
- return ec.marshalNTime2timeᚐTime(ctx, field.Selections, res)
+ return ec.marshalNTime2ᚖtimeᚐTime(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Job_startTime(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "Job",
Field: field,
- IsMethod: false,
- IsResolver: false,
+ IsMethod: true,
+ IsResolver: true,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
return nil, errors.New("field of type Time does not have child fields")
},
@@ -6087,7 +6615,7 @@ func (ec *executionContext) _Job_state(ctx context.Context, field graphql.Collec
}
res := resTmp.(schema.JobState)
fc.Result = res
- return ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx, field.Selections, res)
+ return ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Job_state(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -6131,7 +6659,7 @@ func (ec *executionContext) _Job_tags(ctx context.Context, field graphql.Collect
}
res := resTmp.([]*schema.Tag)
fc.Result = res
- return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res)
+ return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Job_tags(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -6185,7 +6713,7 @@ func (ec *executionContext) _Job_resources(ctx context.Context, field graphql.Co
}
res := resTmp.([]*schema.Resource)
fc.Result = res
- return ec.marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResourceᚄ(ctx, field.Selections, res)
+ return ec.marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐResourceᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Job_resources(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -6693,7 +7221,7 @@ func (ec *executionContext) _JobMetric_unit(ctx context.Context, field graphql.C
}
res := resTmp.(schema.Unit)
fc.Result = res
- return ec.marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res)
+ return ec.marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobMetric_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -6784,7 +7312,7 @@ func (ec *executionContext) _JobMetric_series(ctx context.Context, field graphql
}
res := resTmp.([]schema.Series)
fc.Result = res
- return ec.marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeriesᚄ(ctx, field.Selections, res)
+ return ec.marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSeriesᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobMetric_series(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -6835,7 +7363,7 @@ func (ec *executionContext) _JobMetric_statisticsSeries(ctx context.Context, fie
}
res := resTmp.(*schema.StatsSeries)
fc.Result = res
- return ec.marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐStatsSeries(ctx, field.Selections, res)
+ return ec.marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐStatsSeries(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobMetric_statisticsSeries(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -6933,7 +7461,7 @@ func (ec *executionContext) _JobMetricWithName_scope(ctx context.Context, field
}
res := resTmp.(schema.MetricScope)
fc.Result = res
- return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res)
+ return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobMetricWithName_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -6977,7 +7505,7 @@ func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field
}
res := resTmp.(*schema.JobMetric)
fc.Result = res
- return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobMetric(ctx, field.Selections, res)
+ return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobMetric(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobMetricWithName_metric(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -7031,7 +7559,7 @@ func (ec *executionContext) _JobResultList_items(ctx context.Context, field grap
}
res := resTmp.([]*schema.Job)
fc.Result = res
- return ec.marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx, field.Selections, res)
+ return ec.marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobResultList_items(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -7265,8 +7793,8 @@ func (ec *executionContext) fieldContext_JobResultList_hasNextPage(_ context.Con
return fc, nil
}
-func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_JobStats_name(ctx, field)
+func (ec *executionContext) _JobStats_id(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_id(ctx, field)
if err != nil {
return graphql.Null
}
@@ -7279,7 +7807,51 @@ func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.Co
}()
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
ctx = rctx // use context from middleware stack in children
- return obj.Name, nil
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobStats_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _JobStats_jobId(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_jobId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.JobID, nil
})
if err != nil {
ec.Error(ctx, err)
@@ -7296,7 +7868,7 @@ func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.Co
return ec.marshalNString2string(ctx, field.Selections, res)
}
-func (ec *executionContext) fieldContext_JobStats_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+func (ec *executionContext) fieldContext_JobStats_jobId(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
fc = &graphql.FieldContext{
Object: "JobStats",
Field: field,
@@ -7309,6 +7881,308 @@ func (ec *executionContext) fieldContext_JobStats_name(_ context.Context, field
return fc, nil
}
+func (ec *executionContext) _JobStats_startTime(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_startTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.StartTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobStats_startTime(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _JobStats_duration(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_duration(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Duration, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobStats_duration(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _JobStats_cluster(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_cluster(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Cluster, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobStats_cluster(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _JobStats_subCluster(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_subCluster(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.SubCluster, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobStats_subCluster(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _JobStats_numNodes(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_numNodes(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.NumNodes, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobStats_numNodes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _JobStats_numHWThreads(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_numHWThreads(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.NumHWThreads, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*int)
+ fc.Result = res
+ return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobStats_numHWThreads(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _JobStats_numAccelerators(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_JobStats_numAccelerators(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.NumAccelerators, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*int)
+ fc.Result = res
+ return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_JobStats_numAccelerators(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "JobStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
func (ec *executionContext) _JobStats_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_JobStats_stats(ctx, field)
if err != nil {
@@ -7335,9 +8209,9 @@ func (ec *executionContext) _JobStats_stats(ctx context.Context, field graphql.C
}
return graphql.Null
}
- res := resTmp.(*schema.MetricStatistics)
+ res := resTmp.([]*model.NamedStats)
fc.Result = res
- return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res)
+ return ec.marshalNNamedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_JobStats_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -7348,154 +8222,12 @@ func (ec *executionContext) fieldContext_JobStats_stats(_ context.Context, field
IsResolver: false,
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
- case "avg":
- return ec.fieldContext_MetricStatistics_avg(ctx, field)
- case "min":
- return ec.fieldContext_MetricStatistics_min(ctx, field)
- case "max":
- return ec.fieldContext_MetricStatistics_max(ctx, field)
- }
- return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name)
- },
- }
- return fc, nil
-}
-
-func (ec *executionContext) _JobStatsWithScope_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_JobStatsWithScope_name(ctx, field)
- if err != nil {
- return graphql.Null
- }
- ctx = graphql.WithFieldContext(ctx, fc)
- defer func() {
- if r := recover(); r != nil {
- ec.Error(ctx, ec.Recover(ctx, r))
- ret = graphql.Null
- }
- }()
- resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
- ctx = rctx // use context from middleware stack in children
- return obj.Name, nil
- })
- if err != nil {
- ec.Error(ctx, err)
- return graphql.Null
- }
- if resTmp == nil {
- if !graphql.HasFieldError(ctx, fc) {
- ec.Errorf(ctx, "must not be null")
- }
- return graphql.Null
- }
- res := resTmp.(string)
- fc.Result = res
- return ec.marshalNString2string(ctx, field.Selections, res)
-}
-
-func (ec *executionContext) fieldContext_JobStatsWithScope_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
- fc = &graphql.FieldContext{
- Object: "JobStatsWithScope",
- Field: field,
- IsMethod: false,
- IsResolver: false,
- Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
- return nil, errors.New("field of type String does not have child fields")
- },
- }
- return fc, nil
-}
-
-func (ec *executionContext) _JobStatsWithScope_scope(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_JobStatsWithScope_scope(ctx, field)
- if err != nil {
- return graphql.Null
- }
- ctx = graphql.WithFieldContext(ctx, fc)
- defer func() {
- if r := recover(); r != nil {
- ec.Error(ctx, ec.Recover(ctx, r))
- ret = graphql.Null
- }
- }()
- resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
- ctx = rctx // use context from middleware stack in children
- return obj.Scope, nil
- })
- if err != nil {
- ec.Error(ctx, err)
- return graphql.Null
- }
- if resTmp == nil {
- if !graphql.HasFieldError(ctx, fc) {
- ec.Errorf(ctx, "must not be null")
- }
- return graphql.Null
- }
- res := resTmp.(schema.MetricScope)
- fc.Result = res
- return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res)
-}
-
-func (ec *executionContext) fieldContext_JobStatsWithScope_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
- fc = &graphql.FieldContext{
- Object: "JobStatsWithScope",
- Field: field,
- IsMethod: false,
- IsResolver: false,
- Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
- return nil, errors.New("field of type MetricScope does not have child fields")
- },
- }
- return fc, nil
-}
-
-func (ec *executionContext) _JobStatsWithScope_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_JobStatsWithScope_stats(ctx, field)
- if err != nil {
- return graphql.Null
- }
- ctx = graphql.WithFieldContext(ctx, fc)
- defer func() {
- if r := recover(); r != nil {
- ec.Error(ctx, ec.Recover(ctx, r))
- ret = graphql.Null
- }
- }()
- resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
- ctx = rctx // use context from middleware stack in children
- return obj.Stats, nil
- })
- if err != nil {
- ec.Error(ctx, err)
- return graphql.Null
- }
- if resTmp == nil {
- if !graphql.HasFieldError(ctx, fc) {
- ec.Errorf(ctx, "must not be null")
- }
- return graphql.Null
- }
- res := resTmp.([]*model.ScopedStats)
- fc.Result = res
- return ec.marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx, field.Selections, res)
-}
-
-func (ec *executionContext) fieldContext_JobStatsWithScope_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
- fc = &graphql.FieldContext{
- Object: "JobStatsWithScope",
- Field: field,
- IsMethod: false,
- IsResolver: false,
- Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
- switch field.Name {
- case "hostname":
- return ec.fieldContext_ScopedStats_hostname(ctx, field)
- case "id":
- return ec.fieldContext_ScopedStats_id(ctx, field)
+ case "name":
+ return ec.fieldContext_NamedStats_name(ctx, field)
case "data":
- return ec.fieldContext_ScopedStats_data(ctx, field)
+ return ec.fieldContext_NamedStats_data(ctx, field)
}
- return nil, fmt.Errorf("no field named %q was found under type ScopedStats", field.Name)
+ return nil, fmt.Errorf("no field named %q was found under type NamedStats", field.Name)
},
}
return fc, nil
@@ -8355,7 +9087,7 @@ func (ec *executionContext) _MetricConfig_unit(ctx context.Context, field graphq
}
res := resTmp.(schema.Unit)
fc.Result = res
- return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res)
+ return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_MetricConfig_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -8405,7 +9137,7 @@ func (ec *executionContext) _MetricConfig_scope(ctx context.Context, field graph
}
res := resTmp.(schema.MetricScope)
fc.Result = res
- return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res)
+ return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_MetricConfig_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -8751,7 +9483,7 @@ func (ec *executionContext) _MetricConfig_subClusters(ctx context.Context, field
}
res := resTmp.([]*schema.SubClusterConfig)
fc.Result = res
- return ec.marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfigᚄ(ctx, field.Selections, res)
+ return ec.marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterConfigᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_MetricConfig_subClusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -8853,7 +9585,7 @@ func (ec *executionContext) _MetricFootprints_data(ctx context.Context, field gr
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_MetricFootprints_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -9417,7 +10149,7 @@ func (ec *executionContext) _MetricValue_unit(ctx context.Context, field graphql
}
res := resTmp.(schema.Unit)
fc.Result = res
- return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res)
+ return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_MetricValue_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -9511,7 +10243,7 @@ func (ec *executionContext) _Mutation_createTag(ctx context.Context, field graph
}
res := resTmp.(*schema.Tag)
fc.Result = res
- return ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTag(ctx, field.Selections, res)
+ return ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTag(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Mutation_createTag(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -9631,7 +10363,7 @@ func (ec *executionContext) _Mutation_addTagsToJob(ctx context.Context, field gr
}
res := resTmp.([]*schema.Tag)
fc.Result = res
- return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res)
+ return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Mutation_addTagsToJob(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -9696,7 +10428,7 @@ func (ec *executionContext) _Mutation_removeTagsFromJob(ctx context.Context, fie
}
res := resTmp.([]*schema.Tag)
fc.Result = res
- return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res)
+ return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Mutation_removeTagsFromJob(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -9840,6 +10572,547 @@ func (ec *executionContext) fieldContext_Mutation_updateConfiguration(ctx contex
return fc, nil
}
+func (ec *executionContext) _NamedStats_name(ctx context.Context, field graphql.CollectedField, obj *model.NamedStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamedStats_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamedStats_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamedStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NamedStats_data(ctx context.Context, field graphql.CollectedField, obj *model.NamedStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamedStats_data(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Data, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*schema.MetricStatistics)
+ fc.Result = res
+ return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamedStats_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamedStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "avg":
+ return ec.fieldContext_MetricStatistics_avg(ctx, field)
+ case "min":
+ return ec.fieldContext_MetricStatistics_min(ctx, field)
+ case "max":
+ return ec.fieldContext_MetricStatistics_max(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NamedStatsWithScope_name(ctx context.Context, field graphql.CollectedField, obj *model.NamedStatsWithScope) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamedStatsWithScope_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamedStatsWithScope_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamedStatsWithScope",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NamedStatsWithScope_scope(ctx context.Context, field graphql.CollectedField, obj *model.NamedStatsWithScope) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamedStatsWithScope_scope(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Scope, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(schema.MetricScope)
+ fc.Result = res
+ return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamedStatsWithScope_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamedStatsWithScope",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type MetricScope does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NamedStatsWithScope_stats(ctx context.Context, field graphql.CollectedField, obj *model.NamedStatsWithScope) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamedStatsWithScope_stats(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Stats, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.ScopedStats)
+ fc.Result = res
+ return ec.marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamedStatsWithScope_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamedStatsWithScope",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "hostname":
+ return ec.fieldContext_ScopedStats_hostname(ctx, field)
+ case "id":
+ return ec.fieldContext_ScopedStats_id(ctx, field)
+ case "data":
+ return ec.fieldContext_ScopedStats_data(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ScopedStats", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Node_id(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int64)
+ fc.Result = res
+ return ec.marshalNID2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Node_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Node",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Node_hostname(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_hostname(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Hostname, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Node_hostname(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Node",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Node_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_cluster(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Cluster, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Node_cluster(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Node",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Node_subCluster(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_subCluster(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.SubCluster, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Node_subCluster(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Node",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Node_nodeState(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_nodeState(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Node().NodeState(rctx, obj)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNNodeState2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Node_nodeState(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Node",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type NodeState does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Node_HealthState(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_HealthState(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Node().HealthState(rctx, obj)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(schema.NodeState)
+ fc.Result = res
+ return ec.marshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Node_HealthState(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Node",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type MonitoringState does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Node_metaData(ctx context.Context, field graphql.CollectedField, obj *schema.Node) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Node_metaData(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Node().MetaData(rctx, obj)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(any)
+ fc.Result = res
+ return ec.marshalOAny2interface(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Node_metaData(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Node",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Any does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
func (ec *executionContext) _NodeMetrics_host(ctx context.Context, field graphql.CollectedField, obj *model.NodeMetrics) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_NodeMetrics_host(ctx, field)
if err != nil {
@@ -9980,6 +11253,195 @@ func (ec *executionContext) fieldContext_NodeMetrics_metrics(_ context.Context,
return fc, nil
}
+func (ec *executionContext) _NodeStateResultList_items(ctx context.Context, field graphql.CollectedField, obj *model.NodeStateResultList) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NodeStateResultList_items(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Items, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*schema.Node)
+ fc.Result = res
+ return ec.marshalNNode2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NodeStateResultList_items(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NodeStateResultList",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_Node_id(ctx, field)
+ case "hostname":
+ return ec.fieldContext_Node_hostname(ctx, field)
+ case "cluster":
+ return ec.fieldContext_Node_cluster(ctx, field)
+ case "subCluster":
+ return ec.fieldContext_Node_subCluster(ctx, field)
+ case "nodeState":
+ return ec.fieldContext_Node_nodeState(ctx, field)
+ case "HealthState":
+ return ec.fieldContext_Node_HealthState(ctx, field)
+ case "metaData":
+ return ec.fieldContext_Node_metaData(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Node", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NodeStateResultList_count(ctx context.Context, field graphql.CollectedField, obj *model.NodeStateResultList) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NodeStateResultList_count(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Count, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*int)
+ fc.Result = res
+ return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NodeStateResultList_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NodeStateResultList",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NodeStats_state(ctx context.Context, field graphql.CollectedField, obj *model.NodeStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NodeStats_state(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.State, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NodeStats_state(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NodeStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NodeStats_count(ctx context.Context, field graphql.CollectedField, obj *model.NodeStats) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NodeStats_count(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Count, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NodeStats_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NodeStats",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
func (ec *executionContext) _NodesResultList_items(ctx context.Context, field graphql.CollectedField, obj *model.NodesResultList) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_NodesResultList_items(ctx, field)
if err != nil {
@@ -10265,7 +11727,7 @@ func (ec *executionContext) _Query_clusters(ctx context.Context, field graphql.C
}
res := resTmp.([]*schema.Cluster)
fc.Result = res
- return ec.marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterᚄ(ctx, field.Selections, res)
+ return ec.marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Query_clusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -10317,7 +11779,7 @@ func (ec *executionContext) _Query_tags(ctx context.Context, field graphql.Colle
}
res := resTmp.([]*schema.Tag)
fc.Result = res
- return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res)
+ return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Query_tags(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -10371,7 +11833,7 @@ func (ec *executionContext) _Query_globalMetrics(ctx context.Context, field grap
}
res := resTmp.([]*schema.GlobalMetricListItem)
fc.Result = res
- return ec.marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx, field.Selections, res)
+ return ec.marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐGlobalMetricListItemᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Query_globalMetrics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -10520,6 +11982,196 @@ func (ec *executionContext) fieldContext_Query_allocatedNodes(ctx context.Contex
return fc, nil
}
+func (ec *executionContext) _Query_node(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_node(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().Node(rctx, fc.Args["id"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*schema.Node)
+ fc.Result = res
+ return ec.marshalONode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNode(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_node(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_Node_id(ctx, field)
+ case "hostname":
+ return ec.fieldContext_Node_hostname(ctx, field)
+ case "cluster":
+ return ec.fieldContext_Node_cluster(ctx, field)
+ case "subCluster":
+ return ec.fieldContext_Node_subCluster(ctx, field)
+ case "nodeState":
+ return ec.fieldContext_Node_nodeState(ctx, field)
+ case "HealthState":
+ return ec.fieldContext_Node_HealthState(ctx, field)
+ case "metaData":
+ return ec.fieldContext_Node_metaData(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Node", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_node_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_nodes(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_nodes(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().Nodes(rctx, fc.Args["filter"].([]*model.NodeFilter), fc.Args["order"].(*model.OrderByInput))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*model.NodeStateResultList)
+ fc.Result = res
+ return ec.marshalNNodeStateResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStateResultList(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_nodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "items":
+ return ec.fieldContext_NodeStateResultList_items(ctx, field)
+ case "count":
+ return ec.fieldContext_NodeStateResultList_count(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type NodeStateResultList", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_nodes_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_nodeStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_nodeStats(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().NodeStats(rctx, fc.Args["filter"].([]*model.NodeFilter))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.NodeStats)
+ fc.Result = res
+ return ec.marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatsᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_nodeStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "state":
+ return ec.fieldContext_NodeStats_state(ctx, field)
+ case "count":
+ return ec.fieldContext_NodeStats_count(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type NodeStats", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_nodeStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
func (ec *executionContext) _Query_job(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Query_job(ctx, field)
if err != nil {
@@ -10545,7 +12197,7 @@ func (ec *executionContext) _Query_job(ctx context.Context, field graphql.Collec
}
res := resTmp.(*schema.Job)
fc.Result = res
- return ec.marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJob(ctx, field.Selections, res)
+ return ec.marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJob(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -10715,9 +12367,9 @@ func (ec *executionContext) _Query_jobStats(ctx context.Context, field graphql.C
}
return graphql.Null
}
- res := resTmp.([]*model.JobStats)
+ res := resTmp.([]*model.NamedStats)
fc.Result = res
- return ec.marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx, field.Selections, res)
+ return ec.marshalNNamedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Query_jobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -10729,11 +12381,11 @@ func (ec *executionContext) fieldContext_Query_jobStats(ctx context.Context, fie
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
case "name":
- return ec.fieldContext_JobStats_name(ctx, field)
- case "stats":
- return ec.fieldContext_JobStats_stats(ctx, field)
+ return ec.fieldContext_NamedStats_name(ctx, field)
+ case "data":
+ return ec.fieldContext_NamedStats_data(ctx, field)
}
- return nil, fmt.Errorf("no field named %q was found under type JobStats", field.Name)
+ return nil, fmt.Errorf("no field named %q was found under type NamedStats", field.Name)
},
}
defer func() {
@@ -10776,9 +12428,9 @@ func (ec *executionContext) _Query_scopedJobStats(ctx context.Context, field gra
}
return graphql.Null
}
- res := resTmp.([]*model.JobStatsWithScope)
+ res := resTmp.([]*model.NamedStatsWithScope)
fc.Result = res
- return ec.marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx, field.Selections, res)
+ return ec.marshalNNamedStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsWithScopeᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -10790,13 +12442,13 @@ func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Contex
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
switch field.Name {
case "name":
- return ec.fieldContext_JobStatsWithScope_name(ctx, field)
+ return ec.fieldContext_NamedStatsWithScope_name(ctx, field)
case "scope":
- return ec.fieldContext_JobStatsWithScope_scope(ctx, field)
+ return ec.fieldContext_NamedStatsWithScope_scope(ctx, field)
case "stats":
- return ec.fieldContext_JobStatsWithScope_stats(ctx, field)
+ return ec.fieldContext_NamedStatsWithScope_stats(ctx, field)
}
- return nil, fmt.Errorf("no field named %q was found under type JobStatsWithScope", field.Name)
+ return nil, fmt.Errorf("no field named %q was found under type NamedStatsWithScope", field.Name)
},
}
defer func() {
@@ -10813,64 +12465,6 @@ func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Contex
return fc, nil
}
-func (ec *executionContext) _Query_jobsFootprints(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
- fc, err := ec.fieldContext_Query_jobsFootprints(ctx, field)
- if err != nil {
- return graphql.Null
- }
- ctx = graphql.WithFieldContext(ctx, fc)
- defer func() {
- if r := recover(); r != nil {
- ec.Error(ctx, ec.Recover(ctx, r))
- ret = graphql.Null
- }
- }()
- resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
- ctx = rctx // use context from middleware stack in children
- return ec.resolvers.Query().JobsFootprints(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string))
- })
- if err != nil {
- ec.Error(ctx, err)
- return graphql.Null
- }
- if resTmp == nil {
- return graphql.Null
- }
- res := resTmp.(*model.Footprints)
- fc.Result = res
- return ec.marshalOFootprints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprints(ctx, field.Selections, res)
-}
-
-func (ec *executionContext) fieldContext_Query_jobsFootprints(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
- fc = &graphql.FieldContext{
- Object: "Query",
- Field: field,
- IsMethod: true,
- IsResolver: true,
- Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
- switch field.Name {
- case "timeWeights":
- return ec.fieldContext_Footprints_timeWeights(ctx, field)
- case "metrics":
- return ec.fieldContext_Footprints_metrics(ctx, field)
- }
- return nil, fmt.Errorf("no field named %q was found under type Footprints", field.Name)
- },
- }
- defer func() {
- if r := recover(); r != nil {
- err = ec.Recover(ctx, r)
- ec.Error(ctx, err)
- }
- }()
- ctx = graphql.WithFieldContext(ctx, fc)
- if fc.Args, err = ec.field_Query_jobsFootprints_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
- ec.Error(ctx, err)
- return fc, err
- }
- return fc, nil
-}
-
func (ec *executionContext) _Query_jobs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Query_jobs(ctx, field)
if err != nil {
@@ -11029,6 +12623,141 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex
return fc, nil
}
+func (ec *executionContext) _Query_jobsMetricStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_jobsMetricStats(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().JobsMetricStats(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.JobStats)
+ fc.Result = res
+ return ec.marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_jobsMetricStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_JobStats_id(ctx, field)
+ case "jobId":
+ return ec.fieldContext_JobStats_jobId(ctx, field)
+ case "startTime":
+ return ec.fieldContext_JobStats_startTime(ctx, field)
+ case "duration":
+ return ec.fieldContext_JobStats_duration(ctx, field)
+ case "cluster":
+ return ec.fieldContext_JobStats_cluster(ctx, field)
+ case "subCluster":
+ return ec.fieldContext_JobStats_subCluster(ctx, field)
+ case "numNodes":
+ return ec.fieldContext_JobStats_numNodes(ctx, field)
+ case "numHWThreads":
+ return ec.fieldContext_JobStats_numHWThreads(ctx, field)
+ case "numAccelerators":
+ return ec.fieldContext_JobStats_numAccelerators(ctx, field)
+ case "stats":
+ return ec.fieldContext_JobStats_stats(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type JobStats", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_jobsMetricStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_jobsFootprints(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_jobsFootprints(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().JobsFootprints(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*model.Footprints)
+ fc.Result = res
+ return ec.marshalOFootprints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprints(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_jobsFootprints(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "timeWeights":
+ return ec.fieldContext_Footprints_timeWeights(ctx, field)
+ case "metrics":
+ return ec.fieldContext_Footprints_metrics(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Footprints", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_jobsFootprints_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
func (ec *executionContext) _Query_rooflineHeatmap(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
fc, err := ec.fieldContext_Query_rooflineHeatmap(ctx, field)
if err != nil {
@@ -11627,7 +13356,7 @@ func (ec *executionContext) _ScopedStats_data(ctx context.Context, field graphql
}
res := resTmp.(*schema.MetricStatistics)
fc.Result = res
- return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res)
+ return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_ScopedStats_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -11761,7 +13490,7 @@ func (ec *executionContext) _Series_statistics(ctx context.Context, field graphq
}
res := resTmp.(schema.MetricStatistics)
fc.Result = res
- return ec.marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res)
+ return ec.marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Series_statistics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -11813,7 +13542,7 @@ func (ec *executionContext) _Series_data(ctx context.Context, field graphql.Coll
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Series_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -11857,7 +13586,7 @@ func (ec *executionContext) _StatsSeries_mean(ctx context.Context, field graphql
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_StatsSeries_mean(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -11901,7 +13630,7 @@ func (ec *executionContext) _StatsSeries_median(ctx context.Context, field graph
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_StatsSeries_median(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -11945,7 +13674,7 @@ func (ec *executionContext) _StatsSeries_min(ctx context.Context, field graphql.
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_StatsSeries_min(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -11989,7 +13718,7 @@ func (ec *executionContext) _StatsSeries_max(ctx context.Context, field graphql.
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_StatsSeries_max(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -12341,7 +14070,7 @@ func (ec *executionContext) _SubCluster_flopRateScalar(ctx context.Context, fiel
}
res := resTmp.(schema.MetricValue)
fc.Result = res
- return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res)
+ return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricValue(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_SubCluster_flopRateScalar(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -12393,7 +14122,7 @@ func (ec *executionContext) _SubCluster_flopRateSimd(ctx context.Context, field
}
res := resTmp.(schema.MetricValue)
fc.Result = res
- return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res)
+ return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricValue(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_SubCluster_flopRateSimd(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -12445,7 +14174,7 @@ func (ec *executionContext) _SubCluster_memoryBandwidth(ctx context.Context, fie
}
res := resTmp.(schema.MetricValue)
fc.Result = res
- return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res)
+ return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricValue(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_SubCluster_memoryBandwidth(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -12497,7 +14226,7 @@ func (ec *executionContext) _SubCluster_topology(ctx context.Context, field grap
}
res := resTmp.(schema.Topology)
fc.Result = res
- return ec.marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx, field.Selections, res)
+ return ec.marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTopology(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_SubCluster_topology(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -12555,7 +14284,7 @@ func (ec *executionContext) _SubCluster_metricConfig(ctx context.Context, field
}
res := resTmp.([]schema.MetricConfig)
fc.Result = res
- return ec.marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx, field.Selections, res)
+ return ec.marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricConfigᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_SubCluster_metricConfig(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -13221,7 +14950,7 @@ func (ec *executionContext) _TimeWeights_nodeHours(ctx context.Context, field gr
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_TimeWeights_nodeHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -13265,7 +14994,7 @@ func (ec *executionContext) _TimeWeights_accHours(ctx context.Context, field gra
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_TimeWeights_accHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -13309,7 +15038,7 @@ func (ec *executionContext) _TimeWeights_coreHours(ctx context.Context, field gr
}
res := resTmp.([]schema.Float)
fc.Result = res
- return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
+ return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_TimeWeights_coreHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -13555,7 +15284,7 @@ func (ec *executionContext) _Topology_accelerators(ctx context.Context, field gr
}
res := resTmp.([]*schema.Accelerator)
fc.Result = res
- return ec.marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAcceleratorᚄ(ctx, field.Selections, res)
+ return ec.marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐAcceleratorᚄ(ctx, field.Selections, res)
}
func (ec *executionContext) fieldContext_Topology_accelerators(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
@@ -15822,7 +17551,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any
asMap[k] = v
}
- fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "exclusive", "node"}
+ fieldsInOrder := [...]string{"tags", "dbId", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "exclusive", "node"}
for _, k := range fieldsInOrder {
v, ok := asMap[k]
if !ok {
@@ -15836,6 +17565,13 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any
return it, err
}
it.Tags = data
+ case "dbId":
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dbId"))
+ data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v)
+ if err != nil {
+ return it, err
+ }
+ it.DbID = data
case "jobId":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobId"))
data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
@@ -15887,7 +17623,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any
it.Partition = data
case "duration":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("duration"))
- data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
+ data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx, v)
if err != nil {
return it, err
}
@@ -15908,35 +17644,35 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any
it.MinRunningFor = data
case "numNodes":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numNodes"))
- data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
+ data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx, v)
if err != nil {
return it, err
}
it.NumNodes = data
case "numAccelerators":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numAccelerators"))
- data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
+ data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx, v)
if err != nil {
return it, err
}
it.NumAccelerators = data
case "numHWThreads":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numHWThreads"))
- data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v)
+ data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx, v)
if err != nil {
return it, err
}
it.NumHWThreads = data
case "startTime":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startTime"))
- data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx, v)
+ data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTimeRange(ctx, v)
if err != nil {
return it, err
}
it.StartTime = data
case "state":
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("state"))
- data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx, v)
+ data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobStateᚄ(ctx, v)
if err != nil {
return it, err
}
@@ -16002,6 +17738,54 @@ func (ec *executionContext) unmarshalInputMetricStatItem(ctx context.Context, ob
return it, nil
}
+func (ec *executionContext) unmarshalInputNodeFilter(ctx context.Context, obj any) (model.NodeFilter, error) {
+ var it model.NodeFilter
+ asMap := map[string]any{}
+ for k, v := range obj.(map[string]any) {
+ asMap[k] = v
+ }
+
+ fieldsInOrder := [...]string{"hostname", "cluster", "nodeState", "healthState"}
+ for _, k := range fieldsInOrder {
+ v, ok := asMap[k]
+ if !ok {
+ continue
+ }
+ switch k {
+ case "hostname":
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("hostname"))
+ data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
+ if err != nil {
+ return it, err
+ }
+ it.Hostname = data
+ case "cluster":
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster"))
+ data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
+ if err != nil {
+ return it, err
+ }
+ it.Cluster = data
+ case "nodeState":
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nodeState"))
+ data, err := ec.unmarshalONodeState2ᚖstring(ctx, v)
+ if err != nil {
+ return it, err
+ }
+ it.NodeState = data
+ case "healthState":
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("healthState"))
+ data, err := ec.unmarshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx, v)
+ if err != nil {
+ return it, err
+ }
+ it.HealthState = data
+ }
+ }
+
+ return it, nil
+}
+
func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj any) (model.OrderByInput, error) {
var it model.OrderByInput
asMap := map[string]any{}
@@ -16737,10 +18521,41 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj
atomic.AddUint32(&out.Invalids, 1)
}
case "startTime":
- out.Values[i] = ec._Job_startTime(ctx, field, obj)
- if out.Values[i] == graphql.Null {
- atomic.AddUint32(&out.Invalids, 1)
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Job_startTime(ctx, field, obj)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
}
+
+ if field.Deferrable != nil {
+ dfs, ok := deferred[field.Deferrable.Label]
+ di := 0
+ if ok {
+ dfs.AddField(field)
+ di = len(dfs.Values) - 1
+ } else {
+ dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+ deferred[field.Deferrable.Label] = dfs
+ }
+ dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+ return innerFunc(ctx, dfs)
+ })
+
+ // don't run the out.Concurrently() call below
+ out.Values[i] = graphql.Null
+ continue
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
case "duration":
out.Values[i] = ec._Job_duration(ctx, field, obj)
if out.Values[i] == graphql.Null {
@@ -17269,11 +19084,45 @@ func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet,
switch field.Name {
case "__typename":
out.Values[i] = graphql.MarshalString("JobStats")
- case "name":
- out.Values[i] = ec._JobStats_name(ctx, field, obj)
+ case "id":
+ out.Values[i] = ec._JobStats_id(ctx, field, obj)
if out.Values[i] == graphql.Null {
out.Invalids++
}
+ case "jobId":
+ out.Values[i] = ec._JobStats_jobId(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "startTime":
+ out.Values[i] = ec._JobStats_startTime(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "duration":
+ out.Values[i] = ec._JobStats_duration(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "cluster":
+ out.Values[i] = ec._JobStats_cluster(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "subCluster":
+ out.Values[i] = ec._JobStats_subCluster(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "numNodes":
+ out.Values[i] = ec._JobStats_numNodes(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "numHWThreads":
+ out.Values[i] = ec._JobStats_numHWThreads(ctx, field, obj)
+ case "numAccelerators":
+ out.Values[i] = ec._JobStats_numAccelerators(ctx, field, obj)
case "stats":
out.Values[i] = ec._JobStats_stats(ctx, field, obj)
if out.Values[i] == graphql.Null {
@@ -17302,55 +19151,6 @@ func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet,
return out
}
-var jobStatsWithScopeImplementors = []string{"JobStatsWithScope"}
-
-func (ec *executionContext) _JobStatsWithScope(ctx context.Context, sel ast.SelectionSet, obj *model.JobStatsWithScope) graphql.Marshaler {
- fields := graphql.CollectFields(ec.OperationContext, sel, jobStatsWithScopeImplementors)
-
- out := graphql.NewFieldSet(fields)
- deferred := make(map[string]*graphql.FieldSet)
- for i, field := range fields {
- switch field.Name {
- case "__typename":
- out.Values[i] = graphql.MarshalString("JobStatsWithScope")
- case "name":
- out.Values[i] = ec._JobStatsWithScope_name(ctx, field, obj)
- if out.Values[i] == graphql.Null {
- out.Invalids++
- }
- case "scope":
- out.Values[i] = ec._JobStatsWithScope_scope(ctx, field, obj)
- if out.Values[i] == graphql.Null {
- out.Invalids++
- }
- case "stats":
- out.Values[i] = ec._JobStatsWithScope_stats(ctx, field, obj)
- if out.Values[i] == graphql.Null {
- out.Invalids++
- }
- default:
- panic("unknown field " + strconv.Quote(field.Name))
- }
- }
- out.Dispatch(ctx)
- if out.Invalids > 0 {
- return graphql.Null
- }
-
- atomic.AddInt32(&ec.deferred, int32(len(deferred)))
-
- for label, dfs := range deferred {
- ec.processDeferredGroup(graphql.DeferredGroup{
- Label: label,
- Path: graphql.GetPath(ctx),
- FieldSet: dfs,
- Context: ctx,
- })
- }
-
- return out
-}
-
var jobsStatisticsImplementors = []string{"JobsStatistics"}
func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.SelectionSet, obj *model.JobsStatistics) graphql.Marshaler {
@@ -17897,6 +19697,258 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet)
return out
}
+var namedStatsImplementors = []string{"NamedStats"}
+
+func (ec *executionContext) _NamedStats(ctx context.Context, sel ast.SelectionSet, obj *model.NamedStats) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, namedStatsImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("NamedStats")
+ case "name":
+ out.Values[i] = ec._NamedStats_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "data":
+ out.Values[i] = ec._NamedStats_data(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var namedStatsWithScopeImplementors = []string{"NamedStatsWithScope"}
+
+func (ec *executionContext) _NamedStatsWithScope(ctx context.Context, sel ast.SelectionSet, obj *model.NamedStatsWithScope) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, namedStatsWithScopeImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("NamedStatsWithScope")
+ case "name":
+ out.Values[i] = ec._NamedStatsWithScope_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "scope":
+ out.Values[i] = ec._NamedStatsWithScope_scope(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "stats":
+ out.Values[i] = ec._NamedStatsWithScope_stats(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var nodeImplementors = []string{"Node"}
+
+func (ec *executionContext) _Node(ctx context.Context, sel ast.SelectionSet, obj *schema.Node) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, nodeImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Node")
+ case "id":
+ out.Values[i] = ec._Node_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ atomic.AddUint32(&out.Invalids, 1)
+ }
+ case "hostname":
+ out.Values[i] = ec._Node_hostname(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ atomic.AddUint32(&out.Invalids, 1)
+ }
+ case "cluster":
+ out.Values[i] = ec._Node_cluster(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ atomic.AddUint32(&out.Invalids, 1)
+ }
+ case "subCluster":
+ out.Values[i] = ec._Node_subCluster(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ atomic.AddUint32(&out.Invalids, 1)
+ }
+ case "nodeState":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Node_nodeState(ctx, field, obj)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ if field.Deferrable != nil {
+ dfs, ok := deferred[field.Deferrable.Label]
+ di := 0
+ if ok {
+ dfs.AddField(field)
+ di = len(dfs.Values) - 1
+ } else {
+ dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+ deferred[field.Deferrable.Label] = dfs
+ }
+ dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+ return innerFunc(ctx, dfs)
+ })
+
+ // don't run the out.Concurrently() call below
+ out.Values[i] = graphql.Null
+ continue
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ case "HealthState":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Node_HealthState(ctx, field, obj)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ if field.Deferrable != nil {
+ dfs, ok := deferred[field.Deferrable.Label]
+ di := 0
+ if ok {
+ dfs.AddField(field)
+ di = len(dfs.Values) - 1
+ } else {
+ dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+ deferred[field.Deferrable.Label] = dfs
+ }
+ dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+ return innerFunc(ctx, dfs)
+ })
+
+ // don't run the out.Concurrently() call below
+ out.Values[i] = graphql.Null
+ continue
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ case "metaData":
+ field := field
+
+ innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Node_metaData(ctx, field, obj)
+ return res
+ }
+
+ if field.Deferrable != nil {
+ dfs, ok := deferred[field.Deferrable.Label]
+ di := 0
+ if ok {
+ dfs.AddField(field)
+ di = len(dfs.Values) - 1
+ } else {
+ dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+ deferred[field.Deferrable.Label] = dfs
+ }
+ dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+ return innerFunc(ctx, dfs)
+ })
+
+ // don't run the out.Concurrently() call below
+ out.Values[i] = graphql.Null
+ continue
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
var nodeMetricsImplementors = []string{"NodeMetrics"}
func (ec *executionContext) _NodeMetrics(ctx context.Context, sel ast.SelectionSet, obj *model.NodeMetrics) graphql.Marshaler {
@@ -17946,6 +19998,91 @@ func (ec *executionContext) _NodeMetrics(ctx context.Context, sel ast.SelectionS
return out
}
+var nodeStateResultListImplementors = []string{"NodeStateResultList"}
+
+func (ec *executionContext) _NodeStateResultList(ctx context.Context, sel ast.SelectionSet, obj *model.NodeStateResultList) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, nodeStateResultListImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("NodeStateResultList")
+ case "items":
+ out.Values[i] = ec._NodeStateResultList_items(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "count":
+ out.Values[i] = ec._NodeStateResultList_count(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var nodeStatsImplementors = []string{"NodeStats"}
+
+func (ec *executionContext) _NodeStats(ctx context.Context, sel ast.SelectionSet, obj *model.NodeStats) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, nodeStatsImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("NodeStats")
+ case "state":
+ out.Values[i] = ec._NodeStats_state(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "count":
+ out.Values[i] = ec._NodeStats_count(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
var nodesResultListImplementors = []string{"NodesResultList"}
func (ec *executionContext) _NodesResultList(ctx context.Context, sel ast.SelectionSet, obj *model.NodesResultList) graphql.Marshaler {
@@ -18120,6 +20257,69 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
}
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "node":
+ field := field
+
+ innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_node(ctx, field)
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "nodes":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_nodes(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "nodeStats":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_nodeStats(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
case "job":
field := field
@@ -18205,25 +20405,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
}
- out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
- case "jobsFootprints":
- field := field
-
- innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) {
- defer func() {
- if r := recover(); r != nil {
- ec.Error(ctx, ec.Recover(ctx, r))
- }
- }()
- res = ec._Query_jobsFootprints(ctx, field)
- return res
- }
-
- rrm := func(ctx context.Context) graphql.Marshaler {
- return ec.OperationContext.RootResolverMiddleware(ctx,
- func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
- }
-
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
case "jobs":
field := field
@@ -18268,6 +20449,47 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
}
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "jobsMetricStats":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_jobsMetricStats(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "jobsFootprints":
+ field := field
+
+ innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_jobsFootprints(ctx, field)
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
case "rooflineHeatmap":
field := field
@@ -19358,7 +21580,7 @@ func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, o
// region ***************************** type.gotpl *****************************
-func (ec *executionContext) marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAccelerator(ctx context.Context, sel ast.SelectionSet, v *schema.Accelerator) graphql.Marshaler {
+func (ec *executionContext) marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐAccelerator(ctx context.Context, sel ast.SelectionSet, v *schema.Accelerator) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -19383,7 +21605,7 @@ func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.Se
return res
}
-func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Cluster) graphql.Marshaler {
+func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Cluster) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -19407,7 +21629,7 @@ func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpit
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐCluster(ctx, sel, v[i])
+ ret[i] = ec.marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐCluster(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -19427,7 +21649,7 @@ func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpit
return ret
}
-func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐCluster(ctx context.Context, sel ast.SelectionSet, v *schema.Cluster) graphql.Marshaler {
+func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐCluster(ctx context.Context, sel ast.SelectionSet, v *schema.Cluster) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -19437,11 +21659,11 @@ func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋc
return ec._Cluster(ctx, sel, v)
}
-func (ec *executionContext) marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupport(ctx context.Context, sel ast.SelectionSet, v schema.ClusterSupport) graphql.Marshaler {
+func (ec *executionContext) marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterSupport(ctx context.Context, sel ast.SelectionSet, v schema.ClusterSupport) graphql.Marshaler {
return ec._ClusterSupport(ctx, sel, &v)
}
-func (ec *executionContext) marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.ClusterSupport) graphql.Marshaler {
+func (ec *executionContext) marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterSupportᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.ClusterSupport) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -19465,7 +21687,7 @@ func (ec *executionContext) marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCock
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupport(ctx, sel, v[i])
+ ret[i] = ec.marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐClusterSupport(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -19623,7 +21845,7 @@ func (ec *executionContext) unmarshalNFloatRange2ᚖgithubᚗcomᚋClusterCockpi
return &res, graphql.ErrorOnPath(ctx, err)
}
-func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.GlobalMetricListItem) graphql.Marshaler {
+func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐGlobalMetricListItemᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.GlobalMetricListItem) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -19647,7 +21869,7 @@ func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋCl
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItem(ctx, sel, v[i])
+ ret[i] = ec.marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐGlobalMetricListItem(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -19667,7 +21889,7 @@ func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋCl
return ret
}
-func (ec *executionContext) marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItem(ctx context.Context, sel ast.SelectionSet, v *schema.GlobalMetricListItem) graphql.Marshaler {
+func (ec *executionContext) marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐGlobalMetricListItem(ctx context.Context, sel ast.SelectionSet, v *schema.GlobalMetricListItem) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -19793,6 +22015,27 @@ func (ec *executionContext) marshalNID2ᚕstringᚄ(ctx context.Context, sel ast
return ret
}
+func (ec *executionContext) unmarshalNID2ᚖint64(ctx context.Context, v any) (*int64, error) {
+ res, err := graphql.UnmarshalInt64(v)
+ return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNID2ᚖint64(ctx context.Context, sel ast.SelectionSet, v *int64) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ res := graphql.MarshalInt64(*v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v any) (int, error) {
res, err := graphql.UnmarshalInt(v)
return res, graphql.ErrorOnPath(ctx, err)
@@ -19923,7 +22166,7 @@ func (ec *executionContext) marshalNInt2ᚖint(ctx context.Context, sel ast.Sele
return res
}
-func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler {
+func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -19947,7 +22190,7 @@ func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJob(ctx, sel, v[i])
+ ret[i] = ec.marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJob(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -19967,7 +22210,7 @@ func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc
return ret
}
-func (ec *executionContext) marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler {
+func (ec *executionContext) marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -20053,7 +22296,7 @@ func (ec *executionContext) marshalNJobLink2ᚖgithubᚗcomᚋClusterCockpitᚋc
return ec._JobLink(ctx, sel, v)
}
-func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler {
+func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -20131,13 +22374,13 @@ func (ec *executionContext) marshalNJobResultList2ᚖgithubᚗcomᚋClusterCockp
return ec._JobResultList(ctx, sel, v)
}
-func (ec *executionContext) unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx context.Context, v any) (schema.JobState, error) {
+func (ec *executionContext) unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx context.Context, v any) (schema.JobState, error) {
var res schema.JobState
err := res.UnmarshalGQL(v)
return res, graphql.ErrorOnPath(ctx, err)
}
-func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx context.Context, sel ast.SelectionSet, v schema.JobState) graphql.Marshaler {
+func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx context.Context, sel ast.SelectionSet, v schema.JobState) graphql.Marshaler {
return v
}
@@ -20195,60 +22438,6 @@ func (ec *executionContext) marshalNJobStats2ᚖgithubᚗcomᚋClusterCockpitᚋ
return ec._JobStats(ctx, sel, v)
}
-func (ec *executionContext) marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobStatsWithScope) graphql.Marshaler {
- ret := make(graphql.Array, len(v))
- var wg sync.WaitGroup
- isLen1 := len(v) == 1
- if !isLen1 {
- wg.Add(len(v))
- }
- for i := range v {
- i := i
- fc := &graphql.FieldContext{
- Index: &i,
- Result: &v[i],
- }
- ctx := graphql.WithFieldContext(ctx, fc)
- f := func(i int) {
- defer func() {
- if r := recover(); r != nil {
- ec.Error(ctx, ec.Recover(ctx, r))
- ret = nil
- }
- }()
- if !isLen1 {
- defer wg.Done()
- }
- ret[i] = ec.marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx, sel, v[i])
- }
- if isLen1 {
- f(i)
- } else {
- go f(i)
- }
-
- }
- wg.Wait()
-
- for _, e := range ret {
- if e == graphql.Null {
- return graphql.Null
- }
- }
-
- return ret
-}
-
-func (ec *executionContext) marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx context.Context, sel ast.SelectionSet, v *model.JobStatsWithScope) graphql.Marshaler {
- if v == nil {
- if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
- ec.Errorf(ctx, "the requested element is null which the schema does not allow")
- }
- return graphql.Null
- }
- return ec._JobStatsWithScope(ctx, sel, v)
-}
-
func (ec *executionContext) marshalNJobsStatistics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobsStatisticsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobsStatistics) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
@@ -20303,11 +22492,11 @@ func (ec *executionContext) marshalNJobsStatistics2ᚖgithubᚗcomᚋClusterCock
return ec._JobsStatistics(ctx, sel, v)
}
-func (ec *executionContext) marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfig(ctx context.Context, sel ast.SelectionSet, v schema.MetricConfig) graphql.Marshaler {
+func (ec *executionContext) marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricConfig(ctx context.Context, sel ast.SelectionSet, v schema.MetricConfig) graphql.Marshaler {
return ec._MetricConfig(ctx, sel, &v)
}
-func (ec *executionContext) marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricConfig) graphql.Marshaler {
+func (ec *executionContext) marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricConfig) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -20331,7 +22520,7 @@ func (ec *executionContext) marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpi
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfig(ctx, sel, v[i])
+ ret[i] = ec.marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricConfig(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -20469,13 +22658,13 @@ func (ec *executionContext) marshalNMetricHistoPoints2ᚖgithubᚗcomᚋClusterC
return ec._MetricHistoPoints(ctx, sel, v)
}
-func (ec *executionContext) unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx context.Context, v any) (schema.MetricScope, error) {
+func (ec *executionContext) unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx context.Context, v any) (schema.MetricScope, error) {
var res schema.MetricScope
err := res.UnmarshalGQL(v)
return res, graphql.ErrorOnPath(ctx, err)
}
-func (ec *executionContext) marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx context.Context, sel ast.SelectionSet, v schema.MetricScope) graphql.Marshaler {
+func (ec *executionContext) marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx context.Context, sel ast.SelectionSet, v schema.MetricScope) graphql.Marshaler {
return v
}
@@ -20484,7 +22673,7 @@ func (ec *executionContext) unmarshalNMetricStatItem2ᚖgithubᚗcomᚋClusterCo
return &res, graphql.ErrorOnPath(ctx, err)
}
-func (ec *executionContext) marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler {
+func (ec *executionContext) marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -20494,10 +22683,193 @@ func (ec *executionContext) marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCo
return ec._MetricStatistics(ctx, sel, v)
}
-func (ec *executionContext) marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v schema.MetricValue) graphql.Marshaler {
+func (ec *executionContext) marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v schema.MetricValue) graphql.Marshaler {
return ec._MetricValue(ctx, sel, &v)
}
+func (ec *executionContext) unmarshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx context.Context, v any) (schema.NodeState, error) {
+ tmp, err := graphql.UnmarshalString(v)
+ res := schema.NodeState(tmp)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNMonitoringState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx context.Context, sel ast.SelectionSet, v schema.NodeState) graphql.Marshaler {
+ res := graphql.MarshalString(string(v))
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
+func (ec *executionContext) marshalNNamedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NamedStats) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNNamedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStats(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNNamedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStats(ctx context.Context, sel ast.SelectionSet, v *model.NamedStats) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._NamedStats(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNNamedStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsWithScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NamedStatsWithScope) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNNamedStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsWithScope(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNNamedStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsWithScope(ctx context.Context, sel ast.SelectionSet, v *model.NamedStatsWithScope) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._NamedStatsWithScope(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNNode2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Node) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNNode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNode(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNNode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNode(ctx context.Context, sel ast.SelectionSet, v *schema.Node) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Node(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalNNodeFilter2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeFilter(ctx context.Context, v any) (*model.NodeFilter, error) {
+ res, err := ec.unmarshalInputNodeFilter(ctx, v)
+ return &res, graphql.ErrorOnPath(ctx, err)
+}
+
func (ec *executionContext) marshalNNodeMetrics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeMetricsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeMetrics) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
@@ -20552,63 +22924,36 @@ func (ec *executionContext) marshalNNodeMetrics2ᚖgithubᚗcomᚋClusterCockpit
return ec._NodeMetrics(ctx, sel, v)
}
-func (ec *executionContext) marshalNNodesResultList2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v model.NodesResultList) graphql.Marshaler {
- return ec._NodesResultList(ctx, sel, &v)
+func (ec *executionContext) unmarshalNNodeState2string(ctx context.Context, v any) (string, error) {
+ res, err := graphql.UnmarshalString(v)
+ return res, graphql.ErrorOnPath(ctx, err)
}
-func (ec *executionContext) marshalNNodesResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v *model.NodesResultList) graphql.Marshaler {
+func (ec *executionContext) marshalNNodeState2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+ res := graphql.MarshalString(v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
+func (ec *executionContext) marshalNNodeStateResultList2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStateResultList(ctx context.Context, sel ast.SelectionSet, v model.NodeStateResultList) graphql.Marshaler {
+ return ec._NodeStateResultList(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNNodeStateResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStateResultList(ctx context.Context, sel ast.SelectionSet, v *model.NodeStateResultList) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
}
return graphql.Null
}
- return ec._NodesResultList(ctx, sel, v)
+ return ec._NodeStateResultList(ctx, sel, v)
}
-func (ec *executionContext) unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx context.Context, v any) (schema.Float, error) {
- var res schema.Float
- err := res.UnmarshalGQL(v)
- return res, graphql.ErrorOnPath(ctx, err)
-}
-
-func (ec *executionContext) marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx context.Context, sel ast.SelectionSet, v schema.Float) graphql.Marshaler {
- return v
-}
-
-func (ec *executionContext) unmarshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx context.Context, v any) ([]schema.Float, error) {
- var vSlice []any
- if v != nil {
- vSlice = graphql.CoerceList(v)
- }
- var err error
- res := make([]schema.Float, len(vSlice))
- for i := range vSlice {
- ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
- res[i], err = ec.unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx, vSlice[i])
- if err != nil {
- return nil, err
- }
- }
- return res, nil
-}
-
-func (ec *executionContext) marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Float) graphql.Marshaler {
- ret := make(graphql.Array, len(v))
- for i := range v {
- ret[i] = ec.marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloat(ctx, sel, v[i])
- }
-
- for _, e := range ret {
- if e == graphql.Null {
- return graphql.Null
- }
- }
-
- return ret
-}
-
-func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Resource) graphql.Marshaler {
+func (ec *executionContext) marshalNNodeStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeStats) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -20632,7 +22977,7 @@ func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpit
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResource(ctx, sel, v[i])
+ ret[i] = ec.marshalNNodeStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStats(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -20652,7 +22997,117 @@ func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpit
return ret
}
-func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResource(ctx context.Context, sel ast.SelectionSet, v *schema.Resource) graphql.Marshaler {
+func (ec *executionContext) marshalNNodeStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeStats(ctx context.Context, sel ast.SelectionSet, v *model.NodeStats) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._NodeStats(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNNodesResultList2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v model.NodesResultList) graphql.Marshaler {
+ return ec._NodesResultList(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNNodesResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodesResultList(ctx context.Context, sel ast.SelectionSet, v *model.NodesResultList) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._NodesResultList(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloat(ctx context.Context, v any) (schema.Float, error) {
+ var res schema.Float
+ err := res.UnmarshalGQL(v)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloat(ctx context.Context, sel ast.SelectionSet, v schema.Float) graphql.Marshaler {
+ return v
+}
+
+func (ec *executionContext) unmarshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx context.Context, v any) ([]schema.Float, error) {
+ var vSlice []any
+ if v != nil {
+ vSlice = graphql.CoerceList(v)
+ }
+ var err error
+ res := make([]schema.Float, len(vSlice))
+ for i := range vSlice {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
+ res[i], err = ec.unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloat(ctx, vSlice[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+}
+
+func (ec *executionContext) marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloatᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Float) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ for i := range v {
+ ret[i] = ec.marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐFloat(ctx, sel, v[i])
+ }
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Resource) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐResource(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐResource(ctx context.Context, sel ast.SelectionSet, v *schema.Resource) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -20716,7 +23171,7 @@ func (ec *executionContext) marshalNScopedStats2ᚖgithubᚗcomᚋClusterCockpit
return ec._ScopedStats(ctx, sel, v)
}
-func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler {
+func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler {
return ec._Series(ctx, sel, &v)
}
@@ -20777,7 +23232,7 @@ func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel
return ret
}
-func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubCluster) graphql.Marshaler {
+func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubCluster) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -20801,7 +23256,7 @@ func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockp
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubCluster(ctx, sel, v[i])
+ ret[i] = ec.marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubCluster(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -20821,7 +23276,7 @@ func (ec *executionContext) marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockp
return ret
}
-func (ec *executionContext) marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubCluster(ctx context.Context, sel ast.SelectionSet, v *schema.SubCluster) graphql.Marshaler {
+func (ec *executionContext) marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubCluster(ctx context.Context, sel ast.SelectionSet, v *schema.SubCluster) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -20831,7 +23286,7 @@ func (ec *executionContext) marshalNSubCluster2ᚖgithubᚗcomᚋClusterCockpit
return ec._SubCluster(ctx, sel, v)
}
-func (ec *executionContext) marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubClusterConfig) graphql.Marshaler {
+func (ec *executionContext) marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.SubClusterConfig) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -20855,7 +23310,7 @@ func (ec *executionContext) marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋCluste
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfig(ctx, sel, v[i])
+ ret[i] = ec.marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterConfig(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -20875,7 +23330,7 @@ func (ec *executionContext) marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋCluste
return ret
}
-func (ec *executionContext) marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfig(ctx context.Context, sel ast.SelectionSet, v *schema.SubClusterConfig) graphql.Marshaler {
+func (ec *executionContext) marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSubClusterConfig(ctx context.Context, sel ast.SelectionSet, v *schema.SubClusterConfig) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -20885,11 +23340,11 @@ func (ec *executionContext) marshalNSubClusterConfig2ᚖgithubᚗcomᚋClusterCo
return ec._SubClusterConfig(ctx, sel, v)
}
-func (ec *executionContext) marshalNTag2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v schema.Tag) graphql.Marshaler {
+func (ec *executionContext) marshalNTag2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v schema.Tag) graphql.Marshaler {
return ec._Tag(ctx, sel, &v)
}
-func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Tag) graphql.Marshaler {
+func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTagᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Tag) graphql.Marshaler {
ret := make(graphql.Array, len(v))
var wg sync.WaitGroup
isLen1 := len(v) == 1
@@ -20913,7 +23368,7 @@ func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTag(ctx, sel, v[i])
+ ret[i] = ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTag(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -20933,7 +23388,7 @@ func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc
return ret
}
-func (ec *executionContext) marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v *schema.Tag) graphql.Marshaler {
+func (ec *executionContext) marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v *schema.Tag) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
@@ -20958,6 +23413,27 @@ func (ec *executionContext) marshalNTime2timeᚐTime(ctx context.Context, sel as
return res
}
+func (ec *executionContext) unmarshalNTime2ᚖtimeᚐTime(ctx context.Context, v any) (*time.Time, error) {
+ res, err := graphql.UnmarshalTime(v)
+ return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNTime2ᚖtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v *time.Time) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ res := graphql.MarshalTime(*v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
func (ec *executionContext) marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐTimeWeights(ctx context.Context, sel ast.SelectionSet, v *model.TimeWeights) graphql.Marshaler {
if v == nil {
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
@@ -20968,11 +23444,11 @@ func (ec *executionContext) marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpit
return ec._TimeWeights(ctx, sel, v)
}
-func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler {
+func (ec *executionContext) marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTopology(ctx context.Context, sel ast.SelectionSet, v schema.Topology) graphql.Marshaler {
return ec._Topology(ctx, sel, &v)
}
-func (ec *executionContext) marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler {
+func (ec *executionContext) marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler {
return ec._Unit(ctx, sel, &v)
}
@@ -21229,7 +23705,7 @@ func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel a
return res
}
-func (ec *executionContext) marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAcceleratorᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Accelerator) graphql.Marshaler {
+func (ec *executionContext) marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐAcceleratorᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Accelerator) graphql.Marshaler {
if v == nil {
return graphql.Null
}
@@ -21256,7 +23732,7 @@ func (ec *executionContext) marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCock
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAccelerator(ctx, sel, v[i])
+ ret[i] = ec.marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐAccelerator(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -21623,7 +24099,7 @@ func (ec *executionContext) marshalOInt2ᚖint(ctx context.Context, sel ast.Sele
return res
}
-func (ec *executionContext) unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx context.Context, v any) (*schema.IntRange, error) {
+func (ec *executionContext) unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐIntRange(ctx context.Context, v any) (*schema.IntRange, error) {
if v == nil {
return nil, nil
}
@@ -21631,7 +24107,7 @@ func (ec *executionContext) unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpit
return &res, graphql.ErrorOnPath(ctx, err)
}
-func (ec *executionContext) marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler {
+func (ec *executionContext) marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler {
if v == nil {
return graphql.Null
}
@@ -21665,7 +24141,7 @@ func (ec *executionContext) marshalOJobLinkResultList2ᚖgithubᚗcomᚋClusterC
return ec._JobLinkResultList(ctx, sel, v)
}
-func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx context.Context, v any) ([]schema.JobState, error) {
+func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobStateᚄ(ctx context.Context, v any) ([]schema.JobState, error) {
if v == nil {
return nil, nil
}
@@ -21677,7 +24153,7 @@ func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpit
res := make([]schema.JobState, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
- res[i], err = ec.unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx, vSlice[i])
+ res[i], err = ec.unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx, vSlice[i])
if err != nil {
return nil, err
}
@@ -21685,13 +24161,13 @@ func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpit
return res, nil
}
-func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.JobState) graphql.Marshaler {
+func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobStateᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.JobState) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
for i := range v {
- ret[i] = ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx, sel, v[i])
+ ret[i] = ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐJobState(ctx, sel, v[i])
}
for _, e := range ret {
@@ -21750,7 +24226,7 @@ func (ec *executionContext) marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋCluste
return ret
}
-func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx context.Context, v any) ([]schema.MetricScope, error) {
+func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx context.Context, v any) ([]schema.MetricScope, error) {
if v == nil {
return nil, nil
}
@@ -21762,7 +24238,7 @@ func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockp
res := make([]schema.MetricScope, len(vSlice))
for i := range vSlice {
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
- res[i], err = ec.unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, vSlice[i])
+ res[i], err = ec.unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, vSlice[i])
if err != nil {
return nil, err
}
@@ -21770,13 +24246,13 @@ func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockp
return res, nil
}
-func (ec *executionContext) marshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricScope) graphql.Marshaler {
+func (ec *executionContext) marshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricScope) graphql.Marshaler {
if v == nil {
return graphql.Null
}
ret := make(graphql.Array, len(v))
for i := range v {
- ret[i] = ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, sel, v[i])
+ ret[i] = ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricScope(ctx, sel, v[i])
}
for _, e := range ret {
@@ -21808,10 +24284,70 @@ func (ec *executionContext) unmarshalOMetricStatItem2ᚕᚖgithubᚗcomᚋCluste
return res, nil
}
-func (ec *executionContext) marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v schema.MetricStatistics) graphql.Marshaler {
+func (ec *executionContext) marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v schema.MetricStatistics) graphql.Marshaler {
return ec._MetricStatistics(ctx, sel, &v)
}
+func (ec *executionContext) unmarshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx context.Context, v any) (*schema.NodeState, error) {
+ if v == nil {
+ return nil, nil
+ }
+ tmp, err := graphql.UnmarshalString(v)
+ res := schema.NodeState(tmp)
+ return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOMonitoringState2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNodeState(ctx context.Context, sel ast.SelectionSet, v *schema.NodeState) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ res := graphql.MarshalString(string(*v))
+ return res
+}
+
+func (ec *executionContext) marshalONode2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐNode(ctx context.Context, sel ast.SelectionSet, v *schema.Node) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ return ec._Node(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalONodeFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeFilterᚄ(ctx context.Context, v any) ([]*model.NodeFilter, error) {
+ if v == nil {
+ return nil, nil
+ }
+ var vSlice []any
+ if v != nil {
+ vSlice = graphql.CoerceList(v)
+ }
+ var err error
+ res := make([]*model.NodeFilter, len(vSlice))
+ for i := range vSlice {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
+ res[i], err = ec.unmarshalNNodeFilter2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeFilter(ctx, vSlice[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+}
+
+func (ec *executionContext) unmarshalONodeState2ᚖstring(ctx context.Context, v any) (*string, error) {
+ if v == nil {
+ return nil, nil
+ }
+ res, err := graphql.UnmarshalString(v)
+ return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalONodeState2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ res := graphql.MarshalString(*v)
+ return res
+}
+
func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v any) (*model.OrderByInput, error) {
if v == nil {
return nil, nil
@@ -21828,7 +24364,7 @@ func (ec *executionContext) unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockp
return &res, graphql.ErrorOnPath(ctx, err)
}
-func (ec *executionContext) marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Series) graphql.Marshaler {
+func (ec *executionContext) marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Series) graphql.Marshaler {
if v == nil {
return graphql.Null
}
@@ -21855,7 +24391,7 @@ func (ec *executionContext) marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋcc
if !isLen1 {
defer wg.Done()
}
- ret[i] = ec.marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeries(ctx, sel, v[i])
+ ret[i] = ec.marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐSeries(ctx, sel, v[i])
}
if isLen1 {
f(i)
@@ -21891,7 +24427,7 @@ func (ec *executionContext) marshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCoc
return v
}
-func (ec *executionContext) marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐStatsSeries(ctx context.Context, sel ast.SelectionSet, v *schema.StatsSeries) graphql.Marshaler {
+func (ec *executionContext) marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐStatsSeries(ctx context.Context, sel ast.SelectionSet, v *schema.StatsSeries) graphql.Marshaler {
if v == nil {
return graphql.Null
}
@@ -21986,7 +24522,7 @@ func (ec *executionContext) marshalOTime2ᚖtimeᚐTime(ctx context.Context, sel
return res
}
-func (ec *executionContext) unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx context.Context, v any) (*schema.TimeRange, error) {
+func (ec *executionContext) unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐTimeRange(ctx context.Context, v any) (*schema.TimeRange, error) {
if v == nil {
return nil, nil
}
@@ -21994,7 +24530,7 @@ func (ec *executionContext) unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpit
return &res, graphql.ErrorOnPath(ctx, err)
}
-func (ec *executionContext) marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler {
+func (ec *executionContext) marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑlibᚋschemaᚐUnit(ctx context.Context, sel ast.SelectionSet, v schema.Unit) graphql.Marshaler {
return ec._Unit(ctx, sel, &v)
}
diff --git a/internal/graph/model/models.go b/internal/graph/model/models.go
index 8047957..c943700 100644
--- a/internal/graph/model/models.go
+++ b/internal/graph/model/models.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package model
diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go
index 43c4e37..5a32ac9 100644
--- a/internal/graph/model/models_gen.go
+++ b/internal/graph/model/models_gen.go
@@ -8,7 +8,7 @@ import (
"strconv"
"time"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
type Count struct {
@@ -50,6 +50,7 @@ type IntRangeOutput struct {
type JobFilter struct {
Tags []string `json:"tags,omitempty"`
+ DbID []string `json:"dbId,omitempty"`
JobID *StringInput `json:"jobId,omitempty"`
ArrayJobID *int `json:"arrayJobId,omitempty"`
User *StringInput `json:"user,omitempty"`
@@ -96,14 +97,16 @@ type JobResultList struct {
}
type JobStats struct {
- Name string `json:"name"`
- Stats *schema.MetricStatistics `json:"stats"`
-}
-
-type JobStatsWithScope struct {
- Name string `json:"name"`
- Scope schema.MetricScope `json:"scope"`
- Stats []*ScopedStats `json:"stats"`
+ ID int `json:"id"`
+ JobID string `json:"jobId"`
+ StartTime int `json:"startTime"`
+ Duration int `json:"duration"`
+ Cluster string `json:"cluster"`
+ SubCluster string `json:"subCluster"`
+ NumNodes int `json:"numNodes"`
+ NumHWThreads *int `json:"numHWThreads,omitempty"`
+ NumAccelerators *int `json:"numAccelerators,omitempty"`
+ Stats []*NamedStats `json:"stats"`
}
type JobsStatistics struct {
@@ -153,12 +156,40 @@ type MetricStatItem struct {
type Mutation struct {
}
+type NamedStats struct {
+ Name string `json:"name"`
+ Data *schema.MetricStatistics `json:"data"`
+}
+
+type NamedStatsWithScope struct {
+ Name string `json:"name"`
+ Scope schema.MetricScope `json:"scope"`
+ Stats []*ScopedStats `json:"stats"`
+}
+
+type NodeFilter struct {
+ Hostname *StringInput `json:"hostname,omitempty"`
+ Cluster *StringInput `json:"cluster,omitempty"`
+ NodeState *string `json:"nodeState,omitempty"`
+ HealthState *schema.NodeState `json:"healthState,omitempty"`
+}
+
type NodeMetrics struct {
Host string `json:"host"`
SubCluster string `json:"subCluster"`
Metrics []*JobMetricWithName `json:"metrics"`
}
+type NodeStateResultList struct {
+ Items []*schema.Node `json:"items"`
+ Count *int `json:"count,omitempty"`
+}
+
+type NodeStats struct {
+ State string `json:"state"`
+ Count int `json:"count"`
+}
+
type NodesResultList struct {
Items []*NodeMetrics `json:"items"`
Offset *int `json:"offset,omitempty"`
diff --git a/internal/graph/resolver.go b/internal/graph/resolver.go
index 0f4dc06..990014c 100644
--- a/internal/graph/resolver.go
+++ b/internal/graph/resolver.go
@@ -4,7 +4,7 @@ import (
"sync"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/jmoiron/sqlx"
)
@@ -32,7 +32,7 @@ func Init() {
func GetResolverInstance() *Resolver {
if resolverInstance == nil {
- log.Fatal("Authentication module not initialized!")
+ cclog.Fatal("Authentication module not initialized!")
}
return resolverInstance
diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go
index 10e1b55..78a76ef 100644
--- a/internal/graph/schema.resolvers.go
+++ b/internal/graph/schema.resolvers.go
@@ -20,8 +20,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
// Partitions is the resolver for the partitions field.
@@ -29,9 +29,15 @@ func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) (
return r.Repo.Partitions(obj.Name)
}
+// StartTime is the resolver for the startTime field.
+func (r *jobResolver) StartTime(ctx context.Context, obj *schema.Job) (*time.Time, error) {
+ timestamp := time.Unix(obj.StartTime, 0)
+ return ×tamp, nil
+}
+
// Tags is the resolver for the tags field.
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
- return r.Repo.GetTags(repository.GetUserFromContext(ctx), &obj.ID)
+ return r.Repo.GetTags(repository.GetUserFromContext(ctx), obj.ID)
}
// ConcurrentJobs is the resolver for the concurrentJobs field.
@@ -48,7 +54,7 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod
func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) {
rawFootprint, err := r.Repo.FetchFootprint(obj)
if err != nil {
- log.Warn("Error while fetching job footprint data")
+ cclog.Warn("Error while fetching job footprint data")
return nil, err
}
@@ -73,7 +79,7 @@ func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.
func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]*model.EnergyFootprintValue, error) {
rawEnergyFootprint, err := r.Repo.FetchEnergyFootprint(obj)
if err != nil {
- log.Warn("Error while fetching job energy footprint data")
+ cclog.Warn("Error while fetching job energy footprint data")
return nil, err
}
@@ -137,13 +143,13 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s
// Create in DB
id, err := r.Repo.CreateTag(typeArg, name, scope)
if err != nil {
- log.Warn("Error while creating tag")
+ cclog.Warn("Error while creating tag")
return nil, err
}
return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil
} else {
- log.Warnf("Not authorized to create tag with scope: %s", scope)
- return nil, fmt.Errorf("Not authorized to create tag with scope: %s", scope)
+ cclog.Warnf("Not authorized to create tag with scope: %s", scope)
+ return nil, fmt.Errorf("not authorized to create tag with scope: %s", scope)
}
}
@@ -162,7 +168,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
jid, err := strconv.ParseInt(job, 10, 64)
if err != nil {
- log.Warn("Error while adding tag to job")
+ cclog.Warn("Error while adding tag to job")
return nil, err
}
@@ -171,15 +177,15 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
- log.Warn("Error while parsing tag id")
+ cclog.Warn("Error while parsing tag id")
return nil, err
}
// Test Exists
_, _, tscope, exists := r.Repo.TagInfo(tid)
if !exists {
- log.Warnf("Tag does not exist (ID): %d", tid)
- return nil, fmt.Errorf("Tag does not exist (ID): %d", tid)
+ cclog.Warnf("Tag does not exist (ID): %d", tid)
+ return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
}
// Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag
@@ -188,12 +194,12 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
user.Username == tscope {
// Add to Job
if tags, err = r.Repo.AddTag(user, jid, tid); err != nil {
- log.Warn("Error while adding tag")
+ cclog.Warn("Error while adding tag")
return nil, err
}
} else {
- log.Warnf("Not authorized to add tag: %d", tid)
- return nil, fmt.Errorf("Not authorized to add tag: %d", tid)
+ cclog.Warnf("Not authorized to add tag: %d", tid)
+ return nil, fmt.Errorf("not authorized to add tag: %d", tid)
}
}
@@ -209,7 +215,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
jid, err := strconv.ParseInt(job, 10, 64)
if err != nil {
- log.Warn("Error while parsing job id")
+ cclog.Warn("Error while parsing job id")
return nil, err
}
@@ -218,15 +224,15 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
- log.Warn("Error while parsing tag id")
+ cclog.Warn("Error while parsing tag id")
return nil, err
}
// Test Exists
_, _, tscope, exists := r.Repo.TagInfo(tid)
if !exists {
- log.Warnf("Tag does not exist (ID): %d", tid)
- return nil, fmt.Errorf("Tag does not exist (ID): %d", tid)
+ cclog.Warnf("Tag does not exist (ID): %d", tid)
+ return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
}
// Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag
@@ -235,12 +241,12 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
user.Username == tscope {
// Remove from Job
if tags, err = r.Repo.RemoveTag(user, jid, tid); err != nil {
- log.Warn("Error while removing tag")
+ cclog.Warn("Error while removing tag")
return nil, err
}
} else {
- log.Warnf("Not authorized to remove tag: %d", tid)
- return nil, fmt.Errorf("Not authorized to remove tag: %d", tid)
+ cclog.Warnf("Not authorized to remove tag: %d", tid)
+ return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
}
}
@@ -261,29 +267,29 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
- log.Warn("Error while parsing tag id for removal")
+ cclog.Warn("Error while parsing tag id for removal")
return nil, err
}
// Test Exists
_, _, tscope, exists := r.Repo.TagInfo(tid)
if !exists {
- log.Warnf("Tag does not exist (ID): %d", tid)
- return nil, fmt.Errorf("Tag does not exist (ID): %d", tid)
+ cclog.Warnf("Tag does not exist (ID): %d", tid)
+ return nil, fmt.Errorf("tag does not exist (ID): %d", tid)
}
// Test Access: Admins && Admin Tag OR Everyone && Private Tag
if user.HasRole(schema.RoleAdmin) && (tscope == "global" || tscope == "admin") || user.Username == tscope {
// Remove from DB
if err = r.Repo.RemoveTagById(tid); err != nil {
- log.Warn("Error while removing tag")
+ cclog.Warn("Error while removing tag")
return nil, err
} else {
tags = append(tags, int(tid))
}
} else {
- log.Warnf("Not authorized to remove tag: %d", tid)
- return nil, fmt.Errorf("Not authorized to remove tag: %d", tid)
+ cclog.Warnf("Not authorized to remove tag: %d", tid)
+ return nil, fmt.Errorf("not authorized to remove tag: %d", tid)
}
}
return tags, nil
@@ -292,13 +298,28 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin
// UpdateConfiguration is the resolver for the updateConfiguration field.
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil {
- log.Warn("Error while updating user config")
+ cclog.Warn("Error while updating user config")
return nil, err
}
return nil, nil
}
+// NodeState is the resolver for the nodeState field.
+func (r *nodeResolver) NodeState(ctx context.Context, obj *schema.Node) (string, error) {
+ panic(fmt.Errorf("not implemented: NodeState - nodeState"))
+}
+
+// HealthState is the resolver for the HealthState field.
+func (r *nodeResolver) HealthState(ctx context.Context, obj *schema.Node) (schema.NodeState, error) {
+ panic(fmt.Errorf("not implemented: HealthState - HealthState"))
+}
+
+// MetaData is the resolver for the metaData field.
+func (r *nodeResolver) MetaData(ctx context.Context, obj *schema.Node) (any, error) {
+ panic(fmt.Errorf("not implemented: MetaData - metaData"))
+}
+
// Clusters is the resolver for the clusters field.
func (r *queryResolver) Clusters(ctx context.Context) ([]*schema.Cluster, error) {
return archive.Clusters, nil
@@ -323,7 +344,7 @@ func (r *queryResolver) User(ctx context.Context, username string) (*model.User,
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
data, err := r.Repo.AllocatedNodes(cluster)
if err != nil {
- log.Warn("Error while fetching allocated nodes")
+ cclog.Warn("Error while fetching allocated nodes")
return nil, err
}
@@ -338,17 +359,41 @@ func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*
return counts, nil
}
+// Node is the resolver for the node field.
+func (r *queryResolver) Node(ctx context.Context, id string) (*schema.Node, error) {
+ repo := repository.GetNodeRepository()
+ numericId, err := strconv.ParseInt(id, 10, 64)
+ if err != nil {
+ cclog.Warn("Error while parsing job id")
+ return nil, err
+ }
+ return repo.GetNode(numericId, false)
+}
+
+// Nodes is the resolver for the nodes field.
+func (r *queryResolver) Nodes(ctx context.Context, filter []*model.NodeFilter, order *model.OrderByInput) (*model.NodeStateResultList, error) {
+ repo := repository.GetNodeRepository()
+ nodes, err := repo.QueryNodes(ctx, filter, order)
+ count := len(nodes)
+ return &model.NodeStateResultList{Items: nodes, Count: &count}, err
+}
+
+// NodeStats is the resolver for the nodeStats field.
+func (r *queryResolver) NodeStats(ctx context.Context, filter []*model.NodeFilter) ([]*model.NodeStats, error) {
+ panic(fmt.Errorf("not implemented: NodeStats - nodeStats"))
+}
+
// Job is the resolver for the job field.
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
numericId, err := strconv.ParseInt(id, 10, 64)
if err != nil {
- log.Warn("Error while parsing job id")
+ cclog.Warn("Error while parsing job id")
return nil, err
}
job, err := r.Repo.FindById(ctx, numericId)
if err != nil {
- log.Warn("Error while finding job by id")
+ cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -375,13 +420,13 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
job, err := r.Query().Job(ctx, id)
if err != nil {
- log.Warn("Error while querying job for metrics")
+ cclog.Warn("Error while querying job for metrics")
return nil, err
}
data, err := metricDataDispatcher.LoadData(job, metrics, scopes, ctx, *resolution)
if err != nil {
- log.Warn("Error while loading job data")
+ cclog.Warn("Error while loading job data")
return nil, err
}
@@ -400,24 +445,24 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
}
// JobStats is the resolver for the jobStats field.
-func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) {
+func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) {
job, err := r.Query().Job(ctx, id)
if err != nil {
- log.Warnf("Error while querying job %s for metadata", id)
+ cclog.Warnf("Error while querying job %s for metadata", id)
return nil, err
}
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
if err != nil {
- log.Warnf("Error while loading jobStats data for job id %s", id)
+ cclog.Warnf("Error while loading jobStats data for job id %s", id)
return nil, err
}
- res := []*model.JobStats{}
+ res := []*model.NamedStats{}
for name, md := range data {
- res = append(res, &model.JobStats{
- Name: name,
- Stats: &md,
+ res = append(res, &model.NamedStats{
+ Name: name,
+ Data: &md,
})
}
@@ -425,20 +470,20 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin
}
// ScopedJobStats is the resolver for the scopedJobStats field.
-func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) {
+func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) {
job, err := r.Query().Job(ctx, id)
if err != nil {
- log.Warnf("Error while querying job %s for metadata", id)
+ cclog.Warnf("Error while querying job %s for metadata", id)
return nil, err
}
data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
if err != nil {
- log.Warnf("Error while loading scopedJobStats data for job id %s", id)
+ cclog.Warnf("Error while loading scopedJobStats data for job id %s", id)
return nil, err
}
- res := make([]*model.JobStatsWithScope, 0)
+ res := make([]*model.NamedStatsWithScope, 0)
for name, scoped := range data {
for scope, stats := range scoped {
@@ -451,7 +496,7 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [
})
}
- res = append(res, &model.JobStatsWithScope{
+ res = append(res, &model.NamedStatsWithScope{
Name: name,
Scope: scope,
Stats: mdlStats,
@@ -462,12 +507,6 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [
return res, nil
}
-// JobsFootprints is the resolver for the jobsFootprints field.
-func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
- // NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column!
- return r.jobsFootprints(ctx, filter, metrics)
-}
-
// Jobs is the resolver for the jobs field.
func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) {
if page == nil {
@@ -479,13 +518,13 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
if err != nil {
- log.Warn("Error while querying jobs")
+ cclog.Warn("Error while querying jobs")
return nil, err
}
count, err := r.Repo.CountJobs(ctx, filter)
if err != nil {
- log.Warn("Error while counting jobs")
+ cclog.Warn("Error while counting jobs")
return nil, err
}
@@ -501,14 +540,11 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
}
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
if err != nil {
- log.Warn("Error while querying next jobs")
+ cclog.Warn("Error while querying next jobs")
return nil, err
}
- hasNextPage := false
- if len(nextJobs) == 1 {
- hasNextPage = true
- }
+ hasNextPage := len(nextJobs) == 1
return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
}
@@ -519,8 +555,8 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
var stats []*model.JobsStatistics
// Top Level Defaults
- var defaultDurationBins string = "1h"
- var defaultMetricBins int = 10
+ defaultDurationBins := "1h"
+ defaultMetricBins := 10
if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
@@ -589,6 +625,62 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
return stats, nil
}
+// JobsMetricStats is the resolver for the jobsMetricStats field.
+func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.JobStats, error) {
+ // No Paging, Fixed Order by StartTime ASC
+ order := &model.OrderByInput{
+ Field: "startTime",
+ Type: "col",
+ Order: "ASC",
+ }
+
+ jobs, err := r.Repo.QueryJobs(ctx, filter, nil, order)
+ if err != nil {
+ cclog.Warn("Error while querying jobs for comparison")
+ return nil, err
+ }
+
+ res := []*model.JobStats{}
+ for _, job := range jobs {
+ data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
+ if err != nil {
+ cclog.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
+ continue
+ // return nil, err
+ }
+
+ sres := []*model.NamedStats{}
+ for name, md := range data {
+ sres = append(sres, &model.NamedStats{
+ Name: name,
+ Data: &md,
+ })
+ }
+
+ numThreadsInt := int(job.NumHWThreads)
+ numAccsInt := int(job.NumAcc)
+ res = append(res, &model.JobStats{
+ ID: int(*job.ID),
+ JobID: strconv.Itoa(int(job.JobID)),
+ StartTime: int(job.StartTime),
+ Duration: int(job.Duration),
+ Cluster: job.Cluster,
+ SubCluster: job.SubCluster,
+ NumNodes: int(job.NumNodes),
+ NumHWThreads: &numThreadsInt,
+ NumAccelerators: &numAccsInt,
+ Stats: sres,
+ })
+ }
+ return res, err
+}
+
+// JobsFootprints is the resolver for the jobsFootprints field.
+func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
+ // NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column!
+ return r.jobsFootprints(ctx, filter, metrics)
+}
+
// RooflineHeatmap is the resolver for the rooflineHeatmap field.
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
@@ -609,7 +701,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
- log.Warn("error while loading node data")
+ cclog.Warn("error while loading node data")
return nil, err
}
@@ -621,7 +713,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
}
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
if err != nil {
- log.Warnf("error in nodeMetrics resolver: %s", err)
+ cclog.Warnf("error in nodeMetrics resolver: %s", err)
}
for metric, scopedMetrics := range metrics {
@@ -665,7 +757,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub
data, totalNodes, hasNextPage, err := metricDataDispatcher.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, *resolution, from, to, page, ctx)
if err != nil {
- log.Warn("error while loading node data")
+ cclog.Warn("error while loading node data")
return nil, err
}
@@ -677,7 +769,7 @@ func (r *queryResolver) NodeMetricsList(ctx context.Context, cluster string, sub
}
host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname)
if err != nil {
- log.Warnf("error in nodeMetrics resolver: %s", err)
+ cclog.Warnf("error in nodeMetrics resolver: %s", err)
}
for metric, scopedMetrics := range metrics {
@@ -723,6 +815,9 @@ func (r *Resolver) MetricValue() generated.MetricValueResolver { return &metricV
// Mutation returns generated.MutationResolver implementation.
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
+// Node returns generated.NodeResolver implementation.
+func (r *Resolver) Node() generated.NodeResolver { return &nodeResolver{r} }
+
// Query returns generated.QueryResolver implementation.
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
@@ -733,5 +828,6 @@ type clusterResolver struct{ *Resolver }
type jobResolver struct{ *Resolver }
type metricValueResolver struct{ *Resolver }
type mutationResolver struct{ *Resolver }
+type nodeResolver struct{ *Resolver }
type queryResolver struct{ *Resolver }
type subClusterResolver struct{ *Resolver }
diff --git a/internal/graph/util.go b/internal/graph/util.go
index c2bd73d..38c4914 100644
--- a/internal/graph/util.go
+++ b/internal/graph/util.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package graph
@@ -12,9 +12,8 @@ import (
"github.com/99designs/gqlgen/graphql"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
- // "github.com/ClusterCockpit/cc-backend/pkg/archive"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
const MAX_JOBS_FOR_ANALYSIS = 500
@@ -28,7 +27,7 @@ func (r *queryResolver) rooflineHeatmap(
) ([][]float64, error) {
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
if err != nil {
- log.Error("Error while querying jobs for roofline")
+ cclog.Error("Error while querying jobs for roofline")
return nil, err
}
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
@@ -56,13 +55,13 @@ func (r *queryResolver) rooflineHeatmap(
jobdata, err := metricDataDispatcher.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0)
if err != nil {
- log.Errorf("Error while loading roofline metrics for job %d", job.ID)
+ cclog.Errorf("Error while loading roofline metrics for job %d", job.ID)
return nil, err
}
flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"]
if flops_ == nil && membw_ == nil {
- log.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID)
+ cclog.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID)
continue
// return nil, fmt.Errorf("GRAPH/UTIL > 'flops_any' or 'mem_bw' missing for job %d", job.ID)
}
@@ -70,7 +69,7 @@ func (r *queryResolver) rooflineHeatmap(
flops, ok1 := flops_["node"]
membw, ok2 := membw_["node"]
if !ok1 || !ok2 {
- log.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
+ cclog.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
continue
// TODO/FIXME:
// return nil, errors.New("GRAPH/UTIL > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
@@ -105,7 +104,7 @@ func (r *queryResolver) rooflineHeatmap(
func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
if err != nil {
- log.Error("Error while querying jobs for footprint")
+ cclog.Error("Error while querying jobs for footprint")
return nil, err
}
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
@@ -128,7 +127,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
}
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
- log.Error("Error while loading averages for footprint")
+ cclog.Error("Error while loading averages for footprint")
return nil, err
}
diff --git a/internal/importer/handleImport.go b/internal/importer/handleImport.go
index 623291c..71c4d24 100644
--- a/internal/importer/handleImport.go
+++ b/internal/importer/handleImport.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
@@ -15,8 +15,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
// Import all jobs specified as `:,...`
@@ -31,7 +31,7 @@ func HandleImportFlag(flag string) error {
raw, err := os.ReadFile(files[0])
if err != nil {
- log.Warn("Error while reading metadata file for import")
+ cclog.Warn("Error while reading metadata file for import")
return err
}
@@ -42,15 +42,18 @@ func HandleImportFlag(flag string) error {
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
- job := schema.JobMeta{BaseJob: schema.JobDefaults}
+ job := schema.Job{
+ Exclusive: 1,
+ MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
+ }
if err = dec.Decode(&job); err != nil {
- log.Warn("Error while decoding raw json metadata for import")
+ cclog.Warn("Error while decoding raw json metadata for import")
return err
}
raw, err = os.ReadFile(files[1])
if err != nil {
- log.Warn("Error while reading jobdata file for import")
+ cclog.Warn("Error while reading jobdata file for import")
return err
}
@@ -63,7 +66,7 @@ func HandleImportFlag(flag string) error {
dec.DisallowUnknownFields()
jobData := schema.JobData{}
if err = dec.Decode(&jobData); err != nil {
- log.Warn("Error while decoding raw json jobdata for import")
+ cclog.Warn("Error while decoding raw json jobdata for import")
return err
}
@@ -71,7 +74,7 @@ func HandleImportFlag(flag string) error {
sc, err := archive.GetSubCluster(job.Cluster, job.SubCluster)
if err != nil {
- log.Errorf("cannot get subcluster: %s", err.Error())
+ cclog.Errorf("cannot get subcluster: %s", err.Error())
return err
}
@@ -91,7 +94,7 @@ func HandleImportFlag(flag string) error {
job.RawFootprint, err = json.Marshal(job.Footprint)
if err != nil {
- log.Warn("Error while marshaling job footprint")
+ cclog.Warn("Error while marshaling job footprint")
return err
}
@@ -105,7 +108,7 @@ func HandleImportFlag(flag string) error {
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
- log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp)
+ cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp)
// FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Energy: Power (in Watts) * Time (in Seconds)
@@ -117,7 +120,7 @@ func HandleImportFlag(flag string) error {
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
}
} else {
- log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID)
+ cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID)
}
job.EnergyFootprint[fp] = metricEnergy
@@ -126,45 +129,45 @@ func HandleImportFlag(flag string) error {
job.Energy = (math.Round(totalEnergy*100.0) / 100.0)
if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil {
- log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID)
+ cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID)
return err
}
job.RawResources, err = json.Marshal(job.Resources)
if err != nil {
- log.Warn("Error while marshaling job resources")
+ cclog.Warn("Error while marshaling job resources")
return err
}
job.RawMetaData, err = json.Marshal(job.MetaData)
if err != nil {
- log.Warn("Error while marshaling job metadata")
+ cclog.Warn("Error while marshaling job metadata")
return err
}
- if err = SanityChecks(&job.BaseJob); err != nil {
- log.Warn("BaseJob SanityChecks failed")
+ if err = SanityChecks(&job); err != nil {
+ cclog.Warn("BaseJob SanityChecks failed")
return err
}
if err = archive.GetHandle().ImportJob(&job, &jobData); err != nil {
- log.Error("Error while importing job")
+ cclog.Error("Error while importing job")
return err
}
id, err := r.InsertJob(&job)
if err != nil {
- log.Warn("Error while job db insert")
+ cclog.Warn("Error while job db insert")
return err
}
for _, tag := range job.Tags {
if err := r.ImportTag(id, tag.Type, tag.Name, tag.Scope); err != nil {
- log.Error("Error while adding or creating tag on import")
+ cclog.Error("Error while adding or creating tag on import")
return err
}
}
- log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
+ cclog.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
}
return nil
}
diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go
index 209b6be..11e7afe 100644
--- a/internal/importer/importer_test.go
+++ b/internal/importer/importer_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer_test
@@ -16,7 +16,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/importer"
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
func copyFile(s string, d string) error {
@@ -78,7 +78,7 @@ func setup(t *testing.T) *repository.JobRepository {
}
]}`
- log.Init("info", true)
+ cclog.Init("info", true)
tmpdir := t.TempDir()
jobarchive := filepath.Join(tmpdir, "job-archive")
@@ -166,7 +166,7 @@ func TestHandleImportFlag(t *testing.T) {
}
result := readResult(t, testname)
- job, err := r.Find(&result.JobId, &result.Cluster, &result.StartTime)
+ job, err := r.FindCached(&result.JobId, &result.Cluster, &result.StartTime)
if err != nil {
t.Fatal(err)
}
diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go
index 9a2ccdf..179c21c 100644
--- a/internal/importer/initDB.go
+++ b/internal/importer/initDB.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
@@ -13,8 +13,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
const (
@@ -27,15 +27,15 @@ const (
func InitDB() error {
r := repository.GetJobRepository()
if err := r.Flush(); err != nil {
- log.Errorf("repository initDB(): %v", err)
+ cclog.Errorf("repository initDB(): %v", err)
return err
}
starttime := time.Now()
- log.Print("Building job table...")
+ cclog.Print("Building job table...")
t, err := r.TransactionInit()
if err != nil {
- log.Warn("Error while initializing SQL transactions")
+ cclog.Warn("Error while initializing SQL transactions")
return err
}
tags := make(map[string]int64)
@@ -60,19 +60,14 @@ func InitDB() error {
}
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
- job := schema.Job{
- BaseJob: jobMeta.BaseJob,
- StartTime: time.Unix(jobMeta.StartTime, 0),
- StartTimeUnix: jobMeta.StartTime,
- }
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
if err != nil {
- log.Errorf("cannot get subcluster: %s", err.Error())
+ cclog.Errorf("cannot get subcluster: %s", err.Error())
return err
}
- job.Footprint = make(map[string]float64)
+ jobMeta.Footprint = make(map[string]float64)
for _, fp := range sc.Footprint {
statType := "avg"
@@ -83,16 +78,16 @@ func InitDB() error {
name := fmt.Sprintf("%s_%s", fp, statType)
- job.Footprint[name] = repository.LoadJobStat(jobMeta, fp, statType)
+ jobMeta.Footprint[name] = repository.LoadJobStat(jobMeta, fp, statType)
}
- job.RawFootprint, err = json.Marshal(job.Footprint)
+ jobMeta.RawFootprint, err = json.Marshal(jobMeta.Footprint)
if err != nil {
- log.Warn("Error while marshaling job footprint")
+ cclog.Warn("Error while marshaling job footprint")
return err
}
- job.EnergyFootprint = make(map[string]float64)
+ jobMeta.EnergyFootprint = make(map[string]float64)
// Total Job Energy Outside Loop
totalEnergy := 0.0
@@ -102,7 +97,7 @@ func InitDB() error {
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
- log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
+ cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
// FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Energy: Power (in Watts) * Time (in Seconds)
@@ -114,48 +109,48 @@ func InitDB() error {
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
}
} else {
- log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
+ cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
}
- job.EnergyFootprint[fp] = metricEnergy
+ jobMeta.EnergyFootprint[fp] = metricEnergy
totalEnergy += metricEnergy
}
- job.Energy = (math.Round(totalEnergy*100.0) / 100.0)
- if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil {
- log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
+ jobMeta.Energy = (math.Round(totalEnergy*100.0) / 100.0)
+ if jobMeta.RawEnergyFootprint, err = json.Marshal(jobMeta.EnergyFootprint); err != nil {
+ cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
return err
}
- job.RawResources, err = json.Marshal(job.Resources)
+ jobMeta.RawResources, err = json.Marshal(jobMeta.Resources)
if err != nil {
- log.Errorf("repository initDB(): %v", err)
+ cclog.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
- job.RawMetaData, err = json.Marshal(job.MetaData)
+ jobMeta.RawMetaData, err = json.Marshal(jobMeta.MetaData)
if err != nil {
- log.Errorf("repository initDB(): %v", err)
+ cclog.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
- if err := SanityChecks(&job.BaseJob); err != nil {
- log.Errorf("repository initDB(): %v", err)
+ if err := SanityChecks(jobMeta); err != nil {
+ cclog.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
id, err := r.TransactionAddNamed(t,
- repository.NamedJobInsert, job)
+ repository.NamedJobInsert, jobMeta)
if err != nil {
- log.Errorf("repository initDB(): %v", err)
+ cclog.Errorf("repository initDB(): %v", err)
errorOccured++
continue
}
- for _, tag := range job.Tags {
+ for _, tag := range jobMeta.Tags {
tagstr := tag.Name + ":" + tag.Type
tagId, ok := tags[tagstr]
if !ok {
@@ -163,7 +158,7 @@ func InitDB() error {
addTagQuery,
tag.Name, tag.Type)
if err != nil {
- log.Errorf("Error adding tag: %v", err)
+ cclog.Errorf("Error adding tag: %v", err)
errorOccured++
continue
}
@@ -181,21 +176,21 @@ func InitDB() error {
}
if errorOccured > 0 {
- log.Warnf("Error in import of %d jobs!", errorOccured)
+ cclog.Warnf("Error in import of %d jobs!", errorOccured)
}
r.TransactionEnd(t)
- log.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
+ cclog.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds())
return nil
}
// This function also sets the subcluster if necessary!
-func SanityChecks(job *schema.BaseJob) error {
+func SanityChecks(job *schema.Job) error {
if c := archive.GetCluster(job.Cluster); c == nil {
return fmt.Errorf("no such cluster: %v", job.Cluster)
}
if err := archive.AssignSubCluster(job); err != nil {
- log.Warn("Error while assigning subcluster to job")
+ cclog.Warn("Error while assigning subcluster to job")
return err
}
if !job.State.Valid() {
diff --git a/internal/importer/normalize.go b/internal/importer/normalize.go
index d9595a2..bc72cb3 100644
--- a/internal/importer/normalize.go
+++ b/internal/importer/normalize.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
@@ -7,7 +7,7 @@ package importer
import (
"math"
- ccunits "github.com/ClusterCockpit/cc-units"
+ ccunits "github.com/ClusterCockpit/cc-lib/ccUnits"
)
func getNormalizationFactor(v float64) (float64, int) {
diff --git a/internal/importer/normalize_test.go b/internal/importer/normalize_test.go
index b441c11..72017f5 100644
--- a/internal/importer/normalize_test.go
+++ b/internal/importer/normalize_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package importer
@@ -8,7 +8,7 @@ import (
"fmt"
"testing"
- ccunits "github.com/ClusterCockpit/cc-units"
+ ccunits "github.com/ClusterCockpit/cc-lib/ccUnits"
)
func TestNormalizeFactor(t *testing.T) {
diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go
index c6cecd8..6307843 100644
--- a/internal/metricDataDispatcher/dataLoader.go
+++ b/internal/metricDataDispatcher/dataLoader.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricDataDispatcher
@@ -14,10 +14,10 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/lrucache"
- "github.com/ClusterCockpit/cc-backend/pkg/resampler"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/lrucache"
+ "github.com/ClusterCockpit/cc-lib/resampler"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
@@ -68,10 +68,10 @@ func LoadData(job *schema.Job,
jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution)
if err != nil {
if len(jd) != 0 {
- log.Warnf("partial error: %s", err.Error())
+ cclog.Warnf("partial error: %s", err.Error())
// return err, 0, 0 // Reactivating will block archiving on one partial error
} else {
- log.Error("Error while loading job data from metric repository")
+ cclog.Error("Error while loading job data from metric repository")
return err, 0, 0
}
}
@@ -80,15 +80,15 @@ func LoadData(job *schema.Job,
var jd_temp schema.JobData
jd_temp, err = archive.GetHandle().LoadJobData(job)
if err != nil {
- log.Error("Error while loading job data from archive")
+ cclog.Error("Error while loading job data from archive")
return err, 0, 0
}
- //Deep copy the cached archive hashmap
+ // Deep copy the cached archive hashmap
jd = metricdata.DeepCopy(jd_temp)
- //Resampling for archived data.
- //Pass the resolution from frontend here.
+ // Resampling for archived data.
+ // Pass the resolution from frontend here.
for _, v := range jd {
for _, v_ := range v {
timestep := 0
@@ -178,7 +178,7 @@ func LoadData(job *schema.Job,
})
if err, ok := data.(error); ok {
- log.Error("Error in returned dataset")
+ cclog.Error("Error in returned dataset")
return nil, err
}
@@ -203,7 +203,7 @@ func LoadAverages(
stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion?
if err != nil {
- log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
+ cclog.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
return err
}
@@ -231,7 +231,6 @@ func LoadScopedJobStats(
scopes []schema.MetricScope,
ctx context.Context,
) (schema.ScopedJobStats, error) {
-
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
}
@@ -243,7 +242,7 @@ func LoadScopedJobStats(
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
if err != nil {
- log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
+ cclog.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return nil, err
}
@@ -268,7 +267,7 @@ func LoadJobStats(
stats, err := repo.LoadStats(job, metrics, ctx)
if err != nil {
- log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
+ cclog.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return data, err
}
@@ -318,9 +317,9 @@ func LoadNodeData(
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
if len(data) != 0 {
- log.Warnf("partial error: %s", err.Error())
+ cclog.Warnf("partial error: %s", err.Error())
} else {
- log.Error("Error while loading node data from metric repository")
+ cclog.Error("Error while loading node data from metric repository")
return nil, err
}
}
@@ -355,9 +354,9 @@ func LoadNodeListData(
data, totalNodes, hasNextPage, err := repo.LoadNodeListData(cluster, subCluster, nodeFilter, metrics, scopes, resolution, from, to, page, ctx)
if err != nil {
if len(data) != 0 {
- log.Warnf("partial error: %s", err.Error())
+ cclog.Warnf("partial error: %s", err.Error())
} else {
- log.Error("Error while loading node data from metric repository")
+ cclog.Error("Error while loading node data from metric repository")
return nil, totalNodes, hasNextPage, err
}
}
diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go
index 7c84d93..36c0dd7 100644
--- a/internal/metricdata/cc-metric-store.go
+++ b/internal/metricdata/cc-metric-store.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdata
@@ -18,8 +18,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
type CCMetricStoreConfig struct {
@@ -82,7 +82,7 @@ type ApiMetricData struct {
func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
var config CCMetricStoreConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
- log.Warn("Error while unmarshaling raw json config")
+ cclog.Warn("Error while unmarshaling raw json config")
return err
}
@@ -129,13 +129,13 @@ func (ccms *CCMetricStore) doRequest(
) (*ApiQueryResponse, error) {
buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(body); err != nil {
- log.Errorf("Error while encoding request body: %s", err.Error())
+ cclog.Errorf("Error while encoding request body: %s", err.Error())
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf)
if err != nil {
- log.Errorf("Error while building request body: %s", err.Error())
+ cclog.Errorf("Error while building request body: %s", err.Error())
return nil, err
}
if ccms.jwt != "" {
@@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest(
res, err := ccms.client.Do(req)
if err != nil {
- log.Errorf("Error while performing request: %s", err.Error())
+ cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -161,7 +161,7 @@ func (ccms *CCMetricStore) doRequest(
var resBody ApiQueryResponse
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
- log.Errorf("Error while decoding result body: %s", err.Error())
+ cclog.Errorf("Error while decoding result body: %s", err.Error())
return nil, err
}
@@ -177,14 +177,14 @@ func (ccms *CCMetricStore) LoadData(
) (schema.JobData, error) {
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution)
if err != nil {
- log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
+ cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
return nil, err
}
req := ApiQueryRequest{
Cluster: job.Cluster,
- From: job.StartTime.Unix(),
- To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(),
+ From: job.StartTime,
+ To: job.StartTime + int64(job.Duration),
Queries: queries,
WithStats: true,
WithData: true,
@@ -192,7 +192,7 @@ func (ccms *CCMetricStore) LoadData(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
- log.Errorf("Error while performing request: %s", err.Error())
+ cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -298,7 +298,7 @@ func (ccms *CCMetricStore) buildQueries(
mc := archive.GetMetricConfig(job.Cluster, metric)
if mc == nil {
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
- log.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
+ cclog.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
continue
}
@@ -570,17 +570,16 @@ func (ccms *CCMetricStore) LoadStats(
metrics []string,
ctx context.Context,
) (map[string]map[string]schema.MetricStatistics, error) {
-
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
if err != nil {
- log.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
+ cclog.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
return nil, err
}
req := ApiQueryRequest{
Cluster: job.Cluster,
- From: job.StartTime.Unix(),
- To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(),
+ From: job.StartTime,
+ To: job.StartTime + int64(job.Duration),
Queries: queries,
WithStats: true,
WithData: false,
@@ -588,7 +587,7 @@ func (ccms *CCMetricStore) LoadStats(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
- log.Errorf("Error while performing request: %s", err.Error())
+ cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -598,7 +597,7 @@ func (ccms *CCMetricStore) LoadStats(
metric := ccms.toLocalName(query.Metric)
data := res[0]
if data.Error != nil {
- log.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
+ cclog.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
continue
}
@@ -609,7 +608,7 @@ func (ccms *CCMetricStore) LoadStats(
}
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
- log.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
+ cclog.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
continue
}
@@ -632,14 +631,14 @@ func (ccms *CCMetricStore) LoadScopedStats(
) (schema.ScopedJobStats, error) {
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
if err != nil {
- log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
+ cclog.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
return nil, err
}
req := ApiQueryRequest{
Cluster: job.Cluster,
- From: job.StartTime.Unix(),
- To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(),
+ From: job.StartTime,
+ To: job.StartTime + int64(job.Duration),
Queries: queries,
WithStats: true,
WithData: false,
@@ -647,7 +646,7 @@ func (ccms *CCMetricStore) LoadScopedStats(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
- log.Errorf("Error while performing request: %s", err.Error())
+ cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -748,7 +747,7 @@ func (ccms *CCMetricStore) LoadNodeData(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
- log.Errorf("Error while performing request: %s", err.Error())
+ cclog.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
@@ -816,7 +815,6 @@ func (ccms *CCMetricStore) LoadNodeListData(
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
-
// 0) Init additional vars
var totalNodes int = 0
var hasNextPage bool = false
@@ -852,7 +850,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
if len(nodes) > page.ItemsPerPage {
start := (page.Page - 1) * page.ItemsPerPage
end := start + page.ItemsPerPage
- if end > len(nodes) {
+ if end >= len(nodes) {
end = len(nodes)
hasNextPage = false
} else {
@@ -865,7 +863,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
if err != nil {
- log.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
+ cclog.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
return nil, totalNodes, hasNextPage, err
}
@@ -880,7 +878,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
- log.Errorf("Error while performing request: %s", err.Error())
+ cclog.Errorf("Error while performing request: %s", err.Error())
return nil, totalNodes, hasNextPage, err
}
@@ -975,7 +973,6 @@ func (ccms *CCMetricStore) buildNodeQueries(
scopes []schema.MetricScope,
resolution int,
) ([]ApiQuery, []schema.MetricScope, error) {
-
queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(nodes))
assignedScope := []schema.MetricScope{}
@@ -985,7 +982,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
if subCluster != "" {
subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster)
if scterr != nil {
- log.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error())
+ cclog.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error())
return nil, nil, scterr
}
}
@@ -995,7 +992,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
mc := archive.GetMetricConfig(cluster, metric)
if mc == nil {
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
- log.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster)
+ cclog.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster)
continue
}
diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go
deleted file mode 100644
index c53dad3..0000000
--- a/internal/metricdata/influxdb-v2.go
+++ /dev/null
@@ -1,575 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package metricdata
-
-import (
- "context"
- "crypto/tls"
- "encoding/json"
- "errors"
- "fmt"
- "math"
- "sort"
- "strings"
- "time"
-
- "github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
- influxdb2 "github.com/influxdata/influxdb-client-go/v2"
- influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
-)
-
-type InfluxDBv2DataRepositoryConfig struct {
- Url string `json:"url"`
- Token string `json:"token"`
- Bucket string `json:"bucket"`
- Org string `json:"org"`
- SkipTls bool `json:"skiptls"`
-}
-
-type InfluxDBv2DataRepository struct {
- client influxdb2.Client
- queryClient influxdb2Api.QueryAPI
- bucket, measurement string
-}
-
-func (idb *InfluxDBv2DataRepository) Init(rawConfig json.RawMessage) error {
- var config InfluxDBv2DataRepositoryConfig
- if err := json.Unmarshal(rawConfig, &config); err != nil {
- log.Warn("Error while unmarshaling raw json config")
- return err
- }
-
- idb.client = influxdb2.NewClientWithOptions(config.Url, config.Token, influxdb2.DefaultOptions().SetTLSConfig(&tls.Config{InsecureSkipVerify: config.SkipTls}))
- idb.queryClient = idb.client.QueryAPI(config.Org)
- idb.bucket = config.Bucket
-
- return nil
-}
-
-func (idb *InfluxDBv2DataRepository) formatTime(t time.Time) string {
- return t.Format(time.RFC3339) // Like “2006-01-02T15:04:05Z07:00”
-}
-
-func (idb *InfluxDBv2DataRepository) epochToTime(epoch int64) time.Time {
- return time.Unix(epoch, 0)
-}
-
-func (idb *InfluxDBv2DataRepository) LoadData(
- job *schema.Job,
- metrics []string,
- scopes []schema.MetricScope,
- ctx context.Context,
- resolution int) (schema.JobData, error) {
-
- log.Infof("InfluxDB 2 Backend: Resolution Scaling not Implemented, will return default timestep. Requested Resolution %d", resolution)
-
- measurementsConds := make([]string, 0, len(metrics))
- for _, m := range metrics {
- measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m))
- }
- measurementsCond := strings.Join(measurementsConds, " or ")
-
- hostsConds := make([]string, 0, len(job.Resources))
- for _, h := range job.Resources {
- if h.HWThreads != nil || h.Accelerators != nil {
- // TODO
- return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support HWThreads or Accelerators")
- }
- hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname))
- }
- hostsCond := strings.Join(hostsConds, " or ")
-
- jobData := make(schema.JobData) // Empty Schema: map[FIELD]map[SCOPE]<*JobMetric>METRIC
- // Requested Scopes
- for _, scope := range scopes {
- query := ""
- switch scope {
- case "node":
- // Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows <-- Resolution could be added here?
- // log.Info("Scope 'node' requested. ")
- query = fmt.Sprintf(`
- from(bucket: "%s")
- |> range(start: %s, stop: %s)
- |> filter(fn: (r) => (%s) and (%s) )
- |> drop(columns: ["_start", "_stop"])
- |> group(columns: ["hostname", "_measurement"])
- |> aggregateWindow(every: 60s, fn: mean)
- |> drop(columns: ["_time"])`,
- idb.bucket,
- idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))),
- measurementsCond, hostsCond)
- case "socket":
- log.Info("Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
- continue
- case "core":
- log.Info(" Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
- continue
- // Get Finest Granularity only, Set NULL to 0.0
- // query = fmt.Sprintf(`
- // from(bucket: "%s")
- // |> range(start: %s, stop: %s)
- // |> filter(fn: (r) => %s )
- // |> filter(fn: (r) => %s )
- // |> drop(columns: ["_start", "_stop", "cluster"])
- // |> map(fn: (r) => (if exists r._value then {r with _value: r._value} else {r with _value: 0.0}))`,
- // idb.bucket,
- // idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
- // measurementsCond, hostsCond)
- case "hwthread":
- log.Info(" Scope 'hwthread' requested, but not yet supported: Will return 'node' scope only. ")
- continue
- case "accelerator":
- log.Info(" Scope 'accelerator' requested, but not yet supported: Will return 'node' scope only. ")
- continue
- default:
- log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope)
- continue
- // return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support other scopes than 'node'")
- }
-
- rows, err := idb.queryClient.Query(ctx, query)
- if err != nil {
- log.Error("Error while performing query")
- return nil, err
- }
-
- // Init Metrics: Only Node level now -> TODO: Matching /check on scope level ...
- for _, metric := range metrics {
- jobMetric, ok := jobData[metric]
- if !ok {
- mc := archive.GetMetricConfig(job.Cluster, metric)
- jobMetric = map[schema.MetricScope]*schema.JobMetric{
- scope: { // uses scope var from above!
- Unit: mc.Unit,
- Timestep: mc.Timestep,
- Series: make([]schema.Series, 0, len(job.Resources)),
- StatisticsSeries: nil, // Should be: &schema.StatsSeries{},
- },
- }
- }
- jobData[metric] = jobMetric
- }
-
- // Process Result: Time-Data
- field, host, hostSeries := "", "", schema.Series{}
- // typeId := 0
- switch scope {
- case "node":
- for rows.Next() {
- row := rows.Record()
- if host == "" || host != row.ValueByKey("hostname").(string) || rows.TableChanged() {
- if host != "" {
- // Append Series before reset
- jobData[field][scope].Series = append(jobData[field][scope].Series, hostSeries)
- }
- field, host = row.Measurement(), row.ValueByKey("hostname").(string)
- hostSeries = schema.Series{
- Hostname: host,
- Statistics: schema.MetricStatistics{}, //TODO Add Statistics
- Data: make([]schema.Float, 0),
- }
- }
- val, ok := row.Value().(float64)
- if ok {
- hostSeries.Data = append(hostSeries.Data, schema.Float(val))
- } else {
- hostSeries.Data = append(hostSeries.Data, schema.Float(0))
- }
- }
- case "socket":
- continue
- case "accelerator":
- continue
- case "hwthread":
- // See below @ core
- continue
- case "core":
- continue
- // Include Series.Id in hostSeries
- // for rows.Next() {
- // row := rows.Record()
- // if ( host == "" || host != row.ValueByKey("hostname").(string) || typeId != row.ValueByKey("type-id").(int) || rows.TableChanged() ) {
- // if ( host != "" ) {
- // // Append Series before reset
- // jobData[field][scope].Series = append(jobData[field][scope].Series, hostSeries)
- // }
- // field, host, typeId = row.Measurement(), row.ValueByKey("hostname").(string), row.ValueByKey("type-id").(int)
- // hostSeries = schema.Series{
- // Hostname: host,
- // Id: &typeId,
- // Statistics: nil,
- // Data: make([]schema.Float, 0),
- // }
- // }
- // val := row.Value().(float64)
- // hostSeries.Data = append(hostSeries.Data, schema.Float(val))
- // }
- default:
- log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope)
- continue
- // return nil, errors.New("the InfluxDB metric data repository does not yet support other scopes than 'node, core'")
- }
- // Append last Series
- jobData[field][scope].Series = append(jobData[field][scope].Series, hostSeries)
- }
-
- // Get Stats
- stats, err := idb.LoadStats(job, metrics, ctx)
- if err != nil {
- log.Warn("Error while loading statistics")
- return nil, err
- }
-
- for _, scope := range scopes {
- if scope == "node" { // No 'socket/core' support yet
- for metric, nodes := range stats {
- for node, stats := range nodes {
- for index, _ := range jobData[metric][scope].Series {
- if jobData[metric][scope].Series[index].Hostname == node {
- jobData[metric][scope].Series[index].Statistics = schema.MetricStatistics{Avg: stats.Avg, Min: stats.Min, Max: stats.Max}
- }
- }
- }
- }
- }
- }
-
- return jobData, nil
-}
-
-func (idb *InfluxDBv2DataRepository) LoadStats(
- job *schema.Job,
- metrics []string,
- ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
-
- stats := map[string]map[string]schema.MetricStatistics{}
-
- hostsConds := make([]string, 0, len(job.Resources))
- for _, h := range job.Resources {
- if h.HWThreads != nil || h.Accelerators != nil {
- // TODO
- return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support HWThreads or Accelerators")
- }
- hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname))
- }
- hostsCond := strings.Join(hostsConds, " or ")
-
- // lenMet := len(metrics)
-
- for _, metric := range metrics {
- // log.Debugf("<< You are here: %s (Index %d of %d metrics)", metric, index, lenMet)
-
- query := fmt.Sprintf(`
- data = from(bucket: "%s")
- |> range(start: %s, stop: %s)
- |> filter(fn: (r) => r._measurement == "%s" and r._field == "value" and (%s))
- union(tables: [data |> mean(column: "_value") |> set(key: "_field", value: "avg"),
- data |> min(column: "_value") |> set(key: "_field", value: "min"),
- data |> max(column: "_value") |> set(key: "_field", value: "max")])
- |> pivot(rowKey: ["hostname"], columnKey: ["_field"], valueColumn: "_value")
- |> group()`,
- idb.bucket,
- idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))),
- metric, hostsCond)
-
- rows, err := idb.queryClient.Query(ctx, query)
- if err != nil {
- log.Error("Error while performing query")
- return nil, err
- }
-
- nodes := map[string]schema.MetricStatistics{}
- for rows.Next() {
- row := rows.Record()
- host := row.ValueByKey("hostname").(string)
-
- avg, avgok := row.ValueByKey("avg").(float64)
- if !avgok {
- // log.Debugf(">> Assertion error for metric %s, statistic AVG. Expected 'float64', got %v", metric, avg)
- avg = 0.0
- }
- min, minok := row.ValueByKey("min").(float64)
- if !minok {
- // log.Debugf(">> Assertion error for metric %s, statistic MIN. Expected 'float64', got %v", metric, min)
- min = 0.0
- }
- max, maxok := row.ValueByKey("max").(float64)
- if !maxok {
- // log.Debugf(">> Assertion error for metric %s, statistic MAX. Expected 'float64', got %v", metric, max)
- max = 0.0
- }
-
- nodes[host] = schema.MetricStatistics{
- Avg: avg,
- Min: min,
- Max: max,
- }
- }
- stats[metric] = nodes
- }
-
- return stats, nil
-}
-
-// Used in Job-View StatsTable
-// UNTESTED
-func (idb *InfluxDBv2DataRepository) LoadScopedStats(
- job *schema.Job,
- metrics []string,
- scopes []schema.MetricScope,
- ctx context.Context) (schema.ScopedJobStats, error) {
-
- // Assumption: idb.loadData() only returns series node-scope - use node scope for statsTable
- scopedJobStats := make(schema.ScopedJobStats)
- data, err := idb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
- if err != nil {
- log.Warn("Error while loading job for scopedJobStats")
- return nil, err
- }
-
- for metric, metricData := range data {
- for _, scope := range scopes {
- if scope != schema.MetricScopeNode {
- logOnce.Do(func() {
- log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
- })
- continue
- }
-
- if _, ok := scopedJobStats[metric]; !ok {
- scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
- }
-
- if _, ok := scopedJobStats[metric][scope]; !ok {
- scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
- }
-
- for _, series := range metricData[scope].Series {
- scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
- Hostname: series.Hostname,
- Data: &series.Statistics,
- })
- }
- }
- }
-
- return scopedJobStats, nil
-}
-
-// Used in Systems-View @ Node-Overview
-// UNTESTED
-func (idb *InfluxDBv2DataRepository) LoadNodeData(
- cluster string,
- metrics, nodes []string,
- scopes []schema.MetricScope,
- from, to time.Time,
- ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
-
- // Note: scopes[] Array will be ignored, only return node scope
-
- // CONVERT ARGS TO INFLUX
- measurementsConds := make([]string, 0)
- for _, m := range metrics {
- measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m))
- }
- measurementsCond := strings.Join(measurementsConds, " or ")
-
- hostsConds := make([]string, 0)
- if nodes == nil {
- var allNodes []string
- subClusterNodeLists := archive.NodeLists[cluster]
- for _, nodeList := range subClusterNodeLists {
- allNodes = append(nodes, nodeList.PrintList()...)
- }
- for _, node := range allNodes {
- nodes = append(nodes, node)
- hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node))
- }
- } else {
- for _, node := range nodes {
- hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node))
- }
- }
- hostsCond := strings.Join(hostsConds, " or ")
-
- // BUILD AND PERFORM QUERY
- query := fmt.Sprintf(`
- from(bucket: "%s")
- |> range(start: %s, stop: %s)
- |> filter(fn: (r) => (%s) and (%s) )
- |> drop(columns: ["_start", "_stop"])
- |> group(columns: ["hostname", "_measurement"])
- |> aggregateWindow(every: 60s, fn: mean)
- |> drop(columns: ["_time"])`,
- idb.bucket,
- idb.formatTime(from), idb.formatTime(to),
- measurementsCond, hostsCond)
-
- rows, err := idb.queryClient.Query(ctx, query)
- if err != nil {
- log.Error("Error while performing query")
- return nil, err
- }
-
- // HANDLE QUERY RETURN
- // Collect Float Arrays for Node@Metric -> No Scope Handling!
- influxData := make(map[string]map[string][]schema.Float)
- for rows.Next() {
- row := rows.Record()
- host, field := row.ValueByKey("hostname").(string), row.Measurement()
-
- influxHostData, ok := influxData[host]
- if !ok {
- influxHostData = make(map[string][]schema.Float)
- influxData[host] = influxHostData
- }
-
- influxFieldData, ok := influxData[host][field]
- if !ok {
- influxFieldData = make([]schema.Float, 0)
- influxData[host][field] = influxFieldData
- }
-
- val, ok := row.Value().(float64)
- if ok {
- influxData[host][field] = append(influxData[host][field], schema.Float(val))
- } else {
- influxData[host][field] = append(influxData[host][field], schema.Float(0))
- }
- }
-
- // BUILD FUNCTION RETURN
- data := make(map[string]map[string][]*schema.JobMetric)
- for node, metricData := range influxData {
-
- nodeData, ok := data[node]
- if !ok {
- nodeData = make(map[string][]*schema.JobMetric)
- data[node] = nodeData
- }
-
- for metric, floatArray := range metricData {
- avg, min, max := 0.0, 0.0, 0.0
- for _, val := range floatArray {
- avg += float64(val)
- min = math.Min(min, float64(val))
- max = math.Max(max, float64(val))
- }
-
- stats := schema.MetricStatistics{
- Avg: (math.Round((avg/float64(len(floatArray)))*100) / 100),
- Min: (math.Round(min*100) / 100),
- Max: (math.Round(max*100) / 100),
- }
-
- mc := archive.GetMetricConfig(cluster, metric)
- nodeData[metric] = append(nodeData[metric], &schema.JobMetric{
- Unit: mc.Unit,
- Timestep: mc.Timestep,
- Series: []schema.Series{
- {
- Hostname: node,
- Statistics: stats,
- Data: floatArray,
- },
- },
- })
- }
- }
-
- return data, nil
-}
-
-// Used in Systems-View @ Node-List
-// UNTESTED
-func (idb *InfluxDBv2DataRepository) LoadNodeListData(
- cluster, subCluster, nodeFilter string,
- metrics []string,
- scopes []schema.MetricScope,
- resolution int,
- from, to time.Time,
- page *model.PageRequest,
- ctx context.Context,
-) (map[string]schema.JobData, int, bool, error) {
-
- // Assumption: idb.loadData() only returns series node-scope - use node scope for NodeList
-
- // 0) Init additional vars
- var totalNodes int = 0
- var hasNextPage bool = false
-
- // 1) Get list of all nodes
- var nodes []string
- if subCluster != "" {
- scNodes := archive.NodeLists[cluster][subCluster]
- nodes = scNodes.PrintList()
- } else {
- subClusterNodeLists := archive.NodeLists[cluster]
- for _, nodeList := range subClusterNodeLists {
- nodes = append(nodes, nodeList.PrintList()...)
- }
- }
-
- // 2) Filter nodes
- if nodeFilter != "" {
- filteredNodes := []string{}
- for _, node := range nodes {
- if strings.Contains(node, nodeFilter) {
- filteredNodes = append(filteredNodes, node)
- }
- }
- nodes = filteredNodes
- }
-
- // 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ...
- totalNodes = len(nodes)
- sort.Strings(nodes)
-
- // 3) Apply paging
- if len(nodes) > page.ItemsPerPage {
- start := (page.Page - 1) * page.ItemsPerPage
- end := start + page.ItemsPerPage
- if end > len(nodes) {
- end = len(nodes)
- hasNextPage = false
- } else {
- hasNextPage = true
- }
- nodes = nodes[start:end]
- }
-
- // 4) Fetch And Convert Data, use idb.LoadNodeData() for query
-
- rawNodeData, err := idb.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
- if err != nil {
- log.Error(fmt.Sprintf("Error while loading influx nodeData for nodeListData %#v\n", err))
- return nil, totalNodes, hasNextPage, err
- }
-
- data := make(map[string]schema.JobData)
- for node, nodeData := range rawNodeData {
- // Init Nested Map Data Structures If Not Found
- hostData, ok := data[node]
- if !ok {
- hostData = make(schema.JobData)
- data[node] = hostData
- }
-
- for metric, nodeMetricData := range nodeData {
- metricData, ok := hostData[metric]
- if !ok {
- metricData = make(map[schema.MetricScope]*schema.JobMetric)
- data[node][metric] = metricData
- }
-
- data[node][metric][schema.MetricScopeNode] = nodeMetricData[0] // Only Node Scope Returned from loadNodeData
- }
- }
-
- return data, totalNodes, hasNextPage, nil
-}
diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go
index f30d837..aa3a87c 100644
--- a/internal/metricdata/metricdata.go
+++ b/internal/metricdata/metricdata.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdata
@@ -12,8 +12,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
type MetricDataRepository interface {
@@ -46,7 +46,7 @@ func Init() error {
Kind string `json:"kind"`
}
if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
- log.Warn("Error while unmarshaling raw json MetricDataRepository")
+ cclog.Warn("Error while unmarshaling raw json MetricDataRepository")
return err
}
@@ -54,8 +54,6 @@ func Init() error {
switch kind.Kind {
case "cc-metric-store":
mdr = &CCMetricStore{}
- case "influxdb":
- mdr = &InfluxDBv2DataRepository{}
case "prometheus":
mdr = &PrometheusDataRepository{}
case "test":
@@ -65,7 +63,7 @@ func Init() error {
}
if err := mdr.Init(cluster.MetricDataRepository); err != nil {
- log.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
+ cclog.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
return err
}
metricDataRepos[cluster.Name] = mdr
diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go
index d16501e..2ec8558 100644
--- a/internal/metricdata/prometheus.go
+++ b/internal/metricdata/prometheus.go
@@ -1,5 +1,5 @@
// Copyright (C) 2022 DKRZ
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdata
@@ -22,8 +22,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
promapi "github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
promcfg "github.com/prometheus/common/config"
@@ -160,7 +160,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
var config PrometheusDataRepositoryConfig
// parse config
if err := json.Unmarshal(rawConfig, &config); err != nil {
- log.Warn("Error while unmarshaling raw json config")
+ cclog.Warn("Error while unmarshaling raw json config")
return err
}
// support basic authentication
@@ -179,7 +179,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
RoundTripper: rt,
})
if err != nil {
- log.Error("Error while initializing new prometheus client")
+ cclog.Error("Error while initializing new prometheus client")
return err
}
// init query client
@@ -192,9 +192,9 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
for metric, templ := range config.Templates {
pdb.templates[metric], err = template.New(metric).Parse(templ)
if err == nil {
- log.Debugf("Added PromQL template for %s: %s", metric, templ)
+ cclog.Debugf("Added PromQL template for %s: %s", metric, templ)
} else {
- log.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
+ cclog.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
}
}
return nil
@@ -221,7 +221,7 @@ func (pdb *PrometheusDataRepository) FormatQuery(
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ))
} else {
query := buf.String()
- log.Debugf("PromQL: %s", query)
+ cclog.Debugf("PromQL: %s", query)
return query, nil
}
} else {
@@ -279,13 +279,13 @@ func (pdb *PrometheusDataRepository) LoadData(
for i, resource := range job.Resources {
nodes[i] = resource.Hostname
}
- from := job.StartTime
- to := job.StartTime.Add(time.Duration(job.Duration) * time.Second)
+ from := time.Unix(job.StartTime, 0)
+ to := time.Unix(job.StartTime+int64(job.Duration), 0)
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
- log.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
+ cclog.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
@@ -293,12 +293,12 @@ func (pdb *PrometheusDataRepository) LoadData(
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(job.Cluster, metric)
if metricConfig == nil {
- log.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
+ cclog.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
return nil, errors.New("Prometheus config error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
if err != nil {
- log.Warn("Error while formatting prometheus query")
+ cclog.Warn("Error while formatting prometheus query")
return nil, err
}
@@ -310,11 +310,11 @@ func (pdb *PrometheusDataRepository) LoadData(
}
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
- log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
+ cclog.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
return nil, errors.New("Prometheus query error")
}
if len(warnings) > 0 {
- log.Warnf("Warnings: %v\n", warnings)
+ cclog.Warnf("Warnings: %v\n", warnings)
}
// init data structures
@@ -360,7 +360,7 @@ func (pdb *PrometheusDataRepository) LoadStats(
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
if err != nil {
- log.Warn("Error while loading job for stats")
+ cclog.Warn("Error while loading job for stats")
return nil, err
}
for metric, metricData := range data {
@@ -391,19 +391,19 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
- log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
+ cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(cluster, metric)
if metricConfig == nil {
- log.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
+ cclog.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
return nil, errors.New("Prometheus config error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
if err != nil {
- log.Warn("Error while formatting prometheus query")
+ cclog.Warn("Error while formatting prometheus query")
return nil, err
}
@@ -415,11 +415,11 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
}
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
- log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
+ cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
return nil, errors.New("Prometheus query error")
}
if len(warnings) > 0 {
- log.Warnf("Warnings: %v\n", warnings)
+ cclog.Warnf("Warnings: %v\n", warnings)
}
step := int64(metricConfig.Timestep)
@@ -444,7 +444,7 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
}
}
t1 := time.Since(t0)
- log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
+ cclog.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
return data, nil
}
@@ -453,13 +453,13 @@ func (pdb *PrometheusDataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
- ctx context.Context) (schema.ScopedJobStats, error) {
-
+ ctx context.Context,
+) (schema.ScopedJobStats, error) {
// Assumption: pdb.loadData() only returns series node-scope - use node scope for statsTable
scopedJobStats := make(schema.ScopedJobStats)
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
if err != nil {
- log.Warn("Error while loading job for scopedJobStats")
+ cclog.Warn("Error while loading job for scopedJobStats")
return nil, err
}
@@ -467,7 +467,7 @@ func (pdb *PrometheusDataRepository) LoadScopedStats(
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
- log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
+ cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
@@ -502,7 +502,6 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
-
// Assumption: pdb.loadData() only returns series node-scope - use node scope for NodeList
// 0) Init additional vars
@@ -540,7 +539,7 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
if len(nodes) > page.ItemsPerPage {
start := (page.Page - 1) * page.ItemsPerPage
end := start + page.ItemsPerPage
- if end > len(nodes) {
+ if end >= len(nodes) {
end = len(nodes)
hasNextPage = false
} else {
@@ -564,7 +563,7 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
- log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
+ cclog.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
@@ -572,12 +571,12 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(cluster, metric)
if metricConfig == nil {
- log.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
+ cclog.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
return nil, totalNodes, hasNextPage, errors.New("Prometheus config error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
if err != nil {
- log.Warn("Error while formatting prometheus query")
+ cclog.Warn("Error while formatting prometheus query")
return nil, totalNodes, hasNextPage, err
}
@@ -589,11 +588,11 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
}
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
- log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
+ cclog.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
return nil, totalNodes, hasNextPage, errors.New("Prometheus query error")
}
if len(warnings) > 0 {
- log.Warnf("Warnings: %v\n", warnings)
+ cclog.Warnf("Warnings: %v\n", warnings)
}
step := int64(metricConfig.Timestep)
@@ -629,6 +628,6 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
}
}
t1 := time.Since(t0)
- log.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
+ cclog.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
return data, totalNodes, hasNextPage, nil
}
diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go
index aa7bde1..59e640e 100644
--- a/internal/metricdata/utils.go
+++ b/internal/metricdata/utils.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricdata
@@ -10,7 +10,7 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) {
@@ -29,16 +29,16 @@ func (tmdr *TestMetricDataRepository) LoadData(
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
- resolution int) (schema.JobData, error) {
-
+ resolution int,
+) (schema.JobData, error) {
return TestLoadDataCallback(job, metrics, scopes, ctx, resolution)
}
func (tmdr *TestMetricDataRepository) LoadStats(
job *schema.Job,
metrics []string,
- ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
-
+ ctx context.Context,
+) (map[string]map[string]schema.MetricStatistics, error) {
panic("TODO")
}
@@ -46,8 +46,8 @@ func (tmdr *TestMetricDataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
- ctx context.Context) (schema.ScopedJobStats, error) {
-
+ ctx context.Context,
+) (schema.ScopedJobStats, error) {
panic("TODO")
}
@@ -56,8 +56,8 @@ func (tmdr *TestMetricDataRepository) LoadNodeData(
metrics, nodes []string,
scopes []schema.MetricScope,
from, to time.Time,
- ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
-
+ ctx context.Context,
+) (map[string]map[string][]*schema.JobMetric, error) {
panic("TODO")
}
@@ -70,7 +70,6 @@ func (tmdr *TestMetricDataRepository) LoadNodeListData(
page *model.PageRequest,
ctx context.Context,
) (map[string]schema.JobData, int, bool, error) {
-
panic("TODO")
}
diff --git a/internal/repository/dbConnection.go b/internal/repository/dbConnection.go
index 0e3f29d..872edf1 100644
--- a/internal/repository/dbConnection.go
+++ b/internal/repository/dbConnection.go
@@ -1,15 +1,17 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
import (
"database/sql"
+ "fmt"
+ "net/url"
"sync"
"time"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"github.com/qustavo/sqlhooks/v2"
@@ -33,6 +35,27 @@ type DatabaseOptions struct {
ConnectionMaxIdleTime time.Duration
}
+func setupSqlite(db *sql.DB) (err error) {
+ pragmas := []string{
+ // "journal_mode = WAL",
+ // "busy_timeout = 5000",
+ // "synchronous = NORMAL",
+ // "cache_size = 1000000000", // 1GB
+ // "foreign_keys = true",
+ "temp_store = memory",
+ // "mmap_size = 3000000000",
+ }
+
+ for _, pragma := range pragmas {
+ _, err = db.Exec("PRAGMA " + pragma)
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
func Connect(driver string, db string) {
var err error
var dbHandle *sqlx.DB
@@ -48,26 +71,34 @@ func Connect(driver string, db string) {
switch driver {
case "sqlite3":
- // - Set WAL mode (not strictly necessary each time because it's persisted in the database, but good for first run)
- // - Set busy timeout, so concurrent writers wait on each other instead of erroring immediately
- // - Enable foreign key checks
- opts.URL += "?_journal=WAL&_timeout=5000&_fk=true"
+ // TODO: Have separate DB handles for Writes and Reads
+ // Optimize SQLite connection: https://kerkour.com/sqlite-for-servers
+ connectionUrlParams := make(url.Values)
+ connectionUrlParams.Add("_txlock", "immediate")
+ connectionUrlParams.Add("_journal_mode", "WAL")
+ connectionUrlParams.Add("_busy_timeout", "5000")
+ connectionUrlParams.Add("_synchronous", "NORMAL")
+ connectionUrlParams.Add("_cache_size", "1000000000")
+ connectionUrlParams.Add("_foreign_keys", "true")
+ opts.URL = fmt.Sprintf("file:%s?%s", opts.URL, connectionUrlParams.Encode())
- if log.Loglevel() == "debug" {
+ if cclog.Loglevel() == "debug" {
sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{}))
dbHandle, err = sqlx.Open("sqlite3WithHooks", opts.URL)
} else {
dbHandle, err = sqlx.Open("sqlite3", opts.URL)
}
+
+ setupSqlite(dbHandle.DB)
case "mysql":
opts.URL += "?multiStatements=true"
dbHandle, err = sqlx.Open("mysql", opts.URL)
default:
- log.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
+ cclog.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
}
if err != nil {
- log.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
+ cclog.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
}
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
@@ -78,14 +109,14 @@ func Connect(driver string, db string) {
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
err = checkDBVersion(driver, dbHandle.DB)
if err != nil {
- log.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
+ cclog.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
}
})
}
func GetConnection() *DBConnection {
if dbConnInstance == nil {
- log.Fatalf("Database connection not initialized!")
+ cclog.Fatalf("Database connection not initialized!")
}
return dbConnInstance
diff --git a/internal/repository/hooks.go b/internal/repository/hooks.go
index e4fe8e1..5433072 100644
--- a/internal/repository/hooks.go
+++ b/internal/repository/hooks.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -8,21 +8,21 @@ import (
"context"
"time"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
// Hooks satisfies the sqlhook.Hooks interface
type Hooks struct{}
// Before hook will print the query with it's args and return the context with the timestamp
-func (h *Hooks) Before(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
- log.Debugf("SQL query %s %q", query, args)
+func (h *Hooks) Before(ctx context.Context, query string, args ...any) (context.Context, error) {
+ cclog.Debugf("SQL query %s %q", query, args)
return context.WithValue(ctx, "begin", time.Now()), nil
}
// After hook will get the timestamp registered on the Before hook and print the elapsed time
-func (h *Hooks) After(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
+func (h *Hooks) After(ctx context.Context, query string, args ...any) (context.Context, error) {
begin := ctx.Value("begin").(time.Time)
- log.Debugf("Took: %s\n", time.Since(begin))
+ cclog.Debugf("Took: %s\n", time.Since(begin))
return ctx, nil
}
diff --git a/internal/repository/job.go b/internal/repository/job.go
index 84de6f7..b6aa323 100644
--- a/internal/repository/job.go
+++ b/internal/repository/job.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,16 +9,16 @@ import (
"encoding/json"
"errors"
"fmt"
+ "maps"
"math"
"strconv"
"sync"
"time"
- "github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/lrucache"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/lrucache"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
@@ -33,6 +33,7 @@ type JobRepository struct {
stmtCache *sq.StmtCache
cache *lrucache.Cache
driver string
+ Mutex sync.Mutex
}
func GetJobRepository() *JobRepository {
@@ -51,38 +52,49 @@ func GetJobRepository() *JobRepository {
}
var jobColumns []string = []string{
- "job.id", "job.job_id", "job.hpc_user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.cluster_partition", "job.array_job_id",
- "job.num_nodes", "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state",
- "job.duration", "job.walltime", "job.resources", "job.footprint", "job.energy",
+ "job.id", "job.job_id", "job.hpc_user", "job.project", "job.cluster", "job.subcluster",
+ "job.start_time", "job.cluster_partition", "job.array_job_id", "job.num_nodes",
+ "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status",
+ "job.smt", "job.job_state", "job.duration", "job.walltime", "job.resources",
+ "job.footprint", "job.energy",
}
-func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
+var jobCacheColumns []string = []string{
+ "job_cache.id", "job_cache.job_id", "job_cache.hpc_user", "job_cache.project", "job_cache.cluster",
+ "job_cache.subcluster", "job_cache.start_time", "job_cache.cluster_partition",
+ "job_cache.array_job_id", "job_cache.num_nodes", "job_cache.num_hwthreads",
+ "job_cache.num_acc", "job_cache.exclusive", "job_cache.monitoring_status", "job_cache.smt",
+ "job_cache.job_state", "job_cache.duration", "job_cache.walltime", "job_cache.resources",
+ "job_cache.footprint", "job_cache.energy",
+}
+
+func scanJob(row interface{ Scan(...any) error }) (*schema.Job, error) {
job := &schema.Job{}
if err := row.Scan(
- &job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId,
- &job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
+ &job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster,
+ &job.StartTime, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads,
+ &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
&job.Duration, &job.Walltime, &job.RawResources, &job.RawFootprint, &job.Energy); err != nil {
- log.Warnf("Error while scanning rows (Job): %v", err)
+ cclog.Warnf("Error while scanning rows (Job): %v", err)
return nil, err
}
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
- log.Warn("Error while unmarshaling raw resources json")
+ cclog.Warn("Error while unmarshaling raw resources json")
return nil, err
}
job.RawResources = nil
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
- log.Warnf("Error while unmarshaling raw footprint json: %v", err)
+ cclog.Warnf("Error while unmarshaling raw footprint json: %v", err)
return nil, err
}
job.RawFootprint = nil
- job.StartTime = time.Unix(job.StartTimeUnix, 0)
// Always ensure accurate duration for running jobs
if job.State == schema.JobStateRunning {
- job.Duration = int32(time.Since(job.StartTime).Seconds())
+ job.Duration = int32(time.Now().Unix() - job.StartTime)
}
return job, nil
@@ -97,7 +109,7 @@ func (r *JobRepository) Optimize() error {
return err
}
case "mysql":
- log.Info("Optimize currently not supported for mysql driver")
+ cclog.Info("Optimize currently not supported for mysql driver")
}
return nil
@@ -138,17 +150,6 @@ func (r *JobRepository) Flush() error {
return nil
}
-func scanJobLink(row interface{ Scan(...interface{}) error }) (*model.JobLink, error) {
- jobLink := &model.JobLink{}
- if err := row.Scan(
- &jobLink.ID, &jobLink.JobID); err != nil {
- log.Warn("Error while scanning rows (jobLink)")
- return nil, err
- }
-
- return jobLink, nil
-}
-
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
start := time.Now()
cachekey := fmt.Sprintf("metadata:%d", job.ID)
@@ -159,7 +160,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
- log.Warn("Error while scanning for job metadata")
+ cclog.Warn("Error while scanning for job metadata")
return nil, err
}
@@ -168,12 +169,12 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
}
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
- log.Warn("Error while unmarshaling raw metadata json")
+ cclog.Warn("Error while unmarshaling raw metadata json")
return nil, err
}
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
- log.Debugf("Timer FetchMetadata %s", time.Since(start))
+ cclog.Debugf("Timer FetchMetadata %s", time.Since(start))
return job.MetaData, nil
}
@@ -182,16 +183,14 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
r.cache.Del(cachekey)
if job.MetaData == nil {
if _, err = r.FetchMetadata(job); err != nil {
- log.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
+ cclog.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
return err
}
}
if job.MetaData != nil {
cpy := make(map[string]string, len(job.MetaData)+1)
- for k, v := range job.MetaData {
- cpy[k] = v
- }
+ maps.Copy(cpy, job.MetaData)
cpy[key] = val
job.MetaData = cpy
} else {
@@ -199,7 +198,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
}
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
- log.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
+ cclog.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
return err
}
@@ -207,7 +206,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
Set("meta_data", job.RawMetaData).
Where("job.id = ?", job.ID).
RunWith(r.stmtCache).Exec(); err != nil {
- log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
+ cclog.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
return err
}
@@ -220,7 +219,7 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err
if err := sq.Select("job.footprint").From("job").Where("job.id = ?", job.ID).
RunWith(r.stmtCache).QueryRow().Scan(&job.RawFootprint); err != nil {
- log.Warn("Error while scanning for job footprint")
+ cclog.Warn("Error while scanning for job footprint")
return nil, err
}
@@ -229,11 +228,11 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err
}
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
- log.Warn("Error while unmarshaling raw footprint json")
+ cclog.Warn("Error while unmarshaling raw footprint json")
return nil, err
}
- log.Debugf("Timer FetchFootprint %s", time.Since(start))
+ cclog.Debugf("Timer FetchFootprint %s", time.Since(start))
return job.Footprint, nil
}
@@ -247,7 +246,7 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6
if err := sq.Select("job.energy_footprint").From("job").Where("job.id = ?", job.ID).
RunWith(r.stmtCache).QueryRow().Scan(&job.RawEnergyFootprint); err != nil {
- log.Warn("Error while scanning for job energy_footprint")
+ cclog.Warn("Error while scanning for job energy_footprint")
return nil, err
}
@@ -256,12 +255,12 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6
}
if err := json.Unmarshal(job.RawEnergyFootprint, &job.EnergyFootprint); err != nil {
- log.Warn("Error while unmarshaling raw energy footprint json")
+ cclog.Warn("Error while unmarshaling raw energy footprint json")
return nil, err
}
r.cache.Put(cachekey, job.EnergyFootprint, len(job.EnergyFootprint), 24*time.Hour)
- log.Debugf("Timer FetchEnergyFootprint %s", time.Since(start))
+ cclog.Debugf("Timer FetchEnergyFootprint %s", time.Since(start))
return job.EnergyFootprint, nil
}
@@ -274,9 +273,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
if err != nil {
s, _, _ := qd.ToSql()
- log.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err)
+ cclog.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err)
} else {
- log.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
+ cclog.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
}
return cnt, err
}
@@ -287,9 +286,9 @@ func (r *JobRepository) DeleteJobById(id int64) error {
if err != nil {
s, _, _ := qd.ToSql()
- log.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err)
+ cclog.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err)
} else {
- log.Debugf("DeleteJobById(%d): Success", id)
+ cclog.Debugf("DeleteJobById(%d): Success", id)
}
return err
}
@@ -352,7 +351,7 @@ func (r *JobRepository) FindColumnValue(user *schema.User, searchterm string, ta
}
return "", ErrNotFound
} else {
- log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
+ cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
return "", ErrForbidden
}
}
@@ -371,7 +370,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
err := rows.Scan(&result)
if err != nil {
rows.Close()
- log.Warnf("Error while scanning rows: %v", err)
+ cclog.Warnf("Error while scanning rows: %v", err)
return emptyResult, err
}
results = append(results, result)
@@ -381,7 +380,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
return emptyResult, ErrNotFound
} else {
- log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
+ cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
return emptyResult, ErrForbidden
}
}
@@ -389,7 +388,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
func (r *JobRepository) Partitions(cluster string) ([]string, error) {
var err error
start := time.Now()
- partitions := r.cache.Get("partitions:"+cluster, func() (interface{}, time.Duration, int) {
+ partitions := r.cache.Get("partitions:"+cluster, func() (any, time.Duration, int) {
parts := []string{}
if err = r.DB.Select(&parts, `SELECT DISTINCT job.cluster_partition FROM job WHERE job.cluster = ?;`, cluster); err != nil {
return nil, 0, 1000
@@ -400,7 +399,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) {
if err != nil {
return nil, err
}
- log.Debugf("Timer Partitions %s", time.Since(start))
+ cclog.Debugf("Timer Partitions %s", time.Since(start))
return partitions.([]string), nil
}
@@ -414,7 +413,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
Where("job.cluster = ?", cluster).
RunWith(r.stmtCache).Query()
if err != nil {
- log.Error("Error while running query")
+ cclog.Error("Error while running query")
return nil, err
}
@@ -425,11 +424,11 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
var resources []*schema.Resource
var subcluster string
if err := rows.Scan(&raw, &subcluster); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
if err := json.Unmarshal(raw, &resources); err != nil {
- log.Warn("Error while unmarshaling raw resources json")
+ cclog.Warn("Error while unmarshaling raw resources json")
return nil, err
}
@@ -444,7 +443,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
}
}
- log.Debugf("Timer AllocatedNodes %s", time.Since(start))
+ cclog.Debugf("Timer AllocatedNodes %s", time.Since(start))
return subclusters, nil
}
@@ -460,23 +459,50 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
RunWith(r.DB).Exec()
if err != nil {
- log.Warn("Error while stopping jobs exceeding walltime")
+ cclog.Warn("Error while stopping jobs exceeding walltime")
return err
}
rowsAffected, err := res.RowsAffected()
if err != nil {
- log.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
+ cclog.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
return err
}
if rowsAffected > 0 {
- log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
+ cclog.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
}
- log.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
+ cclog.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
return nil
}
+func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) {
+ query := sq.Select("job.id").From("job").
+ Join("jobtag ON jobtag.job_id = job.id").
+ Where(sq.Eq{"jobtag.tag_id": tagId}).Distinct()
+ rows, err := query.RunWith(r.stmtCache).Query()
+ if err != nil {
+ cclog.Error("Error while running query")
+ return nil, err
+ }
+ jobIds := make([]int64, 0, 100)
+
+ for rows.Next() {
+ var jobId int64
+
+ if err := rows.Scan(&jobId); err != nil {
+ rows.Close()
+ cclog.Warn("Error while scanning rows")
+ return nil, err
+ }
+
+ jobIds = append(jobIds, jobId)
+ }
+
+ return jobIds, nil
+}
+
+// FIXME: Reconsider filtering short jobs with harcoded threshold
func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
query := sq.Select(jobColumns...).From("job").
Where(fmt.Sprintf("job.cluster = '%s'", cluster)).
@@ -485,7 +511,7 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
- log.Error("Error while running query")
+ cclog.Error("Error while running query")
return nil, err
}
@@ -494,13 +520,13 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
job, err := scanJob(rows)
if err != nil {
rows.Close()
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
}
- log.Infof("Return job count %d", len(jobs))
+ cclog.Infof("Return job count %d", len(jobs))
return jobs, nil
}
@@ -525,18 +551,18 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
}
if startTimeBegin == 0 {
- log.Infof("Find jobs before %d", startTimeEnd)
+ cclog.Infof("Find jobs before %d", startTimeEnd)
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
"job.start_time < %d", startTimeEnd))
} else {
- log.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
+ cclog.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
"job.start_time BETWEEN %d AND %d", startTimeBegin, startTimeEnd))
}
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
- log.Error("Error while running query")
+ cclog.Error("Error while running query")
return nil, err
}
@@ -545,13 +571,13 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
job, err := scanJob(rows)
if err != nil {
rows.Close()
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
}
- log.Infof("Return job count %d", len(jobs))
+ cclog.Infof("Return job count %d", len(jobs))
return jobs, nil
}
@@ -581,12 +607,12 @@ func (r *JobRepository) MarkArchived(
func (r *JobRepository) UpdateEnergy(
stmt sq.UpdateBuilder,
- jobMeta *schema.JobMeta,
+ jobMeta *schema.Job,
) (sq.UpdateBuilder, error) {
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
if err != nil {
- log.Errorf("cannot get subcluster: %s", err.Error())
+ cclog.Errorf("cannot get subcluster: %s", err.Error())
return stmt, err
}
energyFootprint := make(map[string]float64)
@@ -599,7 +625,7 @@ func (r *JobRepository) UpdateEnergy(
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh)
- log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
+ cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
// FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Energy: Power (in Watts) * Time (in Seconds)
@@ -611,18 +637,18 @@ func (r *JobRepository) UpdateEnergy(
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
}
} else {
- log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
+ cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
}
energyFootprint[fp] = metricEnergy
totalEnergy += metricEnergy
- // log.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
+ // cclog.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
}
var rawFootprint []byte
if rawFootprint, err = json.Marshal(energyFootprint); err != nil {
- log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
+ cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
return stmt, err
}
@@ -631,12 +657,12 @@ func (r *JobRepository) UpdateEnergy(
func (r *JobRepository) UpdateFootprint(
stmt sq.UpdateBuilder,
- jobMeta *schema.JobMeta,
+ jobMeta *schema.Job,
) (sq.UpdateBuilder, error) {
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
if err != nil {
- log.Errorf("cannot get subcluster: %s", err.Error())
+ cclog.Errorf("cannot get subcluster: %s", err.Error())
return stmt, err
}
footprint := make(map[string]float64)
@@ -650,7 +676,7 @@ func (r *JobRepository) UpdateFootprint(
}
if statType != "avg" && statType != "min" && statType != "max" {
- log.Warnf("unknown statType for footprint update: %s", statType)
+ cclog.Warnf("unknown statType for footprint update: %s", statType)
return stmt, fmt.Errorf("unknown statType for footprint update: %s", statType)
}
@@ -664,7 +690,7 @@ func (r *JobRepository) UpdateFootprint(
var rawFootprint []byte
if rawFootprint, err = json.Marshal(footprint); err != nil {
- log.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
+ cclog.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
return stmt, err
}
diff --git a/internal/repository/jobCreate.go b/internal/repository/jobCreate.go
index 9e47974..aa2ea76 100644
--- a/internal/repository/jobCreate.go
+++ b/internal/repository/jobCreate.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -8,11 +8,19 @@ import (
"encoding/json"
"fmt"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
+const NamedJobCacheInsert string = `INSERT INTO job_cache (
+ job_id, hpc_user, project, cluster, subcluster, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc,
+ exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, footprint, energy, energy_footprint, resources, meta_data
+) VALUES (
+ :job_id, :hpc_user, :project, :cluster, :subcluster, :cluster_partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc,
+ :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :footprint, :energy, :energy_footprint, :resources, :meta_data
+);`
+
const NamedJobInsert string = `INSERT INTO job (
job_id, hpc_user, project, cluster, subcluster, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc,
exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, footprint, energy, energy_footprint, resources, meta_data
@@ -21,24 +29,65 @@ const NamedJobInsert string = `INSERT INTO job (
:exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :footprint, :energy, :energy_footprint, :resources, :meta_data
);`
-func (r *JobRepository) InsertJob(job *schema.JobMeta) (int64, error) {
- res, err := r.DB.NamedExec(NamedJobInsert, job)
+func (r *JobRepository) InsertJob(job *schema.Job) (int64, error) {
+ r.Mutex.Lock()
+ res, err := r.DB.NamedExec(NamedJobCacheInsert, job)
+ r.Mutex.Unlock()
if err != nil {
- log.Warn("Error while NamedJobInsert")
+ cclog.Warn("Error while NamedJobInsert")
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
- log.Warn("Error while getting last insert ID")
+ cclog.Warn("Error while getting last insert ID")
return 0, err
}
return id, nil
}
+func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
+ r.Mutex.Lock()
+ defer r.Mutex.Unlock()
+
+ query := sq.Select(jobCacheColumns...).From("job_cache")
+
+ rows, err := query.RunWith(r.stmtCache).Query()
+ if err != nil {
+ cclog.Errorf("Error while running query %v", err)
+ return nil, err
+ }
+
+ jobs := make([]*schema.Job, 0, 50)
+ for rows.Next() {
+ job, err := scanJob(rows)
+ if err != nil {
+ rows.Close()
+ cclog.Warn("Error while scanning rows")
+ return nil, err
+ }
+ jobs = append(jobs, job)
+ }
+
+ _, err = r.DB.Exec(
+ "INSERT INTO job (job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data) SELECT job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data FROM job_cache")
+ if err != nil {
+ cclog.Warnf("Error while Job sync: %v", err)
+ return nil, err
+ }
+
+ _, err = r.DB.Exec("DELETE FROM job_cache")
+ if err != nil {
+ cclog.Warnf("Error while Job cache clean: %v", err)
+ return nil, err
+ }
+
+ return jobs, nil
+}
+
// Start inserts a new job in the table, returning the unique job ID.
// Statistics are not transfered!
-func (r *JobRepository) Start(job *schema.JobMeta) (id int64, err error) {
+func (r *JobRepository) Start(job *schema.Job) (id int64, err error) {
job.RawFootprint, err = json.Marshal(job.Footprint)
if err != nil {
return -1, fmt.Errorf("REPOSITORY/JOB > encoding footprint field failed: %w", err)
@@ -73,3 +122,19 @@ func (r *JobRepository) Stop(
_, err = stmt.RunWith(r.stmtCache).Exec()
return
}
+
+func (r *JobRepository) StopCached(
+ jobId int64,
+ duration int32,
+ state schema.JobState,
+ monitoringStatus int32,
+) (err error) {
+ stmt := sq.Update("job_cache").
+ Set("job_state", state).
+ Set("duration", duration).
+ Set("monitoring_status", monitoringStatus).
+ Where("job.id = ?", jobId)
+
+ _, err = stmt.RunWith(r.stmtCache).Exec()
+ return
+}
diff --git a/internal/repository/jobFind.go b/internal/repository/jobFind.go
index 1e2ccb8..39519d5 100644
--- a/internal/repository/jobFind.go
+++ b/internal/repository/jobFind.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -11,8 +11,8 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -39,7 +39,27 @@ func (r *JobRepository) Find(
q = q.OrderBy("job.id DESC") // always use newest matching job by db id if more than one match
- log.Debugf("Timer Find %s", time.Since(start))
+ cclog.Debugf("Timer Find %s", time.Since(start))
+ return scanJob(q.RunWith(r.stmtCache).QueryRow())
+}
+
+func (r *JobRepository) FindCached(
+ jobId *int64,
+ cluster *string,
+ startTime *int64,
+) (*schema.Job, error) {
+ q := sq.Select(jobCacheColumns...).From("job_cache").
+ Where("job_cache.job_id = ?", *jobId)
+
+ if cluster != nil {
+ q = q.Where("job_cache.cluster = ?", *cluster)
+ }
+ if startTime != nil {
+ q = q.Where("job_cache.start_time = ?", *startTime)
+ }
+
+ q = q.OrderBy("job_cache.id DESC") // always use newest matching job by db id if more than one match
+
return scanJob(q.RunWith(r.stmtCache).QueryRow())
}
@@ -66,7 +86,7 @@ func (r *JobRepository) FindAll(
rows, err := q.RunWith(r.stmtCache).Query()
if err != nil {
- log.Error("Error while running query")
+ cclog.Error("Error while running query")
return nil, err
}
@@ -74,15 +94,44 @@ func (r *JobRepository) FindAll(
for rows.Next() {
job, err := scanJob(rows)
if err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
jobs = append(jobs, job)
}
- log.Debugf("Timer FindAll %s", time.Since(start))
+ cclog.Debugf("Timer FindAll %s", time.Since(start))
return jobs, nil
}
+// Get complete joblist only consisting of db ids.
+// This is useful to process large job counts and intended to be used
+// together with FindById to process jobs one by one
+func (r *JobRepository) GetJobList() ([]int64, error) {
+ query := sq.Select("id").From("job").
+ Where("job.job_state != 'running'")
+
+ rows, err := query.RunWith(r.stmtCache).Query()
+ if err != nil {
+ cclog.Error("Error while running query")
+ return nil, err
+ }
+
+ jl := make([]int64, 0, 1000)
+ for rows.Next() {
+ var id int64
+ err := rows.Scan(&id)
+ if err != nil {
+ rows.Close()
+ cclog.Warn("Error while scanning rows")
+ return nil, err
+ }
+ jl = append(jl, id)
+ }
+
+ cclog.Infof("Return job count %d", len(jl))
+ return jl, nil
+}
+
// FindById executes a SQL query to find a specific batch job.
// The job is queried using the database id.
// It returns a pointer to a schema.Job data structure and an error variable.
@@ -178,7 +227,7 @@ func (r *JobRepository) FindConcurrentJobs(
var startTime int64
var stopTime int64
- startTime = job.StartTimeUnix
+ startTime = job.StartTime
hostname := job.Resources[0].Hostname
if job.State == schema.JobStateRunning {
@@ -204,7 +253,7 @@ func (r *JobRepository) FindConcurrentJobs(
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
- log.Errorf("Error while running query: %v", err)
+ cclog.Errorf("Error while running query: %v", err)
return nil, err
}
@@ -215,7 +264,7 @@ func (r *JobRepository) FindConcurrentJobs(
var id, jobId, startTime sql.NullInt64
if err = rows.Scan(&id, &jobId, &startTime); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -231,7 +280,7 @@ func (r *JobRepository) FindConcurrentJobs(
rows, err = queryRunning.RunWith(r.stmtCache).Query()
if err != nil {
- log.Errorf("Error while running query: %v", err)
+ cclog.Errorf("Error while running query: %v", err)
return nil, err
}
@@ -239,7 +288,7 @@ func (r *JobRepository) FindConcurrentJobs(
var id, jobId, startTime sql.NullInt64
if err := rows.Scan(&id, &jobId, &startTime); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
diff --git a/internal/repository/jobHooks.go b/internal/repository/jobHooks.go
new file mode 100644
index 0000000..824b5cd
--- /dev/null
+++ b/internal/repository/jobHooks.go
@@ -0,0 +1,57 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package repository
+
+import (
+ "sync"
+
+ "github.com/ClusterCockpit/cc-lib/schema"
+)
+
+type JobHook interface {
+ JobStartCallback(job *schema.Job)
+ JobStopCallback(job *schema.Job)
+}
+
+var (
+ initOnce sync.Once
+ hooks []JobHook
+)
+
+func RegisterJobJook(hook JobHook) {
+ initOnce.Do(func() {
+ hooks = make([]JobHook, 0)
+ })
+
+ if hook != nil {
+ hooks = append(hooks, hook)
+ }
+}
+
+func CallJobStartHooks(jobs []*schema.Job) {
+ if hooks == nil {
+ return
+ }
+
+ for _, hook := range hooks {
+ if hook != nil {
+ for _, job := range jobs {
+ hook.JobStartCallback(job)
+ }
+ }
+ }
+}
+
+func CallJobStopHooks(job *schema.Job) {
+ if hooks == nil {
+ return
+ }
+
+ for _, hook := range hooks {
+ if hook != nil {
+ hook.JobStopCallback(job)
+ }
+ }
+}
diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go
index d169b6f..c9ccb03 100644
--- a/internal/repository/jobQuery.go
+++ b/internal/repository/jobQuery.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -13,8 +13,8 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -68,7 +68,7 @@ func (r *JobRepository) QueryJobs(
rows, err := query.RunWith(r.stmtCache).Query()
if err != nil {
queryString, queryVars, _ := query.ToSql()
- log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
+ cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err
}
@@ -77,7 +77,7 @@ func (r *JobRepository) QueryJobs(
job, err := scanJob(rows)
if err != nil {
rows.Close()
- log.Warn("Error while scanning rows (Jobs)")
+ cclog.Warn("Error while scanning rows (Jobs)")
return nil, err
}
jobs = append(jobs, job)
@@ -123,7 +123,7 @@ func SecurityCheckWithUser(user *schema.User, query sq.SelectBuilder) (sq.Select
if len(user.Projects) != 0 {
return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.hpc_user": user.Username}}), nil
} else {
- log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
+ cclog.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
return query.Where("job.hpc_user = ?", user.Username), nil
}
case user.HasRole(schema.RoleUser): // User : Only personal jobs
@@ -146,6 +146,11 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
// This is an OR-Logic query: Returns all distinct jobs with at least one of the requested tags; TODO: AND-Logic query?
query = query.Join("jobtag ON jobtag.job_id = job.id").Where(sq.Eq{"jobtag.tag_id": filter.Tags}).Distinct()
}
+ if filter.DbID != nil {
+ dbIDs := make([]string, len(filter.DbID))
+ copy(dbIDs, filter.DbID)
+ query = query.Where(sq.Eq{"job.id": dbIDs})
+ }
if filter.JobID != nil {
query = buildStringCondition("job.job_id", filter.JobID, query)
}
@@ -239,7 +244,7 @@ func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBui
case "last30d":
then = now - (60 * 60 * 24 * 30)
default:
- log.Debugf("No known named timeRange: startTime.range = %s", cond.Range)
+ cclog.Debugf("No known named timeRange: startTime.range = %s", cond.Range)
return query
}
return query.Where(field+" BETWEEN ? AND ?", then, now)
@@ -330,7 +335,7 @@ var (
func toSnakeCase(str string) string {
for _, c := range str {
if c == '\'' || c == '\\' {
- log.Panic("toSnakeCase() attack vector!")
+ cclog.Panic("toSnakeCase() attack vector!")
}
}
diff --git a/internal/repository/job_test.go b/internal/repository/job_test.go
index 363bb6c..e96373d 100644
--- a/internal/repository/job_test.go
+++ b/internal/repository/job_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,7 +9,7 @@ import (
"fmt"
"testing"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ "github.com/ClusterCockpit/cc-lib/schema"
_ "github.com/mattn/go-sqlite3"
)
@@ -24,7 +24,7 @@ func TestFind(t *testing.T) {
// fmt.Printf("%+v", job)
- if job.ID != 5 {
+ if *job.ID != 5 {
t.Errorf("wrong summary for diagnostic 3\ngot: %d \nwant: 1366", job.JobID)
}
}
diff --git a/internal/repository/migration.go b/internal/repository/migration.go
index 0b2591e..13f74ec 100644
--- a/internal/repository/migration.go
+++ b/internal/repository/migration.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,14 +9,14 @@ import (
"embed"
"fmt"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/mysql"
"github.com/golang-migrate/migrate/v4/database/sqlite3"
"github.com/golang-migrate/migrate/v4/source/iofs"
)
-const Version uint = 8
+const Version uint = 10
//go:embed migrations/*
var migrationFiles embed.FS
@@ -54,13 +54,13 @@ func checkDBVersion(backend string, db *sql.DB) error {
return err
}
default:
- log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
+ cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
}
v, dirty, err := m.Version()
if err != nil {
if err == migrate.ErrNilVersion {
- log.Warn("Legacy database without version or missing database file!")
+ cclog.Warn("Legacy database without version or missing database file!")
} else {
return err
}
@@ -84,7 +84,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
case "sqlite3":
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
if err != nil {
- log.Fatal(err)
+ cclog.Fatal(err)
}
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
@@ -102,7 +102,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
return m, err
}
default:
- log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
+ cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
}
return m, nil
@@ -115,8 +115,17 @@ func MigrateDB(backend string, db string) error {
}
v, dirty, err := m.Version()
+ if err != nil {
+ if err == migrate.ErrNilVersion {
+ cclog.Warn("Legacy database without version or missing database file!")
+ } else {
+ return err
+ }
+ }
- log.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
+ if v < Version {
+ cclog.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
+ }
if dirty {
return fmt.Errorf("last migration to version %d has failed, please fix the db manually and force version with -force-db flag", Version)
@@ -124,7 +133,7 @@ func MigrateDB(backend string, db string) error {
if err := m.Up(); err != nil {
if err == migrate.ErrNoChange {
- log.Info("DB already up to date!")
+ cclog.Info("DB already up to date!")
} else {
return err
}
@@ -142,7 +151,7 @@ func RevertDB(backend string, db string) error {
if err := m.Migrate(Version - 1); err != nil {
if err == migrate.ErrNoChange {
- log.Info("DB already up to date!")
+ cclog.Info("DB already up to date!")
} else {
return err
}
diff --git a/internal/repository/migrations/sqlite3/09_add-job-cache.down.sql b/internal/repository/migrations/sqlite3/09_add-job-cache.down.sql
new file mode 100644
index 0000000..ef257cf
--- /dev/null
+++ b/internal/repository/migrations/sqlite3/09_add-job-cache.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS job_cache;
diff --git a/internal/repository/migrations/sqlite3/09_add-job-cache.up.sql b/internal/repository/migrations/sqlite3/09_add-job-cache.up.sql
new file mode 100644
index 0000000..7840369
--- /dev/null
+++ b/internal/repository/migrations/sqlite3/09_add-job-cache.up.sql
@@ -0,0 +1,31 @@
+CREATE TABLE "job_cache" (
+ id INTEGER PRIMARY KEY,
+ job_id BIGINT NOT NULL,
+ cluster VARCHAR(255) NOT NULL,
+ subcluster VARCHAR(255) NOT NULL,
+ start_time BIGINT NOT NULL, -- Unix timestamp
+ hpc_user VARCHAR(255) NOT NULL,
+ project VARCHAR(255) NOT NULL,
+ cluster_partition VARCHAR(255),
+ array_job_id BIGINT,
+ duration INT NOT NULL,
+ walltime INT NOT NULL,
+ job_state VARCHAR(255) NOT NULL
+ CHECK (job_state IN (
+ 'running', 'completed', 'failed', 'cancelled',
+ 'stopped', 'timeout', 'preempted', 'out_of_memory'
+ )),
+ meta_data TEXT, -- JSON
+ resources TEXT NOT NULL, -- JSON
+ num_nodes INT NOT NULL,
+ num_hwthreads INT,
+ num_acc INT,
+ smt TINYINT NOT NULL DEFAULT 1 CHECK (smt IN (0, 1)),
+ exclusive TINYINT NOT NULL DEFAULT 1 CHECK (exclusive IN (0, 1, 2)),
+ monitoring_status TINYINT NOT NULL DEFAULT 1
+ CHECK (monitoring_status IN (0, 1, 2, 3)),
+ energy REAL NOT NULL DEFAULT 0.0,
+ energy_footprint TEXT DEFAULT NULL,
+ footprint TEXT DEFAULT NULL,
+ UNIQUE (job_id, cluster, start_time)
+);
diff --git a/internal/repository/migrations/sqlite3/10_node-table.down.sql b/internal/repository/migrations/sqlite3/10_node-table.down.sql
new file mode 100644
index 0000000..9119a5a
--- /dev/null
+++ b/internal/repository/migrations/sqlite3/10_node-table.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS node;
diff --git a/internal/repository/migrations/sqlite3/10_node-table.up.sql b/internal/repository/migrations/sqlite3/10_node-table.up.sql
new file mode 100644
index 0000000..6ba5e25
--- /dev/null
+++ b/internal/repository/migrations/sqlite3/10_node-table.up.sql
@@ -0,0 +1,23 @@
+CREATE TABLE "node" (
+ id INTEGER PRIMARY KEY,
+ hostname VARCHAR(255) NOT NULL,
+ cluster VARCHAR(255) NOT NULL,
+ subcluster VARCHAR(255) NOT NULL,
+ cpus_allocated INTEGER NOT NULL,
+ cpus_total INTEGER NOT NULL,
+ memory_allocated INTEGER NOT NULL,
+ memory_total INTEGER NOT NULL,
+ gpus_allocated INTEGER NOT NULL,
+ gpus_total INTEGER NOT NULL,
+ node_state VARCHAR(255) NOT NULL
+ CHECK (node_state IN (
+ 'allocated', 'reserved', 'idle', 'mixed',
+ 'down', 'unknown'
+ )),
+ health_state VARCHAR(255) NOT NULL
+ CHECK (health_state IN (
+ 'full', 'partial', 'failed'
+ )),
+ meta_data TEXT, -- JSON
+ UNIQUE (hostname, cluster)
+);
diff --git a/internal/repository/node.go b/internal/repository/node.go
new file mode 100644
index 0000000..83bf062
--- /dev/null
+++ b/internal/repository/node.go
@@ -0,0 +1,289 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package repository
+
+import (
+ "context"
+ "database/sql"
+ "encoding/json"
+ "fmt"
+ "maps"
+ "sync"
+ "time"
+
+ "github.com/ClusterCockpit/cc-backend/internal/graph/model"
+ "github.com/ClusterCockpit/cc-backend/pkg/archive"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/lrucache"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ sq "github.com/Masterminds/squirrel"
+ "github.com/jmoiron/sqlx"
+)
+
+var (
+ nodeRepoOnce sync.Once
+ nodeRepoInstance *NodeRepository
+)
+
+type NodeRepository struct {
+ DB *sqlx.DB
+ stmtCache *sq.StmtCache
+ cache *lrucache.Cache
+ driver string
+}
+
+func GetNodeRepository() *NodeRepository {
+ nodeRepoOnce.Do(func() {
+ db := GetConnection()
+
+ nodeRepoInstance = &NodeRepository{
+ DB: db.DB,
+ driver: db.Driver,
+
+ stmtCache: sq.NewStmtCache(db.DB),
+ cache: lrucache.New(1024 * 1024),
+ }
+ })
+ return nodeRepoInstance
+}
+
+func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, error) {
+ start := time.Now()
+ cachekey := fmt.Sprintf("metadata:%d", node.ID)
+ if cached := r.cache.Get(cachekey, nil); cached != nil {
+ node.MetaData = cached.(map[string]string)
+ return node.MetaData, nil
+ }
+
+ if err := sq.Select("node.meta_data").From("node").Where("node.id = ?", node.ID).
+ RunWith(r.stmtCache).QueryRow().Scan(&node.RawMetaData); err != nil {
+ cclog.Warn("Error while scanning for node metadata")
+ return nil, err
+ }
+
+ if len(node.RawMetaData) == 0 {
+ return nil, nil
+ }
+
+ if err := json.Unmarshal(node.RawMetaData, &node.MetaData); err != nil {
+ cclog.Warn("Error while unmarshaling raw metadata json")
+ return nil, err
+ }
+
+ r.cache.Put(cachekey, node.MetaData, len(node.RawMetaData), 24*time.Hour)
+ cclog.Debugf("Timer FetchMetadata %s", time.Since(start))
+ return node.MetaData, nil
+}
+
+func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err error) {
+ cachekey := fmt.Sprintf("metadata:%d", node.ID)
+ r.cache.Del(cachekey)
+ if node.MetaData == nil {
+ if _, err = r.FetchMetadata(node); err != nil {
+ cclog.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID)
+ return err
+ }
+ }
+
+ if node.MetaData != nil {
+ cpy := make(map[string]string, len(node.MetaData)+1)
+ maps.Copy(cpy, node.MetaData)
+ cpy[key] = val
+ node.MetaData = cpy
+ } else {
+ node.MetaData = map[string]string{key: val}
+ }
+
+ if node.RawMetaData, err = json.Marshal(node.MetaData); err != nil {
+ cclog.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID)
+ return err
+ }
+
+ if _, err = sq.Update("node").
+ Set("meta_data", node.RawMetaData).
+ Where("node.id = ?", node.ID).
+ RunWith(r.stmtCache).Exec(); err != nil {
+ cclog.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID)
+ return err
+ }
+
+ r.cache.Put(cachekey, node.MetaData, len(node.RawMetaData), 24*time.Hour)
+ return nil
+}
+
+func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error) {
+ node := &schema.Node{}
+ if err := sq.Select("id", "hostname", "cluster", "subcluster", "node_state",
+ "health_state").From("node").
+ Where("node.id = ?", id).RunWith(r.DB).
+ QueryRow().Scan(&node.ID, &node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState,
+ &node.HealthState); err != nil {
+ cclog.Warnf("Error while querying node '%v' from database", id)
+ return nil, err
+ }
+
+ if withMeta {
+ var err error
+ var meta map[string]string
+ if meta, err = r.FetchMetadata(node); err != nil {
+ cclog.Warnf("Error while fetching metadata for node '%v'", id)
+ return nil, err
+ }
+ node.MetaData = meta
+ }
+
+ return node, nil
+}
+
+const NamedNodeInsert string = `
+INSERT INTO node (hostname, cluster, subcluster, node_state, health_state)
+ VALUES (:hostname, :cluster, :subcluster, :node_state, :health_state);`
+
+func (r *NodeRepository) AddNode(node *schema.Node) (int64, error) {
+ var err error
+
+ res, err := r.DB.NamedExec(NamedNodeInsert, node)
+ if err != nil {
+ cclog.Errorf("Error while adding node '%v' to database", node.Hostname)
+ return 0, err
+ }
+ node.ID, err = res.LastInsertId()
+ if err != nil {
+ cclog.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname)
+ return 0, err
+ }
+
+ return node.ID, nil
+}
+
+func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeState *schema.NodeState) error {
+ var id int64
+ if err := sq.Select("id").From("node").
+ Where("node.hostname = ?", hostname).Where("node.cluster = ?", cluster).RunWith(r.DB).
+ QueryRow().Scan(&id); err != nil {
+ if err == sql.ErrNoRows {
+ subcluster, err := archive.GetSubClusterByNode(cluster, hostname)
+ if err != nil {
+ cclog.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err)
+ return err
+ }
+ node := schema.Node{
+ Hostname: hostname, Cluster: cluster, SubCluster: subcluster, NodeState: *nodeState,
+ HealthState: schema.MonitoringStateFull,
+ }
+ _, err = r.AddNode(&node)
+ if err != nil {
+ cclog.Errorf("Error while adding node '%s' to database: %v", hostname, err)
+ return err
+ }
+
+ cclog.Infof("Added node '%s' to database", hostname)
+ return nil
+ } else {
+ cclog.Warnf("Error while querying node '%v' from database", id)
+ return err
+ }
+ }
+
+ if _, err := sq.Update("node").Set("node_state", nodeState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
+ cclog.Errorf("error while updating node '%s'", hostname)
+ return err
+ }
+ cclog.Infof("Updated node '%s' in database", hostname)
+ return nil
+}
+
+// func (r *NodeRepository) UpdateHealthState(hostname string, healthState *schema.MonitoringState) error {
+// if _, err := sq.Update("node").Set("health_state", healthState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
+// cclog.Errorf("error while updating node '%d'", id)
+// return err
+// }
+//
+// return nil
+// }
+
+func (r *NodeRepository) DeleteNode(id int64) error {
+ _, err := r.DB.Exec(`DELETE FROM node WHERE node.id = ?`, id)
+ if err != nil {
+ cclog.Errorf("Error while deleting node '%d' from DB", id)
+ return err
+ }
+ cclog.Infof("deleted node '%d' from DB", id)
+ return nil
+}
+
+// TODO: Implement order by
+func (r *NodeRepository) QueryNodes(
+ ctx context.Context,
+ filters []*model.NodeFilter,
+ order *model.OrderByInput,
+) ([]*schema.Node, error) {
+ query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("node"))
+ if qerr != nil {
+ return nil, qerr
+ }
+
+ for _, f := range filters {
+ if f.Hostname != nil {
+ query = buildStringCondition("node.hostname", f.Hostname, query)
+ }
+ if f.Cluster != nil {
+ query = buildStringCondition("node.cluster", f.Cluster, query)
+ }
+ if f.NodeState != nil {
+ query = query.Where("node.node_state = ?", f.NodeState)
+ }
+ if f.HealthState != nil {
+ query = query.Where("node.health_state = ?", f.HealthState)
+ }
+ }
+
+ rows, err := query.RunWith(r.stmtCache).Query()
+ if err != nil {
+ queryString, queryVars, _ := query.ToSql()
+ cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
+ return nil, err
+ }
+
+ nodes := make([]*schema.Node, 0, 50)
+ for rows.Next() {
+ node := schema.Node{}
+
+ if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster,
+ &node.NodeState, &node.HealthState); err != nil {
+ rows.Close()
+ cclog.Warn("Error while scanning rows (Nodes)")
+ return nil, err
+ }
+ nodes = append(nodes, &node)
+ }
+
+ return nodes, nil
+}
+
+func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
+ q := sq.Select("hostname", "cluster", "subcluster", "node_state",
+ "health_state").From("node").Where("node.cluster = ?", cluster).OrderBy("node.hostname ASC")
+
+ rows, err := q.RunWith(r.DB).Query()
+ if err != nil {
+ cclog.Warn("Error while querying user list")
+ return nil, err
+ }
+ nodeList := make([]*schema.Node, 0, 100)
+ defer rows.Close()
+ for rows.Next() {
+ node := &schema.Node{}
+ if err := rows.Scan(&node.Hostname, &node.Cluster,
+ &node.SubCluster, &node.NodeState, &node.HealthState); err != nil {
+ cclog.Warn("Error while scanning node list")
+ return nil, err
+ }
+
+ nodeList = append(nodeList, node)
+ }
+
+ return nodeList, nil
+}
diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go
index 1ca9ec5..5603c31 100644
--- a/internal/repository/repository_test.go
+++ b/internal/repository/repository_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,8 +9,8 @@ import (
"testing"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
_ "github.com/mattn/go-sqlite3"
)
@@ -65,7 +65,7 @@ func BenchmarkDB_FindJobById(b *testing.B) {
func BenchmarkDB_FindJob(b *testing.B) {
var jobId int64 = 107266
var startTime int64 = 1657557241
- var cluster = "fritz"
+ cluster := "fritz"
b.Run("FindJob", func(b *testing.B) {
db := setup(b)
@@ -147,7 +147,7 @@ func getContext(tb testing.TB) context.Context {
func setup(tb testing.TB) *JobRepository {
tb.Helper()
- log.Init("warn", true)
+ cclog.Init("warn", true)
dbfile := "testdata/job.db"
err := MigrateDB("sqlite3", dbfile)
noErr(tb, err)
diff --git a/internal/repository/stats.go b/internal/repository/stats.go
index 410ba6c..7beb674 100644
--- a/internal/repository/stats.go
+++ b/internal/repository/stats.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -14,8 +14,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -158,7 +158,7 @@ func (r *JobRepository) JobsStatsGrouped(
rows, err := query.RunWith(r.DB).Query()
if err != nil {
- log.Warn("Error while querying DB for job statistics")
+ cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
@@ -169,7 +169,7 @@ func (r *JobRepository) JobsStatsGrouped(
var name sql.NullString
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := rows.Scan(&id, &jobs, &name, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -241,7 +241,7 @@ func (r *JobRepository) JobsStatsGrouped(
}
}
- log.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
+ cclog.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
return stats, nil
}
@@ -261,7 +261,7 @@ func (r *JobRepository) JobsStats(
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -287,11 +287,11 @@ func (r *JobRepository) JobsStats(
})
}
- log.Debugf("Timer JobStats %s", time.Since(start))
+ cclog.Debugf("Timer JobStats %s", time.Since(start))
return stats, nil
}
-func LoadJobStat(job *schema.JobMeta, metric string, statType string) float64 {
+func LoadJobStat(job *schema.Job, metric string, statType string) float64 {
if stats, ok := job.Statistics[metric]; ok {
switch statType {
case "avg":
@@ -301,7 +301,7 @@ func LoadJobStat(job *schema.JobMeta, metric string, statType string) float64 {
case "min":
return stats.Min
default:
- log.Errorf("Unknown stat type %s", statType)
+ cclog.Errorf("Unknown stat type %s", statType)
}
}
@@ -322,7 +322,7 @@ func (r *JobRepository) JobCountGrouped(
}
rows, err := query.RunWith(r.DB).Query()
if err != nil {
- log.Warn("Error while querying DB for job statistics")
+ cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
@@ -332,7 +332,7 @@ func (r *JobRepository) JobCountGrouped(
var id sql.NullString
var cnt sql.NullInt64
if err := rows.Scan(&id, &cnt); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
if id.Valid {
@@ -344,7 +344,7 @@ func (r *JobRepository) JobCountGrouped(
}
}
- log.Debugf("Timer JobCountGrouped %s", time.Since(start))
+ cclog.Debugf("Timer JobCountGrouped %s", time.Since(start))
return stats, nil
}
@@ -364,7 +364,7 @@ func (r *JobRepository) AddJobCountGrouped(
}
rows, err := query.RunWith(r.DB).Query()
if err != nil {
- log.Warn("Error while querying DB for job statistics")
+ cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
@@ -374,7 +374,7 @@ func (r *JobRepository) AddJobCountGrouped(
var id sql.NullString
var cnt sql.NullInt64
if err := rows.Scan(&id, &cnt); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
if id.Valid {
@@ -393,7 +393,7 @@ func (r *JobRepository) AddJobCountGrouped(
}
}
- log.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
+ cclog.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
return stats, nil
}
@@ -411,7 +411,7 @@ func (r *JobRepository) AddJobCount(
}
rows, err := query.RunWith(r.DB).Query()
if err != nil {
- log.Warn("Error while querying DB for job statistics")
+ cclog.Warn("Error while querying DB for job statistics")
return nil, err
}
@@ -420,7 +420,7 @@ func (r *JobRepository) AddJobCount(
for rows.Next() {
var cnt sql.NullInt64
if err := rows.Scan(&cnt); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -438,7 +438,7 @@ func (r *JobRepository) AddJobCount(
}
}
- log.Debugf("Timer AddJobCount %s", time.Since(start))
+ cclog.Debugf("Timer AddJobCount %s", time.Since(start))
return stats, nil
}
@@ -479,29 +479,29 @@ func (r *JobRepository) AddHistograms(
value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType)
stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount)
if err != nil {
- log.Warn("Error while loading job statistics histogram: job duration")
+ cclog.Warn("Error while loading job statistics histogram: job duration")
return nil, err
}
stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter)
if err != nil {
- log.Warn("Error while loading job statistics histogram: num nodes")
+ cclog.Warn("Error while loading job statistics histogram: num nodes")
return nil, err
}
stat.HistNumCores, err = r.jobsStatisticsHistogram(ctx, "job.num_hwthreads as value", filter)
if err != nil {
- log.Warn("Error while loading job statistics histogram: num hwthreads")
+ cclog.Warn("Error while loading job statistics histogram: num hwthreads")
return nil, err
}
stat.HistNumAccs, err = r.jobsStatisticsHistogram(ctx, "job.num_acc as value", filter)
if err != nil {
- log.Warn("Error while loading job statistics histogram: num acc")
+ cclog.Warn("Error while loading job statistics histogram: num acc")
return nil, err
}
- log.Debugf("Timer AddHistograms %s", time.Since(start))
+ cclog.Debugf("Timer AddHistograms %s", time.Since(start))
return stat, nil
}
@@ -520,7 +520,7 @@ func (r *JobRepository) AddMetricHistograms(
if f.State != nil {
if len(f.State) == 1 && f.State[0] == "running" {
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount)
- log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
+ cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start))
return stat, nil
}
}
@@ -530,13 +530,13 @@ func (r *JobRepository) AddMetricHistograms(
for _, m := range metrics {
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount)
if err != nil {
- log.Warnf("Error while loading job metric statistics histogram: %s", m)
+ cclog.Warnf("Error while loading job metric statistics histogram: %s", m)
continue
}
stat.HistMetrics = append(stat.HistMetrics, metricHisto)
}
- log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
+ cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start))
return stat, nil
}
@@ -560,7 +560,7 @@ func (r *JobRepository) jobsStatisticsHistogram(
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
if err != nil {
- log.Error("Error while running query")
+ cclog.Error("Error while running query")
return nil, err
}
@@ -569,13 +569,13 @@ func (r *JobRepository) jobsStatisticsHistogram(
for rows.Next() {
point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
points = append(points, &point)
}
- log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
+ cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil
}
@@ -607,7 +607,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
if err != nil {
- log.Error("Error while running query")
+ cclog.Error("Error while running query")
return nil, err
}
@@ -615,7 +615,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
for rows.Next() {
point := model.HistoPoint{}
if err := rows.Scan(&point.Value, &point.Count); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
@@ -630,7 +630,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
}
}
- log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
+ cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return points, nil
}
@@ -652,7 +652,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
peak = metricConfig.Peak
unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base
footprintStat = metricConfig.Footprint
- log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
+ cclog.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
}
}
@@ -674,7 +674,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
}
}
- // log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
+ // cclog.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
// Make bins, see https://jereze.com/code/sql-histogram/ (Modified here)
start := time.Now()
@@ -686,7 +686,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
mainQuery := sq.Select(
fmt.Sprintf(`%s + 1 as bin`, binQuery),
- fmt.Sprintf(`count(*) as count`),
+ `count(*) as count`,
// For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * %s as min`, peak, *bins, binQuery),
// For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * (%s + 1) as max`, peak, *bins, binQuery),
).From("job").Where(
@@ -709,7 +709,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
rows, err := mainQuery.RunWith(r.DB).Query()
if err != nil {
- log.Errorf("Error while running mainQuery: %s", err)
+ cclog.Errorf("Error while running mainQuery: %s", err)
return nil, err
}
@@ -726,7 +726,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!)
rpoint := model.MetricHistoPoint{}
if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max
- log.Warnf("Error while scanning rows for %s", metric)
+ cclog.Warnf("Error while scanning rows for %s", metric)
return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested?
}
@@ -736,10 +736,10 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
e.Count = rpoint.Count
// Only Required For Debug: Check DB returned Min/Max against Backend Init above
// if rpoint.Min != nil {
- // log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
+ // cclog.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
// }
// if rpoint.Max != nil {
- // log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
+ // cclog.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
// }
break
}
@@ -749,7 +749,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points}
- log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
+ cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
return &result, nil
}
@@ -759,15 +759,14 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
filters []*model.JobFilter,
bins *int,
) []*model.MetricHistoPoints {
-
// Get Jobs
jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil)
if err != nil {
- log.Errorf("Error while querying jobs for footprint: %s", err)
+ cclog.Errorf("Error while querying jobs for footprint: %s", err)
return nil
}
if len(jobs) > 500 {
- log.Errorf("too many jobs matched (max: %d)", 500)
+ cclog.Errorf("too many jobs matched (max: %d)", 500)
return nil
}
@@ -783,7 +782,7 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
}
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
- log.Errorf("Error while loading averages for histogram: %s", err)
+ cclog.Errorf("Error while loading averages for histogram: %s", err)
return nil
}
}
diff --git a/internal/repository/stats_test.go b/internal/repository/stats_test.go
index 2cc377c..bc4ac04 100644
--- a/internal/repository/stats_test.go
+++ b/internal/repository/stats_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -19,7 +19,6 @@ func TestBuildJobStatsQuery(t *testing.T) {
noErr(t, err)
fmt.Printf("SQL: %s\n", sql)
-
}
func TestJobStats(t *testing.T) {
diff --git a/internal/repository/tags.go b/internal/repository/tags.go
index 544163e..87bf69d 100644
--- a/internal/repository/tags.go
+++ b/internal/repository/tags.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -9,8 +9,8 @@ import (
"strings"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
)
@@ -18,7 +18,7 @@ import (
func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*schema.Tag, error) {
j, err := r.FindByIdWithUser(user, job)
if err != nil {
- log.Warn("Error while finding job by id")
+ cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -26,30 +26,61 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
- log.Errorf("Error adding tag with %s: %v", s, err)
+ cclog.Errorf("Error adding tag with %s: %v", s, err)
return nil, err
}
tags, err := r.GetTags(user, &job)
if err != nil {
- log.Warn("Error while getting tags for job")
+ cclog.Warn("Error while getting tags for job")
return nil, err
}
archiveTags, err := r.getArchiveTags(&job)
if err != nil {
- log.Warn("Error while getting tags for job")
+ cclog.Warn("Error while getting tags for job")
return nil, err
}
return tags, archive.UpdateTags(j, archiveTags)
}
-// Removes a tag from a job by tag id
+func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error) {
+ j, err := r.FindByIdDirect(job)
+ if err != nil {
+ cclog.Warn("Error while finding job by id")
+ return nil, err
+ }
+
+ q := sq.Insert("jobtag").Columns("job_id", "tag_id").Values(job, tag)
+
+ if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
+ s, _, _ := q.ToSql()
+ cclog.Errorf("Error adding tag with %s: %v", s, err)
+ return nil, err
+ }
+
+ tags, err := r.GetTagsDirect(&job)
+ if err != nil {
+ cclog.Warn("Error while getting tags for job")
+ return nil, err
+ }
+
+ archiveTags, err := r.getArchiveTags(&job)
+ if err != nil {
+ cclog.Warn("Error while getting tags for job")
+ return nil, err
+ }
+
+ return tags, archive.UpdateTags(j, archiveTags)
+}
+
+// Removes a tag from a job by tag id.
+// Used by GraphQL API
func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) {
j, err := r.FindByIdWithUser(user, job)
if err != nil {
- log.Warn("Error while finding job by id")
+ cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -57,19 +88,19 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
- log.Errorf("Error removing tag with %s: %v", s, err)
+ cclog.Errorf("Error removing tag with %s: %v", s, err)
return nil, err
}
tags, err := r.GetTags(user, &job)
if err != nil {
- log.Warn("Error while getting tags for job")
+ cclog.Warn("Error while getting tags for job")
return nil, err
}
archiveTags, err := r.getArchiveTags(&job)
if err != nil {
- log.Warn("Error while getting tags for job")
+ cclog.Warn("Error while getting tags for job")
return nil, err
}
@@ -77,18 +108,19 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.
}
// Removes a tag from a job by tag info
+// Used by REST API
func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagType string, tagName string, tagScope string) ([]*schema.Tag, error) {
// Get Tag ID to delete
tagID, exists := r.TagId(tagType, tagName, tagScope)
if !exists {
- log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
- return nil, fmt.Errorf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
+ cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
+ return nil, fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
}
// Get Job
j, err := r.FindByIdWithUser(user, job)
if err != nil {
- log.Warn("Error while finding job by id")
+ cclog.Warn("Error while finding job by id")
return nil, err
}
@@ -97,63 +129,70 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
- log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
+ cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
return nil, err
}
tags, err := r.GetTags(user, &job)
if err != nil {
- log.Warn("Error while getting tags for job")
+ cclog.Warn("Error while getting tags for job")
return nil, err
}
archiveTags, err := r.getArchiveTags(&job)
if err != nil {
- log.Warn("Error while getting tags for job")
+ cclog.Warn("Error while getting tags for job")
return nil, err
}
return tags, archive.UpdateTags(j, archiveTags)
}
+func (r *JobRepository) removeTagFromArchiveJobs(jobIds []int64) {
+ for _, j := range jobIds {
+ tags, err := r.getArchiveTags(&j)
+ if err != nil {
+ cclog.Warnf("Error while getting tags for job %d", j)
+ continue
+ }
+
+ job, err := r.FindByIdDirect(j)
+ if err != nil {
+ cclog.Warnf("Error while getting job %d", j)
+ continue
+ }
+
+ archive.UpdateTags(job, tags)
+ }
+}
+
// Removes a tag from db by tag info
+// Used by REST API. Does not update tagged jobs in Job archive.
func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagScope string) error {
// Get Tag ID to delete
tagID, exists := r.TagId(tagType, tagName, tagScope)
if !exists {
- log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
- return fmt.Errorf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
+ cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
+ return fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
}
- // Handle Delete JobTagTable
- qJobTag := sq.Delete("jobtag").Where("jobtag.tag_id = ?", tagID)
-
- if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil {
- s, _, _ := qJobTag.ToSql()
- log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
- return err
- }
-
- // Handle Delete TagTable
- qTag := sq.Delete("tag").Where("tag.id = ?", tagID)
-
- if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil {
- s, _, _ := qTag.ToSql()
- log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
- return err
- }
-
- return nil
+ return r.RemoveTagById(tagID)
}
// Removes a tag from db by tag id
+// Used by GraphQL API.
func (r *JobRepository) RemoveTagById(tagID int64) error {
+ jobIds, err := r.FindJobIdsByTag(tagID)
+ if err != nil {
+ return err
+ }
+
// Handle Delete JobTagTable
qJobTag := sq.Delete("jobtag").Where("jobtag.tag_id = ?", tagID)
if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := qJobTag.ToSql()
- log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
+ cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
return err
}
@@ -162,10 +201,13 @@ func (r *JobRepository) RemoveTagById(tagID int64) error {
if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := qTag.ToSql()
- log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
+ cclog.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
return err
}
+ // asynchronously update archive jobs
+ go r.removeTagFromArchiveJobs(jobIds)
+
return nil
}
@@ -181,7 +223,7 @@ func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope strin
res, err := q.RunWith(r.stmtCache).Exec()
if err != nil {
s, _, _ := q.ToSql()
- log.Errorf("Error inserting tag with %s: %v", s, err)
+ cclog.Errorf("Error inserting tag with %s: %v", s, err)
return 0, err
}
@@ -230,7 +272,7 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
// Handle Job Ownership
if user != nil && user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { // ADMIN || SUPPORT: Count all jobs
- // log.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
+ // cclog.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
// Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case
} else if user != nil && user.HasRole(schema.RoleManager) { // MANAGER: Count own jobs plus project's jobs
// Build ("project1", "project2", ...) list of variable length directly in SQL string
@@ -291,6 +333,38 @@ func (r *JobRepository) AddTagOrCreate(user *schema.User, jobId int64, tagType s
return tagId, nil
}
+// used in auto tagger plugins
+func (r *JobRepository) AddTagOrCreateDirect(jobId int64, tagType string, tagName string) (tagId int64, err error) {
+ tagScope := "global"
+
+ tagId, exists := r.TagId(tagType, tagName, tagScope)
+ if !exists {
+ tagId, err = r.CreateTag(tagType, tagName, tagScope)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ if _, err := r.AddTagDirect(jobId, tagId); err != nil {
+ return 0, err
+ }
+
+ return tagId, nil
+}
+
+func (r *JobRepository) HasTag(jobId int64, tagType string, tagName string) bool {
+ var id int64
+ q := sq.Select("id").From("tag").Join("jobtag ON jobtag.tag_id = tag.id").
+ Where("jobtag.job_id = ?", jobId).Where("tag.tag_type = ?", tagType).
+ Where("tag.tag_name = ?", tagName)
+ err := q.RunWith(r.stmtCache).QueryRow().Scan(&id)
+ if err != nil {
+ return false
+ } else {
+ return true
+ }
+}
+
// TagId returns the database id of the tag with the specified type and name.
func (r *JobRepository) TagId(tagType string, tagName string, tagScope string) (tagId int64, exists bool) {
exists = true
@@ -322,7 +396,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
rows, err := q.RunWith(r.stmtCache).Query()
if err != nil {
s, _, _ := q.ToSql()
- log.Errorf("Error get tags with %s: %v", s, err)
+ cclog.Errorf("Error get tags with %s: %v", s, err)
return nil, err
}
@@ -330,7 +404,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
for rows.Next() {
tag := &schema.Tag{}
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
// Handle Scope Filtering: Tag Scope is Global, Private (== Username) or User is auth'd to view Admin Tags
@@ -346,6 +420,32 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
return tags, nil
}
+func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
+ q := sq.Select("id", "tag_type", "tag_name", "tag_scope").From("tag")
+ if job != nil {
+ q = q.Join("jobtag ON jobtag.tag_id = tag.id").Where("jobtag.job_id = ?", *job)
+ }
+
+ rows, err := q.RunWith(r.stmtCache).Query()
+ if err != nil {
+ s, _, _ := q.ToSql()
+ cclog.Errorf("Error get tags with %s: %v", s, err)
+ return nil, err
+ }
+
+ tags := make([]*schema.Tag, 0)
+ for rows.Next() {
+ tag := &schema.Tag{}
+ if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
+ cclog.Warn("Error while scanning rows")
+ return nil, err
+ }
+ tags = append(tags, tag)
+ }
+
+ return tags, nil
+}
+
// GetArchiveTags returns a list of all tags *regardless of scope* for archiving if job is nil or of the tags that the job with that database ID has.
func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
q := sq.Select("id", "tag_type", "tag_name", "tag_scope").From("tag")
@@ -356,7 +456,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
rows, err := q.RunWith(r.stmtCache).Query()
if err != nil {
s, _, _ := q.ToSql()
- log.Errorf("Error get tags with %s: %v", s, err)
+ cclog.Errorf("Error get tags with %s: %v", s, err)
return nil, err
}
@@ -364,7 +464,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
for rows.Next() {
tag := &schema.Tag{}
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
- log.Warn("Error while scanning rows")
+ cclog.Warn("Error while scanning rows")
return nil, err
}
tags = append(tags, tag)
@@ -388,7 +488,7 @@ func (r *JobRepository) ImportTag(jobId int64, tagType string, tagName string, t
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
- log.Errorf("Error adding tag on import with %s: %v", s, err)
+ cclog.Errorf("Error adding tag on import with %s: %v", s, err)
return err
}
diff --git a/internal/repository/testdata/job.db b/internal/repository/testdata/job.db
index 43ec9d3..e9e20ce 100644
Binary files a/internal/repository/testdata/job.db and b/internal/repository/testdata/job.db differ
diff --git a/internal/repository/transaction.go b/internal/repository/transaction.go
index 603d505..39941c1 100644
--- a/internal/repository/transaction.go
+++ b/internal/repository/transaction.go
@@ -1,11 +1,11 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
import (
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/jmoiron/sqlx"
)
@@ -20,7 +20,7 @@ func (r *JobRepository) TransactionInit() (*Transaction, error) {
t.tx, err = r.DB.Beginx()
if err != nil {
- log.Warn("Error while bundling transactions")
+ cclog.Warn("Error while bundling transactions")
return nil, err
}
return t, nil
@@ -30,14 +30,14 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error {
var err error
if t.tx != nil {
if err = t.tx.Commit(); err != nil {
- log.Warn("Error while committing transactions")
+ cclog.Warn("Error while committing transactions")
return err
}
}
t.tx, err = r.DB.Beginx()
if err != nil {
- log.Warn("Error while bundling transactions")
+ cclog.Warn("Error while bundling transactions")
return err
}
@@ -46,7 +46,7 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error {
func (r *JobRepository) TransactionEnd(t *Transaction) error {
if err := t.tx.Commit(); err != nil {
- log.Warn("Error while committing SQL transactions")
+ cclog.Warn("Error while committing SQL transactions")
return err
}
return nil
@@ -59,13 +59,13 @@ func (r *JobRepository) TransactionAddNamed(
) (int64, error) {
res, err := t.tx.NamedExec(query, args)
if err != nil {
- log.Errorf("Named Exec failed: %v", err)
+ cclog.Errorf("Named Exec failed: %v", err)
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
- log.Errorf("repository initDB(): %v", err)
+ cclog.Errorf("repository initDB(): %v", err)
return 0, err
}
@@ -73,16 +73,15 @@ func (r *JobRepository) TransactionAddNamed(
}
func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) {
-
res, err := t.tx.Exec(query, args...)
if err != nil {
- log.Errorf("TransactionAdd(), Exec() Error: %v", err)
+ cclog.Errorf("TransactionAdd(), Exec() Error: %v", err)
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
- log.Errorf("TransactionAdd(), LastInsertId() Error: %v", err)
+ cclog.Errorf("TransactionAdd(), LastInsertId() Error: %v", err)
return 0, err
}
diff --git a/internal/repository/user.go b/internal/repository/user.go
index c411c38..1dca7f4 100644
--- a/internal/repository/user.go
+++ b/internal/repository/user.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -13,13 +13,13 @@ import (
"strings"
"sync"
+ "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
"golang.org/x/crypto/bcrypt"
- "github.com/ClusterCockpit/cc-backend/internal/config"
)
var (
@@ -50,7 +50,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) {
if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("hpc_user").
Where("hpc_user.username = ?", username).RunWith(r.DB).
QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil {
- log.Warnf("Error while querying user '%v' from database", username)
+ cclog.Warnf("Error while querying user '%v' from database", username)
return nil, err
}
@@ -59,7 +59,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) {
user.Email = email.String
if rawRoles.Valid {
if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil {
- log.Warn("Error while unmarshaling raw roles from DB")
+ cclog.Warn("Error while unmarshaling raw roles from DB")
return nil, err
}
}
@@ -76,14 +76,14 @@ func (r *UserRepository) GetLdapUsernames() ([]string, error) {
var users []string
rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE hpc_user.ldap = 1`)
if err != nil {
- log.Warn("Error while querying usernames")
+ cclog.Warn("Error while querying usernames")
return nil, err
}
for rows.Next() {
var username string
if err := rows.Scan(&username); err != nil {
- log.Warnf("Error while scanning for user '%s'", username)
+ cclog.Warnf("Error while scanning for user '%s'", username)
return nil, err
}
@@ -111,7 +111,7 @@ func (r *UserRepository) AddUser(user *schema.User) error {
if user.Password != "" {
password, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
if err != nil {
- log.Error("Error while encrypting new user password")
+ cclog.Error("Error while encrypting new user password")
return err
}
cols = append(cols, "password")
@@ -123,21 +123,21 @@ func (r *UserRepository) AddUser(user *schema.User) error {
}
if _, err := sq.Insert("hpc_user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil {
- log.Errorf("Error while inserting new user '%v' into DB", user.Username)
+ cclog.Errorf("Error while inserting new user '%v' into DB", user.Username)
return err
}
- log.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
+ cclog.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
defaultMetricsCfg, err := config.LoadDefaultMetricsConfig()
if err != nil {
- log.Errorf("Error loading default metrics config: %v", err)
+ cclog.Errorf("Error loading default metrics config: %v", err)
} else if defaultMetricsCfg != nil {
for _, cluster := range defaultMetricsCfg.Clusters {
metricsArray := config.ParseMetricsString(cluster.DefaultMetrics)
metricsJSON, err := json.Marshal(metricsArray)
if err != nil {
- log.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err)
+ cclog.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err)
continue
}
confKey := "job_view_selectedMetrics:" + cluster.Name
@@ -145,9 +145,9 @@ func (r *UserRepository) AddUser(user *schema.User) error {
Columns("username", "confkey", "value").
Values(user.Username, confKey, string(metricsJSON)).
RunWith(r.DB).Exec(); err != nil {
- log.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err)
+ cclog.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err)
} else {
- log.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON))
+ cclog.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON))
}
}
}
@@ -160,7 +160,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro
// TODO: Discuss updatable fields
if dbUser.Name != user.Name {
if _, err := sq.Update("hpc_user").Set("name", user.Name).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil {
- log.Errorf("error while updating name of user '%s'", user.Username)
+ cclog.Errorf("error while updating name of user '%s'", user.Username)
return err
}
}
@@ -179,10 +179,10 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro
func (r *UserRepository) DelUser(username string) error {
_, err := r.DB.Exec(`DELETE FROM hpc_user WHERE hpc_user.username = ?`, username)
if err != nil {
- log.Errorf("Error while deleting user '%s' from DB", username)
+ cclog.Errorf("Error while deleting user '%s' from DB", username)
return err
}
- log.Infof("deleted user '%s' from DB", username)
+ cclog.Infof("deleted user '%s' from DB", username)
return nil
}
@@ -194,7 +194,7 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {
rows, err := q.RunWith(r.DB).Query()
if err != nil {
- log.Warn("Error while querying user list")
+ cclog.Warn("Error while querying user list")
return nil, err
}
@@ -206,12 +206,12 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {
user := &schema.User{}
var name, email sql.NullString
if err := rows.Scan(&user.Username, &name, &email, &rawroles, &rawprojects); err != nil {
- log.Warn("Error while scanning user list")
+ cclog.Warn("Error while scanning user list")
return nil, err
}
if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil {
- log.Warn("Error while unmarshaling raw role list")
+ cclog.Warn("Error while unmarshaling raw role list")
return nil, err
}
@@ -234,7 +234,7 @@ func (r *UserRepository) AddRole(
newRole := strings.ToLower(queryrole)
user, err := r.GetUser(username)
if err != nil {
- log.Warnf("Could not load user '%s'", username)
+ cclog.Warnf("Could not load user '%s'", username)
return err
}
@@ -249,7 +249,7 @@ func (r *UserRepository) AddRole(
roles, _ := json.Marshal(append(user.Roles, newRole))
if _, err := sq.Update("hpc_user").Set("roles", roles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
- log.Errorf("error while adding new role for user '%s'", user.Username)
+ cclog.Errorf("error while adding new role for user '%s'", user.Username)
return err
}
return nil
@@ -259,7 +259,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr
oldRole := strings.ToLower(queryrole)
user, err := r.GetUser(username)
if err != nil {
- log.Warnf("Could not load user '%s'", username)
+ cclog.Warnf("Could not load user '%s'", username)
return err
}
@@ -285,7 +285,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr
mroles, _ := json.Marshal(newroles)
if _, err := sq.Update("hpc_user").Set("roles", mroles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
- log.Errorf("Error while removing role for user '%s'", user.Username)
+ cclog.Errorf("Error while removing role for user '%s'", user.Username)
return err
}
return nil
@@ -364,10 +364,10 @@ const ContextUserKey ContextKey = "user"
func GetUserFromContext(ctx context.Context) *schema.User {
x := ctx.Value(ContextUserKey)
if x == nil {
- log.Warnf("no user retrieved from context")
+ cclog.Warnf("no user retrieved from context")
return nil
}
- // log.Infof("user retrieved from context: %v", x.(*schema.User))
+ // cclog.Infof("user retrieved from context: %v", x.(*schema.User))
return x.(*schema.User)
}
@@ -385,11 +385,11 @@ func (r *UserRepository) FetchUserInCtx(ctx context.Context, username string) (*
if err == sql.ErrNoRows {
/* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */
/* since FetchUser will be called to retrieve full name and mail for every job in query/list */
- // log.Warnf("User '%s' Not found in DB", username)
+ // cclog.Warnf("User '%s' Not found in DB", username)
return nil, nil
}
- log.Warnf("Error while fetching user '%s'", username)
+ cclog.Warnf("Error while fetching user '%s'", username)
return nil, err
}
diff --git a/internal/repository/userConfig.go b/internal/repository/userConfig.go
index 5d43071..2ef7164 100644
--- a/internal/repository/userConfig.go
+++ b/internal/repository/userConfig.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -10,9 +10,9 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/lrucache"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/lrucache"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/jmoiron/sqlx"
)
@@ -35,7 +35,7 @@ func GetUserCfgRepo() *UserCfgRepo {
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
if err != nil {
- log.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
+ cclog.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
}
userCfgRepoInstance = &UserCfgRepo{
@@ -70,7 +70,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
rows, err := uCfg.Lookup.Query(user.Username)
if err != nil {
- log.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
+ cclog.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
return err, 0, 0
}
@@ -79,13 +79,13 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
for rows.Next() {
var key, rawval string
if err := rows.Scan(&key, &rawval); err != nil {
- log.Warn("Error while scanning user uiconfig values")
+ cclog.Warn("Error while scanning user uiconfig values")
return err, 0, 0
}
var val interface{}
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
- log.Warn("Error while unmarshaling raw user uiconfig json")
+ cclog.Warn("Error while unmarshaling raw user uiconfig json")
return err, 0, 0
}
@@ -100,7 +100,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
return uiconfig, 24 * time.Hour, size
})
if err, ok := data.(error); ok {
- log.Error("Error in returned dataset")
+ cclog.Error("Error in returned dataset")
return nil, err
}
@@ -117,7 +117,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
if user == nil {
var val interface{}
if err := json.Unmarshal([]byte(value), &val); err != nil {
- log.Warn("Error while unmarshaling raw user config json")
+ cclog.Warn("Error while unmarshaling raw user config json")
return err
}
@@ -128,7 +128,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
}
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`, user.Username, key, value); err != nil {
- log.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
+ cclog.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
return err
}
diff --git a/internal/repository/userConfig_test.go b/internal/repository/userConfig_test.go
index cd15c9d..d200763 100644
--- a/internal/repository/userConfig_test.go
+++ b/internal/repository/userConfig_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repository
@@ -10,8 +10,8 @@ import (
"testing"
"github.com/ClusterCockpit/cc-backend/internal/config"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
_ "github.com/mattn/go-sqlite3"
)
@@ -39,7 +39,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
} } ]
}`
- log.Init("info", true)
+ cclog.Init("info", true)
dbfilepath := "testdata/job.db"
err := MigrateDB("sqlite3", dbfilepath)
if err != nil {
diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go
index bf74391..9c19de5 100644
--- a/internal/routerConfig/routes.go
+++ b/internal/routerConfig/routes.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package routerConfig
@@ -16,10 +16,10 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/internal/util"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/ClusterCockpit/cc-backend/web"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
"github.com/gorilla/mux"
)
@@ -57,23 +57,23 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
// startJobCount := time.Now()
stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy)
if err != nil {
- log.Warnf("failed to count jobs: %s", err.Error())
+ cclog.Warnf("failed to count jobs: %s", err.Error())
}
- // log.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount))
+ // cclog.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount))
// startRunningJobCount := time.Now()
stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running")
if err != nil {
- log.Warnf("failed to count running jobs: %s", err.Error())
+ cclog.Warnf("failed to count running jobs: %s", err.Error())
}
- // log.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount))
+ // cclog.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount))
i["clusters"] = stats
if util.CheckFileExists("./var/notice.txt") {
msg, err := os.ReadFile("./var/notice.txt")
if err != nil {
- log.Warnf("failed to read notice.txt file: %s", err.Error())
+ cclog.Warnf("failed to read notice.txt file: %s", err.Error())
} else {
i["message"] = string(msg)
}
@@ -161,7 +161,7 @@ func setupNodeRoute(i InfoType, r *http.Request) InfoType {
i["hostname"] = vars["hostname"]
i["id"] = fmt.Sprintf("%s (%s)", vars["cluster"], vars["hostname"])
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
- if from != "" || to != "" {
+ if from != "" && to != "" {
i["from"] = from
i["to"] = to
}
@@ -178,7 +178,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
tags, counts, err := jobRepo.CountTags(repository.GetUserFromContext(r.Context()))
tagMap := make(map[string][]map[string]interface{})
if err != nil {
- log.Warnf("GetTags failed: %s", err.Error())
+ cclog.Warnf("GetTags failed: %s", err.Error())
i["tagmap"] = tagMap
return i
}
@@ -297,6 +297,9 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
}
}
}
+ if len(query["dbId"]) != 0 {
+ filterPresets["dbId"] = query["dbId"]
+ }
if query.Get("jobId") != "" {
if len(query["jobId"]) == 1 {
filterPresets["jobId"] = query.Get("jobId")
diff --git a/internal/tagger/apps/alf.txt b/internal/tagger/apps/alf.txt
new file mode 100644
index 0000000..c455f9e
--- /dev/null
+++ b/internal/tagger/apps/alf.txt
@@ -0,0 +1 @@
+alf
diff --git a/internal/tagger/apps/caracal.txt b/internal/tagger/apps/caracal.txt
new file mode 100644
index 0000000..ed61512
--- /dev/null
+++ b/internal/tagger/apps/caracal.txt
@@ -0,0 +1,7 @@
+calc_rate
+qmdffgen
+dynamic
+evbopt
+explore
+black_box
+poly_qmdff
diff --git a/internal/tagger/apps/chroma.txt b/internal/tagger/apps/chroma.txt
new file mode 100644
index 0000000..0e5b50e
--- /dev/null
+++ b/internal/tagger/apps/chroma.txt
@@ -0,0 +1,3 @@
+chroma
+qdp
+qmp
diff --git a/internal/tagger/apps/cp2k.txt b/internal/tagger/apps/cp2k.txt
new file mode 100644
index 0000000..1e5341c
--- /dev/null
+++ b/internal/tagger/apps/cp2k.txt
@@ -0,0 +1 @@
+cp2k
diff --git a/internal/tagger/apps/cpmd.txt b/internal/tagger/apps/cpmd.txt
new file mode 100644
index 0000000..788aa78
--- /dev/null
+++ b/internal/tagger/apps/cpmd.txt
@@ -0,0 +1 @@
+cpmd
diff --git a/internal/tagger/apps/flame.txt b/internal/tagger/apps/flame.txt
new file mode 100644
index 0000000..6e13cbf
--- /dev/null
+++ b/internal/tagger/apps/flame.txt
@@ -0,0 +1 @@
+flame
diff --git a/internal/tagger/apps/gromacs.txt b/internal/tagger/apps/gromacs.txt
new file mode 100644
index 0000000..6fe8779
--- /dev/null
+++ b/internal/tagger/apps/gromacs.txt
@@ -0,0 +1,3 @@
+gromacs
+gmx
+mdrun
diff --git a/internal/tagger/apps/julia.txt b/internal/tagger/apps/julia.txt
new file mode 100644
index 0000000..9146f2b
--- /dev/null
+++ b/internal/tagger/apps/julia.txt
@@ -0,0 +1 @@
+julia
diff --git a/internal/tagger/apps/lammps.txt b/internal/tagger/apps/lammps.txt
new file mode 100644
index 0000000..d254f82
--- /dev/null
+++ b/internal/tagger/apps/lammps.txt
@@ -0,0 +1 @@
+lmp
diff --git a/internal/tagger/apps/matlab.txt b/internal/tagger/apps/matlab.txt
new file mode 100644
index 0000000..b9fe591
--- /dev/null
+++ b/internal/tagger/apps/matlab.txt
@@ -0,0 +1 @@
+matlab
diff --git a/internal/tagger/apps/openfoam.txt b/internal/tagger/apps/openfoam.txt
new file mode 100644
index 0000000..542d645
--- /dev/null
+++ b/internal/tagger/apps/openfoam.txt
@@ -0,0 +1 @@
+openfoam
diff --git a/internal/tagger/apps/orca.txt b/internal/tagger/apps/orca.txt
new file mode 100644
index 0000000..28f7a1e
--- /dev/null
+++ b/internal/tagger/apps/orca.txt
@@ -0,0 +1 @@
+orca
diff --git a/internal/tagger/apps/python.txt b/internal/tagger/apps/python.txt
new file mode 100644
index 0000000..29bc0f7
--- /dev/null
+++ b/internal/tagger/apps/python.txt
@@ -0,0 +1,4 @@
+python
+pip
+anaconda
+conda
diff --git a/internal/tagger/apps/starccm.txt b/internal/tagger/apps/starccm.txt
new file mode 100644
index 0000000..97cd388
--- /dev/null
+++ b/internal/tagger/apps/starccm.txt
@@ -0,0 +1,2 @@
+starccm+
+-podkey
diff --git a/internal/tagger/apps/turbomole.txt b/internal/tagger/apps/turbomole.txt
new file mode 100644
index 0000000..7d88ab6
--- /dev/null
+++ b/internal/tagger/apps/turbomole.txt
@@ -0,0 +1,10 @@
+dscf
+grad
+ridft
+rdgrad
+ricc2
+statpt
+aoforce
+escf
+egrad
+odft
diff --git a/internal/tagger/apps/vasp.txt b/internal/tagger/apps/vasp.txt
new file mode 100644
index 0000000..bd537e4
--- /dev/null
+++ b/internal/tagger/apps/vasp.txt
@@ -0,0 +1 @@
+vasp
diff --git a/internal/tagger/classifyJob.go b/internal/tagger/classifyJob.go
new file mode 100644
index 0000000..32063cd
--- /dev/null
+++ b/internal/tagger/classifyJob.go
@@ -0,0 +1,322 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package tagger
+
+import (
+ "bytes"
+ "embed"
+ "encoding/json"
+ "fmt"
+ "maps"
+ "os"
+ "strings"
+ "text/template"
+
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ "github.com/ClusterCockpit/cc-backend/pkg/archive"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
+ "github.com/expr-lang/expr"
+ "github.com/expr-lang/expr/vm"
+)
+
+//go:embed jobclasses/*
+var jobclassFiles embed.FS
+
+type Variable struct {
+ Name string `json:"name"`
+ Expr string `json:"expr"`
+}
+
+type ruleVariable struct {
+ name string
+ expr *vm.Program
+}
+
+type RuleFormat struct {
+ Name string `json:"name"`
+ Tag string `json:"tag"`
+ Parameters []string `json:"parameters"`
+ Metrics []string `json:"metrics"`
+ Requirements []string `json:"requirements"`
+ Variables []Variable `json:"variables"`
+ Rule string `json:"rule"`
+ Hint string `json:"hint"`
+}
+
+type ruleInfo struct {
+ env map[string]any
+ metrics []string
+ requirements []*vm.Program
+ variables []ruleVariable
+ rule *vm.Program
+ hint *template.Template
+}
+
+type JobClassTagger struct {
+ rules map[string]ruleInfo
+ parameters map[string]any
+ tagType string
+ cfgPath string
+}
+
+func (t *JobClassTagger) prepareRule(b []byte, fns string) {
+ var rule RuleFormat
+ if err := json.NewDecoder(bytes.NewReader(b)).Decode(&rule); err != nil {
+ cclog.Warn("Error while decoding raw job meta json")
+ return
+ }
+
+ ri := ruleInfo{}
+ ri.env = make(map[string]any)
+ ri.metrics = make([]string, 0)
+ ri.requirements = make([]*vm.Program, 0)
+ ri.variables = make([]ruleVariable, 0)
+
+ // check if all required parameters are available
+ for _, p := range rule.Parameters {
+ param, ok := t.parameters[p]
+ if !ok {
+ cclog.Warnf("prepareRule() > missing parameter %s in rule %s", p, fns)
+ return
+ }
+ ri.env[p] = param
+ }
+
+ // set all required metrics
+ ri.metrics = append(ri.metrics, rule.Metrics...)
+
+ // compile requirements
+ for _, r := range rule.Requirements {
+ req, err := expr.Compile(r, expr.AsBool())
+ if err != nil {
+ cclog.Errorf("error compiling requirement %s: %#v", r, err)
+ return
+ }
+ ri.requirements = append(ri.requirements, req)
+ }
+
+ // compile variables
+ for _, v := range rule.Variables {
+ req, err := expr.Compile(v.Expr, expr.AsFloat64())
+ if err != nil {
+ cclog.Errorf("error compiling requirement %s: %#v", v.Name, err)
+ return
+ }
+ ri.variables = append(ri.variables, ruleVariable{name: v.Name, expr: req})
+ }
+
+ // compile rule
+ exp, err := expr.Compile(rule.Rule, expr.AsBool())
+ if err != nil {
+ cclog.Errorf("error compiling rule %s: %#v", fns, err)
+ return
+ }
+ ri.rule = exp
+
+ // prepare hint template
+ ri.hint, err = template.New(fns).Parse(rule.Hint)
+ if err != nil {
+ cclog.Errorf("error processing template %s: %#v", fns, err)
+ }
+ cclog.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables))
+
+ t.rules[rule.Tag] = ri
+}
+
+func (t *JobClassTagger) EventMatch(s string) bool {
+ return strings.Contains(s, "jobclasses")
+}
+
+// FIXME: Only process the file that caused the event
+func (t *JobClassTagger) EventCallback() {
+ files, err := os.ReadDir(t.cfgPath)
+ if err != nil {
+ cclog.Fatal(err)
+ }
+
+ if util.CheckFileExists(t.cfgPath + "/parameters.json") {
+ cclog.Info("Merge parameters")
+ b, err := os.ReadFile(t.cfgPath + "/parameters.json")
+ if err != nil {
+ cclog.Warnf("prepareRule() > open file error: %v", err)
+ }
+
+ var paramTmp map[string]any
+ if err := json.NewDecoder(bytes.NewReader(b)).Decode(¶mTmp); err != nil {
+ cclog.Warn("Error while decoding parameters.json")
+ }
+
+ maps.Copy(t.parameters, paramTmp)
+ }
+
+ for _, fn := range files {
+ fns := fn.Name()
+ if fns != "parameters.json" {
+ cclog.Debugf("Process: %s", fns)
+ filename := fmt.Sprintf("%s/%s", t.cfgPath, fns)
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ cclog.Warnf("prepareRule() > open file error: %v", err)
+ return
+ }
+ t.prepareRule(b, fns)
+ }
+ }
+}
+
+func (t *JobClassTagger) initParameters() error {
+ cclog.Info("Initialize parameters")
+ b, err := jobclassFiles.ReadFile("jobclasses/parameters.json")
+ if err != nil {
+ cclog.Warnf("prepareRule() > open file error: %v", err)
+ return err
+ }
+
+ if err := json.NewDecoder(bytes.NewReader(b)).Decode(&t.parameters); err != nil {
+ cclog.Warn("Error while decoding parameters.json")
+ return err
+ }
+
+ return nil
+}
+
+func (t *JobClassTagger) Register() error {
+ t.cfgPath = "./var/tagger/jobclasses"
+ t.tagType = "jobClass"
+
+ err := t.initParameters()
+ if err != nil {
+ cclog.Warnf("error reading parameters.json: %v", err)
+ return err
+ }
+
+ files, err := jobclassFiles.ReadDir("jobclasses")
+ if err != nil {
+ return fmt.Errorf("error reading app folder: %#v", err)
+ }
+ t.rules = make(map[string]ruleInfo, 0)
+ for _, fn := range files {
+ fns := fn.Name()
+ if fns != "parameters.json" {
+ filename := fmt.Sprintf("jobclasses/%s", fns)
+ cclog.Infof("Process: %s", fns)
+
+ b, err := jobclassFiles.ReadFile(filename)
+ if err != nil {
+ cclog.Warnf("prepareRule() > open file error: %v", err)
+ return err
+ }
+ t.prepareRule(b, fns)
+ }
+ }
+
+ if util.CheckFileExists(t.cfgPath) {
+ t.EventCallback()
+ cclog.Infof("Setup file watch for %s", t.cfgPath)
+ util.AddListener(t.cfgPath, t)
+ }
+
+ return nil
+}
+
+func (t *JobClassTagger) Match(job *schema.Job) {
+ r := repository.GetJobRepository()
+ jobstats, err := archive.GetStatistics(job)
+ metricsList := archive.GetMetricConfigSubCluster(job.Cluster, job.SubCluster)
+ cclog.Infof("Enter match rule with %d rules for job %d", len(t.rules), job.JobID)
+ if err != nil {
+ cclog.Errorf("job classification failed for job %d: %#v", job.JobID, err)
+ return
+ }
+
+ for tag, ri := range t.rules {
+ env := make(map[string]any)
+ maps.Copy(env, ri.env)
+ cclog.Infof("Try to match rule %s for job %d", tag, job.JobID)
+
+ // Initialize environment
+ env["job"] = map[string]any{
+ "exclusive": job.Exclusive,
+ "duration": job.Duration,
+ "numCores": job.NumHWThreads,
+ "numNodes": job.NumNodes,
+ "jobState": job.State,
+ "numAcc": job.NumAcc,
+ "smt": job.SMT,
+ }
+
+ // add metrics to env
+ for _, m := range ri.metrics {
+ stats, ok := jobstats[m]
+ if !ok {
+ cclog.Errorf("job classification failed for job %d: missing metric '%s'", job.JobID, m)
+ return
+ }
+ env[m] = map[string]any{
+ "min": stats.Min,
+ "max": stats.Max,
+ "avg": stats.Avg,
+ "limits": map[string]float64{
+ "peak": metricsList[m].Peak,
+ "normal": metricsList[m].Normal,
+ "caution": metricsList[m].Caution,
+ "alert": metricsList[m].Alert,
+ },
+ }
+ }
+
+ // check rule requirements apply
+ for _, r := range ri.requirements {
+ ok, err := expr.Run(r, env)
+ if err != nil {
+ cclog.Errorf("error running requirement for rule %s: %#v", tag, err)
+ return
+ }
+ if !ok.(bool) {
+ cclog.Infof("requirement for rule %s not met", tag)
+ return
+ }
+ }
+
+ // validate rule expression
+ for _, v := range ri.variables {
+ value, err := expr.Run(v.expr, env)
+ if err != nil {
+ cclog.Errorf("error running rule %s: %#v", tag, err)
+ return
+ }
+ env[v.name] = value
+ }
+
+ // dump.P(env)
+
+ match, err := expr.Run(ri.rule, env)
+ if err != nil {
+ cclog.Errorf("error running rule %s: %#v", tag, err)
+ return
+ }
+ if match.(bool) {
+ cclog.Info("Rule matches!")
+ id := *job.ID
+ if !r.HasTag(id, t.tagType, tag) {
+ r.AddTagOrCreateDirect(id, t.tagType, tag)
+ }
+
+ // process hint template
+ var msg bytes.Buffer
+ if err := ri.hint.Execute(&msg, env); err != nil {
+ cclog.Errorf("Template error: %s", err.Error())
+ return
+ }
+
+ // FIXME: Handle case where multiple tags apply
+ r.UpdateMetadata(job, "message", msg.String())
+ } else {
+ cclog.Info("Rule does not match!")
+ }
+ }
+}
diff --git a/internal/tagger/detectApp.go b/internal/tagger/detectApp.go
new file mode 100644
index 0000000..c06fb72
--- /dev/null
+++ b/internal/tagger/detectApp.go
@@ -0,0 +1,127 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package tagger
+
+import (
+ "bufio"
+ "embed"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
+)
+
+//go:embed apps/*
+var appFiles embed.FS
+
+type appInfo struct {
+ tag string
+ strings []string
+}
+
+type AppTagger struct {
+ apps map[string]appInfo
+ tagType string
+ cfgPath string
+}
+
+func (t *AppTagger) scanApp(f fs.File, fns string) {
+ scanner := bufio.NewScanner(f)
+ ai := appInfo{tag: strings.TrimSuffix(fns, filepath.Ext(fns)), strings: make([]string, 0)}
+
+ for scanner.Scan() {
+ ai.strings = append(ai.strings, scanner.Text())
+ }
+ delete(t.apps, ai.tag)
+ t.apps[ai.tag] = ai
+}
+
+func (t *AppTagger) EventMatch(s string) bool {
+ return strings.Contains(s, "apps")
+}
+
+// FIXME: Only process the file that caused the event
+func (t *AppTagger) EventCallback() {
+ files, err := os.ReadDir(t.cfgPath)
+ if err != nil {
+ cclog.Fatal(err)
+ }
+
+ for _, fn := range files {
+ fns := fn.Name()
+ cclog.Debugf("Process: %s", fns)
+ f, err := os.Open(fmt.Sprintf("%s/%s", t.cfgPath, fns))
+ if err != nil {
+ cclog.Errorf("error opening app file %s: %#v", fns, err)
+ }
+ t.scanApp(f, fns)
+ }
+}
+
+func (t *AppTagger) Register() error {
+ t.cfgPath = "./var/tagger/apps"
+ t.tagType = "app"
+
+ files, err := appFiles.ReadDir("apps")
+ if err != nil {
+ return fmt.Errorf("error reading app folder: %#v", err)
+ }
+ t.apps = make(map[string]appInfo, 0)
+ for _, fn := range files {
+ fns := fn.Name()
+ cclog.Debugf("Process: %s", fns)
+ f, err := appFiles.Open(fmt.Sprintf("apps/%s", fns))
+ if err != nil {
+ return fmt.Errorf("error opening app file %s: %#v", fns, err)
+ }
+ defer f.Close()
+ t.scanApp(f, fns)
+ }
+
+ if util.CheckFileExists(t.cfgPath) {
+ t.EventCallback()
+ cclog.Infof("Setup file watch for %s", t.cfgPath)
+ util.AddListener(t.cfgPath, t)
+ }
+
+ return nil
+}
+
+func (t *AppTagger) Match(job *schema.Job) {
+ r := repository.GetJobRepository()
+ metadata, err := r.FetchMetadata(job)
+ if err != nil {
+ cclog.Infof("Cannot fetch metadata for job: %d on %s", job.JobID, job.Cluster)
+ return
+ }
+
+ jobscript, ok := metadata["jobScript"]
+ if ok {
+ id := *job.ID
+
+ out:
+ for _, a := range t.apps {
+ tag := a.tag
+ for _, s := range a.strings {
+ matched, _ := regexp.MatchString(s, strings.ToLower(jobscript))
+ if matched {
+ if !r.HasTag(id, t.tagType, tag) {
+ r.AddTagOrCreateDirect(id, t.tagType, tag)
+ break out
+ }
+ }
+ }
+ }
+ } else {
+ cclog.Infof("Cannot extract job script for job: %d on %s", job.JobID, job.Cluster)
+ }
+}
diff --git a/internal/tagger/detectApp_test.go b/internal/tagger/detectApp_test.go
new file mode 100644
index 0000000..78f5f76
--- /dev/null
+++ b/internal/tagger/detectApp_test.go
@@ -0,0 +1,59 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package tagger
+
+import (
+ "testing"
+
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+)
+
+func setup(tb testing.TB) *repository.JobRepository {
+ tb.Helper()
+ cclog.Init("warn", true)
+ dbfile := "../repository/testdata/job.db"
+ err := repository.MigrateDB("sqlite3", dbfile)
+ noErr(tb, err)
+ repository.Connect("sqlite3", dbfile)
+ return repository.GetJobRepository()
+}
+
+func noErr(tb testing.TB, err error) {
+ tb.Helper()
+
+ if err != nil {
+ tb.Fatal("Error is not nil:", err)
+ }
+}
+
+func TestRegister(t *testing.T) {
+ var tagger AppTagger
+
+ err := tagger.Register()
+ noErr(t, err)
+
+ if len(tagger.apps) != 16 {
+ t.Errorf("wrong summary for diagnostic \ngot: %d \nwant: 16", len(tagger.apps))
+ }
+}
+
+func TestMatch(t *testing.T) {
+ r := setup(t)
+
+ job, err := r.FindByIdDirect(5)
+ noErr(t, err)
+
+ var tagger AppTagger
+
+ err = tagger.Register()
+ noErr(t, err)
+
+ tagger.Match(job)
+
+ if !r.HasTag(5, "app", "vasp") {
+ t.Errorf("missing tag vasp")
+ }
+}
diff --git a/internal/tagger/jobclasses/highload.json b/internal/tagger/jobclasses/highload.json
new file mode 100644
index 0000000..0d16b45
--- /dev/null
+++ b/internal/tagger/jobclasses/highload.json
@@ -0,0 +1,26 @@
+{
+ "name": "Excessive CPU load",
+ "tag": "excessiveload",
+ "parameters": [
+ "excessivecpuload_threshold_factor",
+ "job_min_duration_seconds",
+ "sampling_interval_seconds"
+ ],
+ "metrics": ["cpu_load"],
+ "requirements": [
+ "job.exclusive == 1",
+ "job.duration > job_min_duration_seconds"
+ ],
+ "variables": [
+ {
+ "name": "load_threshold",
+ "expr": "cpu_load.limits.peak * excessivecpuload_threshold_factor"
+ },
+ {
+ "name": "load_perc",
+ "expr": "1.0 - (cpu_load.avg / cpu_load.limits.peak)"
+ }
+ ],
+ "rule": "cpu_load.avg > load_threshold",
+ "hint": "This job was detected as excessiveload because the average cpu load {{.cpu_load.avg}} falls above the threshold {{.load_threshold}}."
+}
diff --git a/internal/tagger/jobclasses/lowUtilization.json b/internal/tagger/jobclasses/lowUtilization.json
new file mode 100644
index 0000000..9613b06
--- /dev/null
+++ b/internal/tagger/jobclasses/lowUtilization.json
@@ -0,0 +1,22 @@
+{
+ "name": "Low ressource utilization",
+ "tag": "lowutilization",
+ "parameters": ["job_min_duration_seconds"],
+ "metrics": ["flops_any", "mem_bw"],
+ "requirements": [
+ "job.exclusive == 1",
+ "job.duration > job_min_duration_seconds"
+ ],
+ "variables": [
+ {
+ "name": "mem_bw_perc",
+ "expr": "1.0 - (mem_bw.avg / mem_bw.limits.peak)"
+ },
+ {
+ "name": "flops_any_perc",
+ "expr": "1.0 - (flops_any.avg / flops_any.limits.peak)"
+ }
+ ],
+ "rule": "flops_any.avg < flops_any.limits.alert && mem_bw.avg < mem_bw.limits.alert",
+ "hint": "This job was detected as low utilization because the average flop rate {{.flops_any.avg}} falls below the threshold {{.flops_any.limits.alert}}."
+}
diff --git a/internal/tagger/jobclasses/lowload.json b/internal/tagger/jobclasses/lowload.json
new file mode 100644
index 0000000..2212bd1
--- /dev/null
+++ b/internal/tagger/jobclasses/lowload.json
@@ -0,0 +1,26 @@
+{
+ "name": "Low CPU load",
+ "tag": "lowload",
+ "parameters": [
+ "lowcpuload_threshold_factor",
+ "job_min_duration_seconds",
+ "sampling_interval_seconds"
+ ],
+ "metrics": ["cpu_load"],
+ "requirements": [
+ "job.exclusive == 1",
+ "job.duration > job_min_duration_seconds"
+ ],
+ "variables": [
+ {
+ "name": "load_threshold",
+ "expr": "job.numCores * lowcpuload_threshold_factor"
+ },
+ {
+ "name": "load_perc",
+ "expr": "1.0 - (cpu_load.avg / cpu_load.limits.peak)"
+ }
+ ],
+ "rule": "cpu_load.avg < cpu_load.limits.caution",
+ "hint": "This job was detected as lowload because the average cpu load {{.cpu_load}} falls below the threshold {{.cpu_load.limits.caution}}."
+}
diff --git a/internal/tagger/jobclasses/parameters.json b/internal/tagger/jobclasses/parameters.json
new file mode 100644
index 0000000..39e94c1
--- /dev/null
+++ b/internal/tagger/jobclasses/parameters.json
@@ -0,0 +1,14 @@
+{
+ "lowcpuload_threshold_factor": 0.9,
+ "excessivecpuload_threshold_factor": 1.1,
+ "highmemoryusage_threshold_factor": 0.9,
+ "node_load_imbalance_threshold_factor": 0.1,
+ "core_load_imbalance_threshold_factor": 0.1,
+ "high_memory_load_threshold_factor": 0.9,
+ "lowgpuload_threshold_factor": 0.7,
+ "memory_leak_slope_threshold": 0.1,
+ "job_min_duration_seconds": 600.0,
+ "sampling_interval_seconds": 30.0,
+ "cpu_load_pre_cutoff_samples": 11.0,
+ "cpu_load_core_pre_cutoff_samples": 6.0
+}
diff --git a/internal/tagger/tagger.go b/internal/tagger/tagger.go
new file mode 100644
index 0000000..af0ba19
--- /dev/null
+++ b/internal/tagger/tagger.go
@@ -0,0 +1,88 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package tagger
+
+import (
+ "sync"
+
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+)
+
+type Tagger interface {
+ Register() error
+ Match(job *schema.Job)
+}
+
+var (
+ initOnce sync.Once
+ jobTagger *JobTagger
+)
+
+type JobTagger struct {
+ startTaggers []Tagger
+ stopTaggers []Tagger
+}
+
+func newTagger() {
+ jobTagger = &JobTagger{}
+ jobTagger.startTaggers = make([]Tagger, 0)
+ jobTagger.startTaggers = append(jobTagger.startTaggers, &AppTagger{})
+ jobTagger.stopTaggers = make([]Tagger, 0)
+ jobTagger.stopTaggers = append(jobTagger.stopTaggers, &JobClassTagger{})
+
+ for _, tagger := range jobTagger.startTaggers {
+ tagger.Register()
+ }
+ for _, tagger := range jobTagger.stopTaggers {
+ tagger.Register()
+ }
+}
+
+func Init() {
+ initOnce.Do(func() {
+ newTagger()
+ repository.RegisterJobJook(jobTagger)
+ })
+}
+
+func (jt *JobTagger) JobStartCallback(job *schema.Job) {
+ for _, tagger := range jt.startTaggers {
+ tagger.Match(job)
+ }
+}
+
+func (jt *JobTagger) JobStopCallback(job *schema.Job) {
+ for _, tagger := range jt.stopTaggers {
+ tagger.Match(job)
+ }
+}
+
+func RunTaggers() error {
+ newTagger()
+ r := repository.GetJobRepository()
+ jl, err := r.GetJobList()
+ if err != nil {
+ cclog.Errorf("Error while getting job list %s", err)
+ return err
+ }
+
+ for _, id := range jl {
+ job, err := r.FindByIdDirect(id)
+ if err != nil {
+ cclog.Errorf("Error while getting job %s", err)
+ return err
+ }
+ for _, tagger := range jobTagger.startTaggers {
+ tagger.Match(job)
+ }
+ for _, tagger := range jobTagger.stopTaggers {
+ cclog.Infof("Run stop tagger for job %d", job.ID)
+ tagger.Match(job)
+ }
+ }
+ return nil
+}
diff --git a/internal/tagger/tagger_test.go b/internal/tagger/tagger_test.go
new file mode 100644
index 0000000..a94c20f
--- /dev/null
+++ b/internal/tagger/tagger_test.go
@@ -0,0 +1,31 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package tagger
+
+import (
+ "testing"
+
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ "github.com/ClusterCockpit/cc-lib/schema"
+)
+
+func TestInit(t *testing.T) {
+ Init()
+}
+
+func TestJobStartCallback(t *testing.T) {
+ Init()
+ r := setup(t)
+ job, err := r.FindByIdDirect(2)
+ noErr(t, err)
+
+ jobs := make([]*schema.Job, 0, 1)
+ jobs = append(jobs, job)
+
+ repository.CallJobStartHooks(jobs)
+ if !r.HasTag(2, "app", "python") {
+ t.Errorf("missing tag python")
+ }
+}
diff --git a/internal/taskManager/commitJobService.go b/internal/taskManager/commitJobService.go
new file mode 100644
index 0000000..5489007
--- /dev/null
+++ b/internal/taskManager/commitJobService.go
@@ -0,0 +1,35 @@
+// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
+// All rights reserved. This file is part of cc-backend.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+package taskManager
+
+import (
+ "time"
+
+ "github.com/ClusterCockpit/cc-backend/internal/config"
+ "github.com/ClusterCockpit/cc-backend/internal/repository"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/go-co-op/gocron/v2"
+)
+
+func RegisterCommitJobService() {
+ var frequency string
+ if config.Keys.CronFrequency != nil && config.Keys.CronFrequency.CommitJobWorker != "" {
+ frequency = config.Keys.CronFrequency.CommitJobWorker
+ } else {
+ frequency = "2m"
+ }
+ d, _ := time.ParseDuration(frequency)
+ cclog.Infof("Register commitJob service with %s interval", frequency)
+
+ s.NewJob(gocron.DurationJob(d),
+ gocron.NewTask(
+ func() {
+ start := time.Now()
+ cclog.Printf("Jobcache sync started at %s", start.Format(time.RFC3339))
+ jobs, _ := jobRepo.SyncJobs()
+ repository.CallJobStartHooks(jobs)
+ cclog.Printf("Jobcache sync and job callbacks are done and took %s", time.Since(start))
+ }))
+}
diff --git a/internal/taskManager/compressionService.go b/internal/taskManager/compressionService.go
index 005a5bb..e96115f 100644
--- a/internal/taskManager/compressionService.go
+++ b/internal/taskManager/compressionService.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,13 +8,13 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/go-co-op/gocron/v2"
)
func RegisterCompressionService(compressOlderThan int) {
- log.Info("Register compression service")
+ cclog.Info("Register compression service")
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(05, 0, 0))),
gocron.NewTask(
@@ -26,7 +26,7 @@ func RegisterCompressionService(compressOlderThan int) {
startTime := time.Now().Unix() - int64(compressOlderThan*24*3600)
lastTime := ar.CompressLast(startTime)
if startTime == lastTime {
- log.Info("Compression Service - Complete archive run")
+ cclog.Info("Compression Service - Complete archive run")
jobs, err = jobRepo.FindJobsBetween(0, startTime)
} else {
@@ -34,7 +34,7 @@ func RegisterCompressionService(compressOlderThan int) {
}
if err != nil {
- log.Warnf("Error while looking for compression jobs: %v", err)
+ cclog.Warnf("Error while looking for compression jobs: %v", err)
}
ar.Compress(jobs)
}))
diff --git a/internal/taskManager/ldapSyncService.go b/internal/taskManager/ldapSyncService.go
index a998aa8..27212e8 100644
--- a/internal/taskManager/ldapSyncService.go
+++ b/internal/taskManager/ldapSyncService.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,29 +8,29 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/auth"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
func RegisterLdapSyncService(ds string) {
interval, err := parseDuration(ds)
if err != nil {
- log.Warnf("Could not parse duration for sync interval: %v",
+ cclog.Warnf("Could not parse duration for sync interval: %v",
ds)
return
}
auth := auth.GetAuthInstance()
- log.Info("Register LDAP sync service")
+ cclog.Info("Register LDAP sync service")
s.NewJob(gocron.DurationJob(interval),
gocron.NewTask(
func() {
t := time.Now()
- log.Printf("ldap sync started at %s", t.Format(time.RFC3339))
+ cclog.Printf("ldap sync started at %s", t.Format(time.RFC3339))
if err := auth.LdapAuth.Sync(); err != nil {
- log.Errorf("ldap sync failed: %s", err.Error())
+ cclog.Errorf("ldap sync failed: %s", err.Error())
}
- log.Print("ldap sync done")
+ cclog.Print("ldap sync done")
}))
}
diff --git a/internal/taskManager/retentionService.go b/internal/taskManager/retentionService.go
index 502f890..440c369 100644
--- a/internal/taskManager/retentionService.go
+++ b/internal/taskManager/retentionService.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,12 +8,12 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
func RegisterRetentionDeleteService(age int, includeDB bool) {
- log.Info("Register retention delete service")
+ cclog.Info("Register retention delete service")
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))),
gocron.NewTask(
@@ -21,26 +21,26 @@ func RegisterRetentionDeleteService(age int, includeDB bool) {
startTime := time.Now().Unix() - int64(age*24*3600)
jobs, err := jobRepo.FindJobsBetween(0, startTime)
if err != nil {
- log.Warnf("Error while looking for retention jobs: %s", err.Error())
+ cclog.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().CleanUp(jobs)
if includeDB {
cnt, err := jobRepo.DeleteJobsBefore(startTime)
if err != nil {
- log.Errorf("Error while deleting retention jobs from db: %s", err.Error())
+ cclog.Errorf("Error while deleting retention jobs from db: %s", err.Error())
} else {
- log.Infof("Retention: Removed %d jobs from db", cnt)
+ cclog.Infof("Retention: Removed %d jobs from db", cnt)
}
if err = jobRepo.Optimize(); err != nil {
- log.Errorf("Error occured in db optimization: %s", err.Error())
+ cclog.Errorf("Error occured in db optimization: %s", err.Error())
}
}
}))
}
func RegisterRetentionMoveService(age int, includeDB bool, location string) {
- log.Info("Register retention move service")
+ cclog.Info("Register retention move service")
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))),
gocron.NewTask(
@@ -48,19 +48,19 @@ func RegisterRetentionMoveService(age int, includeDB bool, location string) {
startTime := time.Now().Unix() - int64(age*24*3600)
jobs, err := jobRepo.FindJobsBetween(0, startTime)
if err != nil {
- log.Warnf("Error while looking for retention jobs: %s", err.Error())
+ cclog.Warnf("Error while looking for retention jobs: %s", err.Error())
}
archive.GetHandle().Move(jobs, location)
if includeDB {
cnt, err := jobRepo.DeleteJobsBefore(startTime)
if err != nil {
- log.Errorf("Error while deleting retention jobs from db: %v", err)
+ cclog.Errorf("Error while deleting retention jobs from db: %v", err)
} else {
- log.Infof("Retention: Removed %d jobs from db", cnt)
+ cclog.Infof("Retention: Removed %d jobs from db", cnt)
}
if err = jobRepo.Optimize(); err != nil {
- log.Errorf("Error occured in db optimization: %v", err)
+ cclog.Errorf("Error occured in db optimization: %v", err)
}
}
}))
diff --git a/internal/taskManager/stopJobsExceedTime.go b/internal/taskManager/stopJobsExceedTime.go
index d97813a..a3743f6 100644
--- a/internal/taskManager/stopJobsExceedTime.go
+++ b/internal/taskManager/stopJobsExceedTime.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,19 +8,19 @@ import (
"runtime"
"github.com/ClusterCockpit/cc-backend/internal/config"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
func RegisterStopJobsExceedTime() {
- log.Info("Register undead jobs service")
+ cclog.Info("Register undead jobs service")
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(03, 0, 0))),
gocron.NewTask(
func() {
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
if err != nil {
- log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
+ cclog.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
}
runtime.GC()
}))
diff --git a/internal/taskManager/taskManager.go b/internal/taskManager/taskManager.go
index 2004e0d..5f51040 100644
--- a/internal/taskManager/taskManager.go
+++ b/internal/taskManager/taskManager.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -10,8 +10,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
"github.com/go-co-op/gocron/v2"
)
@@ -23,13 +23,13 @@ var (
func parseDuration(s string) (time.Duration, error) {
interval, err := time.ParseDuration(s)
if err != nil {
- log.Warnf("Could not parse duration for sync interval: %v",
+ cclog.Warnf("Could not parse duration for sync interval: %v",
s)
return 0, err
}
if interval == 0 {
- log.Info("TaskManager: Sync interval is zero")
+ cclog.Info("TaskManager: Sync interval is zero")
}
return interval, nil
@@ -40,7 +40,7 @@ func Start() {
jobRepo = repository.GetJobRepository()
s, err = gocron.NewScheduler()
if err != nil {
- log.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
+ cclog.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
}
if config.Keys.StopJobsExceedingWalltime > 0 {
@@ -54,7 +54,7 @@ func Start() {
cfg.Retention.IncludeDB = true
if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil {
- log.Warn("Error while unmarshaling raw config json")
+ cclog.Warn("Error while unmarshaling raw config json")
}
switch cfg.Retention.Policy {
@@ -81,6 +81,7 @@ func Start() {
RegisterFootprintWorker()
RegisterUpdateDurationWorker()
+ RegisterCommitJobService()
s.Start()
}
diff --git a/internal/taskManager/updateDurationService.go b/internal/taskManager/updateDurationService.go
index 81d799e..70ec506 100644
--- a/internal/taskManager/updateDurationService.go
+++ b/internal/taskManager/updateDurationService.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -8,7 +8,7 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/go-co-op/gocron/v2"
)
@@ -20,14 +20,14 @@ func RegisterUpdateDurationWorker() {
frequency = "5m"
}
d, _ := time.ParseDuration(frequency)
- log.Infof("Register Duration Update service with %s interval", frequency)
+ cclog.Infof("Register Duration Update service with %s interval", frequency)
s.NewJob(gocron.DurationJob(d),
gocron.NewTask(
func() {
start := time.Now()
- log.Printf("Update duration started at %s", start.Format(time.RFC3339))
+ cclog.Printf("Update duration started at %s", start.Format(time.RFC3339))
jobRepo.UpdateDuration()
- log.Printf("Update duration is done and took %s", time.Since(start))
+ cclog.Printf("Update duration is done and took %s", time.Since(start))
}))
}
diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go
index a220855..41c5837 100644
--- a/internal/taskManager/updateFootprintService.go
+++ b/internal/taskManager/updateFootprintService.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package taskManager
@@ -12,8 +12,8 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
sq "github.com/Masterminds/squirrel"
"github.com/go-co-op/gocron/v2"
)
@@ -26,7 +26,7 @@ func RegisterFootprintWorker() {
frequency = "10m"
}
d, _ := time.ParseDuration(frequency)
- log.Infof("Register Footprint Update service with %s interval", frequency)
+ cclog.Infof("Register Footprint Update service with %s interval", frequency)
s.NewJob(gocron.DurationJob(d),
gocron.NewTask(
@@ -35,7 +35,7 @@ func RegisterFootprintWorker() {
c := 0
ce := 0
cl := 0
- log.Printf("Update Footprints started at %s", s.Format(time.RFC3339))
+ cclog.Printf("Update Footprints started at %s", s.Format(time.RFC3339))
for _, cluster := range archive.Clusters {
s_cluster := time.Now()
@@ -54,30 +54,26 @@ func RegisterFootprintWorker() {
repo, err := metricdata.GetMetricDataRepo(cluster.Name)
if err != nil {
- log.Errorf("no metric data repository configured for '%s'", cluster.Name)
+ cclog.Errorf("no metric data repository configured for '%s'", cluster.Name)
continue
}
pendingStatements := []sq.UpdateBuilder{}
for _, job := range jobs {
- log.Debugf("Prepare job %d", job.JobID)
+ cclog.Debugf("Prepare job %d", job.JobID)
cl++
s_job := time.Now()
jobStats, err := repo.LoadStats(job, allMetrics, context.Background())
if err != nil {
- log.Errorf("error wile loading job data stats for footprint update: %v", err)
+ cclog.Errorf("error wile loading job data stats for footprint update: %v", err)
ce++
continue
}
- jobMeta := &schema.JobMeta{
- BaseJob: job.BaseJob,
- StartTime: job.StartTime.Unix(),
- Statistics: make(map[string]schema.JobStatistics),
- }
+ job.Statistics = make(map[string]schema.JobStatistics)
for _, metric := range allMetrics {
avg, min, max := 0.0, 0.0, 0.0
@@ -95,7 +91,7 @@ func RegisterFootprintWorker() {
}
// Add values rounded to 2 digits: repo.LoadStats may return unrounded
- jobMeta.Statistics[metric] = schema.JobStatistics{
+ job.Statistics[metric] = schema.JobStatistics{
Unit: schema.Unit{
Prefix: archive.GetMetricConfig(job.Cluster, metric).Unit.Prefix,
Base: archive.GetMetricConfig(job.Cluster, metric).Unit.Base,
@@ -108,28 +104,28 @@ func RegisterFootprintWorker() {
// Build Statement per Job, Add to Pending Array
stmt := sq.Update("job")
- stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta)
+ stmt, err = jobRepo.UpdateFootprint(stmt, job)
if err != nil {
- log.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error())
+ cclog.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error())
ce++
continue
}
stmt = stmt.Where("job.id = ?", job.ID)
pendingStatements = append(pendingStatements, stmt)
- log.Debugf("Job %d took %s", job.JobID, time.Since(s_job))
+ cclog.Debugf("Job %d took %s", job.JobID, time.Since(s_job))
}
t, err := jobRepo.TransactionInit()
if err != nil {
- log.Errorf("failed TransactionInit %v", err)
- log.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name)
+ cclog.Errorf("failed TransactionInit %v", err)
+ cclog.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name)
ce += len(pendingStatements)
} else {
for _, ps := range pendingStatements {
query, args, err := ps.ToSql()
if err != nil {
- log.Errorf("failed in ToSQL conversion: %v", err)
+ cclog.Errorf("failed in ToSQL conversion: %v", err)
ce++
} else {
// args...: Footprint-JSON, Energyfootprint-JSON, TotalEnergy, JobID
@@ -139,8 +135,8 @@ func RegisterFootprintWorker() {
}
jobRepo.TransactionEnd(t)
}
- log.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster))
+ cclog.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster))
}
- log.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s))
+ cclog.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s))
}))
}
diff --git a/internal/util/array.go b/internal/util/array.go
deleted file mode 100644
index 19bdb53..0000000
--- a/internal/util/array.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package util
-
-func Contains[T comparable](items []T, item T) bool {
- for _, v := range items {
- if v == item {
- return true
- }
- }
- return false
-}
diff --git a/internal/util/compress.go b/internal/util/compress.go
deleted file mode 100644
index 4a901ae..0000000
--- a/internal/util/compress.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package util
-
-import (
- "compress/gzip"
- "io"
- "os"
-
- "github.com/ClusterCockpit/cc-backend/pkg/log"
-)
-
-func CompressFile(fileIn string, fileOut string) error {
- originalFile, err := os.Open(fileIn)
- if err != nil {
- log.Errorf("CompressFile() error: %v", err)
- return err
- }
- defer originalFile.Close()
-
- gzippedFile, err := os.Create(fileOut)
-
- if err != nil {
- log.Errorf("CompressFile() error: %v", err)
- return err
- }
- defer gzippedFile.Close()
-
- gzipWriter := gzip.NewWriter(gzippedFile)
- defer gzipWriter.Close()
-
- _, err = io.Copy(gzipWriter, originalFile)
- if err != nil {
- log.Errorf("CompressFile() error: %v", err)
- return err
- }
- gzipWriter.Flush()
- if err := os.Remove(fileIn); err != nil {
- log.Errorf("CompressFile() error: %v", err)
- return err
- }
-
- return nil
-}
-
-func UncompressFile(fileIn string, fileOut string) error {
- gzippedFile, err := os.Open(fileIn)
- if err != nil {
- log.Errorf("UncompressFile() error: %v", err)
- return err
- }
- defer gzippedFile.Close()
-
- gzipReader, _ := gzip.NewReader(gzippedFile)
- defer gzipReader.Close()
-
- uncompressedFile, err := os.Create(fileOut)
- if err != nil {
- log.Errorf("UncompressFile() error: %v", err)
- return err
- }
- defer uncompressedFile.Close()
-
- _, err = io.Copy(uncompressedFile, gzipReader)
- if err != nil {
- log.Errorf("UncompressFile() error: %v", err)
- return err
- }
- if err := os.Remove(fileIn); err != nil {
- log.Errorf("UncompressFile() error: %v", err)
- return err
- }
-
- return nil
-}
diff --git a/internal/util/copy.go b/internal/util/copy.go
deleted file mode 100644
index c6896c4..0000000
--- a/internal/util/copy.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package util
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
-)
-
-func CopyFile(src, dst string) (err error) {
- in, err := os.Open(src)
- if err != nil {
- return
- }
- defer in.Close()
-
- out, err := os.Create(dst)
- if err != nil {
- return
- }
- defer func() {
- if e := out.Close(); e != nil {
- err = e
- }
- }()
-
- _, err = io.Copy(out, in)
- if err != nil {
- return
- }
-
- err = out.Sync()
- if err != nil {
- return
- }
-
- si, err := os.Stat(src)
- if err != nil {
- return
- }
- err = os.Chmod(dst, si.Mode())
- if err != nil {
- return
- }
-
- return
-}
-
-func CopyDir(src string, dst string) (err error) {
- src = filepath.Clean(src)
- dst = filepath.Clean(dst)
-
- si, err := os.Stat(src)
- if err != nil {
- return err
- }
- if !si.IsDir() {
- return fmt.Errorf("source is not a directory")
- }
-
- _, err = os.Stat(dst)
- if err != nil && !os.IsNotExist(err) {
- return
- }
- if err == nil {
- return fmt.Errorf("destination already exists")
- }
-
- err = os.MkdirAll(dst, si.Mode())
- if err != nil {
- return
- }
-
- entries, err := ioutil.ReadDir(src)
- if err != nil {
- return
- }
-
- for _, entry := range entries {
- srcPath := filepath.Join(src, entry.Name())
- dstPath := filepath.Join(dst, entry.Name())
-
- if entry.IsDir() {
- err = CopyDir(srcPath, dstPath)
- if err != nil {
- return
- }
- } else {
- // Skip symlinks.
- if entry.Mode()&os.ModeSymlink != 0 {
- continue
- }
-
- err = CopyFile(srcPath, dstPath)
- if err != nil {
- return
- }
- }
- }
-
- return
-}
diff --git a/internal/util/diskUsage.go b/internal/util/diskUsage.go
deleted file mode 100644
index 53665c5..0000000
--- a/internal/util/diskUsage.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package util
-
-import (
- "os"
-
- "github.com/ClusterCockpit/cc-backend/pkg/log"
-)
-
-func DiskUsage(dirpath string) float64 {
- var size int64
-
- dir, err := os.Open(dirpath)
- if err != nil {
- log.Errorf("DiskUsage() error: %v", err)
- return 0
- }
- defer dir.Close()
-
- files, err := dir.Readdir(-1)
- if err != nil {
- log.Errorf("DiskUsage() error: %v", err)
- return 0
- }
-
- for _, file := range files {
- size += file.Size()
- }
-
- return float64(size) * 1e-6
-}
diff --git a/internal/util/fstat.go b/internal/util/fstat.go
deleted file mode 100644
index 54e1154..0000000
--- a/internal/util/fstat.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package util
-
-import (
- "errors"
- "os"
-
- "github.com/ClusterCockpit/cc-backend/pkg/log"
-)
-
-func CheckFileExists(filePath string) bool {
- _, err := os.Stat(filePath)
- return !errors.Is(err, os.ErrNotExist)
-}
-
-func GetFilesize(filePath string) int64 {
- fileInfo, err := os.Stat(filePath)
- if err != nil {
- log.Errorf("Error on Stat %s: %v", filePath, err)
- return 0
- }
- return fileInfo.Size()
-}
-
-func GetFilecount(path string) int {
- files, err := os.ReadDir(path)
- if err != nil {
- log.Errorf("Error on ReadDir %s: %v", path, err)
- return 0
- }
-
- return len(files)
-}
diff --git a/internal/util/statistics.go b/internal/util/statistics.go
deleted file mode 100644
index d75224f..0000000
--- a/internal/util/statistics.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package util
-
-import (
- "golang.org/x/exp/constraints"
-
- "fmt"
- "math"
- "sort"
-)
-
-func Min[T constraints.Ordered](a, b T) T {
- if a < b {
- return a
- }
- return b
-}
-
-func Max[T constraints.Ordered](a, b T) T {
- if a > b {
- return a
- }
- return b
-}
-
-func sortedCopy(input []float64) []float64 {
- sorted := make([]float64, len(input))
- copy(sorted, input)
- sort.Float64s(sorted)
- return sorted
-}
-
-func Mean(input []float64) (float64, error) {
- if len(input) == 0 {
- return math.NaN(), fmt.Errorf("input array is empty: %#v", input)
- }
- sum := 0.0
- for _, n := range input {
- sum += n
- }
- return sum / float64(len(input)), nil
-}
-
-func Median(input []float64) (median float64, err error) {
- c := sortedCopy(input)
- // Even numbers: add the two middle numbers, divide by two (use mean function)
- // Odd numbers: Use the middle number
- l := len(c)
- if l == 0 {
- return math.NaN(), fmt.Errorf("input array is empty: %#v", input)
- } else if l%2 == 0 {
- median, _ = Mean(c[l/2-1 : l/2+1])
- } else {
- median = c[l/2]
- }
- return median, nil
-}
diff --git a/internal/util/util_test.go b/internal/util/util_test.go
deleted file mode 100644
index d945c96..0000000
--- a/internal/util/util_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package util_test
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "testing"
-
- "github.com/ClusterCockpit/cc-backend/internal/util"
-)
-
-func TestCheckFileExists(t *testing.T) {
- tmpdir := t.TempDir()
- if !util.CheckFileExists(tmpdir) {
- t.Fatal("expected true, got false")
- }
-
- filePath := filepath.Join(tmpdir, "version.txt")
-
- if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
- t.Fatal(err)
- }
- if !util.CheckFileExists(filePath) {
- t.Fatal("expected true, got false")
- }
-
- filePath = filepath.Join(tmpdir, "version-test.txt")
- if util.CheckFileExists(filePath) {
- t.Fatal("expected false, got true")
- }
-}
-
-func TestGetFileSize(t *testing.T) {
- tmpdir := t.TempDir()
- filePath := filepath.Join(tmpdir, "data.json")
-
- if s := util.GetFilesize(filePath); s > 0 {
- t.Fatalf("expected 0, got %d", s)
- }
-
- if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
- t.Fatal(err)
- }
- if s := util.GetFilesize(filePath); s == 0 {
- t.Fatal("expected not 0, got 0")
- }
-}
-
-func TestGetFileCount(t *testing.T) {
- tmpdir := t.TempDir()
-
- if c := util.GetFilecount(tmpdir); c != 0 {
- t.Fatalf("expected 0, got %d", c)
- }
-
- filePath := filepath.Join(tmpdir, "data-1.json")
- if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
- t.Fatal(err)
- }
- filePath = filepath.Join(tmpdir, "data-2.json")
- if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
- t.Fatal(err)
- }
- if c := util.GetFilecount(tmpdir); c != 2 {
- t.Fatalf("expected 2, got %d", c)
- }
-
- if c := util.GetFilecount(filePath); c != 0 {
- t.Fatalf("expected 0, got %d", c)
- }
-}
diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go
index cd457eb..f69cde3 100644
--- a/pkg/archive/archive.go
+++ b/pkg/archive/archive.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -7,11 +7,12 @@ package archive
import (
"encoding/json"
"fmt"
+ "maps"
"sync"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/lrucache"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/lrucache"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
const Version uint64 = 2
@@ -23,7 +24,7 @@ type ArchiveBackend interface {
Exists(job *schema.Job) bool
- LoadJobMeta(job *schema.Job) (*schema.JobMeta, error)
+ LoadJobMeta(job *schema.Job) (*schema.Job, error)
LoadJobData(job *schema.Job) (schema.JobData, error)
@@ -31,9 +32,9 @@ type ArchiveBackend interface {
LoadClusterCfg(name string) (*schema.Cluster, error)
- StoreJobMeta(jobMeta *schema.JobMeta) error
+ StoreJobMeta(jobMeta *schema.Job) error
- ImportJob(jobMeta *schema.JobMeta, jobData *schema.JobData) error
+ ImportJob(jobMeta *schema.Job, jobData *schema.JobData) error
GetClusters() []string
@@ -51,7 +52,7 @@ type ArchiveBackend interface {
}
type JobContainer struct {
- Meta *schema.JobMeta
+ Meta *schema.Job
Data *schema.JobData
}
@@ -60,6 +61,7 @@ var (
cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
ar ArchiveBackend
useArchive bool
+ mutex sync.Mutex
)
func Init(rawConfig json.RawMessage, disableArchive bool) error {
@@ -73,7 +75,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
}
if err = json.Unmarshal(rawConfig, &cfg); err != nil {
- log.Warn("Error while unmarshaling raw config json")
+ cclog.Warn("Error while unmarshaling raw config json")
return
}
@@ -89,10 +91,10 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
var version uint64
version, err = ar.Init(rawConfig)
if err != nil {
- log.Errorf("Error while initializing archiveBackend: %s", err.Error())
+ cclog.Errorf("Error while initializing archiveBackend: %s", err.Error())
return
}
- log.Infof("Load archive version %d", version)
+ cclog.Infof("Load archive version %d", version)
err = initClusterConfig()
})
@@ -112,7 +114,7 @@ func LoadAveragesFromArchive(
) error {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
- log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
+ cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err
}
@@ -135,7 +137,7 @@ func LoadStatsFromArchive(
data := make(map[string]schema.MetricStatistics, len(metrics))
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
- log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
+ cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return data, err
}
@@ -162,10 +164,9 @@ func LoadScopedStatsFromArchive(
metrics []string,
scopes []schema.MetricScope,
) (schema.ScopedJobStats, error) {
-
data, err := ar.LoadJobStats(job)
if err != nil {
- log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())
+ cclog.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())
return nil, err
}
@@ -175,7 +176,7 @@ func LoadScopedStatsFromArchive(
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {
- log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
+ cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return nil, err
}
@@ -185,19 +186,20 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
// If the job is archived, find its `meta.json` file and override the Metadata
// in that JSON file. If the job is not archived, nothing is done.
func UpdateMetadata(job *schema.Job, metadata map[string]string) error {
+ mutex.Lock()
+ defer mutex.Unlock()
+
if job.State == schema.JobStateRunning || !useArchive {
return nil
}
jobMeta, err := ar.LoadJobMeta(job)
if err != nil {
- log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
+ cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err
}
- for k, v := range metadata {
- jobMeta.MetaData[k] = v
- }
+ maps.Copy(jobMeta.MetaData, metadata)
return ar.StoreJobMeta(jobMeta)
}
@@ -205,13 +207,16 @@ func UpdateMetadata(job *schema.Job, metadata map[string]string) error {
// If the job is archived, find its `meta.json` file and override the tags list
// in that JSON file. If the job is not archived, nothing is done.
func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
+ mutex.Lock()
+ defer mutex.Unlock()
+
if job.State == schema.JobStateRunning || !useArchive {
return nil
}
jobMeta, err := ar.LoadJobMeta(job)
if err != nil {
- log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
+ cclog.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err
}
diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go
index ac00ea1..34ea831 100644
--- a/pkg/archive/archive_test.go
+++ b/pkg/archive/archive_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive_test
@@ -9,11 +9,10 @@ import (
"fmt"
"path/filepath"
"testing"
- "time"
- "github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
)
var jobs []*schema.Job
@@ -32,28 +31,28 @@ func setup(t *testing.T) archive.ArchiveBackend {
jobs[0] = &schema.Job{}
jobs[0].JobID = 1403244
jobs[0].Cluster = "emmy"
- jobs[0].StartTime = time.Unix(1608923076, 0)
+ jobs[0].StartTime = 1608923076
jobs[1] = &schema.Job{}
jobs[0].JobID = 1404397
jobs[0].Cluster = "emmy"
- jobs[0].StartTime = time.Unix(1609300556, 0)
+ jobs[0].StartTime = 1609300556
return archive.GetHandle()
}
-func TestCleanUp(t *testing.T) {
- a := setup(t)
- if !a.Exists(jobs[0]) {
- t.Error("Job does not exist")
- }
+// func TestCleanUp(t *testing.T) {
+// a := setup(t)
+// if !a.Exists(jobs[0]) {
+// t.Error("Job does not exist")
+// }
- a.CleanUp(jobs)
+// a.CleanUp(jobs)
- if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
- t.Error("Jobs still exist")
- }
-}
+// if a.Exists(jobs[0]) || a.Exists(jobs[1]) {
+// t.Error("Jobs still exist")
+// }
+// }
// func TestCompress(t *testing.T) {
// a := setup(t)
diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go
index d53941b..51b89b1 100644
--- a/pkg/archive/clusterConfig.go
+++ b/pkg/archive/clusterConfig.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -8,8 +8,8 @@ import (
"errors"
"fmt"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
var (
@@ -27,7 +27,7 @@ func initClusterConfig() error {
cluster, err := ar.LoadClusterCfg(c)
if err != nil {
- log.Warnf("Error while loading cluster config for cluster '%v'", c)
+ cclog.Warnf("Error while loading cluster config for cluster '%v'", c)
return err
}
@@ -69,16 +69,18 @@ func initClusterConfig() error {
for _, sc := range cluster.SubClusters {
newMetric := &schema.MetricConfig{
- Unit: mc.Unit,
+ Metric: schema.Metric{
+ Name: mc.Name,
+ Unit: mc.Unit,
+ Peak: mc.Peak,
+ Normal: mc.Normal,
+ Caution: mc.Caution,
+ Alert: mc.Alert,
+ },
Energy: mc.Energy,
- Name: mc.Name,
Scope: mc.Scope,
Aggregation: mc.Aggregation,
- Peak: mc.Peak,
- Caution: mc.Caution,
- Alert: mc.Alert,
Timestep: mc.Timestep,
- Normal: mc.Normal,
LowerIsBetter: mc.LowerIsBetter,
}
@@ -167,6 +169,45 @@ func GetSubCluster(cluster, subcluster string) (*schema.SubCluster, error) {
return nil, fmt.Errorf("subcluster '%v' not found for cluster '%v', or cluster '%v' not configured", subcluster, cluster, cluster)
}
+func GetMetricConfigSubCluster(cluster, subcluster string) map[string]*schema.Metric {
+ metrics := make(map[string]*schema.Metric)
+
+ for _, c := range Clusters {
+ if c.Name == cluster {
+ for _, m := range c.MetricConfig {
+ for _, s := range m.SubClusters {
+ if s.Name == subcluster {
+ metrics[m.Name] = &schema.Metric{
+ Name: m.Name,
+ Unit: s.Unit,
+ Peak: s.Peak,
+ Normal: s.Normal,
+ Caution: s.Caution,
+ Alert: s.Alert,
+ }
+ break
+ }
+ }
+
+ _, ok := metrics[m.Name]
+ if !ok {
+ metrics[m.Name] = &schema.Metric{
+ Name: m.Name,
+ Unit: m.Unit,
+ Peak: m.Peak,
+ Normal: m.Normal,
+ Caution: m.Caution,
+ Alert: m.Alert,
+ }
+ }
+ }
+ break
+ }
+ }
+
+ return metrics
+}
+
func GetMetricConfig(cluster, metric string) *schema.MetricConfig {
for _, c := range Clusters {
if c.Name == cluster {
@@ -182,7 +223,7 @@ func GetMetricConfig(cluster, metric string) *schema.MetricConfig {
// AssignSubCluster sets the `job.subcluster` property of the job based
// on its cluster and resources.
-func AssignSubCluster(job *schema.BaseJob) error {
+func AssignSubCluster(job *schema.Job) error {
cluster := GetCluster(job.Cluster)
if cluster == nil {
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", job.Cluster)
diff --git a/pkg/archive/clusterConfig_test.go b/pkg/archive/clusterConfig_test.go
index a73f22f..3613017 100644
--- a/pkg/archive/clusterConfig_test.go
+++ b/pkg/archive/clusterConfig_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive_test
diff --git a/pkg/archive/fsBackend.go b/pkg/archive/fsBackend.go
index 711b1f5..8f10360 100644
--- a/pkg/archive/fsBackend.go
+++ b/pkg/archive/fsBackend.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -21,9 +21,9 @@ import (
"time"
"github.com/ClusterCockpit/cc-backend/internal/config"
- "github.com/ClusterCockpit/cc-backend/internal/util"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
"github.com/santhosh-tekuri/jsonschema/v5"
)
@@ -53,28 +53,27 @@ func getDirectory(
rootPath,
job.Cluster,
lvl1, lvl2,
- strconv.FormatInt(job.StartTime.Unix(), 10))
+ strconv.FormatInt(job.StartTime, 10))
}
func getPath(
job *schema.Job,
rootPath string,
- file string) string {
-
+ file string,
+) string {
return filepath.Join(
getDirectory(job, rootPath), file)
}
-func loadJobMeta(filename string) (*schema.JobMeta, error) {
-
+func loadJobMeta(filename string) (*schema.Job, error) {
b, err := os.ReadFile(filename)
if err != nil {
- log.Errorf("loadJobMeta() > open file error: %v", err)
- return &schema.JobMeta{}, err
+ cclog.Errorf("loadJobMeta() > open file error: %v", err)
+ return nil, err
}
if config.Keys.Validate {
if err := schema.Validate(schema.Meta, bytes.NewReader(b)); err != nil {
- return &schema.JobMeta{}, fmt.Errorf("validate job meta: %v", err)
+ return nil, fmt.Errorf("validate job meta: %v", err)
}
}
@@ -83,9 +82,8 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
f, err := os.Open(filename)
-
if err != nil {
- log.Errorf("fsBackend LoadJobData()- %v", err)
+ cclog.Errorf("fsBackend LoadJobData()- %v", err)
return nil, err
}
defer f.Close()
@@ -93,7 +91,7 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
if isCompressed {
r, err := gzip.NewReader(f)
if err != nil {
- log.Errorf(" %v", err)
+ cclog.Errorf(" %v", err)
return nil, err
}
defer r.Close()
@@ -117,9 +115,8 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) {
f, err := os.Open(filename)
-
if err != nil {
- log.Errorf("fsBackend LoadJobStats()- %v", err)
+ cclog.Errorf("fsBackend LoadJobStats()- %v", err)
return nil, err
}
defer f.Close()
@@ -127,7 +124,7 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er
if isCompressed {
r, err := gzip.NewReader(f)
if err != nil {
- log.Errorf(" %v", err)
+ cclog.Errorf(" %v", err)
return nil, err
}
defer r.Close()
@@ -150,28 +147,27 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er
}
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
-
var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil {
- log.Warnf("Init() > Unmarshal error: %#v", err)
+ cclog.Warnf("Init() > Unmarshal error: %#v", err)
return 0, err
}
if config.Path == "" {
err := fmt.Errorf("Init() : empty config.Path")
- log.Errorf("Init() > config.Path error: %v", err)
+ cclog.Errorf("Init() > config.Path error: %v", err)
return 0, err
}
fsa.path = config.Path
b, err := os.ReadFile(filepath.Join(fsa.path, "version.txt"))
if err != nil {
- log.Warnf("fsBackend Init() - %v", err)
+ cclog.Warnf("fsBackend Init() - %v", err)
return 0, err
}
version, err := strconv.ParseUint(strings.TrimSuffix(string(b), "\n"), 10, 64)
if err != nil {
- log.Errorf("fsBackend Init()- %v", err)
+ cclog.Errorf("fsBackend Init()- %v", err)
return 0, err
}
@@ -181,7 +177,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
entries, err := os.ReadDir(fsa.path)
if err != nil {
- log.Errorf("Init() > ReadDir() error: %v", err)
+ cclog.Errorf("Init() > ReadDir() error: %v", err)
return 0, err
}
@@ -199,7 +195,7 @@ func (fsa *FsArchive) Info() {
fmt.Printf("Job archive %s\n", fsa.path)
clusters, err := os.ReadDir(fsa.path)
if err != nil {
- log.Fatalf("Reading clusters failed: %s", err.Error())
+ cclog.Fatalf("Reading clusters failed: %s", err.Error())
}
ci := make(map[string]*clusterInfo)
@@ -213,7 +209,7 @@ func (fsa *FsArchive) Info() {
ci[cc] = &clusterInfo{dateFirst: time.Now().Unix()}
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
if err != nil {
- log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
@@ -222,14 +218,14 @@ func (fsa *FsArchive) Info() {
}
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
if err != nil {
- log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
- log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
@@ -237,7 +233,7 @@ func (fsa *FsArchive) Info() {
ci[cc].numJobs++
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
if err != nil {
- log.Fatalf("Cannot parse starttime: %s", err.Error())
+ cclog.Fatalf("Cannot parse starttime: %s", err.Error())
}
ci[cc].dateFirst = util.Min(ci[cc].dateFirst, startTime)
ci[cc].dateLast = util.Max(ci[cc].dateLast, startTime)
@@ -276,14 +272,13 @@ func (fsa *FsArchive) Exists(job *schema.Job) bool {
}
func (fsa *FsArchive) Clean(before int64, after int64) {
-
if after == 0 {
after = math.MaxInt64
}
clusters, err := os.ReadDir(fsa.path)
if err != nil {
- log.Fatalf("Reading clusters failed: %s", err.Error())
+ cclog.Fatalf("Reading clusters failed: %s", err.Error())
}
for _, cluster := range clusters {
@@ -293,7 +288,7 @@ func (fsa *FsArchive) Clean(before int64, after int64) {
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name()))
if err != nil {
- log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
@@ -302,33 +297,33 @@ func (fsa *FsArchive) Clean(before int64, after int64) {
}
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name()))
if err != nil {
- log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, cluster.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
- log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
startTime, err := strconv.ParseInt(startTimeDir.Name(), 10, 64)
if err != nil {
- log.Fatalf("Cannot parse starttime: %s", err.Error())
+ cclog.Fatalf("Cannot parse starttime: %s", err.Error())
}
if startTime < before || startTime > after {
if err := os.RemoveAll(filepath.Join(dirpath, startTimeDir.Name())); err != nil {
- log.Errorf("JobArchive Cleanup() error: %v", err)
+ cclog.Errorf("JobArchive Cleanup() error: %v", err)
}
}
}
}
if util.GetFilecount(dirpath) == 0 {
if err := os.Remove(dirpath); err != nil {
- log.Errorf("JobArchive Clean() error: %v", err)
+ cclog.Errorf("JobArchive Clean() error: %v", err)
}
}
}
@@ -342,16 +337,16 @@ func (fsa *FsArchive) Move(jobs []*schema.Job, path string) {
target := getDirectory(job, path)
if err := os.MkdirAll(filepath.Clean(filepath.Join(target, "..")), 0777); err != nil {
- log.Errorf("JobArchive Move MkDir error: %v", err)
+ cclog.Errorf("JobArchive Move MkDir error: %v", err)
}
if err := os.Rename(source, target); err != nil {
- log.Errorf("JobArchive Move() error: %v", err)
+ cclog.Errorf("JobArchive Move() error: %v", err)
}
parent := filepath.Clean(filepath.Join(source, ".."))
if util.GetFilecount(parent) == 0 {
if err := os.Remove(parent); err != nil {
- log.Errorf("JobArchive Move() error: %v", err)
+ cclog.Errorf("JobArchive Move() error: %v", err)
}
}
}
@@ -360,20 +355,24 @@ func (fsa *FsArchive) Move(jobs []*schema.Job, path string) {
func (fsa *FsArchive) CleanUp(jobs []*schema.Job) {
start := time.Now()
for _, job := range jobs {
+ if job == nil {
+ cclog.Errorf("JobArchive Cleanup() error: job is nil")
+ continue
+ }
dir := getDirectory(job, fsa.path)
if err := os.RemoveAll(dir); err != nil {
- log.Errorf("JobArchive Cleanup() error: %v", err)
+ cclog.Errorf("JobArchive Cleanup() error: %v", err)
}
parent := filepath.Clean(filepath.Join(dir, ".."))
if util.GetFilecount(parent) == 0 {
if err := os.Remove(parent); err != nil {
- log.Errorf("JobArchive Cleanup() error: %v", err)
+ cclog.Errorf("JobArchive Cleanup() error: %v", err)
}
}
}
- log.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start))
+ cclog.Infof("Retention Service - Remove %d files in %s", len(jobs), time.Since(start))
}
func (fsa *FsArchive) Compress(jobs []*schema.Job) {
@@ -388,25 +387,24 @@ func (fsa *FsArchive) Compress(jobs []*schema.Job) {
}
}
- log.Infof("Compression Service - %d files took %s", cnt, time.Since(start))
+ cclog.Infof("Compression Service - %d files took %s", cnt, time.Since(start))
}
func (fsa *FsArchive) CompressLast(starttime int64) int64 {
-
filename := filepath.Join(fsa.path, "compress.txt")
b, err := os.ReadFile(filename)
if err != nil {
- log.Errorf("fsBackend Compress - %v", err)
+ cclog.Errorf("fsBackend Compress - %v", err)
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
return starttime
}
last, err := strconv.ParseInt(strings.TrimSuffix(string(b), "\n"), 10, 64)
if err != nil {
- log.Errorf("fsBackend Compress - %v", err)
+ cclog.Errorf("fsBackend Compress - %v", err)
return starttime
}
- log.Infof("fsBackend Compress - start %d last %d", starttime, last)
+ cclog.Infof("fsBackend Compress - start %d last %d", starttime, last)
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
return last
}
@@ -435,19 +433,18 @@ func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, erro
return loadJobStats(filename, isCompressed)
}
-func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
+func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.Job, error) {
filename := getPath(job, fsa.path, "meta.json")
return loadJobMeta(filename)
}
func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
-
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil {
- log.Errorf("LoadClusterCfg() > open file error: %v", err)
+ cclog.Errorf("LoadClusterCfg() > open file error: %v", err)
// if config.Keys.Validate {
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
- log.Warnf("Validate cluster config: %v\n", err)
+ cclog.Warnf("Validate cluster config: %v\n", err)
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
}
}
@@ -456,12 +453,11 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
}
func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
-
ch := make(chan JobContainer)
go func() {
clustersDir, err := os.ReadDir(fsa.path)
if err != nil {
- log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
+ cclog.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
}
for _, clusterDir := range clustersDir {
@@ -470,7 +466,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
}
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
if err != nil {
- log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
}
for _, lvl1Dir := range lvl1Dirs {
@@ -481,21 +477,21 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
if err != nil {
- log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
}
for _, lvl2Dir := range lvl2Dirs {
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
startTimeDirs, err := os.ReadDir(dirpath)
if err != nil {
- log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
+ cclog.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
}
for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() {
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
- log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
+ cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
}
if loadMetricData {
@@ -509,10 +505,10 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
data, err := loadJobData(filename, isCompressed)
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
- log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
+ cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
}
ch <- JobContainer{Meta: job, Data: &data}
- log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
+ cclog.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
} else {
ch <- JobContainer{Meta: job, Data: nil}
}
@@ -526,24 +522,18 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
return ch
}
-func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error {
-
- job := schema.Job{
- BaseJob: jobMeta.BaseJob,
- StartTime: time.Unix(jobMeta.StartTime, 0),
- StartTimeUnix: jobMeta.StartTime,
- }
- f, err := os.Create(getPath(&job, fsa.path, "meta.json"))
+func (fsa *FsArchive) StoreJobMeta(job *schema.Job) error {
+ f, err := os.Create(getPath(job, fsa.path, "meta.json"))
if err != nil {
- log.Error("Error while creating filepath for meta.json")
+ cclog.Error("Error while creating filepath for meta.json")
return err
}
- if err := EncodeJobMeta(f, jobMeta); err != nil {
- log.Error("Error while encoding job metadata to meta.json file")
+ if err := EncodeJobMeta(f, job); err != nil {
+ cclog.Error("Error while encoding job metadata to meta.json file")
return err
}
if err := f.Close(); err != nil {
- log.Warn("Error while closing meta.json file")
+ cclog.Warn("Error while closing meta.json file")
return err
}
@@ -555,67 +545,40 @@ func (fsa *FsArchive) GetClusters() []string {
}
func (fsa *FsArchive) ImportJob(
- jobMeta *schema.JobMeta,
- jobData *schema.JobData) error {
-
- job := schema.Job{
- BaseJob: jobMeta.BaseJob,
- StartTime: time.Unix(jobMeta.StartTime, 0),
- StartTimeUnix: jobMeta.StartTime,
- }
- dir := getPath(&job, fsa.path, "")
+ jobMeta *schema.Job,
+ jobData *schema.JobData,
+) error {
+ dir := getPath(jobMeta, fsa.path, "")
if err := os.MkdirAll(dir, 0777); err != nil {
- log.Error("Error while creating job archive path")
+ cclog.Error("Error while creating job archive path")
return err
}
f, err := os.Create(path.Join(dir, "meta.json"))
if err != nil {
- log.Error("Error while creating filepath for meta.json")
+ cclog.Error("Error while creating filepath for meta.json")
return err
}
if err := EncodeJobMeta(f, jobMeta); err != nil {
- log.Error("Error while encoding job metadata to meta.json file")
+ cclog.Error("Error while encoding job metadata to meta.json file")
return err
}
if err := f.Close(); err != nil {
- log.Warn("Error while closing meta.json file")
+ cclog.Warn("Error while closing meta.json file")
return err
}
- // var isCompressed bool = true
- // // TODO Use shortJob Config for check
- // if jobMeta.Duration < 300 {
- // isCompressed = false
- // f, err = os.Create(path.Join(dir, "data.json"))
- // } else {
- // f, err = os.Create(path.Join(dir, "data.json.gz"))
- // }
- // if err != nil {
- // return err
- // }
- //
- // if isCompressed {
- // if err := EncodeJobData(gzip.NewWriter(f), jobData); err != nil {
- // return err
- // }
- // } else {
- // if err := EncodeJobData(f, jobData); err != nil {
- // return err
- // }
- // }
-
f, err = os.Create(path.Join(dir, "data.json"))
if err != nil {
- log.Error("Error while creating filepath for data.json")
+ cclog.Error("Error while creating filepath for data.json")
return err
}
if err := EncodeJobData(f, jobData); err != nil {
- log.Error("Error while encoding job metricdata to data.json file")
+ cclog.Error("Error while encoding job metricdata to data.json file")
return err
}
if err := f.Close(); err != nil {
- log.Warn("Error while closing data.json file")
+ cclog.Warn("Error while closing data.json file")
}
return err
}
diff --git a/pkg/archive/fsBackend_test.go b/pkg/archive/fsBackend_test.go
index 9db68ed..7b1fe74 100644
--- a/pkg/archive/fsBackend_test.go
+++ b/pkg/archive/fsBackend_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -9,10 +9,9 @@ import (
"fmt"
"path/filepath"
"testing"
- "time"
- "github.com/ClusterCockpit/cc-backend/internal/util"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ "github.com/ClusterCockpit/cc-lib/schema"
+ "github.com/ClusterCockpit/cc-lib/util"
)
func TestInitEmptyPath(t *testing.T) {
@@ -86,8 +85,11 @@ func TestLoadJobMeta(t *testing.T) {
t.Fatal(err)
}
- jobIn := schema.Job{BaseJob: schema.JobDefaults}
- jobIn.StartTime = time.Unix(1608923076, 0)
+ jobIn := schema.Job{
+ Exclusive: 1,
+ MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
+ }
+ jobIn.StartTime = 1608923076
jobIn.JobID = 1403244
jobIn.Cluster = "emmy"
@@ -114,8 +116,11 @@ func TestLoadJobData(t *testing.T) {
t.Fatal(err)
}
- jobIn := schema.Job{BaseJob: schema.JobDefaults}
- jobIn.StartTime = time.Unix(1608923076, 0)
+ jobIn := schema.Job{
+ Exclusive: 1,
+ MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
+ }
+ jobIn.StartTime = 1608923076
jobIn.JobID = 1403244
jobIn.Cluster = "emmy"
@@ -142,8 +147,11 @@ func BenchmarkLoadJobData(b *testing.B) {
var fsa FsArchive
fsa.Init(json.RawMessage(archiveCfg))
- jobIn := schema.Job{BaseJob: schema.JobDefaults}
- jobIn.StartTime = time.Unix(1608923076, 0)
+ jobIn := schema.Job{
+ Exclusive: 1,
+ MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
+ }
+ jobIn.StartTime = 1608923076
jobIn.JobID = 1403244
jobIn.Cluster = "emmy"
@@ -165,8 +173,11 @@ func BenchmarkLoadJobDataCompressed(b *testing.B) {
var fsa FsArchive
fsa.Init(json.RawMessage(archiveCfg))
- jobIn := schema.Job{BaseJob: schema.JobDefaults}
- jobIn.StartTime = time.Unix(1608923076, 0)
+ jobIn := schema.Job{
+ Exclusive: 1,
+ MonitoringStatus: schema.MonitoringStatusRunningOrArchiving,
+ }
+ jobIn.StartTime = 1608923076
jobIn.JobID = 1403244
jobIn.Cluster = "emmy"
diff --git a/pkg/archive/json.go b/pkg/archive/json.go
index 5201b74..f0665d4 100644
--- a/pkg/archive/json.go
+++ b/pkg/archive/json.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -9,15 +9,15 @@ import (
"io"
"time"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
+ "github.com/ClusterCockpit/cc-lib/schema"
)
func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
var d schema.JobData
if err := json.NewDecoder(r).Decode(&d); err != nil {
- log.Warn("Error while decoding raw job data json")
+ cclog.Warn("Error while decoding raw job data json")
return err, 0, 1000
}
@@ -25,7 +25,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
})
if err, ok := data.(error); ok {
- log.Warn("Error in decoded job data set")
+ cclog.Warn("Error in decoded job data set")
return nil, err
}
@@ -69,10 +69,10 @@ func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) {
return nil, err
}
-func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
- var d schema.JobMeta
+func DecodeJobMeta(r io.Reader) (*schema.Job, error) {
+ var d schema.Job
if err := json.NewDecoder(r).Decode(&d); err != nil {
- log.Warn("Error while decoding raw job meta json")
+ cclog.Warn("Error while decoding raw job meta json")
return &d, err
}
@@ -84,7 +84,7 @@ func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
var c schema.Cluster
if err := json.NewDecoder(r).Decode(&c); err != nil {
- log.Warn("Error while decoding raw cluster json")
+ cclog.Warn("Error while decoding raw cluster json")
return &c, err
}
@@ -96,17 +96,17 @@ func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
func EncodeJobData(w io.Writer, d *schema.JobData) error {
// Sanitize parameters
if err := json.NewEncoder(w).Encode(d); err != nil {
- log.Warn("Error while encoding new job data json")
+ cclog.Warn("Error while encoding new job data json")
return err
}
return nil
}
-func EncodeJobMeta(w io.Writer, d *schema.JobMeta) error {
+func EncodeJobMeta(w io.Writer, d *schema.Job) error {
// Sanitize parameters
if err := json.NewEncoder(w).Encode(d); err != nil {
- log.Warn("Error while encoding new job meta json")
+ cclog.Warn("Error while encoding new job meta json")
return err
}
diff --git a/pkg/archive/nodelist.go b/pkg/archive/nodelist.go
index 7700185..23ce8a4 100644
--- a/pkg/archive/nodelist.go
+++ b/pkg/archive/nodelist.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
@@ -9,7 +9,7 @@ import (
"strconv"
"strings"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
type NodeList [][]interface {
@@ -51,7 +51,7 @@ func (nl *NodeList) PrintList() []string {
if inner["zeroPadded"] == 1 {
out = append(out, fmt.Sprintf("%s%0*d", prefix, inner["digits"], i))
} else {
- log.Error("node list: only zero-padded ranges are allowed")
+ cclog.Error("node list: only zero-padded ranges are allowed")
}
}
}
@@ -61,7 +61,7 @@ func (nl *NodeList) PrintList() []string {
}
func (nl *NodeList) NodeCount() int {
- var out int = 0
+ out := 0
for _, term := range *nl {
if len(term) == 1 { // If only String-Part in Term: Single Node Name -> add one
out += 1
@@ -129,7 +129,7 @@ type NLExprIntRange struct {
func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
if !nle.zeroPadded || nle.digits < 1 {
- log.Error("only zero-padded ranges are allowed")
+ cclog.Error("only zero-padded ranges are allowed")
return "", false
}
@@ -160,7 +160,7 @@ func (nle NLExprIntRange) limits() []map[string]int {
m["start"] = int(nle.start)
m["end"] = int(nle.end)
m["digits"] = int(nle.digits)
- if nle.zeroPadded == true {
+ if nle.zeroPadded {
m["zeroPadded"] = 1
} else {
m["zeroPadded"] = 0
@@ -183,14 +183,15 @@ func ParseNodeList(raw string) (NodeList, error) {
rawterms := []string{}
prevterm := 0
for i := 0; i < len(raw); i++ {
- if raw[i] == '[' {
+ switch raw[i] {
+ case '[':
for i < len(raw) && raw[i] != ']' {
i++
}
if i == len(raw) {
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
}
- } else if raw[i] == ',' {
+ case ',':
rawterms = append(rawterms, raw[prevterm:i])
prevterm = i + 1
}
diff --git a/pkg/archive/nodelist_test.go b/pkg/archive/nodelist_test.go
index 52aa812..f2747c8 100644
--- a/pkg/archive/nodelist_test.go
+++ b/pkg/archive/nodelist_test.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
diff --git a/pkg/archive/s3Backend.go b/pkg/archive/s3Backend.go
index d8b06e7..8af644a 100644
--- a/pkg/archive/s3Backend.go
+++ b/pkg/archive/s3Backend.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
diff --git a/pkg/log/log.go b/pkg/log/log.go
deleted file mode 100644
index ef14535..0000000
--- a/pkg/log/log.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package log
-
-import (
- "fmt"
- "io"
- "log"
- "os"
-)
-
-// Provides a simple way of logging with different levels.
-// Time/Date are not logged because systemd adds
-// them for us (Default, can be changed by flag '--logdate true').
-//
-// Uses these prefixes: https://www.freedesktop.org/software/systemd/man/sd-daemon.html
-
-var (
- DebugWriter io.Writer = os.Stderr
- InfoWriter io.Writer = os.Stderr
- WarnWriter io.Writer = os.Stderr
- ErrWriter io.Writer = os.Stderr
- CritWriter io.Writer = os.Stderr
-)
-
-var (
- DebugPrefix string = "<7>[DEBUG] "
- InfoPrefix string = "<6>[INFO] "
- WarnPrefix string = "<4>[WARNING] "
- ErrPrefix string = "<3>[ERROR] "
- CritPrefix string = "<2>[CRITICAL] "
-)
-
-var (
- DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
- InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
- WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
- ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
- CritLog *log.Logger = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
-)
-
-var loglevel string = "info"
-
-/* CONFIG */
-
-func Init(lvl string, logdate bool) {
- // Discard I/O for all writers below selected loglevel; is always written.
- switch lvl {
- case "crit":
- ErrWriter = io.Discard
- fallthrough
- case "err":
- WarnWriter = io.Discard
- fallthrough
- case "warn":
- InfoWriter = io.Discard
- fallthrough
- case "info":
- DebugWriter = io.Discard
- case "debug":
- // Nothing to do...
- break
- default:
- fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel '%s'\n", lvl, loglevel)
- }
-
- if !logdate {
- DebugLog = log.New(DebugWriter, DebugPrefix, 0)
- InfoLog = log.New(InfoWriter, InfoPrefix, log.Lshortfile)
- WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
- ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
- CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
- } else {
- DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
- InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
- WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
- ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
- CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
- }
-
- loglevel = lvl
-}
-
-/* HELPER */
-
-func Loglevel() string {
- return loglevel
-}
-
-/* PRIVATE HELPER */
-
-// Return unformatted string
-func printStr(v ...interface{}) string {
- return fmt.Sprint(v...)
-}
-
-// Return formatted string
-func printfStr(format string, v ...interface{}) string {
- return fmt.Sprintf(format, v...)
-}
-
-/* PRINT */
-
-// Prints to STDOUT without string formatting; application continues.
-// Used for special cases not requiring log information like date or location.
-func Print(v ...interface{}) {
- fmt.Fprintln(os.Stdout, v...)
-}
-
-// Prints to STDOUT without string formatting; application exits with error code 0.
-// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
-func Exit(v ...interface{}) {
- fmt.Fprintln(os.Stdout, v...)
- os.Exit(0)
-}
-
-// Prints to STDOUT without string formatting; application exits with error code 1.
-// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
-func Abort(v ...interface{}) {
- fmt.Fprintln(os.Stdout, v...)
- os.Exit(1)
-}
-
-// Prints to DEBUG writer without string formatting; application continues.
-// Used for logging additional information, primarily for development.
-func Debug(v ...interface{}) {
- DebugLog.Output(2, printStr(v...))
-}
-
-// Prints to INFO writer without string formatting; application continues.
-// Used for logging additional information, e.g. notable returns or common fail-cases.
-func Info(v ...interface{}) {
- InfoLog.Output(2, printStr(v...))
-}
-
-// Prints to WARNING writer without string formatting; application continues.
-// Used for logging important information, e.g. uncommon edge-cases or administration related information.
-func Warn(v ...interface{}) {
- WarnLog.Output(2, printStr(v...))
-}
-
-// Prints to ERROR writer without string formatting; application continues.
-// Used for logging errors, but code still can return default(s) or nil.
-func Error(v ...interface{}) {
- ErrLog.Output(2, printStr(v...))
-}
-
-// Prints to CRITICAL writer without string formatting; application exits with error code 1.
-// Used for terminating on unexpected errors with date and code location.
-func Fatal(v ...interface{}) {
- CritLog.Output(2, printStr(v...))
- os.Exit(1)
-}
-
-// Prints to PANIC function without string formatting; application exits with panic.
-// Used for terminating on unexpected errors with stacktrace.
-func Panic(v ...interface{}) {
- panic(printStr(v...))
-}
-
-/* PRINT FORMAT*/
-
-// Prints to STDOUT with string formatting; application continues.
-// Used for special cases not requiring log information like date or location.
-func Printf(format string, v ...interface{}) {
- fmt.Fprintf(os.Stdout, format, v...)
-}
-
-// Prints to STDOUT with string formatting; application exits with error code 0.
-// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
-func Exitf(format string, v ...interface{}) {
- fmt.Fprintf(os.Stdout, format, v...)
- os.Exit(0)
-}
-
-// Prints to STDOUT with string formatting; application exits with error code 1.
-// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
-func Abortf(format string, v ...interface{}) {
- fmt.Fprintf(os.Stdout, format, v...)
- os.Exit(1)
-}
-
-// Prints to DEBUG writer with string formatting; application continues.
-// Used for logging additional information, primarily for development.
-func Debugf(format string, v ...interface{}) {
- DebugLog.Output(2, printfStr(format, v...))
-}
-
-// Prints to INFO writer with string formatting; application continues.
-// Used for logging additional information, e.g. notable returns or common fail-cases.
-func Infof(format string, v ...interface{}) {
- InfoLog.Output(2, printfStr(format, v...))
-}
-
-// Prints to WARNING writer with string formatting; application continues.
-// Used for logging important information, e.g. uncommon edge-cases or administration related information.
-func Warnf(format string, v ...interface{}) {
- WarnLog.Output(2, printfStr(format, v...))
-}
-
-// Prints to ERROR writer with string formatting; application continues.
-// Used for logging errors, but code still can return default(s) or nil.
-func Errorf(format string, v ...interface{}) {
- ErrLog.Output(2, printfStr(format, v...))
-}
-
-// Prints to CRITICAL writer with string formatting; application exits with error code 1.
-// Used for terminating on unexpected errors with date and code location.
-func Fatalf(format string, v ...interface{}) {
- CritLog.Output(2, printfStr(format, v...))
- os.Exit(1)
-}
-
-// Prints to PANIC function with string formatting; application exits with panic.
-// Used for terminating on unexpected errors with stacktrace.
-func Panicf(format string, v ...interface{}) {
- panic(printfStr(format, v...))
-}
diff --git a/pkg/lrucache/README.md b/pkg/lrucache/README.md
deleted file mode 100644
index 855a185..0000000
--- a/pkg/lrucache/README.md
+++ /dev/null
@@ -1,124 +0,0 @@
-# In-Memory LRU Cache for Golang Applications
-
-This library can be embedded into your existing go applications
-and play the role *Memcached* or *Redis* might play for others.
-It is inspired by [PHP Symfony's Cache Components](https://symfony.com/doc/current/components/cache/adapters/array_cache_adapter.html),
-having a similar API. This library can not be used for persistance,
-is not properly tested yet and a bit special in a few ways described
-below (Especially with regards to the memory usage/`size`).
-
-In addition to the interface described below, a `http.Handler` that can be used as middleware is provided as well.
-
-- Advantages:
- - Anything (`interface{}`) can be stored as value
- - As it lives in the application itself, no serialization or de-serialization is needed
- - As it lives in the application itself, no memory moving/networking is needed
- - The computation of a new value for a key does __not__ block the full cache (only the key)
-- Disadvantages:
- - You have to provide a size estimate for every value
- - __This size estimate should not change (i.e. values should not mutate)__
- - The cache can only be accessed by one application
-
-## Example
-
-```go
-// Go look at the godocs and ./cache_test.go for more documentation and examples
-
-maxMemory := 1000
-cache := lrucache.New(maxMemory)
-
-bar = cache.Get("foo", func () (value interface{}, ttl time.Duration, size int) {
- return "bar", 10 * time.Second, len("bar")
-}).(string)
-
-// bar == "bar"
-
-bar = cache.Get("foo", func () (value interface{}, ttl time.Duration, size int) {
- panic("will not be called")
-}).(string)
-```
-
-## Why does `cache.Get` take a function as argument?
-
-*Using the mechanism described below is optional, the second argument to `Get` can be `nil` and there is a `Put` function as well.*
-
-Because this library is meant to be used by multi threaded applications and the following would
-result in the same data being fetched twice if both goroutines run in parallel:
-
-```go
-// This code shows what could happen with other cache libraries
-c := lrucache.New(MAX_CACHE_ENTRIES)
-
-for i := 0; i < 2; i++ {
- go func(){
- // This code will run twice in different goroutines,
- // it could overlap. As `fetchData` probably does some
- // I/O and takes a long time, the probability of both
- // goroutines calling `fetchData` is very high!
- url := "http://example.com/foo"
- contents := c.Get(url)
- if contents == nil {
- contents = fetchData(url)
- c.Set(url, contents)
- }
-
- handleData(contents.([]byte))
- }()
-}
-
-```
-
-Here, if one wanted to make sure that only one of both goroutines fetches the data,
-the programmer would need to build his own synchronization. That would suck!
-
-```go
-c := lrucache.New(MAX_CACHE_SIZE)
-
-for i := 0; i < 2; i++ {
- go func(){
- url := "http://example.com/foo"
- contents := c.Get(url, func()(interface{}, time.Time, int) {
- // This closure will only be called once!
- // If another goroutine calls `c.Get` while this closure
- // is still being executed, it will wait.
- buf := fetchData(url)
- return buf, 100 * time.Second, len(buf)
- })
-
- handleData(contents.([]byte))
- }()
-}
-
-```
-
-This is much better as less resources are wasted and synchronization is handled by
-the library. If it gets called, the call to the closure happens synchronously. While
-it is being executed, all other cache keys can still be accessed without having to wait
-for the execution to be done.
-
-## How `Get` works
-
-The closure passed to `Get` will be called if the value asked for is not cached or
-expired. It should return the following values:
-
-- The value corresponding to that key and to be stored in the cache
-- The time to live for that value (how long until it expires and needs to be recomputed)
-- A size estimate
-
-When `maxMemory` is reached, cache entries need to be evicted. Theoretically,
-it would be possible to use reflection on every value placed in the cache
-to get its exact size in bytes. This would be very expansive and slow though.
-Also, size can change. Instead of this library calculating the size in bytes, you, the user,
-have to provide a size for every value in whatever unit you like (as long as it is the same unit everywhere).
-
-Suggestions on what to use as size: `len(str)` for strings, `len(slice) * size_of_slice_type`, etc.. It is possible
-to use `1` as size for every entry, in that case at most `maxMemory` entries will be in the cache at the same time.
-
-## Affects on GC
-
-Because of the way a garbage collector decides when to run ([explained in the
-runtime package](https://pkg.go.dev/runtime)), having large amounts of data
-sitting in your cache might increase the memory consumption of your process by
-two times the maximum size of the cache. You can decrease the *target
-percentage* to reduce the effect, but then you might have negative performance
-effects when your cache is not filled.
diff --git a/pkg/lrucache/cache.go b/pkg/lrucache/cache.go
deleted file mode 100644
index 220c53b..0000000
--- a/pkg/lrucache/cache.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package lrucache
-
-import (
- "sync"
- "time"
-)
-
-// Type of the closure that must be passed to `Get` to
-// compute the value in case it is not cached.
-//
-// returned values are the computed value to be stored in the cache,
-// the duration until this value will expire and a size estimate.
-type ComputeValue func() (value interface{}, ttl time.Duration, size int)
-
-type cacheEntry struct {
- key string
- value interface{}
-
- expiration time.Time
- size int
- waitingForComputation int
-
- next, prev *cacheEntry
-}
-
-type Cache struct {
- mutex sync.Mutex
- cond *sync.Cond
- maxmemory, usedmemory int
- entries map[string]*cacheEntry
- head, tail *cacheEntry
-}
-
-// Return a new instance of a LRU In-Memory Cache.
-// Read [the README](./README.md) for more information
-// on what is going on with `maxmemory`.
-func New(maxmemory int) *Cache {
- cache := &Cache{
- maxmemory: maxmemory,
- entries: map[string]*cacheEntry{},
- }
- cache.cond = sync.NewCond(&cache.mutex)
- return cache
-}
-
-// Return the cached value for key `key` or call `computeValue` and
-// store its return value in the cache. If called, the closure will be
-// called synchronous and __shall not call methods on the same cache__
-// or a deadlock might ocure. If `computeValue` is nil, the cache is checked
-// and if no entry was found, nil is returned. If another goroutine is currently
-// computing that value, the result is waited for.
-func (c *Cache) Get(key string, computeValue ComputeValue) interface{} {
- now := time.Now()
-
- c.mutex.Lock()
- if entry, ok := c.entries[key]; ok {
- // The expiration not being set is what shows us that
- // the computation of that value is still ongoing.
- for entry.expiration.IsZero() {
- entry.waitingForComputation += 1
- c.cond.Wait()
- entry.waitingForComputation -= 1
- }
-
- if now.After(entry.expiration) {
- if !c.evictEntry(entry) {
- if entry.expiration.IsZero() {
- panic("LRUCACHE/CACHE > cache entry that shoud have been waited for could not be evicted.")
- }
- c.mutex.Unlock()
- return entry.value
- }
- } else {
- if entry != c.head {
- c.unlinkEntry(entry)
- c.insertFront(entry)
- }
- c.mutex.Unlock()
- return entry.value
- }
- }
-
- if computeValue == nil {
- c.mutex.Unlock()
- return nil
- }
-
- entry := &cacheEntry{
- key: key,
- waitingForComputation: 1,
- }
-
- c.entries[key] = entry
-
- hasPaniced := true
- defer func() {
- if hasPaniced {
- c.mutex.Lock()
- delete(c.entries, key)
- entry.expiration = now
- entry.waitingForComputation -= 1
- }
- c.mutex.Unlock()
- }()
-
- c.mutex.Unlock()
- value, ttl, size := computeValue()
- c.mutex.Lock()
- hasPaniced = false
-
- entry.value = value
- entry.expiration = now.Add(ttl)
- entry.size = size
- entry.waitingForComputation -= 1
-
- // Only broadcast if other goroutines are actually waiting
- // for a result.
- if entry.waitingForComputation > 0 {
- // TODO: Have more than one condition variable so that there are
- // less unnecessary wakeups.
- c.cond.Broadcast()
- }
-
- c.usedmemory += size
- c.insertFront(entry)
-
- // Evict only entries with a size of more than zero.
- // This is the only loop in the implementation outside of the `Keys`
- // method.
- evictionCandidate := c.tail
- for c.usedmemory > c.maxmemory && evictionCandidate != nil {
- nextCandidate := evictionCandidate.prev
- if (evictionCandidate.size > 0 || now.After(evictionCandidate.expiration)) &&
- evictionCandidate.waitingForComputation == 0 {
- c.evictEntry(evictionCandidate)
- }
- evictionCandidate = nextCandidate
- }
-
- return value
-}
-
-// Put a new value in the cache. If another goroutine is calling `Get` and
-// computing the value, this function waits for the computation to be done
-// before it overwrites the value.
-func (c *Cache) Put(key string, value interface{}, size int, ttl time.Duration) {
- now := time.Now()
- c.mutex.Lock()
- defer c.mutex.Unlock()
-
- if entry, ok := c.entries[key]; ok {
- for entry.expiration.IsZero() {
- entry.waitingForComputation += 1
- c.cond.Wait()
- entry.waitingForComputation -= 1
- }
-
- c.usedmemory -= entry.size
- entry.expiration = now.Add(ttl)
- entry.size = size
- entry.value = value
- c.usedmemory += entry.size
-
- c.unlinkEntry(entry)
- c.insertFront(entry)
- return
- }
-
- entry := &cacheEntry{
- key: key,
- value: value,
- expiration: now.Add(ttl),
- }
- c.entries[key] = entry
- c.insertFront(entry)
-}
-
-// Remove the value at key `key` from the cache.
-// Return true if the key was in the cache and false
-// otherwise. It is possible that true is returned even
-// though the value already expired.
-// It is possible that false is returned even though the value
-// will show up in the cache if this function is called on a key
-// while that key is beeing computed.
-func (c *Cache) Del(key string) bool {
- c.mutex.Lock()
- defer c.mutex.Unlock()
-
- if entry, ok := c.entries[key]; ok {
- return c.evictEntry(entry)
- }
- return false
-}
-
-// Call f for every entry in the cache. Some sanity checks
-// and eviction of expired keys are done as well.
-// The cache is fully locked for the complete duration of this call!
-func (c *Cache) Keys(f func(key string, val interface{})) {
- c.mutex.Lock()
- defer c.mutex.Unlock()
-
- now := time.Now()
-
- size := 0
- for key, e := range c.entries {
- if key != e.key {
- panic("LRUCACHE/CACHE > key mismatch")
- }
-
- if now.After(e.expiration) {
- if c.evictEntry(e) {
- continue
- }
- }
-
- if e.prev != nil {
- if e.prev.next != e {
- panic("LRUCACHE/CACHE > list corrupted")
- }
- }
-
- if e.next != nil {
- if e.next.prev != e {
- panic("LRUCACHE/CACHE > list corrupted")
- }
- }
-
- size += e.size
- f(key, e.value)
- }
-
- if size != c.usedmemory {
- panic("LRUCACHE/CACHE > size calculations failed")
- }
-
- if c.head != nil {
- if c.tail == nil || c.head.prev != nil {
- panic("LRUCACHE/CACHE > head/tail corrupted")
- }
- }
-
- if c.tail != nil {
- if c.head == nil || c.tail.next != nil {
- panic("LRUCACHE/CACHE > head/tail corrupted")
- }
- }
-}
-
-func (c *Cache) insertFront(e *cacheEntry) {
- e.next = c.head
- c.head = e
-
- e.prev = nil
- if e.next != nil {
- e.next.prev = e
- }
-
- if c.tail == nil {
- c.tail = e
- }
-}
-
-func (c *Cache) unlinkEntry(e *cacheEntry) {
- if e == c.head {
- c.head = e.next
- }
- if e.prev != nil {
- e.prev.next = e.next
- }
- if e.next != nil {
- e.next.prev = e.prev
- }
- if e == c.tail {
- c.tail = e.prev
- }
-}
-
-func (c *Cache) evictEntry(e *cacheEntry) bool {
- if e.waitingForComputation != 0 {
- // panic("LRUCACHE/CACHE > cannot evict this entry as other goroutines need the value")
- return false
- }
-
- c.unlinkEntry(e)
- c.usedmemory -= e.size
- delete(c.entries, e.key)
- return true
-}
diff --git a/pkg/lrucache/cache_test.go b/pkg/lrucache/cache_test.go
deleted file mode 100644
index 8bff40e..0000000
--- a/pkg/lrucache/cache_test.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package lrucache
-
-import (
- "sync"
- "sync/atomic"
- "testing"
- "time"
-)
-
-func TestBasics(t *testing.T) {
- cache := New(123)
-
- value1 := cache.Get("foo", func() (interface{}, time.Duration, int) {
- return "bar", 1 * time.Second, 0
- })
-
- if value1.(string) != "bar" {
- t.Error("cache returned wrong value")
- }
-
- value2 := cache.Get("foo", func() (interface{}, time.Duration, int) {
- t.Error("value should be cached")
- return "", 0, 0
- })
-
- if value2.(string) != "bar" {
- t.Error("cache returned wrong value")
- }
-
- existed := cache.Del("foo")
- if !existed {
- t.Error("delete did not work as expected")
- }
-
- value3 := cache.Get("foo", func() (interface{}, time.Duration, int) {
- return "baz", 1 * time.Second, 0
- })
-
- if value3.(string) != "baz" {
- t.Error("cache returned wrong value")
- }
-
- cache.Keys(func(key string, value interface{}) {
- if key != "foo" || value.(string) != "baz" {
- t.Error("cache corrupted")
- }
- })
-}
-
-func TestExpiration(t *testing.T) {
- cache := New(123)
-
- failIfCalled := func() (interface{}, time.Duration, int) {
- t.Error("Value should be cached!")
- return "", 0, 0
- }
-
- val1 := cache.Get("foo", func() (interface{}, time.Duration, int) {
- return "bar", 5 * time.Millisecond, 0
- })
- val2 := cache.Get("bar", func() (interface{}, time.Duration, int) {
- return "foo", 20 * time.Millisecond, 0
- })
-
- val3 := cache.Get("foo", failIfCalled).(string)
- val4 := cache.Get("bar", failIfCalled).(string)
-
- if val1 != val3 || val3 != "bar" || val2 != val4 || val4 != "foo" {
- t.Error("Wrong values returned")
- }
-
- time.Sleep(10 * time.Millisecond)
-
- val5 := cache.Get("foo", func() (interface{}, time.Duration, int) {
- return "baz", 0, 0
- })
- val6 := cache.Get("bar", failIfCalled)
-
- if val5.(string) != "baz" || val6.(string) != "foo" {
- t.Error("unexpected values")
- }
-
- cache.Keys(func(key string, val interface{}) {
- if key != "bar" || val.(string) != "foo" {
- t.Error("wrong value expired")
- }
- })
-
- time.Sleep(15 * time.Millisecond)
- cache.Keys(func(key string, val interface{}) {
- t.Error("cache should be empty now")
- })
-}
-
-func TestEviction(t *testing.T) {
- c := New(100)
- failIfCalled := func() (interface{}, time.Duration, int) {
- t.Error("Value should be cached!")
- return "", 0, 0
- }
-
- v1 := c.Get("foo", func() (interface{}, time.Duration, int) {
- return "bar", 1 * time.Second, 1000
- })
-
- v2 := c.Get("foo", func() (interface{}, time.Duration, int) {
- return "baz", 1 * time.Second, 1000
- })
-
- if v1.(string) != "bar" || v2.(string) != "baz" {
- t.Error("wrong values returned")
- }
-
- c.Keys(func(key string, val interface{}) {
- t.Error("cache should be empty now")
- })
-
- _ = c.Get("A", func() (interface{}, time.Duration, int) {
- return "a", 1 * time.Second, 50
- })
-
- _ = c.Get("B", func() (interface{}, time.Duration, int) {
- return "b", 1 * time.Second, 50
- })
-
- _ = c.Get("A", failIfCalled)
- _ = c.Get("B", failIfCalled)
- _ = c.Get("C", func() (interface{}, time.Duration, int) {
- return "c", 1 * time.Second, 50
- })
-
- _ = c.Get("B", failIfCalled)
- _ = c.Get("C", failIfCalled)
-
- v4 := c.Get("A", func() (interface{}, time.Duration, int) {
- return "evicted", 1 * time.Second, 25
- })
-
- if v4.(string) != "evicted" {
- t.Error("value should have been evicted")
- }
-
- c.Keys(func(key string, val interface{}) {
- if key != "A" && key != "C" {
- t.Errorf("'%s' was not expected", key)
- }
- })
-}
-
-// I know that this is a shity test,
-// time is relative and unreliable.
-func TestConcurrency(t *testing.T) {
- c := New(100)
- var wg sync.WaitGroup
-
- numActions := 20000
- numThreads := 4
- wg.Add(numThreads)
-
- var concurrentModifications int32 = 0
-
- for i := 0; i < numThreads; i++ {
- go func() {
- for j := 0; j < numActions; j++ {
- _ = c.Get("key", func() (interface{}, time.Duration, int) {
- m := atomic.AddInt32(&concurrentModifications, 1)
- if m != 1 {
- t.Error("only one goroutine at a time should calculate a value for the same key")
- }
-
- time.Sleep(1 * time.Millisecond)
- atomic.AddInt32(&concurrentModifications, -1)
- return "value", 3 * time.Millisecond, 1
- })
- }
-
- wg.Done()
- }()
- }
-
- wg.Wait()
-
- c.Keys(func(key string, val interface{}) {})
-}
-
-func TestPanic(t *testing.T) {
- c := New(100)
-
- c.Put("bar", "baz", 3, 1*time.Minute)
-
- testpanic := func() {
- defer func() {
- if r := recover(); r != nil {
- if r.(string) != "oops" {
- t.Fatal("unexpected panic value")
- }
- }
- }()
-
- _ = c.Get("foo", func() (value interface{}, ttl time.Duration, size int) {
- panic("oops")
- })
-
- t.Fatal("should have paniced!")
- }
-
- testpanic()
-
- v := c.Get("bar", func() (value interface{}, ttl time.Duration, size int) {
- t.Fatal("should not be called!")
- return nil, 0, 0
- })
-
- if v.(string) != "baz" {
- t.Fatal("unexpected value")
- }
-
- testpanic()
-}
diff --git a/pkg/lrucache/handler.go b/pkg/lrucache/handler.go
deleted file mode 100644
index 90b7527..0000000
--- a/pkg/lrucache/handler.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package lrucache
-
-import (
- "bytes"
- "net/http"
- "strconv"
- "time"
-)
-
-// HttpHandler is can be used as HTTP Middleware in order to cache requests,
-// for example static assets. By default, the request's raw URI is used as key and nothing else.
-// Results with a status code other than 200 are cached with a TTL of zero seconds,
-// so basically re-fetched as soon as the current fetch is done and a new request
-// for that URI is done.
-type HttpHandler struct {
- cache *Cache
- fetcher http.Handler
- defaultTTL time.Duration
-
- // Allows overriding the way the cache key is extracted
- // from the http request. The defailt is to use the RequestURI.
- CacheKey func(*http.Request) string
-}
-
-var _ http.Handler = (*HttpHandler)(nil)
-
-type cachedResponseWriter struct {
- w http.ResponseWriter
- statusCode int
- buf bytes.Buffer
-}
-
-type cachedResponse struct {
- headers http.Header
- statusCode int
- data []byte
- fetched time.Time
-}
-
-var _ http.ResponseWriter = (*cachedResponseWriter)(nil)
-
-func (crw *cachedResponseWriter) Header() http.Header {
- return crw.w.Header()
-}
-
-func (crw *cachedResponseWriter) Write(bytes []byte) (int, error) {
- return crw.buf.Write(bytes)
-}
-
-func (crw *cachedResponseWriter) WriteHeader(statusCode int) {
- crw.statusCode = statusCode
-}
-
-// Returns a new caching HttpHandler. If no entry in the cache is found or it was too old, `fetcher` is called with
-// a modified http.ResponseWriter and the response is stored in the cache. If `fetcher` sets the "Expires" header,
-// the ttl is set appropriately (otherwise, the default ttl passed as argument here is used).
-// `maxmemory` should be in the unit bytes.
-func NewHttpHandler(maxmemory int, ttl time.Duration, fetcher http.Handler) *HttpHandler {
- return &HttpHandler{
- cache: New(maxmemory),
- defaultTTL: ttl,
- fetcher: fetcher,
- CacheKey: func(r *http.Request) string {
- return r.RequestURI
- },
- }
-}
-
-// gorilla/mux style middleware:
-func NewMiddleware(maxmemory int, ttl time.Duration) func(http.Handler) http.Handler {
- return func(next http.Handler) http.Handler {
- return NewHttpHandler(maxmemory, ttl, next)
- }
-}
-
-// Tries to serve a response to r from cache or calls next and stores the response to the cache for the next time.
-func (h *HttpHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
- if r.Method != http.MethodGet {
- h.ServeHTTP(rw, r)
- return
- }
-
- cr := h.cache.Get(h.CacheKey(r), func() (interface{}, time.Duration, int) {
- crw := &cachedResponseWriter{
- w: rw,
- statusCode: 200,
- buf: bytes.Buffer{},
- }
-
- h.fetcher.ServeHTTP(crw, r)
-
- cr := &cachedResponse{
- headers: rw.Header().Clone(),
- statusCode: crw.statusCode,
- data: crw.buf.Bytes(),
- fetched: time.Now(),
- }
- cr.headers.Set("Content-Length", strconv.Itoa(len(cr.data)))
-
- ttl := h.defaultTTL
- if cr.statusCode != http.StatusOK {
- ttl = 0
- } else if cr.headers.Get("Expires") != "" {
- if expires, err := http.ParseTime(cr.headers.Get("Expires")); err == nil {
- ttl = time.Until(expires)
- }
- }
-
- return cr, ttl, len(cr.data)
- }).(*cachedResponse)
-
- for key, val := range cr.headers {
- rw.Header()[key] = val
- }
-
- cr.headers.Set("Age", strconv.Itoa(int(time.Since(cr.fetched).Seconds())))
-
- rw.WriteHeader(cr.statusCode)
- rw.Write(cr.data)
-}
diff --git a/pkg/lrucache/handler_test.go b/pkg/lrucache/handler_test.go
deleted file mode 100644
index d908339..0000000
--- a/pkg/lrucache/handler_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package lrucache
-
-import (
- "bytes"
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-)
-
-func TestHandlerBasics(t *testing.T) {
- r := httptest.NewRequest(http.MethodGet, "/test1", nil)
- rw := httptest.NewRecorder()
- shouldBeCalled := true
-
- handler := NewHttpHandler(1000, time.Second, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- rw.Write([]byte("Hello World!"))
-
- if !shouldBeCalled {
- t.Fatal("fetcher expected to be called")
- }
- }))
-
- handler.ServeHTTP(rw, r)
-
- if rw.Code != 200 {
- t.Fatal("unexpected status code")
- }
-
- if !bytes.Equal(rw.Body.Bytes(), []byte("Hello World!")) {
- t.Fatal("unexpected body")
- }
-
- rw = httptest.NewRecorder()
- shouldBeCalled = false
- handler.ServeHTTP(rw, r)
-
- if rw.Code != 200 {
- t.Fatal("unexpected status code")
- }
-
- if !bytes.Equal(rw.Body.Bytes(), []byte("Hello World!")) {
- t.Fatal("unexpected body")
- }
-}
-
-// func TestHandlerExpiration(t *testing.T) {
-// r := httptest.NewRequest(http.MethodGet, "/test1", nil)
-// rw := httptest.NewRecorder()
-// i := 1
-// now := time.Now()
-
-// handler := NewHttpHandler(1000, 1*time.Second, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
-// rw.Header().Set("Expires", now.Add(10*time.Millisecond).Format(http.TimeFormat))
-// rw.Write([]byte(strconv.Itoa(i)))
-// }))
-
-// handler.ServeHTTP(rw, r)
-// if !(rw.Body.String() == strconv.Itoa(1)) {
-// t.Fatal("unexpected body")
-// }
-
-// i += 1
-
-// time.Sleep(11 * time.Millisecond)
-// rw = httptest.NewRecorder()
-// handler.ServeHTTP(rw, r)
-// if !(rw.Body.String() == strconv.Itoa(1)) {
-// t.Fatal("unexpected body")
-// }
-// }
diff --git a/pkg/resampler/resampler.go b/pkg/resampler/resampler.go
deleted file mode 100644
index ebc7e88..0000000
--- a/pkg/resampler/resampler.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package resampler
-
-import (
- "errors"
- "fmt"
- "math"
-
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
-)
-
-func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int64) ([]schema.Float, int64, error) {
- if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency {
- return data, old_frequency, nil
- }
-
- if new_frequency%old_frequency != 0 {
- return nil, 0, errors.New("new sampling frequency should be multiple of the old frequency")
- }
-
- var step int = int(new_frequency / old_frequency)
- var new_data_length = len(data) / step
-
- if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) {
- return data, old_frequency, nil
- }
-
- new_data := make([]schema.Float, new_data_length)
-
- for i := 0; i < new_data_length; i++ {
- new_data[i] = data[i*step]
- }
-
- return new_data, new_frequency, nil
-}
-
-// Inspired by one of the algorithms from https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf
-// Adapted from https://github.com/haoel/downsampling/blob/master/core/lttb.go
-func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_frequency int) ([]schema.Float, int, error) {
-
- if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency {
- return data, old_frequency, nil
- }
-
- if new_frequency%old_frequency != 0 {
- return nil, 0, errors.New(fmt.Sprintf("new sampling frequency : %d should be multiple of the old frequency : %d", new_frequency, old_frequency))
- }
-
- var step int = int(new_frequency / old_frequency)
- var new_data_length = len(data) / step
-
- if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) {
- return data, old_frequency, nil
- }
-
- new_data := make([]schema.Float, 0, new_data_length)
-
- // Bucket size. Leave room for start and end data points
- bucketSize := float64(len(data)-2) / float64(new_data_length-2)
-
- new_data = append(new_data, data[0]) // Always add the first point
-
- // We have 3 pointers represent for
- // > bucketLow - the current bucket's beginning location
- // > bucketMiddle - the current bucket's ending location,
- // also the beginning location of next bucket
- // > bucketHight - the next bucket's ending location.
- bucketLow := 1
- bucketMiddle := int(math.Floor(bucketSize)) + 1
-
- var prevMaxAreaPoint int
-
- for i := 0; i < new_data_length-2; i++ {
-
- bucketHigh := int(math.Floor(float64(i+2)*bucketSize)) + 1
- if bucketHigh >= len(data)-1 {
- bucketHigh = len(data) - 2
- }
-
- // Calculate point average for next bucket (containing c)
- avgPointX, avgPointY := calculateAverageDataPoint(data[bucketMiddle:bucketHigh+1], int64(bucketMiddle))
-
- // Get the range for current bucket
- currBucketStart := bucketLow
- currBucketEnd := bucketMiddle
-
- // Point a
- pointX := prevMaxAreaPoint
- pointY := data[prevMaxAreaPoint]
-
- maxArea := -1.0
-
- var maxAreaPoint int
- flag_ := 0
- for ; currBucketStart < currBucketEnd; currBucketStart++ {
-
- area := calculateTriangleArea(schema.Float(pointX), pointY, avgPointX, avgPointY, schema.Float(currBucketStart), data[currBucketStart])
- if area > maxArea {
- maxArea = area
- maxAreaPoint = currBucketStart
- }
- if math.IsNaN(float64(avgPointY)) {
- flag_ = 1
-
- }
- }
-
- if flag_ == 1 {
- new_data = append(new_data, schema.NaN) // Pick this point from the bucket
-
- } else {
- new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket
- }
- prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint
-
- //move to the next window
- bucketLow = bucketMiddle
- bucketMiddle = bucketHigh
- }
-
- new_data = append(new_data, data[len(data)-1]) // Always add last
-
- return new_data, new_frequency, nil
-}
diff --git a/pkg/resampler/util.go b/pkg/resampler/util.go
deleted file mode 100644
index 36d8bed..0000000
--- a/pkg/resampler/util.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package resampler
-
-import (
- "math"
-
- "github.com/ClusterCockpit/cc-backend/pkg/schema"
-)
-
-func calculateTriangleArea(paX, paY, pbX, pbY, pcX, pcY schema.Float) float64 {
- area := ((paX-pcX)*(pbY-paY) - (paX-pbX)*(pcY-paY)) * 0.5
- return math.Abs(float64(area))
-}
-
-func calculateAverageDataPoint(points []schema.Float, xStart int64) (avgX schema.Float, avgY schema.Float) {
- flag := 0
- for _, point := range points {
- avgX += schema.Float(xStart)
- avgY += point
- xStart++
- if math.IsNaN(float64(point)) {
- flag = 1
- }
- }
-
- l := schema.Float(len(points))
-
- avgX /= l
- avgY /= l
-
- if flag == 1 {
- return avgX, schema.NaN
- } else {
- return avgX, avgY
- }
-}
diff --git a/pkg/runtimeEnv/setup.go b/pkg/runtimeEnv/setup.go
index 4cacb18..e23a004 100644
--- a/pkg/runtimeEnv/setup.go
+++ b/pkg/runtimeEnv/setup.go
@@ -1,89 +1,20 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package runtimeEnv
import (
- "bufio"
- "errors"
"fmt"
"os"
"os/exec"
"os/user"
"strconv"
- "strings"
"syscall"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
-// Very simple and limited .env file reader.
-// All variable definitions found are directly
-// added to the processes environment.
-func LoadEnv(file string) error {
- f, err := os.Open(file)
- if err != nil {
- log.Error("Error while opening .env file")
- return err
- }
-
- defer f.Close()
- s := bufio.NewScanner(bufio.NewReader(f))
- for s.Scan() {
- line := s.Text()
- if strings.HasPrefix(line, "#") || len(line) == 0 {
- continue
- }
-
- if strings.Contains(line, "#") {
- return errors.New("'#' are only supported at the start of a line")
- }
-
- line = strings.TrimPrefix(line, "export ")
- parts := strings.SplitN(line, "=", 2)
- if len(parts) != 2 {
- return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
- }
-
- key := strings.TrimSpace(parts[0])
- val := strings.TrimSpace(parts[1])
- if strings.HasPrefix(val, "\"") {
- if !strings.HasSuffix(val, "\"") {
- return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
- }
-
- runes := []rune(val[1 : len(val)-1])
- sb := strings.Builder{}
- for i := 0; i < len(runes); i++ {
- if runes[i] == '\\' {
- i++
- switch runes[i] {
- case 'n':
- sb.WriteRune('\n')
- case 'r':
- sb.WriteRune('\r')
- case 't':
- sb.WriteRune('\t')
- case '"':
- sb.WriteRune('"')
- default:
- return fmt.Errorf("RUNTIME/SETUP > unsupported escape sequence in quoted string: backslash %#v", runes[i])
- }
- continue
- }
- sb.WriteRune(runes[i])
- }
-
- val = sb.String()
- }
-
- os.Setenv(key, val)
- }
-
- return s.Err()
-}
-
// Changes the processes user and group to that
// specified in the config.json. The go runtime
// takes care of all threads (and not only the calling one)
@@ -92,13 +23,13 @@ func DropPrivileges(username string, group string) error {
if group != "" {
g, err := user.LookupGroup(group)
if err != nil {
- log.Warn("Error while looking up group")
+ cclog.Warn("Error while looking up group")
return err
}
gid, _ := strconv.Atoi(g.Gid)
if err := syscall.Setgid(gid); err != nil {
- log.Warn("Error while setting gid")
+ cclog.Warn("Error while setting gid")
return err
}
}
@@ -106,13 +37,13 @@ func DropPrivileges(username string, group string) error {
if username != "" {
u, err := user.Lookup(username)
if err != nil {
- log.Warn("Error while looking up user")
+ cclog.Warn("Error while looking up user")
return err
}
uid, _ := strconv.Atoi(u.Uid)
if err := syscall.Setuid(uid); err != nil {
- log.Warn("Error while setting uid")
+ cclog.Warn("Error while setting uid")
return err
}
}
diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go
deleted file mode 100644
index 322f308..0000000
--- a/pkg/schema/cluster.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "fmt"
- "strconv"
-)
-
-type Accelerator struct {
- ID string `json:"id"`
- Type string `json:"type"`
- Model string `json:"model"`
-}
-
-type Topology struct {
- Node []int `json:"node"`
- Socket [][]int `json:"socket"`
- MemoryDomain [][]int `json:"memoryDomain"`
- Die [][]*int `json:"die,omitempty"`
- Core [][]int `json:"core"`
- Accelerators []*Accelerator `json:"accelerators,omitempty"`
-}
-
-type MetricValue struct {
- Unit Unit `json:"unit"`
- Value float64 `json:"value"`
-}
-
-type SubCluster struct {
- Name string `json:"name"`
- Nodes string `json:"nodes"`
- ProcessorType string `json:"processorType"`
- Topology Topology `json:"topology"`
- FlopRateScalar MetricValue `json:"flopRateScalar"`
- FlopRateSimd MetricValue `json:"flopRateSimd"`
- MemoryBandwidth MetricValue `json:"memoryBandwidth"`
- MetricConfig []MetricConfig `json:"metricConfig,omitempty"`
- Footprint []string `json:"footprint,omitempty"`
- EnergyFootprint []string `json:"energyFootprint,omitempty"`
- SocketsPerNode int `json:"socketsPerNode"`
- CoresPerSocket int `json:"coresPerSocket"`
- ThreadsPerCore int `json:"threadsPerCore"`
-}
-
-type SubClusterConfig struct {
- Name string `json:"name"`
- Footprint string `json:"footprint,omitempty"`
- Energy string `json:"energy"`
- Peak float64 `json:"peak"`
- Normal float64 `json:"normal"`
- Caution float64 `json:"caution"`
- Alert float64 `json:"alert"`
- Remove bool `json:"remove"`
- LowerIsBetter bool `json:"lowerIsBetter"`
-}
-
-type MetricConfig struct {
- Unit Unit `json:"unit"`
- Energy string `json:"energy"`
- Name string `json:"name"`
- Scope MetricScope `json:"scope"`
- Aggregation string `json:"aggregation"`
- Footprint string `json:"footprint,omitempty"`
- SubClusters []*SubClusterConfig `json:"subClusters,omitempty"`
- Peak float64 `json:"peak"`
- Caution float64 `json:"caution"`
- Alert float64 `json:"alert"`
- Timestep int `json:"timestep"`
- Normal float64 `json:"normal"`
- LowerIsBetter bool `json:"lowerIsBetter"`
-}
-
-type Cluster struct {
- Name string `json:"name"`
- MetricConfig []*MetricConfig `json:"metricConfig"`
- SubClusters []*SubCluster `json:"subClusters"`
-}
-
-type ClusterSupport struct {
- Cluster string `json:"cluster"`
- SubClusters []string `json:"subclusters"`
-}
-
-type GlobalMetricListItem struct {
- Name string `json:"name"`
- Unit Unit `json:"unit"`
- Scope MetricScope `json:"scope"`
- Footprint string `json:"footprint,omitempty"`
- Availability []ClusterSupport `json:"availability"`
-}
-
-// Return a list of socket IDs given a list of hwthread IDs. Even if just one
-// hwthread is in that socket, add it to the list. If no hwthreads other than
-// those in the argument list are assigned to one of the sockets in the first
-// return value, return true as the second value. TODO: Optimize this, there
-// must be a more efficient way/algorithm.
-func (topo *Topology) GetSocketsFromHWThreads(
- hwthreads []int,
-) (sockets []int, exclusive bool) {
- socketsMap := map[int]int{}
- for _, hwthread := range hwthreads {
- for socket, hwthreadsInSocket := range topo.Socket {
- for _, hwthreadInSocket := range hwthreadsInSocket {
- if hwthread == hwthreadInSocket {
- socketsMap[socket] += 1
- }
- }
- }
- }
-
- exclusive = true
- hwthreadsPerSocket := len(topo.Node) / len(topo.Socket)
- sockets = make([]int, 0, len(socketsMap))
- for socket, count := range socketsMap {
- sockets = append(sockets, socket)
- exclusive = exclusive && count == hwthreadsPerSocket
- }
-
- return sockets, exclusive
-}
-
-// Return a list of socket IDs given a list of core IDs. Even if just one
-// core is in that socket, add it to the list. If no cores other than
-// those in the argument list are assigned to one of the sockets in the first
-// return value, return true as the second value. TODO: Optimize this, there
-// must be a more efficient way/algorithm.
-func (topo *Topology) GetSocketsFromCores (
- cores []int,
-) (sockets []int, exclusive bool) {
- socketsMap := map[int]int{}
- for _, core := range cores {
- for _, hwthreadInCore := range topo.Core[core] {
- for socket, hwthreadsInSocket := range topo.Socket {
- for _, hwthreadInSocket := range hwthreadsInSocket {
- if hwthreadInCore == hwthreadInSocket {
- socketsMap[socket] += 1
- }
- }
- }
- }
- }
-
- exclusive = true
- hwthreadsPerSocket := len(topo.Node) / len(topo.Socket)
- sockets = make([]int, 0, len(socketsMap))
- for socket, count := range socketsMap {
- sockets = append(sockets, socket)
- exclusive = exclusive && count == hwthreadsPerSocket
- }
-
- return sockets, exclusive
-}
-
-// Return a list of core IDs given a list of hwthread IDs. Even if just one
-// hwthread is in that core, add it to the list. If no hwthreads other than
-// those in the argument list are assigned to one of the cores in the first
-// return value, return true as the second value. TODO: Optimize this, there
-// must be a more efficient way/algorithm.
-func (topo *Topology) GetCoresFromHWThreads(
- hwthreads []int,
-) (cores []int, exclusive bool) {
- coresMap := map[int]int{}
- for _, hwthread := range hwthreads {
- for core, hwthreadsInCore := range topo.Core {
- for _, hwthreadInCore := range hwthreadsInCore {
- if hwthread == hwthreadInCore {
- coresMap[core] += 1
- }
- }
- }
- }
-
- exclusive = true
- hwthreadsPerCore := len(topo.Node) / len(topo.Core)
- cores = make([]int, 0, len(coresMap))
- for core, count := range coresMap {
- cores = append(cores, core)
- exclusive = exclusive && count == hwthreadsPerCore
- }
-
- return cores, exclusive
-}
-
-// Return a list of memory domain IDs given a list of hwthread IDs. Even if
-// just one hwthread is in that memory domain, add it to the list. If no
-// hwthreads other than those in the argument list are assigned to one of the
-// memory domains in the first return value, return true as the second value.
-// TODO: Optimize this, there must be a more efficient way/algorithm.
-func (topo *Topology) GetMemoryDomainsFromHWThreads(
- hwthreads []int,
-) (memDoms []int, exclusive bool) {
- memDomsMap := map[int]int{}
- for _, hwthread := range hwthreads {
- for memDom, hwthreadsInmemDom := range topo.MemoryDomain {
- for _, hwthreadInmemDom := range hwthreadsInmemDom {
- if hwthread == hwthreadInmemDom {
- memDomsMap[memDom] += 1
- }
- }
- }
- }
-
- exclusive = true
- hwthreadsPermemDom := len(topo.Node) / len(topo.MemoryDomain)
- memDoms = make([]int, 0, len(memDomsMap))
- for memDom, count := range memDomsMap {
- memDoms = append(memDoms, memDom)
- exclusive = exclusive && count == hwthreadsPermemDom
- }
-
- return memDoms, exclusive
-}
-
-// Temporary fix to convert back from int id to string id for accelerators
-func (topo *Topology) GetAcceleratorID(id int) (string, error) {
- if id < 0 {
- fmt.Printf("ID smaller than 0!\n")
- return topo.Accelerators[0].ID, nil
- } else if id < len(topo.Accelerators) {
- return topo.Accelerators[id].ID, nil
- } else {
- return "", fmt.Errorf("index %d out of range", id)
- }
-}
-
-// Return list of hardware (string) accelerator IDs
-func (topo *Topology) GetAcceleratorIDs() []string {
- accels := make([]string, 0)
- for _, accel := range topo.Accelerators {
- accels = append(accels, accel.ID)
- }
- return accels
-}
-
-// Outdated? Or: Return indices of accelerators in parent array?
-func (topo *Topology) GetAcceleratorIDsAsInt() ([]int, error) {
- accels := make([]int, 0)
- for _, accel := range topo.Accelerators {
- id, err := strconv.Atoi(accel.ID)
- if err != nil {
- return nil, err
- }
- accels = append(accels, id)
- }
- return accels, nil
-}
diff --git a/pkg/schema/config.go b/pkg/schema/config.go
deleted file mode 100644
index 27d11be..0000000
--- a/pkg/schema/config.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "encoding/json"
- "time"
-)
-
-type LdapConfig struct {
- Url string `json:"url"`
- UserBase string `json:"user_base"`
- SearchDN string `json:"search_dn"`
- UserBind string `json:"user_bind"`
- UserFilter string `json:"user_filter"`
- UserAttr string `json:"username_attr"`
- SyncInterval string `json:"sync_interval"` // Parsed using time.ParseDuration.
- SyncDelOldUsers bool `json:"sync_del_old_users"`
-
- // Should an non-existent user be added to the DB if user exists in ldap directory
- SyncUserOnLogin bool `json:"syncUserOnLogin"`
-}
-
-type OpenIDConfig struct {
- Provider string `json:"provider"`
- SyncUserOnLogin bool `json:"syncUserOnLogin"`
- UpdateUserOnLogin bool `json:"updateUserOnLogin"`
-}
-
-type JWTAuthConfig struct {
- // Specifies for how long a JWT token shall be valid
- // as a string parsable by time.ParseDuration().
- MaxAge string `json:"max-age"`
-
- // Specifies which cookie should be checked for a JWT token (if no authorization header is present)
- CookieName string `json:"cookieName"`
-
- // Deny login for users not in database (but defined in JWT).
- // Ignore user roles defined in JWTs ('roles' claim), get them from db.
- ValidateUser bool `json:"validateUser"`
-
- // Specifies which issuer should be accepted when validating external JWTs ('iss' claim)
- TrustedIssuer string `json:"trustedIssuer"`
-
- // Should an non-existent user be added to the DB based on the information in the token
- SyncUserOnLogin bool `json:"syncUserOnLogin"`
-
- // Should an existent user be updated in the DB based on the information in the token
- UpdateUserOnLogin bool `json:"updateUserOnLogin"`
-}
-
-type IntRange struct {
- From int `json:"from"`
- To int `json:"to"`
-}
-
-type TimeRange struct {
- From *time.Time `json:"from"`
- To *time.Time `json:"to"`
- Range string `json:"range,omitempty"`
-}
-
-type FilterRanges struct {
- Duration *IntRange `json:"duration"`
- NumNodes *IntRange `json:"numNodes"`
- StartTime *TimeRange `json:"startTime"`
-}
-
-type ClusterConfig struct {
- Name string `json:"name"`
- FilterRanges *FilterRanges `json:"filterRanges"`
- MetricDataRepository json.RawMessage `json:"metricDataRepository"`
-}
-
-type Retention struct {
- Policy string `json:"policy"`
- Location string `json:"location"`
- Age int `json:"age"`
- IncludeDB bool `json:"includeDB"`
-}
-
-type ResampleConfig struct {
- // Array of resampling target resolutions, in seconds; Example: [600,300,60]
- Resolutions []int `json:"resolutions"`
- // Trigger next zoom level at less than this many visible datapoints
- Trigger int `json:"trigger"`
-}
-
-type CronFrequency struct {
- // Duration Update Worker [Defaults to '5m']
- DurationWorker string `json:"duration-worker"`
- // Metric-Footprint Update Worker [Defaults to '10m']
- FootprintWorker string `json:"footprint-worker"`
-}
-
-// Format of the configuration (file). See below for the defaults.
-type ProgramConfig struct {
- // Address where the http (or https) server will listen on (for example: 'localhost:80').
- Addr string `json:"addr"`
-
- // Addresses from which secured admin API endpoints can be reached, can be wildcard "*"
- ApiAllowedIPs []string `json:"apiAllowedIPs"`
-
- // Drop root permissions once .env was read and the port was taken.
- User string `json:"user"`
- Group string `json:"group"`
-
- // Disable authentication (for everything: API, Web-UI, ...)
- DisableAuthentication bool `json:"disable-authentication"`
-
- // If `embed-static-files` is true (default), the frontend files are directly
- // embeded into the go binary and expected to be in web/frontend. Only if
- // it is false the files in `static-files` are served instead.
- EmbedStaticFiles bool `json:"embed-static-files"`
- StaticFiles string `json:"static-files"`
-
- // 'sqlite3' or 'mysql' (mysql will work for mariadb as well)
- DBDriver string `json:"db-driver"`
-
- // For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).
- DB string `json:"db"`
-
- // Config for job archive
- Archive json.RawMessage `json:"archive"`
-
- // Keep all metric data in the metric data repositories,
- // do not write to the job-archive.
- DisableArchive bool `json:"disable-archive"`
-
- // Validate json input against schema
- Validate bool `json:"validate"`
-
- // For LDAP Authentication and user synchronisation.
- LdapConfig *LdapConfig `json:"ldap"`
- JwtConfig *JWTAuthConfig `json:"jwts"`
- OpenIDConfig *OpenIDConfig `json:"oidc"`
-
- // If 0 or empty, the session does not expire!
- SessionMaxAge string `json:"session-max-age"`
-
- // If both those options are not empty, use HTTPS using those certificates.
- HttpsCertFile string `json:"https-cert-file"`
- HttpsKeyFile string `json:"https-key-file"`
-
- // If not the empty string and `addr` does not end in ":80",
- // redirect every request incoming at port 80 to that url.
- RedirectHttpTo string `json:"redirect-http-to"`
-
- // If overwritten, at least all the options in the defaults below must
- // be provided! Most options here can be overwritten by the user.
- UiDefaults map[string]interface{} `json:"ui-defaults"`
-
- // If exists, will enable dynamic zoom in frontend metric plots using the configured values
- EnableResampling *ResampleConfig `json:"enable-resampling"`
-
- // Where to store MachineState files
- MachineStateDir string `json:"machine-state-dir"`
-
- // If not zero, automatically mark jobs as stopped running X seconds longer than their walltime.
- StopJobsExceedingWalltime int `json:"stop-jobs-exceeding-walltime"`
-
- // Defines time X in seconds in which jobs are considered to be "short" and will be filtered in specific views.
- ShortRunningJobsDuration int `json:"short-running-jobs-duration"`
-
- // Energy Mix CO2 Emission Constant [g/kWh]
- // If entered, displays estimated CO2 emission for job based on jobs totalEnergy
- EmissionConstant int `json:"emission-constant"`
-
- // Frequency of cron job workers
- CronFrequency *CronFrequency `json:"cron-frequency"`
-
- // Array of Clusters
- Clusters []*ClusterConfig `json:"clusters"`
-}
diff --git a/pkg/schema/float.go b/pkg/schema/float.go
deleted file mode 100644
index e7d9857..0000000
--- a/pkg/schema/float.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "errors"
- "io"
- "math"
- "strconv"
-
- "github.com/ClusterCockpit/cc-backend/pkg/log"
-)
-
-// A custom float type is used so that (Un)MarshalJSON and
-// (Un)MarshalGQL can be overloaded and NaN/null can be used.
-// The default behaviour of putting every nullable value behind
-// a pointer has a bigger overhead.
-type Float float64
-
-var NaN Float = Float(math.NaN())
-var nullAsBytes []byte = []byte("null")
-
-func (f Float) IsNaN() bool {
- return math.IsNaN(float64(f))
-}
-
-// NaN will be serialized to `null`.
-func (f Float) MarshalJSON() ([]byte, error) {
- if f.IsNaN() {
- return nullAsBytes, nil
- }
-
- return strconv.AppendFloat(make([]byte, 0, 10), float64(f), 'f', 2, 64), nil
-}
-
-// `null` will be unserialized to NaN.
-func (f *Float) UnmarshalJSON(input []byte) error {
- s := string(input)
- if s == "null" {
- *f = NaN
- return nil
- }
-
- val, err := strconv.ParseFloat(s, 64)
- if err != nil {
- log.Warn("Error while parsing custom float")
- return err
- }
- *f = Float(val)
- return nil
-}
-
-// UnmarshalGQL implements the graphql.Unmarshaler interface.
-func (f *Float) UnmarshalGQL(v interface{}) error {
- f64, ok := v.(float64)
- if !ok {
- return errors.New("invalid Float scalar")
- }
-
- *f = Float(f64)
- return nil
-}
-
-// MarshalGQL implements the graphql.Marshaler interface.
-// NaN will be serialized to `null`.
-func (f Float) MarshalGQL(w io.Writer) {
- if f.IsNaN() {
- w.Write(nullAsBytes)
- } else {
- w.Write(strconv.AppendFloat(make([]byte, 0, 10), float64(f), 'f', 2, 64))
- }
-}
-
-// Only used via REST-API, not via GraphQL.
-// This uses a lot less allocations per series,
-// but it turns out that the performance increase
-// from using this is not that big.
-func (s *Series) MarshalJSON() ([]byte, error) {
- buf := make([]byte, 0, 512+len(s.Data)*8)
- buf = append(buf, `{"hostname":"`...)
- buf = append(buf, s.Hostname...)
- buf = append(buf, '"')
- if s.Id != nil {
- buf = append(buf, `,"id":"`...)
- buf = append(buf, *s.Id...)
- buf = append(buf, '"')
- }
- buf = append(buf, `,"statistics":{"min":`...)
- buf = strconv.AppendFloat(buf, s.Statistics.Min, 'f', 2, 64)
- buf = append(buf, `,"avg":`...)
- buf = strconv.AppendFloat(buf, s.Statistics.Avg, 'f', 2, 64)
- buf = append(buf, `,"max":`...)
- buf = strconv.AppendFloat(buf, s.Statistics.Max, 'f', 2, 64)
- buf = append(buf, '}')
- buf = append(buf, `,"data":[`...)
- for i := 0; i < len(s.Data); i++ {
- if i != 0 {
- buf = append(buf, ',')
- }
-
- if s.Data[i].IsNaN() {
- buf = append(buf, `null`...)
- } else {
- buf = strconv.AppendFloat(buf, float64(s.Data[i]), 'f', 2, 32)
- }
- }
- buf = append(buf, ']', '}')
- return buf, nil
-}
-
-func ConvertFloatToFloat64(s []Float) []float64 {
- fp := make([]float64, len(s))
-
- for i, val := range s {
- fp[i] = float64(val)
- }
-
- return fp
-}
-
-func GetFloat64ToFloat(s []float64) []Float {
- fp := make([]Float, len(s))
-
- for i, val := range s {
- fp[i] = Float(val)
- }
-
- return fp
-}
diff --git a/pkg/schema/job.go b/pkg/schema/job.go
deleted file mode 100644
index 5e3110b..0000000
--- a/pkg/schema/job.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "errors"
- "fmt"
- "io"
- "time"
-)
-
-// BaseJob is the common part of the job metadata structs
-//
-// Common subset of Job and JobMeta. Use one of those, not this type directly.
-
-type BaseJob struct {
- Cluster string `json:"cluster" db:"cluster" example:"fritz"`
- SubCluster string `json:"subCluster" db:"subcluster" example:"main"`
- Partition string `json:"partition,omitempty" db:"cluster_partition" example:"main"`
- Project string `json:"project" db:"project" example:"abcd200"`
- User string `json:"user" db:"hpc_user" example:"abcd100h"`
- State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"`
- Tags []*Tag `json:"tags,omitempty"`
- RawEnergyFootprint []byte `json:"-" db:"energy_footprint"`
- RawFootprint []byte `json:"-" db:"footprint"`
- RawMetaData []byte `json:"-" db:"meta_data"`
- RawResources []byte `json:"-" db:"resources"`
- Resources []*Resource `json:"resources"`
- EnergyFootprint map[string]float64 `json:"energyFootprint"`
- Footprint map[string]float64 `json:"footprint"`
- MetaData map[string]string `json:"metaData"`
- ConcurrentJobs JobLinkResultList `json:"concurrentJobs"`
- Energy float64 `json:"energy" db:"energy"`
- ArrayJobId int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"`
- Walltime int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"`
- JobID int64 `json:"jobId" db:"job_id" example:"123000"`
- Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"`
- SMT int32 `json:"smt,omitempty" db:"smt" example:"4"`
- MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"`
- Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"`
- NumAcc int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"`
- NumHWThreads int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"`
- NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"`
-}
-
-// Job struct type
-//
-// This type is used as the GraphQL interface and using sqlx as a table row.
-//
-// Job model
-// @Description Information of a HPC job.
-type Job struct {
- StartTime time.Time `json:"startTime"`
- BaseJob
- ID int64 `json:"id" db:"id"`
- StartTimeUnix int64 `json:"-" db:"start_time" example:"1649723812"`
-}
-
-// JobMeta struct type
-//
-// When reading from the database or sending data via GraphQL, the start time
-// can be in the much more convenient time.Time type. In the `meta.json`
-// files, the start time is encoded as a unix epoch timestamp. This is why
-// there is this struct, which contains all fields from the regular job
-// struct, but "overwrites" the StartTime field with one of type int64. ID
-// *int64 `json:"id,omitempty"` >> never used in the job-archive, only
-// available via REST-API
-//
-
-type JobLink struct {
- ID int64 `json:"id"`
- JobID int64 `json:"jobId"`
-}
-
-type JobLinkResultList struct {
- Items []*JobLink `json:"items"`
- Count int `json:"count"`
-}
-
-// JobMeta model
-// @Description Meta data information of a HPC job.
-type JobMeta struct {
- ID *int64 `json:"id,omitempty"`
- Statistics map[string]JobStatistics `json:"statistics"`
- BaseJob
- StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"`
-}
-
-const (
- MonitoringStatusDisabled int32 = 0
- MonitoringStatusRunningOrArchiving int32 = 1
- MonitoringStatusArchivingFailed int32 = 2
- MonitoringStatusArchivingSuccessful int32 = 3
-)
-
-var JobDefaults BaseJob = BaseJob{
- Exclusive: 1,
- MonitoringStatus: MonitoringStatusRunningOrArchiving,
-}
-
-type Unit struct {
- Base string `json:"base"`
- Prefix string `json:"prefix,omitempty"`
-}
-
-// JobStatistics model
-// @Description Specification for job metric statistics.
-type JobStatistics struct {
- Unit Unit `json:"unit"`
- Avg float64 `json:"avg" example:"2500" minimum:"0"` // Job metric average
- Min float64 `json:"min" example:"2000" minimum:"0"` // Job metric minimum
- Max float64 `json:"max" example:"3000" minimum:"0"` // Job metric maximum
-}
-
-// Tag model
-// @Description Defines a tag using name and type.
-type Tag struct {
- Type string `json:"type" db:"tag_type" example:"Debug"`
- Name string `json:"name" db:"tag_name" example:"Testjob"`
- Scope string `json:"scope" db:"tag_scope" example:"global"`
- ID int64 `json:"id" db:"id"`
-}
-
-// Resource model
-// @Description A resource used by a job
-type Resource struct {
- Hostname string `json:"hostname"`
- Configuration string `json:"configuration,omitempty"`
- HWThreads []int `json:"hwthreads,omitempty"`
- Accelerators []string `json:"accelerators,omitempty"`
-}
-
-type JobState string
-
-const (
- JobStateRunning JobState = "running"
- JobStateCompleted JobState = "completed"
- JobStateFailed JobState = "failed"
- JobStateCancelled JobState = "cancelled"
- JobStateStopped JobState = "stopped"
- JobStateTimeout JobState = "timeout"
- JobStatePreempted JobState = "preempted"
- JobStateOutOfMemory JobState = "out_of_memory"
-)
-
-func (e *JobState) UnmarshalGQL(v interface{}) error {
- str, ok := v.(string)
- if !ok {
- return fmt.Errorf("SCHEMA/JOB > enums must be strings")
- }
-
- *e = JobState(str)
- if !e.Valid() {
- return errors.New("SCHEMA/JOB > invalid job state")
- }
-
- return nil
-}
-
-func (e JobState) MarshalGQL(w io.Writer) {
- fmt.Fprintf(w, "\"%s\"", e)
-}
-
-func (e JobState) Valid() bool {
- return e == JobStateRunning ||
- e == JobStateCompleted ||
- e == JobStateFailed ||
- e == JobStateCancelled ||
- e == JobStateStopped ||
- e == JobStateTimeout ||
- e == JobStatePreempted ||
- e == JobStateOutOfMemory
-}
diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go
deleted file mode 100644
index fbb85e4..0000000
--- a/pkg/schema/metrics.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "fmt"
- "io"
- "math"
- "sort"
- "unsafe"
-
- "github.com/ClusterCockpit/cc-backend/internal/util"
-)
-
-type JobData map[string]map[MetricScope]*JobMetric
-type ScopedJobStats map[string]map[MetricScope][]*ScopedStats
-
-type JobMetric struct {
- StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"`
- Unit Unit `json:"unit"`
- Series []Series `json:"series"`
- Timestep int `json:"timestep"`
-}
-
-type Series struct {
- Id *string `json:"id,omitempty"`
- Hostname string `json:"hostname"`
- Data []Float `json:"data"`
- Statistics MetricStatistics `json:"statistics"`
-}
-
-type ScopedStats struct {
- Hostname string `json:"hostname"`
- Id *string `json:"id,omitempty"`
- Data *MetricStatistics `json:"data"`
-}
-
-type MetricStatistics struct {
- Avg float64 `json:"avg"`
- Min float64 `json:"min"`
- Max float64 `json:"max"`
-}
-
-type StatsSeries struct {
- Percentiles map[int][]Float `json:"percentiles,omitempty"`
- Mean []Float `json:"mean"`
- Median []Float `json:"median"`
- Min []Float `json:"min"`
- Max []Float `json:"max"`
-}
-
-type MetricScope string
-
-const (
- MetricScopeInvalid MetricScope = "invalid_scope"
-
- MetricScopeNode MetricScope = "node"
- MetricScopeSocket MetricScope = "socket"
- MetricScopeMemoryDomain MetricScope = "memoryDomain"
- MetricScopeCore MetricScope = "core"
- MetricScopeHWThread MetricScope = "hwthread"
-
- MetricScopeAccelerator MetricScope = "accelerator"
-)
-
-var metricScopeGranularity map[MetricScope]int = map[MetricScope]int{
- MetricScopeNode: 10,
- MetricScopeSocket: 5,
- MetricScopeMemoryDomain: 4,
- MetricScopeCore: 3,
- MetricScopeHWThread: 2,
- /* Special-Case Accelerator
- * -> No conversion possible if native scope is HWTHREAD
- * -> Therefore needs to be less than HWTREAD, else max() would return unhandled case
- * -> If nativeScope is accelerator, accelerator metrics return correctly
- */
- MetricScopeAccelerator: 1,
-
- MetricScopeInvalid: -1,
-}
-
-func (e *MetricScope) LT(other MetricScope) bool {
- a := metricScopeGranularity[*e]
- b := metricScopeGranularity[other]
- return a < b
-}
-
-func (e *MetricScope) LTE(other MetricScope) bool {
- a := metricScopeGranularity[*e]
- b := metricScopeGranularity[other]
- return a <= b
-}
-
-func (e *MetricScope) Max(other MetricScope) MetricScope {
- a := metricScopeGranularity[*e]
- b := metricScopeGranularity[other]
- if a > b {
- return *e
- }
- return other
-}
-
-func (e *MetricScope) UnmarshalGQL(v interface{}) error {
- str, ok := v.(string)
- if !ok {
- return fmt.Errorf("SCHEMA/METRICS > enums must be strings")
- }
-
- *e = MetricScope(str)
- if !e.Valid() {
- return fmt.Errorf("SCHEMA/METRICS > %s is not a valid MetricScope", str)
- }
- return nil
-}
-
-func (e MetricScope) MarshalGQL(w io.Writer) {
- fmt.Fprintf(w, "\"%s\"", e)
-}
-
-func (e MetricScope) Valid() bool {
- gran, ok := metricScopeGranularity[e]
- return ok && gran > 0
-}
-
-func (jd *JobData) Size() int {
- n := 128
- for _, scopes := range *jd {
- for _, metric := range scopes {
- if metric.StatisticsSeries != nil {
- n += len(metric.StatisticsSeries.Max)
- n += len(metric.StatisticsSeries.Mean)
- n += len(metric.StatisticsSeries.Median)
- n += len(metric.StatisticsSeries.Min)
- }
-
- for _, series := range metric.Series {
- n += len(series.Data)
- }
- }
- }
- return n * int(unsafe.Sizeof(Float(0)))
-}
-
-const smooth bool = false
-
-func (jm *JobMetric) AddStatisticsSeries() {
- if jm.StatisticsSeries != nil || len(jm.Series) < 4 {
- return
- }
-
- n, m := 0, len(jm.Series[0].Data)
- for _, series := range jm.Series {
- if len(series.Data) > n {
- n = len(series.Data)
- }
- if len(series.Data) < m {
- m = len(series.Data)
- }
- }
-
- // mean := make([]Float, n)
- min, median, max := make([]Float, n), make([]Float, n), make([]Float, n)
- i := 0
- for ; i < m; i++ {
- seriesCount := len(jm.Series)
- // ssum := 0.0
- smin, smed, smax := math.MaxFloat32, make([]float64, seriesCount), -math.MaxFloat32
- notnan := 0
- for j := 0; j < seriesCount; j++ {
- x := float64(jm.Series[j].Data[i])
- if math.IsNaN(x) {
- continue
- }
-
- notnan += 1
- // ssum += x
- smed[j] = x
- smin = math.Min(smin, x)
- smax = math.Max(smax, x)
- }
-
- if notnan < 3 {
- min[i] = NaN
- // mean[i] = NaN
- median[i] = NaN
- max[i] = NaN
- } else {
- min[i] = Float(smin)
- // mean[i] = Float(ssum / float64(notnan))
- max[i] = Float(smax)
-
- medianRaw, err := util.Median(smed)
- if err != nil {
- median[i] = NaN
- } else {
- median[i] = Float(medianRaw)
- }
- }
- }
-
- for ; i < n; i++ {
- min[i] = NaN
- // mean[i] = NaN
- median[i] = NaN
- max[i] = NaN
- }
-
- if smooth {
- for i := 2; i < len(median)-2; i++ {
- if min[i].IsNaN() {
- continue
- }
-
- min[i] = (min[i-2] + min[i-1] + min[i] + min[i+1] + min[i+2]) / 5
- max[i] = (max[i-2] + max[i-1] + max[i] + max[i+1] + max[i+2]) / 5
- // mean[i] = (mean[i-2] + mean[i-1] + mean[i] + mean[i+1] + mean[i+2]) / 5
- // Reduce Median further
- smoothRaw := []float64{float64(median[i-2]), float64(median[i-1]), float64(median[i]), float64(median[i+1]), float64(median[i+2])}
- smoothMedian, err := util.Median(smoothRaw)
- if err != nil {
- median[i] = NaN
- } else {
- median[i] = Float(smoothMedian)
- }
- }
- }
-
- jm.StatisticsSeries = &StatsSeries{Median: median, Min: min, Max: max} // Mean: mean
-}
-
-func (jd *JobData) AddNodeScope(metric string) bool {
- scopes, ok := (*jd)[metric]
- if !ok {
- return false
- }
-
- maxScope := MetricScopeInvalid
- for scope := range scopes {
- maxScope = maxScope.Max(scope)
- }
-
- if maxScope == MetricScopeInvalid || maxScope == MetricScopeNode {
- return false
- }
-
- jm := scopes[maxScope]
- hosts := make(map[string][]Series, 32)
- for _, series := range jm.Series {
- hosts[series.Hostname] = append(hosts[series.Hostname], series)
- }
-
- nodeJm := &JobMetric{
- Unit: jm.Unit,
- Timestep: jm.Timestep,
- Series: make([]Series, 0, len(hosts)),
- }
- for hostname, series := range hosts {
- min, sum, max := math.MaxFloat32, 0.0, -math.MaxFloat32
- for _, series := range series {
- sum += series.Statistics.Avg
- min = math.Min(min, series.Statistics.Min)
- max = math.Max(max, series.Statistics.Max)
- }
-
- n, m := 0, len(jm.Series[0].Data)
- for _, series := range jm.Series {
- if len(series.Data) > n {
- n = len(series.Data)
- }
- if len(series.Data) < m {
- m = len(series.Data)
- }
- }
-
- i, data := 0, make([]Float, len(series[0].Data))
- for ; i < m; i++ {
- x := Float(0.0)
- for _, series := range jm.Series {
- x += series.Data[i]
- }
- data[i] = x
- }
-
- for ; i < n; i++ {
- data[i] = NaN
- }
-
- nodeJm.Series = append(nodeJm.Series, Series{
- Hostname: hostname,
- Statistics: MetricStatistics{Min: min, Avg: sum / float64(len(series)), Max: max},
- Data: data,
- })
- }
-
- scopes[MetricScopeNode] = nodeJm
- return true
-}
-
-func (jd *JobData) RoundMetricStats() {
- // TODO: Make Digit-Precision Configurable? (Currently: Fixed to 2 Digits)
- for _, scopes := range *jd {
- for _, jm := range scopes {
- for index := range jm.Series {
- jm.Series[index].Statistics = MetricStatistics{
- Avg: (math.Round(jm.Series[index].Statistics.Avg*100) / 100),
- Min: (math.Round(jm.Series[index].Statistics.Min*100) / 100),
- Max: (math.Round(jm.Series[index].Statistics.Max*100) / 100),
- }
- }
- }
- }
-}
-
-func (jm *JobMetric) AddPercentiles(ps []int) bool {
- if jm.StatisticsSeries == nil {
- jm.AddStatisticsSeries()
- }
-
- if len(jm.Series) < 3 {
- return false
- }
-
- if jm.StatisticsSeries.Percentiles == nil {
- jm.StatisticsSeries.Percentiles = make(map[int][]Float, len(ps))
- }
-
- n := 0
- for _, series := range jm.Series {
- if len(series.Data) > n {
- n = len(series.Data)
- }
- }
-
- data := make([][]float64, n)
- for i := 0; i < n; i++ {
- vals := make([]float64, 0, len(jm.Series))
- for _, series := range jm.Series {
- if i < len(series.Data) {
- vals = append(vals, float64(series.Data[i]))
- }
- }
-
- sort.Float64s(vals)
- data[i] = vals
- }
-
- for _, p := range ps {
- if p < 1 || p > 99 {
- panic("SCHEMA/METRICS > invalid percentile")
- }
-
- if _, ok := jm.StatisticsSeries.Percentiles[p]; ok {
- continue
- }
-
- percentiles := make([]Float, n)
- for i := 0; i < n; i++ {
- sorted := data[i]
- percentiles[i] = Float(sorted[(len(sorted)*p)/100])
- }
-
- jm.StatisticsSeries.Percentiles[p] = percentiles
- }
-
- return true
-}
diff --git a/pkg/schema/schemas/cluster.schema.json b/pkg/schema/schemas/cluster.schema.json
deleted file mode 100644
index c60c100..0000000
--- a/pkg/schema/schemas/cluster.schema.json
+++ /dev/null
@@ -1,339 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft/2020-12/schema",
- "$id": "embedfs://cluster.schema.json",
- "title": "HPC cluster description",
- "description": "Meta data information of a HPC cluster",
- "type": "object",
- "properties": {
- "name": {
- "description": "The unique identifier of a cluster",
- "type": "string"
- },
- "metricConfig": {
- "description": "Metric specifications",
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "description": "Metric name",
- "type": "string"
- },
- "unit": {
- "description": "Metric unit",
- "$ref": "embedfs://unit.schema.json"
- },
- "scope": {
- "description": "Native measurement resolution",
- "type": "string",
- "enum": [
- "node",
- "socket",
- "memoryDomain",
- "core",
- "hwthread",
- "accelerator"
- ]
- },
- "timestep": {
- "description": "Frequency of timeseries points in seconds",
- "type": "integer"
- },
- "aggregation": {
- "description": "How the metric is aggregated",
- "type": "string",
- "enum": [
- "sum",
- "avg"
- ]
- },
- "footprint": {
- "description": "Is it a footprint metric and what type",
- "type": "string",
- "enum": [
- "avg",
- "max",
- "min"
- ]
- },
- "energy": {
- "description": "Is it used to calculate job energy",
- "type": "string",
- "enum": [
- "power",
- "energy"
- ]
- },
- "lowerIsBetter": {
- "description": "Is lower better.",
- "type": "boolean"
- },
- "peak": {
- "description": "Metric peak threshold (Upper metric limit)",
- "type": "number"
- },
- "normal": {
- "description": "Metric normal threshold",
- "type": "number"
- },
- "caution": {
- "description": "Metric caution threshold (Suspicious but does not require immediate action)",
- "type": "number"
- },
- "alert": {
- "description": "Metric alert threshold (Requires immediate action)",
- "type": "number"
- },
- "subClusters": {
- "description": "Array of cluster hardware partition metric thresholds",
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "description": "Hardware partition name",
- "type": "string"
- },
- "footprint": {
- "description": "Is it a footprint metric and what type. Overwrite global setting",
- "type": "string",
- "enum": [
- "avg",
- "max",
- "min"
- ]
- },
- "energy": {
- "description": "Is it used to calculate job energy. Overwrite global",
- "type": "string",
- "enum": [
- "power",
- "energy"
- ]
- },
- "lowerIsBetter": {
- "description": "Is lower better. Overwrite global",
- "type": "boolean"
- },
- "peak": {
- "description": "The maximum possible metric value",
- "type": "number"
- },
- "normal": {
- "description": "A common metric value level",
- "type": "number"
- },
- "caution": {
- "description": "Metric value requires attention",
- "type": "number"
- },
- "alert": {
- "description": "Metric value requiring immediate attention",
- "type": "number"
- },
- "remove": {
- "description": "Remove this metric for this subcluster",
- "type": "boolean"
- }
- },
- "required": [
- "name"
- ]
- }
- }
- },
- "required": [
- "name",
- "unit",
- "scope",
- "timestep",
- "aggregation",
- "peak",
- "normal",
- "caution",
- "alert"
- ]
- },
- "minItems": 1
- },
- "subClusters": {
- "description": "Array of cluster hardware partitions",
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "description": "Hardware partition name",
- "type": "string"
- },
- "processorType": {
- "description": "Processor type",
- "type": "string"
- },
- "socketsPerNode": {
- "description": "Number of sockets per node",
- "type": "integer"
- },
- "coresPerSocket": {
- "description": "Number of cores per socket",
- "type": "integer"
- },
- "threadsPerCore": {
- "description": "Number of SMT threads per core",
- "type": "integer"
- },
- "flopRateScalar": {
- "description": "Theoretical node peak flop rate for scalar code in GFlops/s",
- "type": "object",
- "properties": {
- "unit": {
- "description": "Metric unit",
- "$ref": "embedfs://unit.schema.json"
- },
- "value": {
- "type": "number"
- }
- }
- },
- "flopRateSimd": {
- "description": "Theoretical node peak flop rate for SIMD code in GFlops/s",
- "type": "object",
- "properties": {
- "unit": {
- "description": "Metric unit",
- "$ref": "embedfs://unit.schema.json"
- },
- "value": {
- "type": "number"
- }
- }
- },
- "memoryBandwidth": {
- "description": "Theoretical node peak memory bandwidth in GB/s",
- "type": "object",
- "properties": {
- "unit": {
- "description": "Metric unit",
- "$ref": "embedfs://unit.schema.json"
- },
- "value": {
- "type": "number"
- }
- }
- },
- "nodes": {
- "description": "Node list expression",
- "type": "string"
- },
- "topology": {
- "description": "Node topology",
- "type": "object",
- "properties": {
- "node": {
- "description": "HwTread lists of node",
- "type": "array",
- "items": {
- "type": "integer"
- }
- },
- "socket": {
- "description": "HwTread lists of sockets",
- "type": "array",
- "items": {
- "type": "array",
- "items": {
- "type": "integer"
- }
- }
- },
- "memoryDomain": {
- "description": "HwTread lists of memory domains",
- "type": "array",
- "items": {
- "type": "array",
- "items": {
- "type": "integer"
- }
- }
- },
- "die": {
- "description": "HwTread lists of dies",
- "type": "array",
- "items": {
- "type": "array",
- "items": {
- "type": "integer"
- }
- }
- },
- "core": {
- "description": "HwTread lists of cores",
- "type": "array",
- "items": {
- "type": "array",
- "items": {
- "type": "integer"
- }
- }
- },
- "accelerators": {
- "type": "array",
- "description": "List of of accelerator devices",
- "items": {
- "type": "object",
- "properties": {
- "id": {
- "type": "string",
- "description": "The unique device id"
- },
- "type": {
- "type": "string",
- "description": "The accelerator type",
- "enum": [
- "Nvidia GPU",
- "AMD GPU",
- "Intel GPU"
- ]
- },
- "model": {
- "type": "string",
- "description": "The accelerator model"
- }
- },
- "required": [
- "id",
- "type",
- "model"
- ]
- }
- }
- },
- "required": [
- "node",
- "socket",
- "memoryDomain"
- ]
- }
- },
- "required": [
- "name",
- "nodes",
- "topology",
- "processorType",
- "socketsPerNode",
- "coresPerSocket",
- "threadsPerCore",
- "flopRateScalar",
- "flopRateSimd",
- "memoryBandwidth"
- ]
- },
- "minItems": 1
- }
- },
- "required": [
- "name",
- "metricConfig",
- "subClusters"
- ]
-}
diff --git a/pkg/schema/schemas/config.schema.json b/pkg/schema/schemas/config.schema.json
deleted file mode 100644
index c844174..0000000
--- a/pkg/schema/schemas/config.schema.json
+++ /dev/null
@@ -1,498 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft/2020-12/schema",
- "$id": "embedfs://config.schema.json",
- "title": "cc-backend configuration file schema",
- "type": "object",
- "properties": {
- "addr": {
- "description": "Address where the http (or https) server will listen on (for example: 'localhost:80').",
- "type": "string"
- },
- "apiAllowedIPs": {
- "description": "Addresses from which secured API endpoints can be reached",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "user": {
- "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.",
- "type": "string"
- },
- "group": {
- "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.",
- "type": "string"
- },
- "disable-authentication": {
- "description": "Disable authentication (for everything: API, Web-UI, ...).",
- "type": "boolean"
- },
- "embed-static-files": {
- "description": "If all files in `web/frontend/public` should be served from within the binary itself (they are embedded) or not.",
- "type": "boolean"
- },
- "static-files": {
- "description": "Folder where static assets can be found, if embed-static-files is false.",
- "type": "string"
- },
- "db-driver": {
- "description": "sqlite3 or mysql (mysql will work for mariadb as well).",
- "type": "string",
- "enum": [
- "sqlite3",
- "mysql"
- ]
- },
- "db": {
- "description": "For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).",
- "type": "string"
- },
- "archive": {
- "description": "Configuration keys for job-archive",
- "type": "object",
- "properties": {
- "kind": {
- "description": "Backend type for job-archive",
- "type": "string",
- "enum": [
- "file",
- "s3"
- ]
- },
- "path": {
- "description": "Path to job archive for file backend",
- "type": "string"
- },
- "compression": {
- "description": "Setup automatic compression for jobs older than number of days",
- "type": "integer"
- },
- "retention": {
- "description": "Configuration keys for retention",
- "type": "object",
- "properties": {
- "policy": {
- "description": "Retention policy",
- "type": "string",
- "enum": [
- "none",
- "delete",
- "move"
- ]
- },
- "includeDB": {
- "description": "Also remove jobs from database",
- "type": "boolean"
- },
- "age": {
- "description": "Act on jobs with startTime older than age (in days)",
- "type": "integer"
- },
- "location": {
- "description": "The target directory for retention. Only applicable for retention move.",
- "type": "string"
- }
- },
- "required": [
- "policy"
- ]
- }
- },
- "required": [
- "kind"
- ]
- },
- "disable-archive": {
- "description": "Keep all metric data in the metric data repositories, do not write to the job-archive.",
- "type": "boolean"
- },
- "validate": {
- "description": "Validate all input json documents against json schema.",
- "type": "boolean"
- },
- "session-max-age": {
- "description": "Specifies for how long a session shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire!",
- "type": "string"
- },
- "https-cert-file": {
- "description": "Filepath to SSL certificate. If also https-key-file is set use HTTPS using those certificates.",
- "type": "string"
- },
- "https-key-file": {
- "description": "Filepath to SSL key file. If also https-cert-file is set use HTTPS using those certificates.",
- "type": "string"
- },
- "redirect-http-to": {
- "description": "If not the empty string and addr does not end in :80, redirect every request incoming at port 80 to that url.",
- "type": "string"
- },
- "stop-jobs-exceeding-walltime": {
- "description": "If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job.",
- "type": "integer"
- },
- "short-running-jobs-duration": {
- "description": "Do not show running jobs shorter than X seconds.",
- "type": "integer"
- },
- "emission-constant": {
- "description": ".",
- "type": "integer"
- },
- "cron-frequency": {
- "description": "Frequency of cron job workers.",
- "type": "object",
- "properties": {
- "duration-worker": {
- "description": "Duration Update Worker [Defaults to '5m']",
- "type": "string"
- },
- "footprint-worker": {
- "description": "Metric-Footprint Update Worker [Defaults to '10m']",
- "type": "string"
- }
- }
- },
- "enable-resampling": {
- "description": "Enable dynamic zoom in frontend metric plots.",
- "type": "object",
- "properties": {
- "trigger": {
- "description": "Trigger next zoom level at less than this many visible datapoints.",
- "type": "integer"
- },
- "resolutions": {
- "description": "Array of resampling target resolutions, in seconds.",
- "type": "array",
- "items": {
- "type": "integer"
- }
- }
- },
- "required": [
- "trigger",
- "resolutions"
- ]
- },
- "jwts": {
- "description": "For JWT token authentication.",
- "type": "object",
- "properties": {
- "max-age": {
- "description": "Configure how long a token is valid. As string parsable by time.ParseDuration()",
- "type": "string"
- },
- "cookieName": {
- "description": "Cookie that should be checked for a JWT token.",
- "type": "string"
- },
- "validateUser": {
- "description": "Deny login for users not in database (but defined in JWT). Overwrite roles in JWT with database roles.",
- "type": "boolean"
- },
- "trustedIssuer": {
- "description": "Issuer that should be accepted when validating external JWTs ",
- "type": "string"
- },
- "syncUserOnLogin": {
- "description": "Add non-existent user to DB at login attempt with values provided in JWT.",
- "type": "boolean"
- }
- },
- "required": [
- "max-age"
- ]
- },
- "oidc": {
- "provider": {
- "description": "",
- "type": "string"
- },
- "syncUserOnLogin": {
- "description": "",
- "type": "boolean"
- },
- "updateUserOnLogin": {
- "description": "",
- "type": "boolean"
- },
- "required": [
- "provider"
- ]
- },
- "ldap": {
- "description": "For LDAP Authentication and user synchronisation.",
- "type": "object",
- "properties": {
- "url": {
- "description": "URL of LDAP directory server.",
- "type": "string"
- },
- "user_base": {
- "description": "Base DN of user tree root.",
- "type": "string"
- },
- "search_dn": {
- "description": "DN for authenticating LDAP admin account with general read rights.",
- "type": "string"
- },
- "user_bind": {
- "description": "Expression used to authenticate users via LDAP bind. Must contain uid={username}.",
- "type": "string"
- },
- "user_filter": {
- "description": "Filter to extract users for syncing.",
- "type": "string"
- },
- "username_attr": {
- "description": "Attribute with full username. Default: gecos",
- "type": "string"
- },
- "sync_interval": {
- "description": "Interval used for syncing local user table with LDAP directory. Parsed using time.ParseDuration.",
- "type": "string"
- },
- "sync_del_old_users": {
- "description": "Delete obsolete users in database.",
- "type": "boolean"
- },
- "syncUserOnLogin": {
- "description": "Add non-existent user to DB at login attempt if user exists in Ldap directory",
- "type": "boolean"
- }
- },
- "required": [
- "url",
- "user_base",
- "search_dn",
- "user_bind",
- "user_filter"
- ]
- },
- "clusters": {
- "description": "Configuration for the clusters to be displayed.",
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "description": "The name of the cluster.",
- "type": "string"
- },
- "metricDataRepository": {
- "description": "Type of the metric data repository for this cluster",
- "type": "object",
- "properties": {
- "kind": {
- "type": "string",
- "enum": [
- "influxdb",
- "prometheus",
- "cc-metric-store",
- "test"
- ]
- },
- "url": {
- "type": "string"
- },
- "token": {
- "type": "string"
- }
- },
- "required": [
- "kind",
- "url"
- ]
- },
- "filterRanges": {
- "description": "This option controls the slider ranges for the UI controls of numNodes, duration, and startTime.",
- "type": "object",
- "properties": {
- "numNodes": {
- "description": "UI slider range for number of nodes",
- "type": "object",
- "properties": {
- "from": {
- "type": "integer"
- },
- "to": {
- "type": "integer"
- }
- },
- "required": [
- "from",
- "to"
- ]
- },
- "duration": {
- "description": "UI slider range for duration",
- "type": "object",
- "properties": {
- "from": {
- "type": "integer"
- },
- "to": {
- "type": "integer"
- }
- },
- "required": [
- "from",
- "to"
- ]
- },
- "startTime": {
- "description": "UI slider range for start time",
- "type": "object",
- "properties": {
- "from": {
- "type": "string",
- "format": "date-time"
- },
- "to": {
- "type": "null"
- }
- },
- "required": [
- "from",
- "to"
- ]
- }
- },
- "required": [
- "numNodes",
- "duration",
- "startTime"
- ]
- }
- },
- "required": [
- "name",
- "metricDataRepository",
- "filterRanges"
- ],
- "minItems": 1
- }
- },
- "ui-defaults": {
- "description": "Default configuration for web UI",
- "type": "object",
- "properties": {
- "plot_general_colorBackground": {
- "description": "Color plot background according to job average threshold limits",
- "type": "boolean"
- },
- "plot_general_lineWidth": {
- "description": "Initial linewidth",
- "type": "integer"
- },
- "plot_list_jobsPerPage": {
- "description": "Jobs shown per page in job lists",
- "type": "integer"
- },
- "plot_view_plotsPerRow": {
- "description": "Number of plots per row in single job view",
- "type": "integer"
- },
- "plot_view_showPolarplot": {
- "description": "Option to toggle polar plot in single job view",
- "type": "boolean"
- },
- "plot_view_showRoofline": {
- "description": "Option to toggle roofline plot in single job view",
- "type": "boolean"
- },
- "plot_view_showStatTable": {
- "description": "Option to toggle the node statistic table in single job view",
- "type": "boolean"
- },
- "system_view_selectedMetric": {
- "description": "Initial metric shown in system view",
- "type": "string"
- },
- "job_view_showFootprint": {
- "description": "Option to toggle footprint ui in single job view",
- "type": "boolean"
- },
- "job_list_usePaging": {
- "description": "Option to switch from continous scroll to paging",
- "type": "boolean"
- },
- "analysis_view_histogramMetrics": {
- "description": "Metrics to show as job count histograms in analysis view",
- "type": "array",
- "items": {
- "type": "string",
- "minItems": 1
- }
- },
- "analysis_view_scatterPlotMetrics": {
- "description": "Initial scatter plto configuration in analysis view",
- "type": "array",
- "items": {
- "type": "array",
- "items": {
- "type": "string",
- "minItems": 2,
- "maxItems": 2
- },
- "minItems": 1
- }
- },
- "job_view_nodestats_selectedMetrics": {
- "description": "Initial metrics shown in node statistics table of single job view",
- "type": "array",
- "items": {
- "type": "string",
- "minItems": 1
- }
- },
- "job_view_selectedMetrics": {
- "description": "Initial metrics shown as plots in single job view",
- "type": "array",
- "items": {
- "type": "string",
- "minItems": 1
- }
- },
- "plot_general_colorscheme": {
- "description": "Initial color scheme",
- "type": "array",
- "items": {
- "type": "string",
- "minItems": 1
- }
- },
- "plot_list_selectedMetrics": {
- "description": "Initial metric plots shown in jobs lists",
- "type": "array",
- "items": {
- "type": "string",
- "minItems": 1
- }
- }
- },
- "required": [
- "plot_general_colorBackground",
- "plot_general_lineWidth",
- "plot_list_jobsPerPage",
- "plot_view_plotsPerRow",
- "plot_view_showPolarplot",
- "plot_view_showRoofline",
- "plot_view_showStatTable",
- "system_view_selectedMetric",
- "job_view_showFootprint",
- "job_list_usePaging",
- "analysis_view_histogramMetrics",
- "analysis_view_scatterPlotMetrics",
- "job_view_nodestats_selectedMetrics",
- "job_view_selectedMetrics",
- "plot_general_colorscheme",
- "plot_list_selectedMetrics"
- ]
- }
- },
- "required": [
- "jwts",
- "clusters",
- "apiAllowedIPs"
- ]
-}
diff --git a/pkg/schema/schemas/job-data.schema.json b/pkg/schema/schemas/job-data.schema.json
deleted file mode 100644
index c0c492b..0000000
--- a/pkg/schema/schemas/job-data.schema.json
+++ /dev/null
@@ -1,490 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft/2020-12/schema",
- "$id": "embedfs://job-data.schema.json",
- "title": "Job metric data list",
- "description": "Collection of metric data of a HPC job",
- "type": "object",
- "properties": {
- "mem_used": {
- "description": "Memory capacity used",
- "type": "object",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "flops_any": {
- "description": "Total flop rate with DP flops scaled up",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "memoryDomain": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "core": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "hwthread": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "mem_bw": {
- "description": "Main memory bandwidth",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "memoryDomain": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "net_bw": {
- "description": "Total fast interconnect network bandwidth",
- "type": "object",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "ipc": {
- "description": "Instructions executed per cycle",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "memoryDomain": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "core": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "hwthread": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "cpu_user": {
- "description": "CPU user active core utilization",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "memoryDomain": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "core": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "hwthread": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "cpu_load": {
- "description": "CPU requested core utilization (load 1m)",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "flops_dp": {
- "description": "Double precision flop rate",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "memoryDomain": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "core": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "hwthread": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "flops_sp": {
- "description": "Single precision flops rate",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "memoryDomain": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "core": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "hwthread": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "vectorization_ratio": {
- "description": "Fraction of arithmetic instructions using SIMD instructions",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "memoryDomain": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "core": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "hwthread": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "cpu_power": {
- "description": "CPU power consumption",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "mem_power": {
- "description": "Memory power consumption",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "acc_utilization": {
- "description": "GPU utilization",
- "properties": {
- "accelerator": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "accelerator"
- ]
- },
- "acc_mem_used": {
- "description": "GPU memory capacity used",
- "properties": {
- "accelerator": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "accelerator"
- ]
- },
- "acc_power": {
- "description": "GPU power consumption",
- "properties": {
- "accelerator": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "accelerator"
- ]
- },
- "clock": {
- "description": "Average core frequency",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "socket": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "memoryDomain": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "core": {
- "$ref": "embedfs://job-metric-data.schema.json"
- },
- "hwthread": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "minProperties": 1
- },
- "eth_read_bw": {
- "description": "Ethernet read bandwidth",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "eth_write_bw": {
- "description": "Ethernet write bandwidth",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "filesystems": {
- "description": "Array of filesystems",
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "type": {
- "type": "string",
- "enum": [
- "nfs",
- "lustre",
- "gpfs",
- "nvme",
- "ssd",
- "hdd",
- "beegfs"
- ]
- },
- "read_bw": {
- "description": "File system read bandwidth",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "write_bw": {
- "description": "File system write bandwidth",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "read_req": {
- "description": "File system read requests",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "write_req": {
- "description": "File system write requests",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "inodes": {
- "description": "File system write requests",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "accesses": {
- "description": "File system open and close",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "fsync": {
- "description": "File system fsync",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "create": {
- "description": "File system create",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "open": {
- "description": "File system open",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "close": {
- "description": "File system close",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "seek": {
- "description": "File system seek",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- }
- },
- "required": [
- "name",
- "type",
- "read_bw",
- "write_bw"
- ]
- },
- "minItems": 1
- }
- },
- "ic_rcv_packets": {
- "description": "Network interconnect read packets",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "ic_send_packets": {
- "description": "Network interconnect send packet",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "ic_read_bw": {
- "description": "Network interconnect read bandwidth",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "ic_write_bw": {
- "description": "Network interconnect write bandwidth",
- "properties": {
- "node": {
- "$ref": "embedfs://job-metric-data.schema.json"
- }
- },
- "required": [
- "node"
- ]
- },
- "required": [
- "cpu_user",
- "cpu_load",
- "mem_used",
- "flops_any",
- "mem_bw",
- "net_bw",
- "filesystems"
- ]
-}
diff --git a/pkg/schema/schemas/job-meta.schema.json b/pkg/schema/schemas/job-meta.schema.json
deleted file mode 100644
index db7475c..0000000
--- a/pkg/schema/schemas/job-meta.schema.json
+++ /dev/null
@@ -1,351 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft/2020-12/schema",
- "$id": "embedfs://job-meta.schema.json",
- "title": "Job meta data",
- "description": "Meta data information of a HPC job",
- "type": "object",
- "properties": {
- "jobId": {
- "description": "The unique identifier of a job",
- "type": "integer"
- },
- "user": {
- "description": "The unique identifier of a user",
- "type": "string"
- },
- "project": {
- "description": "The unique identifier of a project",
- "type": "string"
- },
- "cluster": {
- "description": "The unique identifier of a cluster",
- "type": "string"
- },
- "subCluster": {
- "description": "The unique identifier of a sub cluster",
- "type": "string"
- },
- "partition": {
- "description": "The Slurm partition to which the job was submitted",
- "type": "string"
- },
- "arrayJobId": {
- "description": "The unique identifier of an array job",
- "type": "integer"
- },
- "numNodes": {
- "description": "Number of nodes used",
- "type": "integer",
- "exclusiveMinimum": 0
- },
- "numHwthreads": {
- "description": "Number of HWThreads used",
- "type": "integer",
- "exclusiveMinimum": 0
- },
- "numAcc": {
- "description": "Number of accelerators used",
- "type": "integer",
- "exclusiveMinimum": 0
- },
- "exclusive": {
- "description": "Specifies how nodes are shared. 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive, 2 - Shared among multiple jobs of same user",
- "type": "integer",
- "minimum": 0,
- "maximum": 2
- },
- "monitoringStatus": {
- "description": "State of monitoring system during job run",
- "type": "integer"
- },
- "smt": {
- "description": "SMT threads used by job",
- "type": "integer"
- },
- "walltime": {
- "description": "Requested walltime of job in seconds",
- "type": "integer",
- "exclusiveMinimum": 0
- },
- "jobState": {
- "description": "Final state of job",
- "type": "string",
- "enum": [
- "completed",
- "failed",
- "cancelled",
- "stopped",
- "out_of_memory",
- "timeout"
- ]
- },
- "startTime": {
- "description": "Start epoch time stamp in seconds",
- "type": "integer",
- "exclusiveMinimum": 0
- },
- "duration": {
- "description": "Duration of job in seconds",
- "type": "integer",
- "exclusiveMinimum": 0
- },
- "resources": {
- "description": "Resources used by job",
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "hostname": {
- "type": "string"
- },
- "hwthreads": {
- "type": "array",
- "description": "List of OS processor ids",
- "items": {
- "type": "integer"
- }
- },
- "accelerators": {
- "type": "array",
- "description": "List of of accelerator device ids",
- "items": {
- "type": "string"
- }
- },
- "configuration": {
- "type": "string",
- "description": "The configuration options of the node"
- }
- },
- "required": [
- "hostname"
- ],
- "minItems": 1
- }
- },
- "metaData": {
- "description": "Additional information about the job",
- "type": "object",
- "properties": {
- "jobScript": {
- "type": "string",
- "description": "The batch script of the job"
- },
- "jobName": {
- "type": "string",
- "description": "Slurm Job name"
- },
- "slurmInfo": {
- "type": "string",
- "description": "Additional slurm infos as show by scontrol show job"
- }
- }
- },
- "tags": {
- "description": "List of tags",
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "type": {
- "type": "string"
- }
- },
- "required": [
- "name",
- "type"
- ]
- },
- "uniqueItems": true
- },
- "statistics": {
- "description": "Job statistic data",
- "type": "object",
- "properties": {
- "mem_used": {
- "description": "Memory capacity used (required)",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "cpu_load": {
- "description": "CPU requested core utilization (load 1m) (required)",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "flops_any": {
- "description": "Total flop rate with DP flops scaled up (required)",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "mem_bw": {
- "description": "Main memory bandwidth (required)",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "net_bw": {
- "description": "Total fast interconnect network bandwidth (required)",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "file_bw": {
- "description": "Total file IO bandwidth (required)",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "ipc": {
- "description": "Instructions executed per cycle",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "cpu_user": {
- "description": "CPU user active core utilization",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "flops_dp": {
- "description": "Double precision flop rate",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "flops_sp": {
- "description": "Single precision flops rate",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "rapl_power": {
- "description": "CPU power consumption",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "acc_used": {
- "description": "GPU utilization",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "acc_mem_used": {
- "description": "GPU memory capacity used",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "acc_power": {
- "description": "GPU power consumption",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "clock": {
- "description": "Average core frequency",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "eth_read_bw": {
- "description": "Ethernet read bandwidth",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "eth_write_bw": {
- "description": "Ethernet write bandwidth",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "ic_rcv_packets": {
- "description": "Network interconnect read packets",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "ic_send_packets": {
- "description": "Network interconnect send packet",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "ic_read_bw": {
- "description": "Network interconnect read bandwidth",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "ic_write_bw": {
- "description": "Network interconnect write bandwidth",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "filesystems": {
- "description": "Array of filesystems",
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "type": {
- "type": "string",
- "enum": [
- "nfs",
- "lustre",
- "gpfs",
- "nvme",
- "ssd",
- "hdd",
- "beegfs"
- ]
- },
- "read_bw": {
- "description": "File system read bandwidth",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "write_bw": {
- "description": "File system write bandwidth",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "read_req": {
- "description": "File system read requests",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "write_req": {
- "description": "File system write requests",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "inodes": {
- "description": "File system write requests",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "accesses": {
- "description": "File system open and close",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "fsync": {
- "description": "File system fsync",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "create": {
- "description": "File system create",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "open": {
- "description": "File system open",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "close": {
- "description": "File system close",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- },
- "seek": {
- "description": "File system seek",
- "$ref": "embedfs://job-metric-statistics.schema.json"
- }
- },
- "required": [
- "name",
- "type",
- "read_bw",
- "write_bw"
- ]
- },
- "minItems": 1
- }
- },
- "required": [
- "cpu_user",
- "cpu_load",
- "mem_used",
- "flops_any",
- "mem_bw"
- ]
- }
- },
- "required": [
- "jobId",
- "user",
- "project",
- "cluster",
- "subCluster",
- "numNodes",
- "exclusive",
- "startTime",
- "jobState",
- "duration",
- "resources",
- "statistics"
- ]
-}
diff --git a/pkg/schema/schemas/job-metric-data.schema.json b/pkg/schema/schemas/job-metric-data.schema.json
deleted file mode 100644
index ad499bf..0000000
--- a/pkg/schema/schemas/job-metric-data.schema.json
+++ /dev/null
@@ -1,216 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft/2020-12/schema",
- "$id": "embedfs://job-metric-data.schema.json",
- "title": "Job metric data",
- "description": "Metric data of a HPC job",
- "type": "object",
- "properties": {
- "unit": {
- "description": "Metric unit",
- "$ref": "embedfs://unit.schema.json"
- },
- "timestep": {
- "description": "Measurement interval in seconds",
- "type": "integer"
- },
- "thresholds": {
- "description": "Metric thresholds for specific system",
- "type": "object",
- "properties": {
- "peak": {
- "type": "number"
- },
- "normal": {
- "type": "number"
- },
- "caution": {
- "type": "number"
- },
- "alert": {
- "type": "number"
- }
- }
- },
- "statisticsSeries": {
- "type": "object",
- "description": "Statistics series across topology",
- "properties": {
- "min": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "max": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "mean": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "percentiles": {
- "type": "object",
- "properties": {
- "10": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "20": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "30": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "40": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "50": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "60": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "70": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "80": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "90": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "25": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- },
- "75": {
- "type": "array",
- "items": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 3
- }
- }
- }
- }
- },
- "series": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "hostname": {
- "type": "string"
- },
- "id": {
- "type": "string"
- },
- "statistics": {
- "type": "object",
- "description": "Statistics across time dimension",
- "properties": {
- "avg": {
- "description": "Series average",
- "type": "number",
- "minimum": 0
- },
- "min": {
- "description": "Series minimum",
- "type": "number",
- "minimum": 0
- },
- "max": {
- "description": "Series maximum",
- "type": "number",
- "minimum": 0
- }
- },
- "required": [
- "avg",
- "min",
- "max"
- ]
- },
- "data": {
- "type": "array",
- "contains": {
- "type": "number",
- "minimum": 0
- },
- "minItems": 1
- }
- },
- "required": [
- "hostname",
- "statistics",
- "data"
- ]
- }
- }
- },
- "required": [
- "unit",
- "timestep",
- "series"
- ]
-}
diff --git a/pkg/schema/schemas/job-metric-statistics.schema.json b/pkg/schema/schemas/job-metric-statistics.schema.json
deleted file mode 100644
index f753ed3..0000000
--- a/pkg/schema/schemas/job-metric-statistics.schema.json
+++ /dev/null
@@ -1,34 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft/2020-12/schema",
- "$id": "embedfs://job-metric-statistics.schema.json",
- "title": "Job statistics",
- "description": "Format specification for job metric statistics",
- "type": "object",
- "properties": {
- "unit": {
- "description": "Metric unit",
- "$ref": "embedfs://unit.schema.json"
- },
- "avg": {
- "description": "Job metric average",
- "type": "number",
- "minimum": 0
- },
- "min": {
- "description": "Job metric minimum",
- "type": "number",
- "minimum": 0
- },
- "max": {
- "description": "Job metric maximum",
- "type": "number",
- "minimum": 0
- }
- },
- "required": [
- "unit",
- "avg",
- "min",
- "max"
- ]
-}
diff --git a/pkg/schema/schemas/unit.schema.json b/pkg/schema/schemas/unit.schema.json
deleted file mode 100644
index a8a2b4d..0000000
--- a/pkg/schema/schemas/unit.schema.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft/2020-12/schema",
- "$id": "embedfs://unit.schema.json",
- "title": "Metric unit",
- "description": "Format specification for job metric units",
- "type": "object",
- "properties": {
- "base": {
- "description": "Metric base unit",
- "type": "string",
- "enum": [
- "B",
- "F",
- "B/s",
- "F/s",
- "CPI",
- "IPC",
- "Hz",
- "W",
- "J",
- "°C",
- ""
- ]
- },
- "prefix": {
- "description": "Unit prefix",
- "type": "string",
- "enum": [
- "K",
- "M",
- "G",
- "T",
- "P",
- "E"
- ]
- }
- },
- "required": [
- "base"
- ]
-}
diff --git a/pkg/schema/user.go b/pkg/schema/user.go
deleted file mode 100644
index 9b62cfa..0000000
--- a/pkg/schema/user.go
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "fmt"
- "strings"
-)
-
-type Role int
-
-const (
- RoleAnonymous Role = iota
- RoleApi
- RoleUser
- RoleManager
- RoleSupport
- RoleAdmin
- RoleError
-)
-
-type AuthSource int
-
-const (
- AuthViaLocalPassword AuthSource = iota
- AuthViaLDAP
- AuthViaToken
- AuthViaOIDC
- AuthViaAll
-)
-
-type AuthType int
-
-const (
- AuthToken AuthType = iota
- AuthSession
-)
-
-type User struct {
- Username string `json:"username"`
- Password string `json:"-"`
- Name string `json:"name"`
- Email string `json:"email"`
- Roles []string `json:"roles"`
- Projects []string `json:"projects"`
- AuthType AuthType `json:"authType"`
- AuthSource AuthSource `json:"authSource"`
-}
-
-func (u *User) HasProject(project string) bool {
- for _, p := range u.Projects {
- if p == project {
- return true
- }
- }
- return false
-}
-
-func GetRoleString(roleInt Role) string {
- return [6]string{"anonymous", "api", "user", "manager", "support", "admin"}[roleInt]
-}
-
-func getRoleEnum(roleStr string) Role {
- switch strings.ToLower(roleStr) {
- case "admin":
- return RoleAdmin
- case "support":
- return RoleSupport
- case "manager":
- return RoleManager
- case "user":
- return RoleUser
- case "api":
- return RoleApi
- case "anonymous":
- return RoleAnonymous
- default:
- return RoleError
- }
-}
-
-func IsValidRole(role string) bool {
- return getRoleEnum(role) != RoleError
-}
-
-// Check if User has SPECIFIED role AND role is VALID
-func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) {
- if IsValidRole(role) {
- for _, r := range u.Roles {
- if r == role {
- return true, true
- }
- }
- return false, true
- }
- return false, false
-}
-
-// Check if User has SPECIFIED role
-func (u *User) HasRole(role Role) bool {
- for _, r := range u.Roles {
- if r == GetRoleString(role) {
- return true
- }
- }
- return false
-}
-
-// Check if User has ANY of the listed roles
-func (u *User) HasAnyRole(queryroles []Role) bool {
- for _, ur := range u.Roles {
- for _, qr := range queryroles {
- if ur == GetRoleString(qr) {
- return true
- }
- }
- }
- return false
-}
-
-// Check if User has ALL of the listed roles
-func (u *User) HasAllRoles(queryroles []Role) bool {
- target := len(queryroles)
- matches := 0
- for _, ur := range u.Roles {
- for _, qr := range queryroles {
- if ur == GetRoleString(qr) {
- matches += 1
- break
- }
- }
- }
-
- if matches == target {
- return true
- } else {
- return false
- }
-}
-
-// Check if User has NONE of the listed roles
-func (u *User) HasNotRoles(queryroles []Role) bool {
- matches := 0
- for _, ur := range u.Roles {
- for _, qr := range queryroles {
- if ur == GetRoleString(qr) {
- matches += 1
- break
- }
- }
- }
-
- if matches == 0 {
- return true
- } else {
- return false
- }
-}
-
-// Called by API endpoint '/roles/' from frontend: Only required for admin config -> Check Admin Role
-func GetValidRoles(user *User) ([]string, error) {
- var vals []string
- if user.HasRole(RoleAdmin) {
- for i := RoleApi; i < RoleError; i++ {
- vals = append(vals, GetRoleString(i))
- }
- return vals, nil
- }
-
- return vals, fmt.Errorf("%s: only admins are allowed to fetch a list of roles", user.Username)
-}
-
-// Called by routerConfig web.page setup in backend: Only requires known user
-func GetValidRolesMap(user *User) (map[string]Role, error) {
- named := make(map[string]Role)
- if user.HasNotRoles([]Role{RoleAnonymous}) {
- for i := RoleApi; i < RoleError; i++ {
- named[GetRoleString(i)] = i
- }
- return named, nil
- }
- return named, fmt.Errorf("only known users are allowed to fetch a list of roles")
-}
-
-// Find highest role
-func (u *User) GetAuthLevel() Role {
- if u.HasRole(RoleAdmin) {
- return RoleAdmin
- } else if u.HasRole(RoleSupport) {
- return RoleSupport
- } else if u.HasRole(RoleManager) {
- return RoleManager
- } else if u.HasRole(RoleUser) {
- return RoleUser
- } else if u.HasRole(RoleApi) {
- return RoleApi
- } else if u.HasRole(RoleAnonymous) {
- return RoleAnonymous
- } else {
- return RoleError
- }
-}
diff --git a/pkg/schema/user_test.go b/pkg/schema/user_test.go
deleted file mode 100644
index ce3ab3b..0000000
--- a/pkg/schema/user_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "testing"
-)
-
-func TestHasValidRole(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user"}}
-
- exists, _ := u.HasValidRole("user")
-
- if !exists {
- t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("user"): EXISTS = %v, expected 'true'.`, exists)
- }
-}
-
-func TestHasNotValidRole(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user"}}
-
- exists, _ := u.HasValidRole("manager")
-
- if exists {
- t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("manager"): EXISTS = %v, expected 'false'.`, exists)
- }
-}
-
-func TestHasInvalidRole(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user"}}
-
- _, valid := u.HasValidRole("invalid")
-
- if valid {
- t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("invalid"): VALID = %v, expected 'false'.`, valid)
- }
-}
-
-func TestHasNotInvalidRole(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user"}}
-
- _, valid := u.HasValidRole("user")
-
- if !valid {
- t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("user"): VALID = %v, expected 'true'.`, valid)
- }
-}
-
-func TestHasRole(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user"}}
-
- exists := u.HasRole(RoleUser)
-
- if !exists {
- t.Fatalf(`User{Roles: ["user"]} -> HasRole(RoleUser): EXISTS = %v, expected 'true'.`, exists)
- }
-}
-
-func TestHasNotRole(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user"}}
-
- exists := u.HasRole(RoleManager)
-
- if exists {
- t.Fatalf(`User{Roles: ["user"]} -> HasRole(RoleManager): EXISTS = %v, expected 'false'.`, exists)
- }
-}
-
-func TestHasAnyRole(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user", "manager"}}
-
- result := u.HasAnyRole([]Role{RoleManager, RoleSupport, RoleAdmin})
-
- if !result {
- t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAnyRole([]Role{RoleManager, RoleSupport, RoleAdmin}): RESULT = %v, expected 'true'.`, result)
- }
-}
-
-func TestHasNotAnyRole(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user", "manager"}}
-
- result := u.HasAnyRole([]Role{RoleSupport, RoleAdmin})
-
- if result {
- t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAllRoles([]Role{RoleSupport, RoleAdmin}): RESULT = %v, expected 'false'.`, result)
- }
-}
-
-func TestHasAllRoles(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user", "manager", "support"}}
-
- result := u.HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport})
-
- if !result {
- t.Fatalf(`User{Roles: ["user", "manager", "support"]} -> HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport}): RESULT = %v, expected 'true'.`, result)
- }
-}
-
-func TestHasNotAllRoles(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user", "manager"}}
-
- result := u.HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport})
-
- if result {
- t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport}): RESULT = %v, expected 'false'.`, result)
- }
-}
-
-func TestHasNotRoles(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user", "manager"}}
-
- result := u.HasNotRoles([]Role{RoleSupport, RoleAdmin})
-
- if !result {
- t.Fatalf(`User{Roles: ["user", "manager"]} -> HasNotRoles([]Role{RoleSupport, RoleAdmin}): RESULT = %v, expected 'true'.`, result)
- }
-}
-
-func TestHasAllNotRoles(t *testing.T) {
- u := User{Username: "testuser", Roles: []string{"user", "manager"}}
-
- result := u.HasNotRoles([]Role{RoleUser, RoleManager})
-
- if result {
- t.Fatalf(`User{Roles: ["user", "manager"]} -> HasNotRoles([]Role{RoleUser, RoleManager}): RESULT = %v, expected 'false'.`, result)
- }
-}
diff --git a/pkg/schema/validate.go b/pkg/schema/validate.go
deleted file mode 100644
index 3511936..0000000
--- a/pkg/schema/validate.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "embed"
- "encoding/json"
- "fmt"
- "io"
- "path/filepath"
- "strings"
-
- "github.com/ClusterCockpit/cc-backend/pkg/log"
- "github.com/santhosh-tekuri/jsonschema/v5"
-)
-
-type Kind int
-
-const (
- Meta Kind = iota + 1
- Data
- Config
- ClusterCfg
-)
-
-//go:embed schemas/*
-var schemaFiles embed.FS
-
-func Validate(k Kind, r io.Reader) (err error) {
- jsonschema.Loaders["embedfs"] = func(s string) (io.ReadCloser, error) {
- f := filepath.Join("schemas", strings.Split(s, "//")[1])
- return schemaFiles.Open(f)
- }
- var s *jsonschema.Schema
-
- switch k {
- case Meta:
- s, err = jsonschema.Compile("embedfs://job-meta.schema.json")
- case Data:
- s, err = jsonschema.Compile("embedfs://job-data.schema.json")
- case ClusterCfg:
- s, err = jsonschema.Compile("embedfs://cluster.schema.json")
- case Config:
- s, err = jsonschema.Compile("embedfs://config.schema.json")
- default:
- return fmt.Errorf("SCHEMA/VALIDATE > unkown schema kind: %#v", k)
- }
-
- if err != nil {
- log.Errorf("Error while compiling json schema for kind '%#v'", k)
- return err
- }
-
- var v interface{}
- if err := json.NewDecoder(r).Decode(&v); err != nil {
- log.Warnf("Error while decoding raw json schema: %#v", err)
- return err
- }
-
- if err = s.Validate(v); err != nil {
- return fmt.Errorf("SCHEMA/VALIDATE > %#v", err)
- }
-
- return nil
-}
diff --git a/pkg/schema/validate_test.go b/pkg/schema/validate_test.go
deleted file mode 100644
index f4943f0..0000000
--- a/pkg/schema/validate_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-package schema
-
-import (
- "bytes"
- "testing"
-)
-
-func TestValidateConfig(t *testing.T) {
- json := []byte(`{
- "jwts": {
- "max-age": "2m"
- },
- "apiAllowedIPs": [
- "*"
- ],
- "clusters": [
- {
- "name": "testcluster",
- "metricDataRepository": {
- "kind": "cc-metric-store",
- "url": "localhost:8082"},
- "filterRanges": {
- "numNodes": { "from": 1, "to": 64 },
- "duration": { "from": 0, "to": 86400 },
- "startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
- }}]
-}`)
-
- if err := Validate(Config, bytes.NewReader(json)); err != nil {
- t.Errorf("Error is not nil! %v", err)
- }
-}
-
-func TestValidateJobMeta(t *testing.T) {
-}
-
-func TestValidateCluster(t *testing.T) {
- json := []byte(`{
- "name": "emmy",
- "subClusters": [
- {
- "name": "main",
- "processorType": "Intel IvyBridge",
- "socketsPerNode": 2,
- "coresPerSocket": 10,
- "threadsPerCore": 2,
- "flopRateScalar": {
- "unit": {
- "prefix": "G",
- "base": "F/s"
- },
- "value": 14
- },
- "flopRateSimd": {
- "unit": {
- "prefix": "G",
- "base": "F/s"
- },
- "value": 112
- },
- "memoryBandwidth": {
- "unit": {
- "prefix": "G",
- "base": "B/s"
- },
- "value": 24
- },
- "numberOfNodes": 70,
- "nodes": "w11[27-45,49-63,69-72]",
- "topology": {
- "node": [0,20,1,21,2,22,3,23,4,24,5,25,6,26,7,27,8,28,9,29,10,30,11,31,12,32,13,33,14,34,15,35,16,36,17,37,18,38,19,39],
- "socket": [
- [0,20,1,21,2,22,3,23,4,24,5,25,6,26,7,27,8,28,9,29],
- [10,30,11,31,12,32,13,33,14,34,15,35,16,36,17,37,18,38,19,39]
- ],
- "memoryDomain": [
- [0,20,1,21,2,22,3,23,4,24,5,25,6,26,7,27,8,28,9,29],
- [10,30,11,31,12,32,13,33,14,34,15,35,16,36,17,37,18,38,19,39]
- ],
- "core": [
- [0,20],[1,21],[2,22],[3,23],[4,24],[5,25],[6,26],[7,27],[8,28],[9,29],[10,30],[11,31],[12,32],[13,33],[14,34],[15,35],[16,36],[17,37],[18,38],[19,39]
- ]
- }
- }
- ],
- "metricConfig": [
- {
- "name": "cpu_load",
- "scope": "hwthread",
- "unit": {"base": ""},
- "aggregation": "avg",
- "timestep": 60,
- "peak": 4,
- "normal": 2,
- "caution": 1,
- "alert": 0.25
- }
- ]
-}`)
-
- if err := Validate(ClusterCfg, bytes.NewReader(json)); err != nil {
- t.Errorf("Error is not nil! %v", err)
- }
-}
diff --git a/tools/archive-manager/main.go b/tools/archive-manager/main.go
index 7c842ff..0cf5f98 100644
--- a/tools/archive-manager/main.go
+++ b/tools/archive-manager/main.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
@@ -13,7 +13,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/pkg/archive"
- "github.com/ClusterCockpit/cc-backend/pkg/log"
+ cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
)
func parseDate(in string) int64 {
@@ -22,7 +22,7 @@ func parseDate(in string) int64 {
if in != "" {
t, err := time.ParseInLocation(shortForm, in, loc)
if err != nil {
- log.Abortf("Archive Manager Main: Date parse failed with input: '%s'\nError: %s\n", in, err.Error())
+ cclog.Abortf("Archive Manager Main: Date parse failed with input: '%s'\nError: %s\n", in, err.Error())
}
return t.Unix()
}
@@ -46,18 +46,18 @@ func main() {
archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", srcPath)
- log.Init(flagLogLevel, flagLogDateTime)
+ cclog.Init(flagLogLevel, flagLogDateTime)
config.Init(flagConfigFile)
if err := archive.Init(json.RawMessage(archiveCfg), false); err != nil {
- log.Fatal(err)
+ cclog.Fatal(err)
}
ar := archive.GetHandle()
if flagValidate {
config.Keys.Validate = true
for job := range ar.Iter(true) {
- log.Printf("Validate %s - %d\n", job.Meta.Cluster, job.Meta.JobID)
+ cclog.Printf("Validate %s - %d\n", job.Meta.Cluster, job.Meta.JobID)
}
os.Exit(0)
}
diff --git a/tools/gen-keypair/main.go b/tools/gen-keypair/main.go
index ff9c5c3..6ed3e11 100644
--- a/tools/gen-keypair/main.go
+++ b/tools/gen-keypair/main.go
@@ -1,5 +1,5 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
-// All rights reserved.
+// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json
index 0b31f40..51e358d 100644
--- a/web/frontend/package-lock.json
+++ b/web/frontend/package-lock.json
@@ -9,32 +9,31 @@
"version": "1.0.0",
"license": "MIT",
"dependencies": {
- "@rollup/plugin-replace": "^5.0.7",
- "@sveltestrap/sveltestrap": "^6.2.7",
- "@urql/svelte": "^4.2.2",
- "chart.js": "^4.4.6",
- "date-fns": "^2.30.0",
- "graphql": "^16.9.0",
- "mathjs": "^12.4.3",
- "svelte-chartjs": "^3.1.5",
- "uplot": "^1.6.31",
- "wonka": "^6.3.4"
+ "@rollup/plugin-replace": "^6.0.2",
+ "@sveltestrap/sveltestrap": "^7.1.0",
+ "@urql/svelte": "^4.2.3",
+ "chart.js": "^4.4.9",
+ "date-fns": "^4.1.0",
+ "graphql": "^16.11.0",
+ "mathjs": "^14.5.2",
+ "uplot": "^1.6.32",
+ "wonka": "^6.3.5"
},
"devDependencies": {
- "@rollup/plugin-commonjs": "^25.0.8",
- "@rollup/plugin-node-resolve": "^15.3.0",
+ "@rollup/plugin-commonjs": "^28.0.3",
+ "@rollup/plugin-node-resolve": "^16.0.1",
"@rollup/plugin-terser": "^0.4.4",
"@timohausmann/quadtree-js": "^1.2.6",
- "rollup": "^4.27.4",
+ "rollup": "^4.41.1",
"rollup-plugin-css-only": "^4.5.2",
"rollup-plugin-svelte": "^7.2.2",
- "svelte": "^4.2.19"
+ "svelte": "^5.33.14"
}
},
"node_modules/@0no-co/graphql.web": {
- "version": "1.0.11",
- "resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.0.11.tgz",
- "integrity": "sha512-xuSJ9WXwTmtngWkbdEoopMo6F8NLtjy84UNAMsAr5C3/2SgAL/dEU10TMqTIsipqPQ8HA/7WzeqQ9DEQxSvPPA==",
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.1.2.tgz",
+ "integrity": "sha512-N2NGsU5FLBhT8NZ+3l2YrzZSHITjNXNuDhC4iDiikv0IujaJ0Xc6xIxQZ/Ek3Cb+rgPjnLHYyJm11tInuJn+cw==",
"license": "MIT",
"peerDependencies": {
"graphql": "^14.0.0 || ^15.0.0 || ^16.0.0"
@@ -59,21 +58,18 @@
}
},
"node_modules/@babel/runtime": {
- "version": "7.27.0",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.0.tgz",
- "integrity": "sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==",
+ "version": "7.27.6",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz",
+ "integrity": "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==",
"license": "MIT",
- "dependencies": {
- "regenerator-runtime": "^0.14.0"
- },
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@jridgewell/gen-mapping": {
- "version": "0.3.5",
- "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz",
- "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==",
+ "version": "0.3.8",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz",
+ "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==",
"license": "MIT",
"dependencies": {
"@jridgewell/set-array": "^1.2.1",
@@ -146,21 +142,22 @@
}
},
"node_modules/@rollup/plugin-commonjs": {
- "version": "25.0.8",
- "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-25.0.8.tgz",
- "integrity": "sha512-ZEZWTK5n6Qde0to4vS9Mr5x/0UZoqCxPVR9KRUjU4kA2sO7GEUn1fop0DAwpO6z0Nw/kJON9bDmSxdWxO/TT1A==",
+ "version": "28.0.3",
+ "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-28.0.3.tgz",
+ "integrity": "sha512-pyltgilam1QPdn+Zd9gaCfOLcnjMEJ9gV+bTw6/r73INdvzf1ah9zLIJBm+kW7R6IUFIQ1YO+VqZtYxZNWFPEQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@rollup/pluginutils": "^5.0.1",
"commondir": "^1.0.1",
"estree-walker": "^2.0.2",
- "glob": "^8.0.3",
+ "fdir": "^6.2.0",
"is-reference": "1.2.1",
- "magic-string": "^0.30.3"
+ "magic-string": "^0.30.3",
+ "picomatch": "^4.0.2"
},
"engines": {
- "node": ">=14.0.0"
+ "node": ">=16.0.0 || 14 >= 14.17"
},
"peerDependencies": {
"rollup": "^2.68.0||^3.0.0||^4.0.0"
@@ -172,9 +169,9 @@
}
},
"node_modules/@rollup/plugin-node-resolve": {
- "version": "15.3.0",
- "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.3.0.tgz",
- "integrity": "sha512-9eO5McEICxMzJpDW9OnMYSv4Sta3hmt7VtBFz5zR9273suNOydOyq/FrGeGy+KsTRFm8w0SLVhzig2ILFT63Ag==",
+ "version": "16.0.1",
+ "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-16.0.1.tgz",
+ "integrity": "sha512-tk5YCxJWIG81umIvNkSod2qK5KyQW19qcBF/B78n1bjtOON6gzKoVeSzAE8yHCZEDmqkHKkxplExA8KzdJLJpA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -197,9 +194,9 @@
}
},
"node_modules/@rollup/plugin-replace": {
- "version": "5.0.7",
- "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.7.tgz",
- "integrity": "sha512-PqxSfuorkHz/SPpyngLyg5GCEkOcee9M1bkxiVDr41Pd61mqP1PLOoDPbpl44SB2mQGKwV/In74gqQmGITOhEQ==",
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-6.0.2.tgz",
+ "integrity": "sha512-7QaYCf8bqF04dOy7w/eHmJeNExxTYwvKAmlSAH/EaWWUzbT0h5sbF6bktFoX/0F/0qwng5/dWFMyf3gzaM8DsQ==",
"license": "MIT",
"dependencies": {
"@rollup/pluginutils": "^5.0.1",
@@ -241,9 +238,9 @@
}
},
"node_modules/@rollup/pluginutils": {
- "version": "5.1.3",
- "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.3.tgz",
- "integrity": "sha512-Pnsb6f32CD2W3uCaLZIzDmeFyQ2b8UWMFI7xtwUezpcGBDVDW6y9XgAWIlARiGAo6eNF5FK5aQTr0LFyNyqq5A==",
+ "version": "5.1.4",
+ "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.4.tgz",
+ "integrity": "sha512-USm05zrsFxYLPdWWq+K3STlWiT/3ELn3RcV5hJMghpeAIhxfsUIg6mt12CBJBInWMV4VneoV7SfGv8xIwo2qNQ==",
"license": "MIT",
"dependencies": {
"@types/estree": "^1.0.0",
@@ -263,9 +260,9 @@
}
},
"node_modules/@rollup/rollup-android-arm-eabi": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.27.4.tgz",
- "integrity": "sha512-2Y3JT6f5MrQkICUyRVCw4oa0sutfAsgaSsb0Lmmy1Wi2y7X5vT9Euqw4gOsCyy0YfKURBg35nhUKZS4mDcfULw==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.41.1.tgz",
+ "integrity": "sha512-NELNvyEWZ6R9QMkiytB4/L4zSEaBC03KIXEghptLGLZWJ6VPrL63ooZQCOnlx36aQPGhzuOMwDerC1Eb2VmrLw==",
"cpu": [
"arm"
],
@@ -277,9 +274,9 @@
]
},
"node_modules/@rollup/rollup-android-arm64": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.27.4.tgz",
- "integrity": "sha512-wzKRQXISyi9UdCVRqEd0H4cMpzvHYt1f/C3CoIjES6cG++RHKhrBj2+29nPF0IB5kpy9MS71vs07fvrNGAl/iA==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.41.1.tgz",
+ "integrity": "sha512-DXdQe1BJ6TK47ukAoZLehRHhfKnKg9BjnQYUu9gzhI8Mwa1d2fzxA1aw2JixHVl403bwp1+/o/NhhHtxWJBgEA==",
"cpu": [
"arm64"
],
@@ -291,9 +288,9 @@
]
},
"node_modules/@rollup/rollup-darwin-arm64": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.27.4.tgz",
- "integrity": "sha512-PlNiRQapift4LNS8DPUHuDX/IdXiLjf8mc5vdEmUR0fF/pyy2qWwzdLjB+iZquGr8LuN4LnUoSEvKRwjSVYz3Q==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.41.1.tgz",
+ "integrity": "sha512-5afxvwszzdulsU2w8JKWwY8/sJOLPzf0e1bFuvcW5h9zsEg+RQAojdW0ux2zyYAz7R8HvvzKCjLNJhVq965U7w==",
"cpu": [
"arm64"
],
@@ -305,9 +302,9 @@
]
},
"node_modules/@rollup/rollup-darwin-x64": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.27.4.tgz",
- "integrity": "sha512-o9bH2dbdgBDJaXWJCDTNDYa171ACUdzpxSZt+u/AAeQ20Nk5x+IhA+zsGmrQtpkLiumRJEYef68gcpn2ooXhSQ==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.41.1.tgz",
+ "integrity": "sha512-egpJACny8QOdHNNMZKf8xY0Is6gIMz+tuqXlusxquWu3F833DcMwmGM7WlvCO9sB3OsPjdC4U0wHw5FabzCGZg==",
"cpu": [
"x64"
],
@@ -319,9 +316,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-arm64": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.27.4.tgz",
- "integrity": "sha512-NBI2/i2hT9Q+HySSHTBh52da7isru4aAAo6qC3I7QFVsuhxi2gM8t/EI9EVcILiHLj1vfi+VGGPaLOUENn7pmw==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.41.1.tgz",
+ "integrity": "sha512-DBVMZH5vbjgRk3r0OzgjS38z+atlupJ7xfKIDJdZZL6sM6wjfDNo64aowcLPKIx7LMQi8vybB56uh1Ftck/Atg==",
"cpu": [
"arm64"
],
@@ -333,9 +330,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-x64": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.27.4.tgz",
- "integrity": "sha512-wYcC5ycW2zvqtDYrE7deary2P2UFmSh85PUpAx+dwTCO9uw3sgzD6Gv9n5X4vLaQKsrfTSZZ7Z7uynQozPVvWA==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.41.1.tgz",
+ "integrity": "sha512-3FkydeohozEskBxNWEIbPfOE0aqQgB6ttTkJ159uWOFn42VLyfAiyD9UK5mhu+ItWzft60DycIN1Xdgiy8o/SA==",
"cpu": [
"x64"
],
@@ -347,9 +344,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.27.4.tgz",
- "integrity": "sha512-9OwUnK/xKw6DyRlgx8UizeqRFOfi9mf5TYCw1uolDaJSbUmBxP85DE6T4ouCMoN6pXw8ZoTeZCSEfSaYo+/s1w==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.41.1.tgz",
+ "integrity": "sha512-wC53ZNDgt0pqx5xCAgNunkTzFE8GTgdZ9EwYGVcg+jEjJdZGtq9xPjDnFgfFozQI/Xm1mh+D9YlYtl+ueswNEg==",
"cpu": [
"arm"
],
@@ -361,9 +358,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.27.4.tgz",
- "integrity": "sha512-Vgdo4fpuphS9V24WOV+KwkCVJ72u7idTgQaBoLRD0UxBAWTF9GWurJO9YD9yh00BzbkhpeXtm6na+MvJU7Z73A==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.41.1.tgz",
+ "integrity": "sha512-jwKCca1gbZkZLhLRtsrka5N8sFAaxrGz/7wRJ8Wwvq3jug7toO21vWlViihG85ei7uJTpzbXZRcORotE+xyrLA==",
"cpu": [
"arm"
],
@@ -375,9 +372,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-gnu": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.27.4.tgz",
- "integrity": "sha512-pleyNgyd1kkBkw2kOqlBx+0atfIIkkExOTiifoODo6qKDSpnc6WzUY5RhHdmTdIJXBdSnh6JknnYTtmQyobrVg==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.41.1.tgz",
+ "integrity": "sha512-g0UBcNknsmmNQ8V2d/zD2P7WWfJKU0F1nu0k5pW4rvdb+BIqMm8ToluW/eeRmxCared5dD76lS04uL4UaNgpNA==",
"cpu": [
"arm64"
],
@@ -389,9 +386,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-musl": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.27.4.tgz",
- "integrity": "sha512-caluiUXvUuVyCHr5DxL8ohaaFFzPGmgmMvwmqAITMpV/Q+tPoaHZ/PWa3t8B2WyoRcIIuu1hkaW5KkeTDNSnMA==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.41.1.tgz",
+ "integrity": "sha512-XZpeGB5TKEZWzIrj7sXr+BEaSgo/ma/kCgrZgL0oo5qdB1JlTzIYQKel/RmhT6vMAvOdM2teYlAaOGJpJ9lahg==",
"cpu": [
"arm64"
],
@@ -402,10 +399,24 @@
"linux"
]
},
+ "node_modules/@rollup/rollup-linux-loongarch64-gnu": {
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.41.1.tgz",
+ "integrity": "sha512-bkCfDJ4qzWfFRCNt5RVV4DOw6KEgFTUZi2r2RuYhGWC8WhCA8lCAJhDeAmrM/fdiAH54m0mA0Vk2FGRPyzI+tw==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
"node_modules/@rollup/rollup-linux-powerpc64le-gnu": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.27.4.tgz",
- "integrity": "sha512-FScrpHrO60hARyHh7s1zHE97u0KlT/RECzCKAdmI+LEoC1eDh/RDji9JgFqyO+wPDb86Oa/sXkily1+oi4FzJQ==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.41.1.tgz",
+ "integrity": "sha512-3mr3Xm+gvMX+/8EKogIZSIEF0WUu0HL9di+YWlJpO8CQBnoLAEL/roTCxuLncEdgcfJcvA4UMOf+2dnjl4Ut1A==",
"cpu": [
"ppc64"
],
@@ -417,9 +428,23 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.27.4.tgz",
- "integrity": "sha512-qyyprhyGb7+RBfMPeww9FlHwKkCXdKHeGgSqmIXw9VSUtvyFZ6WZRtnxgbuz76FK7LyoN8t/eINRbPUcvXB5fw==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.41.1.tgz",
+ "integrity": "sha512-3rwCIh6MQ1LGrvKJitQjZFuQnT2wxfU+ivhNBzmxXTXPllewOF7JR1s2vMX/tWtUYFgphygxjqMl76q4aMotGw==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.41.1.tgz",
+ "integrity": "sha512-LdIUOb3gvfmpkgFZuccNa2uYiqtgZAz3PTzjuM5bH3nvuy9ty6RGc/Q0+HDFrHrizJGVpjnTZ1yS5TNNjFlklw==",
"cpu": [
"riscv64"
],
@@ -431,9 +456,9 @@
]
},
"node_modules/@rollup/rollup-linux-s390x-gnu": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.27.4.tgz",
- "integrity": "sha512-PFz+y2kb6tbh7m3A7nA9++eInGcDVZUACulf/KzDtovvdTizHpZaJty7Gp0lFwSQcrnebHOqxF1MaKZd7psVRg==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.41.1.tgz",
+ "integrity": "sha512-oIE6M8WC9ma6xYqjvPhzZYk6NbobIURvP/lEbh7FWplcMO6gn7MM2yHKA1eC/GvYwzNKK/1LYgqzdkZ8YFxR8g==",
"cpu": [
"s390x"
],
@@ -445,9 +470,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-gnu": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.27.4.tgz",
- "integrity": "sha512-Ni8mMtfo+o/G7DVtweXXV/Ol2TFf63KYjTtoZ5f078AUgJTmaIJnj4JFU7TK/9SVWTaSJGxPi5zMDgK4w+Ez7Q==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.41.1.tgz",
+ "integrity": "sha512-cWBOvayNvA+SyeQMp79BHPK8ws6sHSsYnK5zDcsC3Hsxr1dgTABKjMnMslPq1DvZIp6uO7kIWhiGwaTdR4Og9A==",
"cpu": [
"x64"
],
@@ -459,9 +484,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-musl": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.27.4.tgz",
- "integrity": "sha512-5AeeAF1PB9TUzD+3cROzFTnAJAcVUGLuR8ng0E0WXGkYhp6RD6L+6szYVX+64Rs0r72019KHZS1ka1q+zU/wUw==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.41.1.tgz",
+ "integrity": "sha512-y5CbN44M+pUCdGDlZFzGGBSKCA4A/J2ZH4edTYSSxFg7ce1Xt3GtydbVKWLlzL+INfFIZAEg1ZV6hh9+QQf9YQ==",
"cpu": [
"x64"
],
@@ -473,9 +498,9 @@
]
},
"node_modules/@rollup/rollup-win32-arm64-msvc": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.27.4.tgz",
- "integrity": "sha512-yOpVsA4K5qVwu2CaS3hHxluWIK5HQTjNV4tWjQXluMiiiu4pJj4BN98CvxohNCpcjMeTXk/ZMJBRbgRg8HBB6A==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.41.1.tgz",
+ "integrity": "sha512-lZkCxIrjlJlMt1dLO/FbpZbzt6J/A8p4DnqzSa4PWqPEUUUnzXLeki/iyPLfV0BmHItlYgHUqJe+3KiyydmiNQ==",
"cpu": [
"arm64"
],
@@ -487,9 +512,9 @@
]
},
"node_modules/@rollup/rollup-win32-ia32-msvc": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.27.4.tgz",
- "integrity": "sha512-KtwEJOaHAVJlxV92rNYiG9JQwQAdhBlrjNRp7P9L8Cb4Rer3in+0A+IPhJC9y68WAi9H0sX4AiG2NTsVlmqJeQ==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.41.1.tgz",
+ "integrity": "sha512-+psFT9+pIh2iuGsxFYYa/LhS5MFKmuivRsx9iPJWNSGbh2XVEjk90fmpUEjCnILPEPJnikAU6SFDiEUyOv90Pg==",
"cpu": [
"ia32"
],
@@ -501,9 +526,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-msvc": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.27.4.tgz",
- "integrity": "sha512-3j4jx1TppORdTAoBJRd+/wJRGCPC0ETWkXOecJ6PPZLj6SptXkrXcNqdj0oclbKML6FkQltdz7bBA3rUSirZug==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.41.1.tgz",
+ "integrity": "sha512-Wq2zpapRYLfi4aKxf2Xff0tN+7slj2d4R87WEzqw7ZLsVvO5zwYCIuEGSZYiK41+GlwUo1HiR+GdkLEJnCKTCw==",
"cpu": [
"x64"
],
@@ -514,10 +539,19 @@
"win32"
]
},
+ "node_modules/@sveltejs/acorn-typescript": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@sveltejs/acorn-typescript/-/acorn-typescript-1.0.5.tgz",
+ "integrity": "sha512-IwQk4yfwLdibDlrXVE04jTZYlLnwsTT2PIOQQGNLWfjavGifnk1JD1LcZjZaBTRcxZu2FfPfNLOE04DSu9lqtQ==",
+ "license": "MIT",
+ "peerDependencies": {
+ "acorn": "^8.9.0"
+ }
+ },
"node_modules/@sveltestrap/sveltestrap": {
- "version": "6.2.7",
- "resolved": "https://registry.npmjs.org/@sveltestrap/sveltestrap/-/sveltestrap-6.2.7.tgz",
- "integrity": "sha512-WwLLfAFUb42BGuRrf3Vbct30bQMzlEMMipN/MfxhjuLTmLQeW9muVJfPyvjtWS+mY+RjkSCoHvAp/ZobP1NLlQ==",
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/@sveltestrap/sveltestrap/-/sveltestrap-7.1.0.tgz",
+ "integrity": "sha512-TpIx25kqLV+z+VD3yfqYayOI1IaCeWFbT0uqM6NfA4vQgDs9PjFwmjkU4YEAlV/ngs9e7xPmaRWE7lkrg4Miow==",
"license": "MIT",
"dependencies": {
"@popperjs/core": "^2.11.8"
@@ -534,9 +568,9 @@
"license": "MIT"
},
"node_modules/@types/estree": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz",
- "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==",
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
"license": "MIT"
},
"node_modules/@types/resolve": {
@@ -547,9 +581,9 @@
"license": "MIT"
},
"node_modules/@urql/core": {
- "version": "5.0.8",
- "resolved": "https://registry.npmjs.org/@urql/core/-/core-5.0.8.tgz",
- "integrity": "sha512-1GOnUw7/a9bzkcM0+U8U5MmxW2A7FE5YquuEmcJzTtW5tIs2EoS4F2ITpuKBjRBbyRjZgO860nWFPo1m4JImGA==",
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/@urql/core/-/core-5.1.1.tgz",
+ "integrity": "sha512-aGh024z5v2oINGD/In6rAtVKTm4VmQ2TxKQBAtk2ZSME5dunZFcjltw4p5ENQg+5CBhZ3FHMzl0Oa+rwqiWqlg==",
"license": "MIT",
"dependencies": {
"@0no-co/graphql.web": "^1.0.5",
@@ -557,12 +591,12 @@
}
},
"node_modules/@urql/svelte": {
- "version": "4.2.2",
- "resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.2.2.tgz",
- "integrity": "sha512-6ntLGsWcnNtaMZVmFpePfFTSpYxYpznCAqnuvLDjt7Oa7YqHcFiyPnz7IIsiPD9VE6hZSi0+RwmRk5BMba/teQ==",
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.2.3.tgz",
+ "integrity": "sha512-v3eArfymhdjaM5VQFp3QZxq9veYPadmDfX7ueid/kD4DlRplIycPakJ2FrKigh46SXa5mWqJ3QWuWyRKVu61sw==",
"license": "MIT",
"dependencies": {
- "@urql/core": "^5.0.0",
+ "@urql/core": "^5.1.1",
"wonka": "^6.3.2"
},
"peerDependencies": {
@@ -571,9 +605,9 @@
}
},
"node_modules/acorn": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz",
- "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==",
+ "version": "8.14.1",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz",
+ "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==",
"license": "MIT",
"bin": {
"acorn": "bin/acorn"
@@ -600,23 +634,6 @@
"node": ">= 0.4"
}
},
- "node_modules/balanced-match": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
- "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "balanced-match": "^1.0.0"
- }
- },
"node_modules/buffer-from": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
@@ -625,9 +642,9 @@
"license": "MIT"
},
"node_modules/chart.js": {
- "version": "4.4.6",
- "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.6.tgz",
- "integrity": "sha512-8Y406zevUPbbIBA/HRk33khEmQPk5+cxeflWE/2rx1NJsjVWMPw/9mSP9rxHP5eqi6LNoPBVMfZHxbwLSgldYA==",
+ "version": "4.4.9",
+ "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.9.tgz",
+ "integrity": "sha512-EyZ9wWKgpAU0fLJ43YAEIF8sr5F2W3LqbS40ZJyHIner2lY14ufqv2VMp69MAiZ2rpwxEUxEhIH/0U3xyRynxg==",
"license": "MIT",
"dependencies": {
"@kurkle/color": "^0.3.0"
@@ -636,26 +653,13 @@
"pnpm": ">=8"
}
},
- "node_modules/code-red": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/code-red/-/code-red-1.0.4.tgz",
- "integrity": "sha512-7qJWqItLA8/VPVlKJlFXU+NBlo/qyfs39aJcuMT/2ere32ZqvF5OSxgdM5xOfJJ7O429gg2HM47y8v9P+9wrNw==",
+ "node_modules/clsx": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
+ "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==",
"license": "MIT",
- "dependencies": {
- "@jridgewell/sourcemap-codec": "^1.4.15",
- "@types/estree": "^1.0.1",
- "acorn": "^8.10.0",
- "estree-walker": "^3.0.3",
- "periscopic": "^3.1.0"
- }
- },
- "node_modules/code-red/node_modules/estree-walker": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
- "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
- "license": "MIT",
- "dependencies": {
- "@types/estree": "^1.0.0"
+ "engines": {
+ "node": ">=6"
}
},
"node_modules/commander": {
@@ -685,39 +689,20 @@
"url": "https://github.com/sponsors/rawify"
}
},
- "node_modules/css-tree": {
- "version": "2.3.1",
- "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz",
- "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==",
- "license": "MIT",
- "dependencies": {
- "mdn-data": "2.0.30",
- "source-map-js": "^1.0.1"
- },
- "engines": {
- "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0"
- }
- },
"node_modules/date-fns": {
- "version": "2.30.0",
- "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz",
- "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==",
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-4.1.0.tgz",
+ "integrity": "sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==",
"license": "MIT",
- "dependencies": {
- "@babel/runtime": "^7.21.0"
- },
- "engines": {
- "node": ">=0.11"
- },
"funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/date-fns"
+ "type": "github",
+ "url": "https://github.com/sponsors/kossnocorp"
}
},
"node_modules/decimal.js": {
- "version": "10.4.3",
- "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz",
- "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==",
+ "version": "10.5.0",
+ "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.5.0.tgz",
+ "integrity": "sha512-8vDa8Qxvr/+d94hSh5P3IJwI5t8/c0KsMp+g8bNw9cY2icONa5aPfvKeieW1WlG0WQYwwhJ7mjui2xtiePQSXw==",
"license": "MIT"
},
"node_modules/deepmerge": {
@@ -736,31 +721,54 @@
"integrity": "sha512-nV5aVWW1K0wEiUIEdZ4erkGGH8mDxGyxSeqPzRNtWP7ataw+/olFObw7hujFWlVjNsaDFw5VZ5NzVSIqRgfTiw==",
"license": "MIT"
},
+ "node_modules/esm-env": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/esm-env/-/esm-env-1.2.2.tgz",
+ "integrity": "sha512-Epxrv+Nr/CaL4ZcFGPJIYLWFom+YeV1DqMLHJoEd9SYRxNbaFruBwfEX/kkHUJf55j2+TUbmDcmuilbP1TmXHA==",
+ "license": "MIT"
+ },
+ "node_modules/esrap": {
+ "version": "1.4.7",
+ "resolved": "https://registry.npmjs.org/esrap/-/esrap-1.4.7.tgz",
+ "integrity": "sha512-0ZxW6guTF/AeKeKi7he93lmgv7Hx7giD1tBrOeVqkqsZGQJd2/kfnL7LdIsr9FT/AtkBK9XeDTov+gxprBqdEg==",
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.4.15"
+ }
+ },
"node_modules/estree-walker": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz",
"integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==",
"license": "MIT"
},
- "node_modules/fraction.js": {
- "version": "4.3.4",
- "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.4.tgz",
- "integrity": "sha512-pwiTgt0Q7t+GHZA4yaLjObx4vXmmdcS0iSJ19o8d/goUGgItX9UZWKWNnLHehxviD8wU2IWRsnR8cD5+yOJP2Q==",
+ "node_modules/fdir": {
+ "version": "6.4.5",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.5.tgz",
+ "integrity": "sha512-4BG7puHpVsIYxZUbiUE3RqGloLaSSwzYie5jvasC4LWuBWzZawynvYouhjbQKw2JuIGYdm0DzIxl8iVidKlUEw==",
+ "dev": true,
"license": "MIT",
- "engines": {
- "node": "*"
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
},
- "funding": {
- "type": "patreon",
- "url": "https://github.com/sponsors/rawify"
+ "peerDependenciesMeta": {
+ "picomatch": {
+ "optional": true
+ }
}
},
- "node_modules/fs.realpath": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
- "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
- "dev": true,
- "license": "ISC"
+ "node_modules/fraction.js": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.2.2.tgz",
+ "integrity": "sha512-uXBDv5knpYmv/2gLzWQ5mBHGBRk9wcKTeWu6GLTUEQfjCxO09uM/mHDrojlL+Q1mVGIIFo149Gba7od1XPgSzQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/rawify"
+ }
},
"node_modules/fsevents": {
"version": "2.3.3",
@@ -787,31 +795,10 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/glob": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz",
- "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==",
- "deprecated": "Glob versions prior to v9 are no longer supported",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^5.0.1",
- "once": "^1.3.0"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/graphql": {
- "version": "16.9.0",
- "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.9.0.tgz",
- "integrity": "sha512-GGTKBX4SD7Wdb8mqeDLni2oaRGYQWjWHGKPQ24ZMnUtKfcsVoiv4uX8+LJr1K6U5VW2Lu1BwJnj7uiori0YtRw==",
+ "version": "16.11.0",
+ "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.11.0.tgz",
+ "integrity": "sha512-mS1lbMsxgQj6hge1XZ6p7GPhbrtFwUFYi3wRzXAC/FmYnyXMTvvI3td3rjmQ2u8ewXueaSvRPWaEcgVVOT9Jnw==",
"license": "MIT",
"engines": {
"node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
@@ -830,29 +817,10 @@
"node": ">= 0.4"
}
},
- "node_modules/inflight": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
- "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
- "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "once": "^1.3.0",
- "wrappy": "1"
- }
- },
- "node_modules/inherits": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
- "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
- "dev": true,
- "license": "ISC"
- },
"node_modules/is-core-module": {
- "version": "2.15.1",
- "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.1.tgz",
- "integrity": "sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==",
+ "version": "2.16.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
+ "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -895,29 +863,29 @@
"license": "MIT"
},
"node_modules/magic-string": {
- "version": "0.30.14",
- "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.14.tgz",
- "integrity": "sha512-5c99P1WKTed11ZC0HMJOj6CDIue6F8ySu+bJL+85q1zBEIY8IklrJ1eiKC2NDRh3Ct3FcvmJPyQHb9erXMTJNw==",
+ "version": "0.30.17",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz",
+ "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==",
"license": "MIT",
"dependencies": {
"@jridgewell/sourcemap-codec": "^1.5.0"
}
},
"node_modules/mathjs": {
- "version": "12.4.3",
- "resolved": "https://registry.npmjs.org/mathjs/-/mathjs-12.4.3.tgz",
- "integrity": "sha512-oHdGPDbp7gO873xxG90RLq36IuicuKvbpr/bBG5g9c8Obm/VsKVrK9uoRZZHUodohzlnmCEqfDzbR3LH6m+aAQ==",
+ "version": "14.5.2",
+ "resolved": "https://registry.npmjs.org/mathjs/-/mathjs-14.5.2.tgz",
+ "integrity": "sha512-51U6hp7j4M4Rj+l+q2KbmXAV9EhQVQzUdw1wE67RnUkKKq5ibxdrl9Ky2YkSUEIc2+VU8/IsThZNu6QSHUoyTA==",
"license": "Apache-2.0",
"dependencies": {
- "@babel/runtime": "^7.24.4",
- "complex.js": "^2.1.1",
+ "@babel/runtime": "^7.26.10",
+ "complex.js": "^2.2.5",
"decimal.js": "^10.4.3",
"escape-latex": "^1.2.0",
- "fraction.js": "4.3.4",
+ "fraction.js": "^5.2.1",
"javascript-natural-sort": "^0.7.1",
"seedrandom": "^3.0.5",
"tiny-emitter": "^2.1.0",
- "typed-function": "^4.1.1"
+ "typed-function": "^4.2.1"
},
"bin": {
"mathjs": "bin/cli.js"
@@ -926,35 +894,6 @@
"node": ">= 18"
}
},
- "node_modules/mdn-data": {
- "version": "2.0.30",
- "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz",
- "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==",
- "license": "CC0-1.0"
- },
- "node_modules/minimatch": {
- "version": "5.1.6",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz",
- "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/once": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
- "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "wrappy": "1"
- }
- },
"node_modules/path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
@@ -962,35 +901,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/periscopic": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz",
- "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==",
- "license": "MIT",
- "dependencies": {
- "@types/estree": "^1.0.0",
- "estree-walker": "^3.0.0",
- "is-reference": "^3.0.0"
- }
- },
- "node_modules/periscopic/node_modules/estree-walker": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
- "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
- "license": "MIT",
- "dependencies": {
- "@types/estree": "^1.0.0"
- }
- },
- "node_modules/periscopic/node_modules/is-reference": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.3.tgz",
- "integrity": "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==",
- "license": "MIT",
- "dependencies": {
- "@types/estree": "^1.0.6"
- }
- },
"node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
@@ -1013,34 +923,31 @@
"safe-buffer": "^5.1.0"
}
},
- "node_modules/regenerator-runtime": {
- "version": "0.14.1",
- "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz",
- "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==",
- "license": "MIT"
- },
"node_modules/resolve": {
- "version": "1.22.8",
- "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
- "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
+ "version": "1.22.10",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz",
+ "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==",
"dev": true,
"license": "MIT",
"dependencies": {
- "is-core-module": "^2.13.0",
+ "is-core-module": "^2.16.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
+ "engines": {
+ "node": ">= 0.4"
+ },
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve.exports": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz",
- "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==",
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz",
+ "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==",
"dev": true,
"license": "MIT",
"engines": {
@@ -1048,13 +955,13 @@
}
},
"node_modules/rollup": {
- "version": "4.27.4",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.27.4.tgz",
- "integrity": "sha512-RLKxqHEMjh/RGLsDxAEsaLO3mWgyoU6x9w6n1ikAzet4B3gI2/3yP6PWY2p9QzRTh6MfEIXB3MwsOY0Iv3vNrw==",
+ "version": "4.41.1",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.41.1.tgz",
+ "integrity": "sha512-cPmwD3FnFv8rKMBc1MxWCwVQFxwf1JEmSX3iQXrRVVG15zerAIXRjMFVWnd5Q5QvgKF7Aj+5ykXFhUl+QGnyOw==",
"devOptional": true,
"license": "MIT",
"dependencies": {
- "@types/estree": "1.0.6"
+ "@types/estree": "1.0.7"
},
"bin": {
"rollup": "dist/bin/rollup"
@@ -1064,24 +971,26 @@
"npm": ">=8.0.0"
},
"optionalDependencies": {
- "@rollup/rollup-android-arm-eabi": "4.27.4",
- "@rollup/rollup-android-arm64": "4.27.4",
- "@rollup/rollup-darwin-arm64": "4.27.4",
- "@rollup/rollup-darwin-x64": "4.27.4",
- "@rollup/rollup-freebsd-arm64": "4.27.4",
- "@rollup/rollup-freebsd-x64": "4.27.4",
- "@rollup/rollup-linux-arm-gnueabihf": "4.27.4",
- "@rollup/rollup-linux-arm-musleabihf": "4.27.4",
- "@rollup/rollup-linux-arm64-gnu": "4.27.4",
- "@rollup/rollup-linux-arm64-musl": "4.27.4",
- "@rollup/rollup-linux-powerpc64le-gnu": "4.27.4",
- "@rollup/rollup-linux-riscv64-gnu": "4.27.4",
- "@rollup/rollup-linux-s390x-gnu": "4.27.4",
- "@rollup/rollup-linux-x64-gnu": "4.27.4",
- "@rollup/rollup-linux-x64-musl": "4.27.4",
- "@rollup/rollup-win32-arm64-msvc": "4.27.4",
- "@rollup/rollup-win32-ia32-msvc": "4.27.4",
- "@rollup/rollup-win32-x64-msvc": "4.27.4",
+ "@rollup/rollup-android-arm-eabi": "4.41.1",
+ "@rollup/rollup-android-arm64": "4.41.1",
+ "@rollup/rollup-darwin-arm64": "4.41.1",
+ "@rollup/rollup-darwin-x64": "4.41.1",
+ "@rollup/rollup-freebsd-arm64": "4.41.1",
+ "@rollup/rollup-freebsd-x64": "4.41.1",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.41.1",
+ "@rollup/rollup-linux-arm-musleabihf": "4.41.1",
+ "@rollup/rollup-linux-arm64-gnu": "4.41.1",
+ "@rollup/rollup-linux-arm64-musl": "4.41.1",
+ "@rollup/rollup-linux-loongarch64-gnu": "4.41.1",
+ "@rollup/rollup-linux-powerpc64le-gnu": "4.41.1",
+ "@rollup/rollup-linux-riscv64-gnu": "4.41.1",
+ "@rollup/rollup-linux-riscv64-musl": "4.41.1",
+ "@rollup/rollup-linux-s390x-gnu": "4.41.1",
+ "@rollup/rollup-linux-x64-gnu": "4.41.1",
+ "@rollup/rollup-linux-x64-musl": "4.41.1",
+ "@rollup/rollup-win32-arm64-msvc": "4.41.1",
+ "@rollup/rollup-win32-ia32-msvc": "4.41.1",
+ "@rollup/rollup-win32-x64-msvc": "4.41.1",
"fsevents": "~2.3.2"
}
},
@@ -1146,6 +1055,13 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
+ "node_modules/rollup/node_modules/@types/estree": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz",
+ "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==",
+ "devOptional": true,
+ "license": "MIT"
+ },
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
@@ -1200,15 +1116,6 @@
"node": ">=0.10.0"
}
},
- "node_modules/source-map-js": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
- "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
- "license": "BSD-3-Clause",
- "engines": {
- "node": ">=0.10.0"
- }
- },
"node_modules/source-map-support": {
"version": "0.5.21",
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
@@ -1234,47 +1141,28 @@
}
},
"node_modules/svelte": {
- "version": "4.2.19",
- "resolved": "https://registry.npmjs.org/svelte/-/svelte-4.2.19.tgz",
- "integrity": "sha512-IY1rnGr6izd10B0A8LqsBfmlT5OILVuZ7XsI0vdGPEvuonFV7NYEUK4dAkm9Zg2q0Um92kYjTpS1CAP3Nh/KWw==",
+ "version": "5.33.14",
+ "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.33.14.tgz",
+ "integrity": "sha512-kRlbhIlMTijbFmVDQFDeKXPLlX1/ovXwV0I162wRqQhRcygaqDIcu1d/Ese3H2uI+yt3uT8E7ndgDthQv5v5BA==",
"license": "MIT",
"dependencies": {
- "@ampproject/remapping": "^2.2.1",
- "@jridgewell/sourcemap-codec": "^1.4.15",
- "@jridgewell/trace-mapping": "^0.3.18",
- "@types/estree": "^1.0.1",
- "acorn": "^8.9.0",
- "aria-query": "^5.3.0",
- "axobject-query": "^4.0.0",
- "code-red": "^1.0.3",
- "css-tree": "^2.3.1",
- "estree-walker": "^3.0.3",
- "is-reference": "^3.0.1",
+ "@ampproject/remapping": "^2.3.0",
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@sveltejs/acorn-typescript": "^1.0.5",
+ "@types/estree": "^1.0.5",
+ "acorn": "^8.12.1",
+ "aria-query": "^5.3.1",
+ "axobject-query": "^4.1.0",
+ "clsx": "^2.1.1",
+ "esm-env": "^1.2.1",
+ "esrap": "^1.4.6",
+ "is-reference": "^3.0.3",
"locate-character": "^3.0.0",
- "magic-string": "^0.30.4",
- "periscopic": "^3.1.0"
+ "magic-string": "^0.30.11",
+ "zimmerframe": "^1.1.2"
},
"engines": {
- "node": ">=16"
- }
- },
- "node_modules/svelte-chartjs": {
- "version": "3.1.5",
- "resolved": "https://registry.npmjs.org/svelte-chartjs/-/svelte-chartjs-3.1.5.tgz",
- "integrity": "sha512-ka2zh7v5FiwfAX1oMflZ0HkNkgjHjFqANgRyC+vNYXfxtx2ku68Zo+2KgbKeBH2nS1ThDqkIACPzGxy4T0UaoA==",
- "license": "MIT",
- "peerDependencies": {
- "chart.js": "^3.5.0 || ^4.0.0",
- "svelte": "^4.0.0"
- }
- },
- "node_modules/svelte/node_modules/estree-walker": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
- "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
- "license": "MIT",
- "dependencies": {
- "@types/estree": "^1.0.0"
+ "node": ">=18"
}
},
"node_modules/svelte/node_modules/is-reference": {
@@ -1287,14 +1175,14 @@
}
},
"node_modules/terser": {
- "version": "5.36.0",
- "resolved": "https://registry.npmjs.org/terser/-/terser-5.36.0.tgz",
- "integrity": "sha512-IYV9eNMuFAV4THUspIRXkLakHnV6XO7FEdtKjf/mDyrnqUg9LnlOn6/RwRvM9SZjR4GUq8Nk8zj67FzVARr74w==",
+ "version": "5.41.0",
+ "resolved": "https://registry.npmjs.org/terser/-/terser-5.41.0.tgz",
+ "integrity": "sha512-H406eLPXpZbAX14+B8psIuvIr8+3c+2hkuYzpMkoE0ij+NdsVATbA78vb8neA/eqrj7rywa2pIkdmWRsXW6wmw==",
"dev": true,
"license": "BSD-2-Clause",
"dependencies": {
"@jridgewell/source-map": "^0.3.3",
- "acorn": "^8.8.2",
+ "acorn": "^8.14.0",
"commander": "^2.20.0",
"source-map-support": "~0.5.20"
},
@@ -1321,23 +1209,22 @@
}
},
"node_modules/uplot": {
- "version": "1.6.31",
- "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.31.tgz",
- "integrity": "sha512-sQZqSwVCbJGnFB4IQjQYopzj5CoTZJ4Br1fG/xdONimqgHmsacvCjNesdGDypNKFbrhLGIeshYhy89FxPF+H+w==",
+ "version": "1.6.32",
+ "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.32.tgz",
+ "integrity": "sha512-KIMVnG68zvu5XXUbC4LQEPnhwOxBuLyW1AHtpm6IKTXImkbLgkMy+jabjLgSLMasNuGGzQm/ep3tOkyTxpiQIw==",
"license": "MIT"
},
"node_modules/wonka": {
- "version": "6.3.4",
- "resolved": "https://registry.npmjs.org/wonka/-/wonka-6.3.4.tgz",
- "integrity": "sha512-CjpbqNtBGNAeyNS/9W6q3kSkKE52+FjIj7AkFlLr11s/VWGUu6a2CdYSdGxocIhIVjaW/zchesBQUKPVU69Cqg==",
+ "version": "6.3.5",
+ "resolved": "https://registry.npmjs.org/wonka/-/wonka-6.3.5.tgz",
+ "integrity": "sha512-SSil+ecw6B4/Dm7Pf2sAshKQ5hWFvfyGlfPbEd6A14dOH6VDjrmbY86u6nZvy9omGwwIPFR8V41+of1EezgoUw==",
"license": "MIT"
},
- "node_modules/wrappy": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
- "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
- "dev": true,
- "license": "ISC"
+ "node_modules/zimmerframe": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/zimmerframe/-/zimmerframe-1.1.2.tgz",
+ "integrity": "sha512-rAbqEGa8ovJy4pyBxZM70hg4pE6gDgaQ0Sl9M3enG3I0d6H4XSAM3GeNGLKnsBpuijUow064sf7ww1nutC5/3w==",
+ "license": "MIT"
}
}
}
diff --git a/web/frontend/package.json b/web/frontend/package.json
index 389ffe6..b10e271 100644
--- a/web/frontend/package.json
+++ b/web/frontend/package.json
@@ -7,25 +7,24 @@
"dev": "rollup -c -w"
},
"devDependencies": {
- "@rollup/plugin-commonjs": "^25.0.8",
- "@rollup/plugin-node-resolve": "^15.3.0",
+ "@rollup/plugin-commonjs": "^28.0.3",
+ "@rollup/plugin-node-resolve": "^16.0.1",
"@rollup/plugin-terser": "^0.4.4",
"@timohausmann/quadtree-js": "^1.2.6",
- "rollup": "^4.27.4",
+ "rollup": "^4.41.1",
"rollup-plugin-css-only": "^4.5.2",
"rollup-plugin-svelte": "^7.2.2",
- "svelte": "^4.2.19"
+ "svelte": "^5.33.14"
},
"dependencies": {
- "@rollup/plugin-replace": "^5.0.7",
- "@sveltestrap/sveltestrap": "^6.2.7",
- "@urql/svelte": "^4.2.2",
- "chart.js": "^4.4.6",
- "date-fns": "^2.30.0",
- "graphql": "^16.9.0",
- "mathjs": "^12.4.3",
- "svelte-chartjs": "^3.1.5",
- "uplot": "^1.6.31",
- "wonka": "^6.3.4"
+ "@rollup/plugin-replace": "^6.0.2",
+ "@sveltestrap/sveltestrap": "^7.1.0",
+ "@urql/svelte": "^4.2.3",
+ "chart.js": "^4.4.9",
+ "date-fns": "^4.1.0",
+ "graphql": "^16.11.0",
+ "mathjs": "^14.5.2",
+ "uplot": "^1.6.32",
+ "wonka": "^6.3.5"
}
}
diff --git a/web/frontend/rollup.config.mjs b/web/frontend/rollup.config.mjs
index 0e15105..a7b4732 100644
--- a/web/frontend/rollup.config.mjs
+++ b/web/frontend/rollup.config.mjs
@@ -6,13 +6,20 @@ import terser from '@rollup/plugin-terser';
import css from 'rollup-plugin-css-only';
const production = !process.env.ROLLUP_WATCH;
-// const production = false
const plugins = [
svelte({
compilerOptions: {
- // enable run-time checks when not in production
- dev: !production
+ // Enable run-time checks when not in production
+ dev: !production,
+ // Enable Svelte 5-specific features
+ hydratable: true, // If using server-side rendering
+ immutable: true, // Optimize updates for immutable data
+ // As of sveltestrap 7.1.0, filtered warnings would appear for imported sveltestrap components
+ warningFilter: (warning) => (
+ warning.code !== 'element_invalid_self_closing_tag' &&
+ warning.code !== 'a11y_interactive_supports_focus'
+ )
}
}),
@@ -23,7 +30,7 @@ const plugins = [
// https://github.com/rollup/plugins/tree/master/packages/commonjs
resolve({
browser: true,
- dedupe: ['svelte']
+ dedupe: ['svelte', '@sveltejs/kit'] // Ensure deduplication for Svelte 5
}),
commonjs(),
@@ -32,8 +39,10 @@ const plugins = [
production && terser(),
replace({
- "process.env.NODE_ENV": JSON.stringify("development"),
- preventAssignment: true
+ preventAssignment: true,
+ values: {
+ "process.env.NODE_ENV": JSON.stringify(production ? "production" : "development"),
+ }
})
];
diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte
index 8b327a2..689b7a2 100644
--- a/web/frontend/src/Analysis.root.svelte
+++ b/web/frontend/src/Analysis.root.svelte
@@ -1,9 +1,9 @@
+ Properties:
+ - `filterPresets Object`: Optional predefined filter values
+-->
@@ -318,18 +348,20 @@
{:else if cluster}
av.name)}
- bind:metricsInHistograms
- bind:metricsInScatterplots
+ presetMetricsInHistograms={metricsInHistograms}
+ presetMetricsInScatterplots={metricsInScatterplots}
+ applyHistograms={(metrics) => metricsInHistograms = [...metrics]}
+ applyScatter={(metrics) => metricsInScatterplots = [...metrics]}
/>
{/if}
{
+ applyFilters={(detail) => {
jobFilters = detail.filters;
}}
/>
@@ -392,6 +424,7 @@
{$topQuery.error.message}
{:else}
+
+ {#snippet histoGridContent(item)}
+
+ {/snippet}
+
({
metric,
...binsFromFootprint(
@@ -547,17 +592,8 @@
),
}))}
itemsPerRow={ccconfig.plot_view_plotsPerRow}
- >
-
-
+ gridContent={histoGridContent}
+ />
@@ -575,9 +611,19 @@
+ {#snippet metricsGridContent(item)}
+
+ {/snippet}
+
({
m1,
f1: $footprintsQuery.data.footprints.metrics.find(
@@ -589,18 +635,8 @@
).data,
}))}
itemsPerRow={ccconfig.plot_view_plotsPerRow}
- >
-
-
+ gridContent={metricsGridContent}
+ />
{/if}
diff --git a/web/frontend/src/Config.root.svelte b/web/frontend/src/Config.root.svelte
index 126f92b..e8a2045 100644
--- a/web/frontend/src/Config.root.svelte
+++ b/web/frontend/src/Config.root.svelte
@@ -1,12 +1,13 @@
+ Properties:
+ - `ìsAdmin Bool!`: Is currently logged in user admin authority
+ - `isSupport Bool!`: Is currently logged in user support authority
+ - `isApi Bool!`: Is currently logged in user api authority
+ - `username String!`: Empty string if auth. is disabled, otherwise the username as string
+ - `ncontent String!`: The currently displayed message on the homescreen
+-->
{#if isAdmin}
-
+
Admin Options
@@ -31,7 +35,7 @@
{/if}
{#if isSupport || isAdmin}
-
+
Support Options
diff --git a/web/frontend/src/Header.svelte b/web/frontend/src/Header.svelte
index cf3e058..7a964c8 100644
--- a/web/frontend/src/Header.svelte
+++ b/web/frontend/src/Header.svelte
@@ -1,12 +1,13 @@
+ Properties:
+ - `username String`: Empty string if auth. is disabled, otherwise the username as string
+ - `authlevel Number`: The current users authentication level
+ - `clusters [String]`: List of cluster names
+ - `subClusters [String]`: List of subCluster names
+ - `roles [Number]`: Enum containing available roles
+-->
+
- (isOpen = !isOpen)} />
+ (isOpen = !isOpen)} />
(isOpen = detail.isOpen)}
+ onupdate={({ detail }) => (isOpen = detail.isOpen)}
>
- {#if screenSize > 1500 || screenSize < 768}
+ {#if showMax || showBrg}
item.requiredRole <= authlevel)}
/>
- {:else if screenSize > 1300}
+
+ {:else if showMid}
{/if}
- {:else}
+
+ {:else if showSml}
{/if}
+
+ {:else}
+ Error: Unknown Window Size!
{/if}
-
+
\ No newline at end of file
diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte
index 92d8bb2..d481436 100644
--- a/web/frontend/src/Job.root.svelte
+++ b/web/frontend/src/Job.root.svelte
@@ -1,18 +1,19 @@
+ Properties:
+ - `dbid Number`: The jobs DB ID
+ - `username String`: Empty string if auth. is disabled, otherwise the username as string
+ - `authlevel Number`: The current users authentication level
+ - `roles [Number]`: Enum containing available roles
+-->
@@ -115,11 +133,11 @@
{
+ applyFilters={(detail) => {
jobFilters = detail.filters;
}}
/>
@@ -137,9 +155,14 @@
changeSorting(e, "id")}
+ onclick={() => changeSorting("id")}
>
-
+ {#if sorting?.field == "id"}
+
+
+ {:else}
+
+ {/if}
{#if type == "USER"}
@@ -148,9 +171,13 @@
changeSorting(e, "name")}
+ onclick={() => changeSorting("name")}
>
-
+ {#if sorting?.field == "name"}
+
+ {:else}
+
+ {/if}
{/if}
@@ -159,9 +186,14 @@
changeSorting(e, "totalJobs")}
+ onclick={() => changeSorting("totalJobs")}
>
-
+ {#if sorting?.field == "totalJobs"}
+
+
+ {:else}
+
+ {/if}
@@ -169,9 +201,13 @@
changeSorting(e, "totalWalltime")}
+ onclick={() => changeSorting("totalWalltime")}
>
-
+ {#if sorting?.field == "totalWalltime"}
+
+ {:else}
+
+ {/if}
@@ -179,9 +215,13 @@
changeSorting(e, "totalCoreHours")}
+ onclick={() => changeSorting("totalCoreHours")}
>
-
+ {#if sorting?.field == "totalCoreHours"}
+
+ {:else}
+
+ {/if}
@@ -189,9 +229,13 @@
changeSorting(e, "totalAccHours")}
+ onclick={() => changeSorting("totalAccHours")}
>
-
+ {#if sorting?.field == "totalAccHours"}
+
+ {:else}
+
+ {/if}
diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte
index 60ab404..8082c5a 100644
--- a/web/frontend/src/Node.root.svelte
+++ b/web/frontend/src/Node.root.svelte
@@ -1,15 +1,17 @@
+ Properties:
+ - `cluster String`: Currently selected cluster
+ - `hostname String`: Currently selected host (== node)
+ - `presetFrom Date?`: Custom Time Range selection 'from' [Default: null]
+ - `presetTo Date?`: Custom Time Range selection 'to' [Default: null]
+-->
@@ -146,7 +154,14 @@
-
+ {
+ from = newFrom;
+ to = newTo;
+ }}
+ />
@@ -172,7 +187,7 @@
{
+ onRefresh={() => {
const diff = Date.now() - to;
from = new Date(from.getTime() + diff);
to = new Date(to.getTime() + diff);
@@ -189,20 +204,8 @@
{:else if $nodeMetricsData.fetching || $initq.fetching}
{:else}
- ({
- ...m,
- disabled: checkMetricDisabled(
- m.name,
- cluster,
- $nodeMetricsData.data.nodeMetrics[0].subCluster,
- ),
- }))
- .sort((a, b) => a.name.localeCompare(b.name))}
- >
+
+ {#snippet gridContent(item)}
{item.name}
{systemUnits[item.name] ? "(" + systemUnits[item.name] + ")" : ""}
@@ -214,7 +217,7 @@
cluster={clusters.find((c) => c.name == cluster)}
subCluster={$nodeMetricsData.data.nodeMetrics[0].subCluster}
series={item.metric.series}
- forNode={true}
+ forNode
/>
{:else if item.disabled === true && item.metric}
No dataset returned for {item.name}
{/if}
-
+ {/snippet}
+
+ ({
+ ...m,
+ disabled: checkMetricDisabled(
+ m.name,
+ cluster,
+ $nodeMetricsData.data.nodeMetrics[0].subCluster,
+ ),
+ }))
+ .sort((a, b) => a.name.localeCompare(b.name))}
+ itemsPerRow={ccconfig.plot_view_plotsPerRow}
+ {gridContent}
+ />
{/if}
diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte
index 86170d1..8cdd091 100644
--- a/web/frontend/src/Status.root.svelte
+++ b/web/frontend/src/Status.root.svelte
@@ -1,10 +1,10 @@
- Properties:
- - `cluster String`: The cluster to show status information for
- -->
-
@@ -334,7 +345,7 @@
(isHistogramSelectionOpen = true)}
+ onclick={() => (isHistogramSelectionOpen = true)}
>
Select Histograms
@@ -342,7 +353,7 @@
{
+ onRefresh={() => {
from = new Date(Date.now() - 5 * 60 * 1000);
to = new Date(Date.now());
}}
@@ -450,7 +461,7 @@
{#key $mainQuery.data.nodeMetrics}
{$topUserQuery.error.message}
{:else}
{$topProjectQuery.error.message}
{:else}
{#if selectedHistograms}
+
+ {#snippet gridContent(item)}
+
+ {/snippet}
+
{#key $mainQuery.data.stats[0].histMetrics}
-
-
+ {gridContent}
+ />
{/key}
{/if}
{/if}
{
+ selectedHistograms = [...newSelection];
+ }}
/>
diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte
index 1589cac..f46dde4 100644
--- a/web/frontend/src/Systems.root.svelte
+++ b/web/frontend/src/Systems.root.svelte
@@ -1,12 +1,13 @@
+ Properties:
+ - `displayType String?`: The type of node display ['OVERVIEW' || 'LIST']
+ - `cluster String`: The cluster to show status information for [Default: null]
+ - `subCluster String`: The subCluster to show status information for [Default: null]
+ - `presetFrom Date?`: Custom Time Range selection 'from' [Default: null]
+ - `presetTo Date?`: Custom Time Range selection 'to' [Default: null]
+-->
@@ -108,7 +113,7 @@
(isMetricsSelectionOpen = true)}
+ onclick={() => (isMetricsSelectionOpen = true)}
>
{selectedMetrics.length} selected
@@ -139,12 +144,20 @@
placeholder="Filter hostname ..."
type="text"
bind:value={pendingHostnameFilter}
+ oninput={updateHostnameFilter}
/>
-
+ {
+ from = newFrom;
+ to = newTo;
+ }}
+ />
{#if displayNodeOverview}
@@ -153,26 +166,28 @@
Metric
- {#each systemMetrics as metric}
+ {#each systemMetrics as metric (metric.name)}
{metric.name} {systemUnits[metric.name] ? "("+systemUnits[metric.name]+")" : ""} {metric.name} {presetSystemUnits[metric.name] ? "("+presetSystemUnits[metric.name]+")" : ""}
+ {:else}
+ No available options
{/each}
{/if}
-
-
- {
- const diff = Date.now() - to;
- from = new Date(from.getTime() + diff);
- to = new Date(to.getTime() + diff);
- }}
- />
-
{/if}
+
+
+ {
+ const diff = Date.now() - to;
+ from = new Date(from.getTime() + diff);
+ to = new Date(to.getTime() + diff);
+ }}
+ />
+
@@ -185,20 +200,22 @@
{:else}
{#if displayNodeOverview}
-
+
{:else}
-
+
{/if}
{/if}
- {
- selectedMetrics = [...detail]
- }}
-/>
+{#if !displayNodeOverview}
+
+ selectedMetrics = [...newMetrics]
+ }
+ />
+{/if}
diff --git a/web/frontend/src/Tags.root.svelte b/web/frontend/src/Tags.root.svelte
index 03311b4..21dbcf4 100644
--- a/web/frontend/src/Tags.root.svelte
+++ b/web/frontend/src/Tags.root.svelte
@@ -1,11 +1,11 @@
+ Properties:
+ - `username String!`: Users username.
+ - `isAdmin Bool!`: User has Admin Auth.
+ - `presetTagmap Object!`: Map of accessible, appwide tags. Prefiltered in backend.
+-->
@@ -96,7 +103,7 @@
removeTag(tag, tagType)}
+ onclick={() => removeTag(tag, tagType)}
>
diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte
index 9b6e15a..c1f0fb8 100644
--- a/web/frontend/src/User.root.svelte
+++ b/web/frontend/src/User.root.svelte
@@ -1,10 +1,10 @@
+ Properties:
+ - `user Object`: The GraphQL user object
+ - `filterPresets Object`: Optional predefined filter values
+-->
@@ -129,13 +162,13 @@
- (isSortingOpen = true)}>
+ (isSortingOpen = true)}>
Sorting
(isMetricsSelectionOpen = true)}
+ onclick={() => (isMetricsSelectionOpen = true)}
>
Metrics
@@ -143,11 +176,11 @@
{
+ {filterPresets}
+ matchedJobs={matchedListJobs}
+ applyFilters={(detail) => {
jobFilters = [...detail.filters, { user: { eq: user.username } }];
selectedCluster = jobFilters[0]?.cluster
? jobFilters[0].cluster.eq
@@ -173,11 +206,11 @@
filterComponent.updateFilters(detail)}
+ setFilter={(filter) => filterComponent.updateFilters(filter)}
/>
- {
+ {
jobList.refreshJobs()
jobList.refreshAllMetrics()
}} />
@@ -269,7 +302,7 @@
outline
color="secondary"
class="w-100"
- on:click={() => (isHistogramSelectionOpen = true)}
+ onclick={() => (isHistogramSelectionOpen = true)}
>
Select Histograms
@@ -305,22 +338,25 @@
{:else}
+
+ {#snippet gridContent(item)}
+
+ {/snippet}
+
{#key $stats.data.jobsStatistics[0].histMetrics}
-
-
+ {gridContent}
+ />
{/key}
{/if}
{:else}
@@ -336,27 +372,39 @@
-
+
+ sorting = {...newSort}
+ }
+/>
+ metrics = [...newMetrics]
+ }
/>
{
+ selectedHistogramsBuffer[selectedCluster || 'all'] = [...newSelection];
+ }}
/>
diff --git a/web/frontend/src/analysis.entrypoint.js b/web/frontend/src/analysis.entrypoint.js
index 07c63f5..97e4739 100644
--- a/web/frontend/src/analysis.entrypoint.js
+++ b/web/frontend/src/analysis.entrypoint.js
@@ -1,9 +1,10 @@
+import { mount } from 'svelte';
import {} from './header.entrypoint.js'
import Analysis from './Analysis.root.svelte'
filterPresets.cluster = cluster
-new Analysis({
+mount(Analysis, {
target: document.getElementById('svelte-app'),
props: {
filterPresets: filterPresets,
diff --git a/web/frontend/src/analysis/PlotSelection.svelte b/web/frontend/src/analysis/PlotSelection.svelte
index 6a5e089..f4818b6 100644
--- a/web/frontend/src/analysis/PlotSelection.svelte
+++ b/web/frontend/src/analysis/PlotSelection.svelte
@@ -1,11 +1,13 @@
+ Properties:
+ - `availableMetrics [String]`: Available metrics in selected cluster
+ - `presetMetricsInHistograms [String]`: The latest selected metrics to display as histogram
+ - `presetMetricsInScatterplots [[String, String]]`: The latest selected metrics to display as scatterplot
+ - `applyHistograms Func`: The callback function to apply current histogramMetrics selection
+ - `applyScatter Func`: The callback function to apply current scatterMetrics selection
+-->
- (isHistogramConfigOpen = true)}>
+ (isHistogramConfigOpen = true)}>
Select Plots for Histograms
- (isScatterPlotConfigOpen = true)}>
+ (isScatterPlotConfigOpen = true)}>
Select Plots in Scatter Plots
@@ -78,11 +90,13 @@
type="checkbox"
bind:group={metricsInHistograms}
value={metric}
- on:change={() =>
+ onchange={() => {
updateConfiguration({
name: "analysis_view_histogramMetrics",
value: metricsInHistograms,
- })}
+ });
+ applyHistograms(metricsInHistograms);
+ }}
/>
{metric}
@@ -91,7 +105,7 @@
- (isHistogramConfigOpen = false)}>
+ (isHistogramConfigOpen = false)}>
Close
@@ -112,7 +126,7 @@
style="float: right;"
outline
color="danger"
- on:click={() => {
+ onclick={() => {
metricsInScatterplots = metricsInScatterplots.filter(
(p) => pair != p,
);
@@ -120,6 +134,7 @@
name: "analysis_view_scatterPlotMetrics",
value: metricsInScatterplots,
});
+ applyScatter(metricsInScatterplots);
}}
>
@@ -146,7 +161,7 @@
{
+ onclick={() => {
metricsInScatterplots = [
...metricsInScatterplots,
[selectedMetric1, selectedMetric2],
@@ -157,6 +172,7 @@
name: "analysis_view_scatterPlotMetrics",
value: metricsInScatterplots,
});
+ applyScatter(metricsInScatterplots);
}}
>
Add Plot
@@ -164,7 +180,7 @@
- (isScatterPlotConfigOpen = false)}>
+ (isScatterPlotConfigOpen = false)}>
Close
diff --git a/web/frontend/src/config.entrypoint.js b/web/frontend/src/config.entrypoint.js
index d2949f2..f9d8e45 100644
--- a/web/frontend/src/config.entrypoint.js
+++ b/web/frontend/src/config.entrypoint.js
@@ -1,7 +1,8 @@
+import { mount } from 'svelte';
import {} from './header.entrypoint.js'
import Config from './Config.root.svelte'
-new Config({
+mount(Config, {
target: document.getElementById('svelte-app'),
props: {
isAdmin: isAdmin,
diff --git a/web/frontend/src/config/AdminSettings.svelte b/web/frontend/src/config/AdminSettings.svelte
index dd53df4..1a426b8 100644
--- a/web/frontend/src/config/AdminSettings.svelte
+++ b/web/frontend/src/config/AdminSettings.svelte
@@ -1,6 +1,9 @@
+ @component Admin settings wrapper
+
+ Properties:
+ - `ncontent String`: The homepage notice content
+-->
-
+ getUserList()} />
-
+ getUserList()} bind:users />
-
+ getUserList()} />
-
+ getUserList()} />
diff --git a/web/frontend/src/config/SupportSettings.svelte b/web/frontend/src/config/SupportSettings.svelte
index 8207b8e..497119b 100644
--- a/web/frontend/src/config/SupportSettings.svelte
+++ b/web/frontend/src/config/SupportSettings.svelte
@@ -1,12 +1,12 @@
+ @component Support settings wrapper
+-->
diff --git a/web/frontend/src/config/UserSettings.svelte b/web/frontend/src/config/UserSettings.svelte
index 1b59e31..dade973 100644
--- a/web/frontend/src/config/UserSettings.svelte
+++ b/web/frontend/src/config/UserSettings.svelte
@@ -1,10 +1,10 @@
+ Properties:
+ - `username String!`: Empty string if auth. is disabled, otherwise the username as string
+ - `isApi Bool!`: Is currently logged in user api authority
+-->
- handleSettingSubmit(e)}/>
- handleSettingSubmit(e)}/>
- handleSettingSubmit(e)}/>
+ handleSettingSubmit(e, newSetting)}/>
+ handleSettingSubmit(e, newSetting)}/>
+ handleSettingSubmit(e, newSetting)}/>
diff --git a/web/frontend/src/config/admin/AddUser.svelte b/web/frontend/src/config/admin/AddUser.svelte
index 6c20d7a..2bcad3b 100644
--- a/web/frontend/src/config/admin/AddUser.svelte
+++ b/web/frontend/src/config/admin/AddUser.svelte
@@ -1,26 +1,29 @@
- Events:
- - `reload`: Trigger upstream reload of user list after user creation
- -->
-
@@ -59,7 +58,8 @@
method="post"
action="/config/users/"
class="card-body"
- on:submit|preventDefault={handleUserSubmit}
+ autocomplete="off"
+ onsubmit={(e) => handleUserSubmit(e)}
>
Create User
@@ -70,6 +70,7 @@
id="username"
name="username"
aria-describedby="usernameHelp"
+ autocomplete="username"
/>
Must be unique.
@@ -81,6 +82,7 @@
id="password"
name="password"
aria-describedby="passwordHelp"
+ autocomplete="new-password"
/>
Only API users are allowed to have a blank password. Users with a blank
@@ -109,6 +111,7 @@
id="name"
name="name"
aria-describedby="nameHelp"
+ autocomplete="name"
/>
Optional, can be blank.
@@ -120,6 +123,7 @@
id="email"
name="email"
aria-describedby="emailHelp"
+ autocomplete="email"
/>
Optional, can be blank.
@@ -153,13 +157,13 @@
{/each}
- Submit
- {#if displayMessage}
- {message.msg}
-
{/if}
+ Submit
+ {#if displayMessage}
+ {message.msg}
+ {/if}
diff --git a/web/frontend/src/config/admin/EditProject.svelte b/web/frontend/src/config/admin/EditProject.svelte
index 3c87d46..8a34264 100644
--- a/web/frontend/src/config/admin/EditProject.svelte
+++ b/web/frontend/src/config/admin/EditProject.svelte
@@ -1,21 +1,27 @@
+ Properties:
+ - `reloadUser Func`: The callback function to reload the user list
+-->
@@ -108,19 +112,17 @@
placeholder="project-id"
id="project-id"
/>
-
-
Add handleAddProject(e)}>Add
Remove handleRemoveProject(e)}>Remove
diff --git a/web/frontend/src/config/admin/EditRole.svelte b/web/frontend/src/config/admin/EditRole.svelte
index b8d12bd..4fccf97 100644
--- a/web/frontend/src/config/admin/EditRole.svelte
+++ b/web/frontend/src/config/admin/EditRole.svelte
@@ -1,26 +1,29 @@
+ Properties:
+ - `roles [String]!`: List of roles used in app as strings
+ - `reloadUser Func`: The callback function to reload the user list
+-->
@@ -113,19 +114,17 @@
>
{/each}
-
-
Add handleAddRole(e)}>Add
Remove handleRemoveRole(e)}>Remove
diff --git a/web/frontend/src/config/admin/NoticeEdit.svelte b/web/frontend/src/config/admin/NoticeEdit.svelte
index 325800b..7d1d93a 100644
--- a/web/frontend/src/config/admin/NoticeEdit.svelte
+++ b/web/frontend/src/config/admin/NoticeEdit.svelte
@@ -1,19 +1,28 @@
+ @component Admin edit notice content card
+
+ Properties:
+ - `ncontent String`: The homepage notice content
+-->
@@ -53,11 +49,11 @@
reloadUser()}
style="float: right;">Reload
-
+
@@ -71,13 +67,13 @@
- {#each userList as user}
+ {#each users as user}
Delete deleteUser(user.username)}>Delete
diff --git a/web/frontend/src/config/admin/ShowUsersRow.svelte b/web/frontend/src/config/admin/ShowUsersRow.svelte
index 25c3710..1ba658d 100644
--- a/web/frontend/src/config/admin/ShowUsersRow.svelte
+++ b/web/frontend/src/config/admin/ShowUsersRow.svelte
@@ -1,18 +1,24 @@
+ Properties:
+ - `user Object!`: User Object
+ - {username: String, name: String, roles: [String], projects: String, email: String}
+-->
-
-
-
\ No newline at end of file
diff --git a/web/frontend/src/config/user/PlotRenderOptions.svelte b/web/frontend/src/config/user/PlotRenderOptions.svelte
index 8a3a948..2f6af91 100644
--- a/web/frontend/src/config/user/PlotRenderOptions.svelte
+++ b/web/frontend/src/config/user/PlotRenderOptions.svelte
@@ -1,222 +1,222 @@
- Events:
- - `update-config, {selector: String, target: String}`: Trigger upstream update of the config option
- -->
-
-
-
-
-
-
- updateSetting("#line-width-form", "lw")}
+
+
+
+ updateSetting(e, {
+ selector: "#line-width-form",
+ target: "lw",
+ })}
+ >
+
+
-
-
- Line Width
-
- {#if displayMessage && message.target == "lw"}
-
-
- Update: {message.msg}
-
-
+ Line Width
+
+ {#if displayMessage && message.target == "lw"}
+
+
+ Update: {message.msg}
+
+
+ {/if}
+
+
+
+
Line Width
+
+
+ Width of the lines in the timeseries plots.
+
+
+ Submit
+
+
+
+
+
+
+
+ updateSetting(e, {
+ selector: "#plots-per-row-form",
+ target: "ppr",
+ })}
+ >
+
+
+ Plots per Row
+ {#if displayMessage && message.target == "ppr"}
+
+
+ Update: {message.msg}
+
+
+ {/if}
+
+
+
+
Plots per Row
+
+
+ How many plots to show next to each other on pages such as
+ /monitoring/job/, /monitoring/system/...
+
+
+ Submit
+
+
+
+
+
+
+
+ updateSetting(e, {
+ selector: "#backgrounds-form",
+ target: "bg",
+ })}
+ >
+
+
+ Colored Backgrounds
+ {#if displayMessage && message.target == "bg"}
+
+
+ Update: {message.msg}
+
+
+ {/if}
+
+
+
+
+
Submit
+
+
+
+ updateSetting(e, {
+ selector: "#colorblindmode-form",
+ target: "cbm",
+ })}
+ >
+
+
-
-
- Plots per Row
- {#if displayMessage && message.target == "ppr"}
- Update: {message.msg}
-
{/if}
-
-
-
-
Plots per Row
-
-
- How many plots to show next to each other on pages such as
- /monitoring/job/, /monitoring/system/...
+
Color Blind Mode
+ {#if displayMessage && message.target == "cbm"}
+
+
+ Update: {message.msg}
+
+ {/if}
+
+
+
+
+ {#if config?.plot_general_colorblindMode}
+
+ {:else}
+
+ {/if}
+ Yes
-
Submit
-
-
-
-
-
-
- updateSetting("#backgrounds-form", "bg")}
- >
-
-
- Colored Backgrounds
- {#if displayMessage && message.target == "bg"}
- Update: {message.msg}
-
{/if}
-
-
-
+ Submit
+
+
+
\ No newline at end of file
diff --git a/web/frontend/src/config/user/UserOptions.svelte b/web/frontend/src/config/user/UserOptions.svelte
index 0cdbe9f..49f3c17 100644
--- a/web/frontend/src/config/user/UserOptions.svelte
+++ b/web/frontend/src/config/user/UserOptions.svelte
@@ -1,84 +1,80 @@
- Events:
- - `update-config, {selector: String, target: String}`: Trigger upstream update of the config option
- -->
-
@@ -90,8 +86,10 @@
method="post"
action="/frontend/configuration/"
class="card-body"
- on:submit|preventDefault={() =>
- updateSetting("#paging-form", "pag")}
+ onsubmit={(e) => updateSetting(e, {
+ selector: "#paging-form",
+ target: "pag",
+ })}
>
- {#if config.job_list_usePaging}
+ {#if config?.job_list_usePaging}
{:else}
@@ -117,7 +115,7 @@
Paging with selectable count of jobs.
- {#if config.job_list_usePaging}
+ {#if config?.job_list_usePaging}
{:else}
@@ -133,41 +131,41 @@
{#if isApi}
-
-
- Generate JWT
- {#if jwt}
-
- Copy JWT to Clipboard
-
-
- Your token is displayed on the right. Press this button to copy it to the clipboard.
-
- {#if displayCheck}
-
- Copied!
-
- {/if}
- {:else}
-
- Generate JWT for '{username}'
-
-
- Generate a JSON Web Token for use with the ClusterCockpit REST-API endpoints.
-
- {/if}
-
-
+
+
+ Generate JWT
+ {#if jwt}
+ clipJwt()}>
+ Copy JWT to Clipboard
+
+
+ Your token is displayed on the right. Press this button to copy it to the clipboard.
+
+ {#if displayCheck}
+
+ Copied!
+
+ {/if}
+ {:else}
+ getUserJwt(username)}>
+ Generate JWT for '{username}'
+
+
+ Generate a JSON Web Token for use with the ClusterCockpit REST-API endpoints.
+
+ {/if}
+
+
-
-
- Display JWT
- {jwt ? jwt : 'Press "Gen. JWT" to request token ...'}
-
-
+
+
+ Display JWT
+ {jwt ? jwt : 'Press "Gen. JWT" to request token ...'}
+
+
{/if}
\ No newline at end of file
diff --git a/web/frontend/src/generic/Filters.svelte b/web/frontend/src/generic/Filters.svelte
index 74640ae..44fc408 100644
--- a/web/frontend/src/generic/Filters.svelte
+++ b/web/frontend/src/generic/Filters.svelte
@@ -1,22 +1,22 @@
+ Properties:
+ - `menuText String?`: Optional text to show in the dropdown menu [Default: null]
+ - `filterPresets Object?`: Optional predefined filter values [Default: {}]
+ - `disableClusterSelection Bool?`: Is the selection disabled [Default: false]
+ - `startTimeQuickSelect Bool?`: Render startTime quick selections [Default: false]
+ - `matchedJobs Number?`: Number of jobs matching the filter [Default: -2]
+ - `showFilter Func`: If the filter component should be rendered in addition to total count info [Default: true]
+ - `applyFilters Func`: The callback function to apply current filter selection
+
+ Functions:
+ - `void updateFilters (additionalFilters: Object, force: Bool)`:
+ Handles new filters from nested components, triggers upstream update event.
+ 'additionalFilters' usually added to existing selection, but can be forced to overwrite instead.
+-->
+
+{#if $compareData.fetching}
+
+
+
+
+
+{:else if $compareData.error}
+
+
+ {$compareData.error.message}
+
+
+{:else}
+ {#key comparePlotData}
+
+
+
+
+
+ {#each metrics as m}
+
+
+
+
+
+ {/each}
+ {/key}
+
+
+
+
+{/if}
\ No newline at end of file
diff --git a/web/frontend/src/generic/JobList.svelte b/web/frontend/src/generic/JobList.svelte
index 89b8fad..dc6def2 100644
--- a/web/frontend/src/generic/JobList.svelte
+++ b/web/frontend/src/generic/JobList.svelte
@@ -1,20 +1,22 @@
+ Functions:
+ - `refreshJobs()`: Load jobs data with unchanged parameters and 'network-only' keyword
+ - `refreshAllMetrics()`: Trigger downstream refresh of all running jobs' metric data
+ - `queryJobs(filters?: [JobFilter])`: Load jobs data with new filters, starts from page 1
+-->
{#each items as item}
-
+
+ {@render gridContent(item)}
{/each}
diff --git a/web/frontend/src/generic/PlotTable.svelte b/web/frontend/src/generic/PlotTable.svelte
deleted file mode 100644
index 4bc0694..0000000
--- a/web/frontend/src/generic/PlotTable.svelte
+++ /dev/null
@@ -1,59 +0,0 @@
-
-
-
-
-
- {#each rows as row}
-
- {#each row as item (item)}
-
- {#if !isPlaceholder(item) && plotWidth > 0}
-
- {/if}
-
- {/each}
-
- {/each}
-
diff --git a/web/frontend/src/generic/filters/Cluster.svelte b/web/frontend/src/generic/filters/Cluster.svelte
index f886582..2d35e84 100644
--- a/web/frontend/src/generic/filters/Cluster.svelte
+++ b/web/frontend/src/generic/filters/Cluster.svelte
@@ -1,19 +1,16 @@
+ Properties:
+ - `isOpen Bool?`: Is this filter component opened [Bindable, Default: false]
+ - `presetCluster String?`: The latest selected cluster [Default: ""]
+ - `presetPartition String?`: The latest selected partition [Default: ""]
+ - `disableClusterSelection Bool?`: Is the selection disabled [Default: false]
+ - `setFilter Func`: The callback function to apply current filter selection
+-->
(isOpen = !isOpen)}>
@@ -45,13 +46,13 @@
Cluster
{#if disableClusterSelection}
Info: Cluster Selection Disabled in This View
- Selected Cluster: {cluster}
+ Selected Cluster: {presetCluster}
{:else}
((pendingCluster = null), (pendingPartition = null))}
+ onclick={() => ((pendingCluster = null), (pendingPartition = null))}
>
Any Cluster
@@ -59,7 +60,7 @@
(
+ onclick={() => (
(pendingCluster = cluster.name), (pendingPartition = null)
)}
>
@@ -75,14 +76,14 @@
(pendingPartition = null)}
+ onclick={() => (pendingPartition = null)}
>
Any Partition
- {#each clusters.find((c) => c.name == pendingCluster).partitions as partition}
+ {#each clusters?.find((c) => c.name == pendingCluster)?.partitions as partition}
(pendingPartition = partition)}
+ onclick={() => (pendingPartition = partition)}
>
{partition}
@@ -93,22 +94,22 @@
{
+ onclick={() => {
isOpen = false;
- cluster = pendingCluster;
- partition = pendingPartition;
- dispatch("set-filter", { cluster, partition });
+ setFilter({ cluster: pendingCluster, partition: pendingPartition });
}}>Close & Apply
- {
- isOpen = false;
- cluster = pendingCluster = null;
- partition = pendingPartition = null;
- dispatch("set-filter", { cluster, partition });
- }}>Reset
- (isOpen = false)}>Close
+ {#if !disableClusterSelection}
+ {
+ isOpen = false;
+ pendingCluster = null;
+ pendingPartition = null;
+ setFilter({ cluster: pendingCluster, partition: pendingPartition})
+ }}>Reset
+ {/if}
+ (isOpen = false)}>Close
diff --git a/web/frontend/src/generic/filters/Duration.svelte b/web/frontend/src/generic/filters/Duration.svelte
index 6a22b98..3056d9f 100644
--- a/web/frontend/src/generic/filters/Duration.svelte
+++ b/web/frontend/src/generic/filters/Duration.svelte
@@ -1,19 +1,14 @@
+ Properties:
+ - `isOpen Bool?`: Is this filter component opened [Bindable, Default: false]
+ - `presetDuration Object?`: Object containing the latest duration filter parameters
+ - Default: { lessThan: null, moreThan: null, from: null, to: null }
+ - `setFilter Func`: The callback function to apply current filter selection
+-->
(isOpen = !isOpen)}>
@@ -92,7 +112,7 @@
type="number"
min="0"
class="form-control"
- bind:value={pendingMoreThan.hours}
+ bind:value={moreState.hours}
disabled={moreDisabled}
/>