Compare commits

...

11 Commits

15 changed files with 1238 additions and 1738 deletions

15
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,15 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "npm"
directory: "/web/frontend"
schedule:
interval: "weekly"

10
go.mod
View File

@@ -21,6 +21,8 @@ require (
github.com/influxdata/influxdb-client-go/v2 v2.14.0 github.com/influxdata/influxdb-client-go/v2 v2.14.0
github.com/jmoiron/sqlx v1.4.0 github.com/jmoiron/sqlx v1.4.0
github.com/mattn/go-sqlite3 v1.14.24 github.com/mattn/go-sqlite3 v1.14.24
github.com/minio/minio-go/v7 v7.0.63
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.21.0 github.com/prometheus/client_golang v1.21.0
github.com/prometheus/common v0.62.0 github.com/prometheus/common v0.62.0
github.com/qustavo/sqlhooks/v2 v2.1.0 github.com/qustavo/sqlhooks/v2 v2.1.0
@@ -43,6 +45,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect
@@ -62,9 +65,13 @@ require (
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/mailru/easyjson v0.9.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -73,7 +80,9 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rs/xid v1.5.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sosodev/duration v1.3.1 // indirect github.com/sosodev/duration v1.3.1 // indirect
github.com/swaggo/files v1.0.1 // indirect github.com/swaggo/files v1.0.1 // indirect
github.com/urfave/cli/v2 v2.27.5 // indirect github.com/urfave/cli/v2 v2.27.5 // indirect
@@ -86,6 +95,7 @@ require (
golang.org/x/text v0.23.0 // indirect golang.org/x/text v0.23.0 // indirect
golang.org/x/tools v0.30.0 // indirect golang.org/x/tools v0.30.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect

19
go.sum
View File

@@ -53,6 +53,8 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk= github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
@@ -148,6 +150,9 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -165,6 +170,12 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ=
github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
@@ -205,12 +216,16 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4= github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=
github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
@@ -220,6 +235,7 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
@@ -296,6 +312,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -341,6 +358,8 @@ google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojt
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=

View File

@@ -162,7 +162,6 @@ func LoadScopedStatsFromArchive(
metrics []string, metrics []string,
scopes []schema.MetricScope, scopes []schema.MetricScope,
) (schema.ScopedJobStats, error) { ) (schema.ScopedJobStats, error) {
data, err := ar.LoadJobStats(job) data, err := ar.LoadJobStats(job)
if err != nil { if err != nil {
log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error()) log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())

View File

@@ -5,9 +5,7 @@
package archive package archive
import ( import (
"bufio"
"bytes" "bytes"
"compress/gzip"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@@ -20,7 +18,6 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
@@ -59,14 +56,13 @@ func getDirectory(
func getPath( func getPath(
job *schema.Job, job *schema.Job,
rootPath string, rootPath string,
file string) string { file string,
) string {
return filepath.Join( return filepath.Join(
getDirectory(job, rootPath), file) getDirectory(job, rootPath), file)
} }
func loadJobMeta(filename string) (*schema.JobMeta, error) { func loadJobMeta(filename string) (*schema.JobMeta, error) {
b, err := os.ReadFile(filename) b, err := os.ReadFile(filename)
if err != nil { if err != nil {
log.Errorf("loadJobMeta() > open file error: %v", err) log.Errorf("loadJobMeta() > open file error: %v", err)
@@ -83,7 +79,6 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
func loadJobData(filename string, isCompressed bool) (schema.JobData, error) { func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
log.Errorf("fsBackend LoadJobData()- %v", err) log.Errorf("fsBackend LoadJobData()- %v", err)
return nil, err return nil, err
@@ -117,7 +112,6 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) { func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) {
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
log.Errorf("fsBackend LoadJobStats()- %v", err) log.Errorf("fsBackend LoadJobStats()- %v", err)
return nil, err return nil, err
@@ -150,7 +144,6 @@ func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, er
} }
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) { func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
var config FsArchiveConfig var config FsArchiveConfig
if err := json.Unmarshal(rawConfig, &config); err != nil { if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Warnf("Init() > Unmarshal error: %#v", err) log.Warnf("Init() > Unmarshal error: %#v", err)
@@ -276,7 +269,6 @@ func (fsa *FsArchive) Exists(job *schema.Job) bool {
} }
func (fsa *FsArchive) Clean(before int64, after int64) { func (fsa *FsArchive) Clean(before int64, after int64) {
if after == 0 { if after == 0 {
after = math.MaxInt64 after = math.MaxInt64
} }
@@ -392,7 +384,6 @@ func (fsa *FsArchive) Compress(jobs []*schema.Job) {
} }
func (fsa *FsArchive) CompressLast(starttime int64) int64 { func (fsa *FsArchive) CompressLast(starttime int64) int64 {
filename := filepath.Join(fsa.path, "compress.txt") filename := filepath.Join(fsa.path, "compress.txt")
b, err := os.ReadFile(filename) b, err := os.ReadFile(filename)
if err != nil { if err != nil {
@@ -412,7 +403,7 @@ func (fsa *FsArchive) CompressLast(starttime int64) int64 {
} }
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) { func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
var isCompressed bool = true isCompressed := true
filename := getPath(job, fsa.path, "data.json.gz") filename := getPath(job, fsa.path, "data.json.gz")
if !util.CheckFileExists(filename) { if !util.CheckFileExists(filename) {
@@ -420,7 +411,13 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
isCompressed = false isCompressed = false
} }
return loadJobData(filename, isCompressed) f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend LoadJobData()- %v", err)
return nil, err
}
defer f.Close()
return loadJobData(f, filename, isCompressed)
} }
func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) { func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) {
@@ -437,26 +434,27 @@ func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, erro
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) { func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
filename := getPath(job, fsa.path, "meta.json") filename := getPath(job, fsa.path, "meta.json")
return loadJobMeta(filename) b, err := os.ReadFile(filename)
if err != nil {
log.Errorf("loadJobMeta() > open file error: %v", err)
return &schema.JobMeta{}, err
}
return loadJobMeta(b)
} }
func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) { func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json")) b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
if err != nil { if err != nil {
log.Errorf("LoadClusterCfg() > open file error: %v", err) log.Errorf("LoadClusterCfg() > open file error: %v", err)
// if config.Keys.Validate {
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil { if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
log.Warnf("Validate cluster config: %v\n", err) log.Warnf("Validate cluster config: %v\n", err)
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err) return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
} }
} }
// }
return DecodeCluster(bytes.NewReader(b)) return DecodeCluster(bytes.NewReader(b))
} }
func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer { func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
ch := make(chan JobContainer) ch := make(chan JobContainer)
go func() { go func() {
clustersDir, err := os.ReadDir(fsa.path) clustersDir, err := os.ReadDir(fsa.path)
@@ -493,7 +491,11 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
for _, startTimeDir := range startTimeDirs { for _, startTimeDir := range startTimeDirs {
if startTimeDir.IsDir() { if startTimeDir.IsDir() {
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json")) b, err := os.ReadFile(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
if err != nil {
log.Errorf("loadJobMeta() > open file error: %v", err)
}
job, err := loadJobMeta(b)
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) { if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
} }
@@ -507,7 +509,13 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
isCompressed = false isCompressed = false
} }
data, err := loadJobData(filename, isCompressed) f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend LoadJobData()- %v", err)
}
defer f.Close()
data, err := loadJobData(f, filename, isCompressed)
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) { if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
} }
@@ -527,7 +535,6 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
} }
func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error { func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error {
job := schema.Job{ job := schema.Job{
BaseJob: jobMeta.BaseJob, BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0), StartTime: time.Unix(jobMeta.StartTime, 0),
@@ -556,8 +563,8 @@ func (fsa *FsArchive) GetClusters() []string {
func (fsa *FsArchive) ImportJob( func (fsa *FsArchive) ImportJob(
jobMeta *schema.JobMeta, jobMeta *schema.JobMeta,
jobData *schema.JobData) error { jobData *schema.JobData,
) error {
job := schema.Job{ job := schema.Job{
BaseJob: jobMeta.BaseJob, BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0), StartTime: time.Unix(jobMeta.StartTime, 0),
@@ -617,5 +624,6 @@ func (fsa *FsArchive) ImportJob(
if err := f.Close(); err != nil { if err := f.Close(); err != nil {
log.Warn("Error while closing data.json file") log.Warn("Error while closing data.json file")
} }
return err
return nil
} }

View File

@@ -7,6 +7,7 @@ package archive
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
@@ -62,8 +63,12 @@ func TestLoadJobMetaInternal(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
b, err := os.ReadFile("testdata/archive/emmy/1404/397/1609300556/meta.json")
if err != nil {
t.Fatalf("loadJobMeta() > open file error: %v", err)
}
job, err := loadJobMeta("testdata/archive/emmy/1404/397/1609300556/meta.json") job, err := loadJobMeta(b)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@@ -0,0 +1,9 @@
CREATE TABLE IF NOT EXISTS "job" (
id INTEGER PRIMARY KEY,
clustername TEXT NOT NULL,
job_id INTEGER NOT NULL,
start_time INTEGER NOT NULL, -- Unix timestamp
meta_data TEXT, -- JSON
metric_data BLOB,
UNIQUE ("job_id", "clustername", "start_time")
);

View File

@@ -4,10 +4,403 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package archive package archive
import (
"context"
"encoding/json"
"fmt"
"io"
"path/filepath"
"strconv"
"strings"
"time"
"unsafe"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/pkg/errors"
)
type S3ArchiveConfig struct { type S3ArchiveConfig struct {
Path string `json:"filePath"` Endpoint string `json:"endpoint"`
AccessKeyID string `json:"accessKeyID"`
SecretAccessKey string `json:"secretAccessKey"`
Bucket string `json:"bucket"`
UseSSL bool `json:"useSSL"`
} }
type S3Archive struct { type S3Archive struct {
path string client *minio.Client
bucket string
clusters []string
} }
func (s3a *S3Archive) stat(object string) (*minio.ObjectInfo, error) {
objectStat, e := s3a.client.StatObject(context.Background(),
s3a.bucket,
object, minio.GetObjectOptions{})
if e != nil {
errResponse := minio.ToErrorResponse(e)
if errResponse.Code == "AccessDenied" {
return nil, errors.Wrap(e, "AccessDenied")
}
if errResponse.Code == "NoSuchBucket" {
return nil, errors.Wrap(e, "NoSuchBucket")
}
if errResponse.Code == "InvalidBucketName" {
return nil, errors.Wrap(e, "InvalidBucketName")
}
if errResponse.Code == "NoSuchKey" {
return nil, errors.Wrap(e, "NoSuchKey")
}
return nil, e
}
return &objectStat, nil
}
func (s3a *S3Archive) Init(rawConfig json.RawMessage) (uint64, error) {
var config S3ArchiveConfig
var err error
if err = json.Unmarshal(rawConfig, &config); err != nil {
log.Warnf("Init() > Unmarshal error: %#v", err)
return 0, err
}
fmt.Printf("Endpoint: %s Bucket: %s\n", config.Endpoint, config.Bucket)
s3a.client, err = minio.New(config.Endpoint, &minio.Options{
Creds: credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, ""),
Secure: config.UseSSL,
})
if err != nil {
err = fmt.Errorf("Init() : Initialize minio client failed")
return 0, err
}
s3a.bucket = config.Bucket
found, err := s3a.client.BucketExists(context.Background(), s3a.bucket)
if err != nil {
err = fmt.Errorf("Init() : %v", err)
return 0, err
}
if found {
log.Infof("Bucket found.")
} else {
log.Infof("Bucket not found.")
}
r, err := s3a.client.GetObject(context.Background(),
s3a.bucket, "version.txt", minio.GetObjectOptions{})
if err != nil {
err = fmt.Errorf("Init() : Get version object failed")
return 0, err
}
defer r.Close()
b, err := io.ReadAll(r)
if err != nil {
log.Errorf("Init() : %v", err)
return 0, err
}
version, err := strconv.ParseUint(strings.TrimSuffix(string(b), "\n"), 10, 64)
if err != nil {
log.Errorf("Init() : %v", err)
return 0, err
}
if version != Version {
return 0, fmt.Errorf("unsupported version %d, need %d", version, Version)
}
for object := range s3a.client.ListObjects(
context.Background(),
s3a.bucket, minio.ListObjectsOptions{
Recursive: false,
}) {
if object.Err != nil {
log.Errorf("listObject: %v", object.Err)
}
if strings.HasSuffix(object.Key, "/") {
s3a.clusters = append(s3a.clusters, strings.TrimSuffix(object.Key, "/"))
}
}
return version, err
}
func (s3a *S3Archive) Info() {
fmt.Printf("Job archive %s\n", s3a.bucket)
var clusters []string
for object := range s3a.client.ListObjects(
context.Background(),
s3a.bucket, minio.ListObjectsOptions{
Recursive: false,
}) {
if object.Err != nil {
log.Errorf("listObject: %v", object.Err)
}
if strings.HasSuffix(object.Key, "/") {
clusters = append(clusters, object.Key)
}
}
ci := make(map[string]*clusterInfo)
for _, cluster := range clusters {
ci[cluster] = &clusterInfo{dateFirst: time.Now().Unix()}
for d := range s3a.client.ListObjects(
context.Background(),
s3a.bucket, minio.ListObjectsOptions{
Recursive: true,
Prefix: cluster,
}) {
log.Errorf("%s", d.Key)
ci[cluster].diskSize += (float64(d.Size) * 1e-6)
}
}
}
// func (s3a *S3Archive) Exists(job *schema.Job) bool {
// return true
// }
func (s3a *S3Archive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
filename := getPath(job, "/", "meta.json")
log.Infof("Init() : %s", filename)
r, err := s3a.client.GetObject(context.Background(),
s3a.bucket, filename, minio.GetObjectOptions{})
if err != nil {
err = fmt.Errorf("Init() : Get version object failed")
return nil, err
}
defer r.Close()
b, err := io.ReadAll(r)
if err != nil {
log.Errorf("Init() : %v", err)
return nil, err
}
return loadJobMeta(b)
}
func (s3a *S3Archive) LoadJobData(job *schema.Job) (schema.JobData, error) {
isCompressed := true
key := getPath(job, "./", "data.json.gz")
_, err := s3a.stat(key)
if err != nil {
if err.Error() == "NoSuchKey" {
key = getPath(job, "./", "data.json")
isCompressed = false
}
}
r, err := s3a.client.GetObject(context.Background(),
s3a.bucket, key, minio.GetObjectOptions{})
if err != nil {
err = fmt.Errorf("Init() : Get version object failed")
return nil, err
}
defer r.Close()
return loadJobData(r, key, isCompressed)
}
func (s3a *S3Archive) LoadClusterCfg(name string) (*schema.Cluster, error) {
key := filepath.Join("./", name, "cluster.json")
r, err := s3a.client.GetObject(context.Background(),
s3a.bucket, key, minio.GetObjectOptions{})
if err != nil {
err = fmt.Errorf("Init() : Get version object failed")
return nil, err
}
defer r.Close()
return DecodeCluster(r)
}
func (s3a *S3Archive) Iter(loadMetricData bool) <-chan JobContainer {
ch := make(chan JobContainer)
go func() {
clusterDirs := s3a.client.ListObjects(context.Background(), s3a.bucket, minio.ListObjectsOptions{Recursive: false})
for clusterDir := range clusterDirs {
if clusterDir.Err != nil {
fmt.Println(clusterDir.Err)
return
}
fmt.Println(clusterDir.Key)
if clusterDir.Size != 0 {
continue
}
key := filepath.Join("", clusterDir.Key)
fmt.Println(key)
lvl1Dirs := s3a.client.ListObjects(context.Background(), s3a.bucket, minio.ListObjectsOptions{Recursive: false, Prefix: key})
for lvl1Dir := range lvl1Dirs {
fmt.Println(lvl1Dir.Key)
ch <- JobContainer{Meta: nil, Data: nil}
}
//
// for _, lvl1Dir := range lvl1Dirs {
// if !lvl1Dir.IsDir() {
// // Could be the cluster.json file
// continue
// }
//
// lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
// if err != nil {
// log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
// }
//
// for _, lvl2Dir := range lvl2Dirs {
// dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
// startTimeDirs, err := os.ReadDir(dirpath)
// if err != nil {
// log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
// }
//
// for _, startTimeDir := range startTimeDirs {
// if startTimeDir.IsDir() {
// b, err := os.ReadFile(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
// if err != nil {
// log.Errorf("loadJobMeta() > open file error: %v", err)
// }
// job, err := loadJobMeta(b)
// if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
// log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
// }
//
// if loadMetricData {
// var isCompressed bool = true
// filename := filepath.Join(dirpath, startTimeDir.Name(), "data.json.gz")
//
// if !util.CheckFileExists(filename) {
// filename = filepath.Join(dirpath, startTimeDir.Name(), "data.json")
// isCompressed = false
// }
//
// f, err := os.Open(filename)
// if err != nil {
// log.Errorf("fsBackend LoadJobData()- %v", err)
// }
// defer f.Close()
//
// data, err := loadJobData(f, filename, isCompressed)
// if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
// log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
// }
// ch <- JobContainer{Meta: job, Data: &data}
// log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
// } else {
// ch <- JobContainer{Meta: job, Data: nil}
// }
// }
// }
// }
// }
}
close(ch)
}()
return ch
}
func (s3a *S3Archive) ImportJob(
jobMeta *schema.JobMeta,
jobData *schema.JobData,
) error {
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),
StartTimeUnix: jobMeta.StartTime,
}
r, w := io.Pipe()
go func() {
defer w.Close()
if err := EncodeJobMeta(w, jobMeta); err != nil {
log.Error("Error while encoding job metadata to meta.json object")
}
}()
key := getPath(&job, "./", "meta.json")
_, e := s3a.client.PutObject(context.Background(),
s3a.bucket, key, r, -1, minio.PutObjectOptions{})
if e != nil {
log.Errorf("Put error %#v", e)
return e
}
r, w = io.Pipe()
go func() {
defer w.Close()
if err := EncodeJobData(w, jobData); err != nil {
log.Error("Error while encoding job metricdata to data.json object")
}
}()
key = getPath(&job, "./", "data.json")
_, e = s3a.client.PutObject(context.Background(),
s3a.bucket, key, r, -1, minio.PutObjectOptions{})
if e != nil {
log.Errorf("Put error %#v", e)
return e
}
return nil
}
func (s3a *S3Archive) StoreJobMeta(jobMeta *schema.JobMeta) error {
job := schema.Job{
BaseJob: jobMeta.BaseJob,
StartTime: time.Unix(jobMeta.StartTime, 0),
StartTimeUnix: jobMeta.StartTime,
}
r, w := io.Pipe()
if err := EncodeJobMeta(w, jobMeta); err != nil {
log.Error("Error while encoding job metadata to meta.json file")
return err
}
key := getPath(&job, "./", "meta.json")
s3a.client.PutObject(context.Background(),
s3a.bucket, key, r,
int64(unsafe.Sizeof(job)), minio.PutObjectOptions{})
if err := w.Close(); err != nil {
log.Warn("Error while closing meta.json file")
return err
}
return nil
}
func (s3a *S3Archive) GetClusters() []string {
return s3a.clusters
}
//
// func (s3a *S3Archive) CleanUp(jobs []*schema.Job)
//
// func (s3a *S3Archive) Move(jobs []*schema.Job, path string)
//
// func (s3a *S3Archive) Clean(before int64, after int64)
//
// func (s3a *S3Archive) Compress(jobs []*schema.Job)
//
// func (s3a *S3Archive) CompressLast(starttime int64) int64
//

View File

@@ -0,0 +1,153 @@
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
import (
"bytes"
"encoding/json"
"fmt"
"os"
"testing"
"time"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
)
func TestS3Init(t *testing.T) {
var s3a S3Archive
version, err := s3a.Init(json.RawMessage("{\"endpoint\":\"192.168.1.10:9100\",\"accessKeyID\":\"uACSaCN2Chiotpnr4bBS\",\"secretAccessKey\":\"MkEbBsFvMii1K5GreUriTJZxH359B1n28Au9Kaml\",\"bucket\":\"cc-archive\",\"useSSL\":false}"))
if err != nil {
t.Fatal(err)
}
if s3a.bucket != "cc-archive" {
t.Errorf("S3 bucket \ngot: %s \nwant: cc-archive", s3a.bucket)
}
if version != 1 {
t.Errorf("S3 archive version \ngot: %d \nwant: 1", version)
t.Fail()
}
if len(s3a.clusters) != 2 || s3a.clusters[0] != "alex" {
t.Fail()
}
}
func TestS3LoadJobMeta(t *testing.T) {
var s3a S3Archive
_, err := s3a.Init(json.RawMessage("{\"endpoint\":\"192.168.1.10:9100\",\"accessKeyID\":\"uACSaCN2Chiotpnr4bBS\",\"secretAccessKey\":\"MkEbBsFvMii1K5GreUriTJZxH359B1n28Au9Kaml\",\"bucket\":\"cc-archive\",\"useSSL\":false}"))
if err != nil {
t.Fatal(err)
}
jobIn := schema.Job{BaseJob: schema.JobDefaults}
jobIn.StartTime = time.Unix(1675954353, 0)
jobIn.JobID = 398764
jobIn.Cluster = "fritz"
job, err := s3a.LoadJobMeta(&jobIn)
if err != nil {
t.Fatal(err)
}
if job.JobID != 398764 {
t.Fail()
}
if int(job.NumNodes) != len(job.Resources) {
t.Fail()
}
if job.StartTime != 1675954353 {
t.Fail()
}
}
func TestS3LoadJobData(t *testing.T) {
var s3a S3Archive
_, err := s3a.Init(json.RawMessage("{\"endpoint\":\"192.168.1.10:9100\",\"accessKeyID\":\"uACSaCN2Chiotpnr4bBS\",\"secretAccessKey\":\"MkEbBsFvMii1K5GreUriTJZxH359B1n28Au9Kaml\",\"bucket\":\"cc-archive\",\"useSSL\":false}"))
if err != nil {
t.Fatal(err)
}
jobIn := schema.Job{BaseJob: schema.JobDefaults}
jobIn.StartTime = time.Unix(1675954353, 0)
jobIn.JobID = 398764
jobIn.Cluster = "fritz"
data, err := s3a.LoadJobData(&jobIn)
if err != nil {
t.Fatal(err)
}
for _, scopes := range data {
// fmt.Printf("Metric name: %s\n", name)
if _, exists := scopes[schema.MetricScopeNode]; !exists {
t.Fail()
}
}
}
func TestS3LoadCluster(t *testing.T) {
var s3a S3Archive
_, err := s3a.Init(json.RawMessage("{\"endpoint\":\"192.168.1.10:9100\",\"accessKeyID\":\"uACSaCN2Chiotpnr4bBS\",\"secretAccessKey\":\"MkEbBsFvMii1K5GreUriTJZxH359B1n28Au9Kaml\",\"bucket\":\"cc-archive\",\"useSSL\":false}"))
if err != nil {
t.Fatal(err)
}
cfg, err := s3a.LoadClusterCfg("fritz")
if err != nil {
t.Fatal(err)
}
if cfg.SubClusters[0].CoresPerSocket != 36 {
t.Fail()
}
}
func TestS3JobImport(t *testing.T) {
var s3a S3Archive
_, err := s3a.Init(json.RawMessage("{\"endpoint\":\"192.168.1.10:9100\",\"accessKeyID\":\"uACSaCN2Chiotpnr4bBS\",\"secretAccessKey\":\"MkEbBsFvMii1K5GreUriTJZxH359B1n28Au9Kaml\",\"bucket\":\"cc-archive\",\"useSSL\":false}"))
if err != nil {
t.Fatal(err)
}
raw, err := os.ReadFile("./testdata/archive/fritz/398/759/1675954289/meta.json")
if err != nil {
t.Fatal("Error while reading metadata file for import")
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
if err = dec.Decode(&jobMeta); err != nil {
t.Fatal("Error while decoding raw json metadata for import")
}
raw, err = os.ReadFile("./testdata/archive/fritz/398/759/1675954289/data.json")
if err != nil {
t.Fatal("Error while reading jobdata file for import")
}
dec = json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields()
jobData := schema.JobData{}
if err = dec.Decode(&jobData); err != nil {
t.Fatal("Error while decoding raw json jobdata for import")
}
s3a.ImportJob(&jobMeta, &jobData)
}
func TestS3Iter(t *testing.T) {
var s3a S3Archive
_, err := s3a.Init(json.RawMessage("{\"endpoint\":\"192.168.1.10:9100\",\"accessKeyID\":\"uACSaCN2Chiotpnr4bBS\",\"secretAccessKey\":\"MkEbBsFvMii1K5GreUriTJZxH359B1n28Au9Kaml\",\"bucket\":\"cc-archive\",\"useSSL\":false}"))
if err != nil {
t.Fatal(err)
}
for jobContainer := range s3a.Iter(false) {
if jobContainer.Meta == nil {
fmt.Println("Is nil")
}
}
}

View File

@@ -0,0 +1,261 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package archive
import (
"database/sql"
"embed"
"encoding/json"
"fmt"
"net/url"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
"github.com/ClusterCockpit/cc-lib/schema"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/source/iofs"
"github.com/jmoiron/sqlx"
)
//go:embed migrations/*
var migrationFiles embed.FS
type SqliteArchiveConfig struct {
Path string `json:"filePath"`
}
type SqliteArchive struct {
path string
}
func getMigrateInstance(db string) (m *migrate.Migrate, err error) {
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
if err != nil {
cclog.Fatal(err)
}
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
if err != nil {
return m, err
}
return m, nil
}
func MigrateDB(db string) error {
m, err := getMigrateInstance(db)
if err != nil {
return err
}
v, dirty, err := m.Version()
if err != nil {
if err == migrate.ErrNilVersion {
cclog.Warn("Legacy database without version or missing database file!")
} else {
return err
}
}
if uint64(v) < Version {
cclog.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
}
if dirty {
return fmt.Errorf("last migration to version %d has failed, please fix the db manually and force version with -force-db flag", Version)
}
if err := m.Up(); err != nil {
if err == migrate.ErrNoChange {
cclog.Info("DB already up to date!")
} else {
return err
}
}
m.Close()
return nil
}
func RevertDB(db string) error {
m, err := getMigrateInstance(db)
if err != nil {
return err
}
if err := m.Migrate(Version - 1); err != nil {
if err == migrate.ErrNoChange {
cclog.Info("DB already up to date!")
} else {
return err
}
}
m.Close()
return nil
}
func ForceDB(db string) error {
m, err := getMigrateInstance(db)
if err != nil {
return err
}
if err := m.Force(int(Version)); err != nil {
return err
}
m.Close()
return nil
}
var (
dbConnOnce sync.Once
dbConnInstance *DBConnection
)
type DBConnection struct {
DB *sqlx.DB
}
type DatabaseOptions struct {
URL string
MaxOpenConnections int
MaxIdleConnections int
ConnectionMaxLifetime time.Duration
ConnectionMaxIdleTime time.Duration
}
func setupSqlite(db *sql.DB) (err error) {
pragmas := []string{
// "journal_mode = WAL",
// "busy_timeout = 5000",
// "synchronous = NORMAL",
// "cache_size = 1000000000", // 1GB
// "foreign_keys = true",
"temp_store = memory",
// "mmap_size = 3000000000",
}
for _, pragma := range pragmas {
_, err = db.Exec("PRAGMA " + pragma)
if err != nil {
return err
}
}
return nil
}
func Connect(driver string, db string) {
var err error
var dbHandle *sqlx.DB
dbConnOnce.Do(func() {
opts := DatabaseOptions{
URL: db,
MaxOpenConnections: 4,
MaxIdleConnections: 4,
ConnectionMaxLifetime: time.Hour,
ConnectionMaxIdleTime: time.Hour,
}
// TODO: Have separate DB handles for Writes and Reads
// Optimize SQLite connection: https://kerkour.com/sqlite-for-servers
connectionUrlParams := make(url.Values)
connectionUrlParams.Add("_txlock", "immediate")
connectionUrlParams.Add("_journal_mode", "WAL")
connectionUrlParams.Add("_busy_timeout", "5000")
connectionUrlParams.Add("_synchronous", "NORMAL")
connectionUrlParams.Add("_cache_size", "1000000000")
connectionUrlParams.Add("_foreign_keys", "true")
opts.URL = fmt.Sprintf("file:%s?%s", opts.URL, connectionUrlParams.Encode())
dbHandle, err = sqlx.Open("sqlite3", opts.URL)
if err != nil {
cclog.Abortf("Job archive DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
}
err = setupSqlite(dbHandle.DB)
if err != nil {
cclog.Abortf("Job archive DB Connection: Setup Sqlite failed.\nError: %s\n", driver, err.Error())
}
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
dbHandle.SetMaxIdleConns(opts.MaxIdleConnections)
dbHandle.SetConnMaxLifetime(opts.ConnectionMaxLifetime)
dbHandle.SetConnMaxIdleTime(opts.ConnectionMaxIdleTime)
dbConnInstance = &DBConnection{DB: dbHandle}
// err = checkDBVersion(driver, dbHandle.DB)
// if err != nil {
// cclog.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
// }
})
}
func GetConnection() *DBConnection {
if dbConnInstance == nil {
cclog.Fatalf("Database connection not initialized!")
}
return dbConnInstance
}
func (fsa *SqliteArchive) Init(rawConfig json.RawMessage) (uint64, error) {
return version, nil
}
func (fsa *SqliteArchive) Info() {
fmt.Printf("SQLITE Job archive\n")
}
func (fsa *SqliteArchive) Exists(job *schema.Job) bool {
}
func (fsa *SqliteArchive) Clean(before int64, after int64) {
}
func (fsa *SqliteArchive) Move(jobs []*schema.Job, path string) {
}
func (fsa *SqliteArchive) CleanUp(jobs []*schema.Job) {
}
func (fsa *SqliteArchive) Compress(jobs []*schema.Job) {
}
func (fsa *SqliteArchive) CompressLast(starttime int64) int64 {
return 0
}
func (fsa *SqliteArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
}
func (fsa *SqliteArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) {
}
func (fsa *SqliteArchive) LoadJobMeta(job *schema.Job) (*schema.Job, error) {
}
func (fsa *SqliteArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
}
func (fsa *SqliteArchive) Iter(loadMetricData bool) <-chan JobContainer {
}
func (fsa *SqliteArchive) StoreJobMeta(job *schema.Job) error {
}
func (fsa *SqliteArchive) GetClusters() []string {
}
func (fsa *SqliteArchive) ImportJob(
jobMeta *schema.Job,
jobData *schema.JobData,
) error {
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"jobId":398759,"user":"k106eb10","project":"k106eb","cluster":"fritz","subCluster":"main","partition":"singlenode","arrayJobId":0,"numNodes":1,"numHwthreads":72,"numAcc":0,"exclusive":1,"monitoringStatus":1,"smt":0,"jobState":"completed","duration":456,"walltime":86340,"resources":[{"hostname":"f1039"}],"metaData":{"jobName":"ams_pipeline","jobScript":"#!/bin/bash -l\n#SBATCH --job-name=ams_pipeline\n#SBATCH --time=23:59:00\n#SBATCH --partition=singlenode\n#SBATCH --ntasks=72\n#SBATCH --hint=multithread\n#SBATCH --chdir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_9_D1_3_Al4Ba/cfg/AlNi4\n#SBATCH --export=NONE\nunset SLURM_EXPORT_ENV\nuss=$(whoami)\nfind /dev/shm/ -user $uss -type f -mmin +30 -delete\ncd \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_9_D1_3_Al4Ba/cfg/AlNi4\"\nams_pipeline pipeline.json \u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_9_D1_3_Al4Ba/cfg/AlNi4/ams_pipeline_job.sh.out\" 2\u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_9_D1_3_Al4Ba/cfg/AlNi4/ams_pipeline_job.sh.err\"\n","slurmInfo":"\nJobId=398759 JobName=ams_pipeline\n UserId=k106eb10(210387) GroupId=80111\n Account=k106eb QOS=normal \n Requeue=False Restarts=0 BatchFlag=True \n TimeLimit=1439\n SubmitTime=2023-02-09T14:10:18\n Partition=singlenode \n NodeList=f1039\n NumNodes=1 NumCPUs=72 NumTasks=72 CPUs/Task=1\n NTasksPerNode:Socket:Core=0:None:None\n TRES_req=cpu=72,mem=250000M,node=1,billing=72\n TRES_alloc=cpu=72,node=1,billing=72\n Command=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_9_D1_3_Al4Ba/cfg/AlNi4/ams_pipeline_job.sh\n WorkDir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_9_D1_3_Al4Ba/cfg/AlNi4\n StdErr=\n StdOut=ams_pipeline.o%j\n"},"startTime":1675954289,"statistics":{"clock":{"unit":{"base":"Hz","prefix":"M"},"avg":2353.432,"min":948.182,"max":2788.083},"cpu_load":{"unit":{"base":""},"avg":51.549,"min":11.01,"max":69.65},"cpu_power":{"unit":{"base":"W"},"avg":341.035,"min":97.175,"max":504.194},"cpu_user":{"unit":{"base":""},"avg":67.956,"min":0.07,"max":96.219},"flops_any":{"unit":{"base":"F/s","prefix":"G"},"avg":232.513,"min":0,"max":410.287},"flops_dp":{"unit":{"base":"F/s","prefix":"G"},"avg":116.255,"min":0,"max":205.143},"flops_sp":{"unit":{"base":"F/s","prefix":"G"},"avg":0.003,"min":0,"max":0.009},"ib_recv":{"unit":{"base":"B/s"},"avg":22674.825,"min":39.601,"max":47259.522},"ib_recv_pkts":{"unit":{"base":"packets/s"},"avg":321.484,"min":0.2,"max":680.887},"ib_xmit":{"unit":{"base":"B/s"},"avg":140.208,"min":39.601,"max":576.666},"ib_xmit_pkts":{"unit":{"base":"packets/s"},"avg":0.685,"min":0.2,"max":1.984},"ipc":{"unit":{"base":"IPC"},"avg":0.944,"min":0.21,"max":1.424},"mem_bw":{"unit":{"base":"B/s","prefix":"G"},"avg":39.231,"min":0.023,"max":86.676},"mem_power":{"unit":{"base":"W"},"avg":18.049,"min":7.256,"max":26.926},"mem_used":{"unit":{"base":"B","prefix":"G"},"avg":19.107,"min":3.866,"max":24.729},"nfs4_read":{"unit":{"base":"B/s","prefix":"M"},"avg":389.875,"min":0,"max":1390},"nfs4_total":{"unit":{"base":"B/s","prefix":"M"},"avg":5032.75,"min":956,"max":10098},"nfs4_write":{"unit":{"base":"B/s","prefix":"M"},"avg":21.125,"min":0,"max":41},"vectorization_ratio":{"unit":{"base":"%"},"avg":60.12,"min":0.028,"max":99.133}}}

Binary file not shown.

View File

@@ -0,0 +1 @@
{"jobId":398760,"user":"k106eb10","project":"k106eb","cluster":"fritz","subCluster":"main","partition":"singlenode","arrayJobId":0,"numNodes":1,"numHwthreads":72,"numAcc":0,"exclusive":1,"monitoringStatus":1,"smt":0,"jobState":"completed","duration":424,"walltime":86340,"resources":[{"hostname":"f0726"}],"metaData":{"jobName":"ams_pipeline","jobScript":"#!/bin/bash -l\n#SBATCH --job-name=ams_pipeline\n#SBATCH --time=23:59:00\n#SBATCH --partition=singlenode\n#SBATCH --ntasks=72\n#SBATCH --hint=multithread\n#SBATCH --chdir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_58_D2_e_BaHg11/cfg/Ni3Al33\n#SBATCH --export=NONE\nunset SLURM_EXPORT_ENV\nuss=$(whoami)\nfind /dev/shm/ -user $uss -type f -mmin +30 -delete\ncd \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_58_D2_e_BaHg11/cfg/Ni3Al33\"\nams_pipeline pipeline.json \u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_58_D2_e_BaHg11/cfg/Ni3Al33/ams_pipeline_job.sh.out\" 2\u003e \"/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_58_D2_e_BaHg11/cfg/Ni3Al33/ams_pipeline_job.sh.err\"\n","slurmInfo":"\nJobId=398760 JobName=ams_pipeline\n UserId=k106eb10(210387) GroupId=80111\n Account=k106eb QOS=normal \n Requeue=False Restarts=0 BatchFlag=True \n TimeLimit=1439\n SubmitTime=2023-02-09T14:10:18\n Partition=singlenode \n NodeList=f0726\n NumNodes=1 NumCPUs=72 NumTasks=72 CPUs/Task=1\n NTasksPerNode:Socket:Core=0:None:None\n TRES_req=cpu=72,mem=250000M,node=1,billing=72\n TRES_alloc=cpu=72,node=1,billing=72\n Command=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_58_D2_e_BaHg11/cfg/Ni3Al33/ams_pipeline_job.sh\n WorkDir=/home/atuin/k106eb/k106eb10/ACE/Ni-Al/DFT/VASP_PBE_500_0.125_0.1_NM/AlNi/binaries/bulk/moreprototypesYury/gen_58_D2_e_BaHg11/cfg/Ni3Al33\n StdErr=\n StdOut=ams_pipeline.o%j\n"},"startTime":1675954305,"statistics":{"clock":{"unit":{"base":"Hz","prefix":"M"},"avg":2080.276,"min":177.875,"max":2999.808},"cpu_load":{"unit":{"base":""},"avg":51.85,"min":17.55,"max":70.19},"cpu_power":{"unit":{"base":"W"},"avg":895207.403,"min":99.623,"max":5813238.184},"cpu_user":{"unit":{"base":""},"avg":69.514,"min":0.141,"max":95.954},"flops_any":{"unit":{"base":"F/s","prefix":"G"},"avg":449583.761,"min":0,"max":3594180.285},"flops_dp":{"unit":{"base":"F/s","prefix":"G"},"avg":224791.879,"min":0,"max":1797090.142},"flops_sp":{"unit":{"base":"F/s","prefix":"G"},"avg":0.004,"min":0,"max":0.009},"ib_recv":{"unit":{"base":"B/s"},"avg":22280.656,"min":39.602,"max":48063.992},"ib_recv_pkts":{"unit":{"base":"packets/s"},"avg":315.311,"min":0.2,"max":685.046},"ib_xmit":{"unit":{"base":"B/s"},"avg":139.722,"min":39.131,"max":568.239},"ib_xmit_pkts":{"unit":{"base":"packets/s"},"avg":0.685,"min":0.183,"max":2.167},"ipc":{"unit":{"base":"IPC"},"avg":0.693,"min":0.169,"max":0.871},"mem_bw":{"unit":{"base":"B/s","prefix":"G"},"avg":277555.465,"min":0.023,"max":2219018.206},"mem_power":{"unit":{"base":"W"},"avg":73514.92,"min":7.599,"max":485466.144},"mem_used":{"unit":{"base":"B","prefix":"G"},"avg":12.971,"min":3.973,"max":21.517},"nfs4_read":{"unit":{"base":"B/s","prefix":"M"},"avg":413.25,"min":0,"max":1730},"nfs4_total":{"unit":{"base":"B/s","prefix":"M"},"avg":5063.875,"min":1102,"max":10701},"nfs4_write":{"unit":{"base":"B/s","prefix":"M"},"avg":17.875,"min":1,"max":45},"vectorization_ratio":{"unit":{"base":"%"},"avg":54.299,"min":0,"max":98.808}}}

File diff suppressed because it is too large Load Diff