mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-26 13:29:05 +01:00
Introduce new golang job-archive backend
This commit is contained in:
parent
90ae832c2e
commit
b7970585ea
227
README.md
227
README.md
@ -1,226 +1,9 @@
|
|||||||
# HPCJobDatabase
|
|
||||||
A standardized interface and reference implementation for HPC job data.
|
|
||||||
The DB and json schema specification is available in the [wiki](https://github.com/RRZE-HPC/HPCJobDatabase/wiki).
|
|
||||||
|
|
||||||
# Dependencies
|
|
||||||
|
|
||||||
* Getopt::Long
|
|
||||||
* Pod::Usage
|
|
||||||
* DateTime::Format::Strptime
|
|
||||||
* DBD::SQLite
|
|
||||||
|
|
||||||
# Setup
|
# Setup
|
||||||
|
|
||||||
```
|
* Edit ```./graph/schema.graphqls```
|
||||||
sqlite3 jobDB < initDB.sql
|
* Regenerate code: ```gqlgen generate```
|
||||||
```
|
* Implement callbacks in ```graph/schema.resolvers.go```
|
||||||
|
|
||||||
# Helper Scripts
|
# Run server
|
||||||
|
|
||||||
For all scripts apart from `acQuery.pl` the advice *use the source Luke* holds.
|
* ```go run server.go```
|
||||||
|
|
||||||
Help text for acQuery:
|
|
||||||
```
|
|
||||||
Usage:
|
|
||||||
acQuery.pl [options] -- <DB file>
|
|
||||||
|
|
||||||
Help Options:
|
|
||||||
--help Show help text
|
|
||||||
--man Show man page
|
|
||||||
--hasprofile <true|false> Only show jobs with timerseries metric data
|
|
||||||
--mode <mode> Set the operation mode
|
|
||||||
--user <user_id> Search for jobs of specific user
|
|
||||||
--project <project_id> Search for jobs of specific project
|
|
||||||
--numnodes <from> <to> Specify range for number of nodes of job
|
|
||||||
--starttime <from> <to> Specify range for start time of jobs
|
|
||||||
--duration <from> <to> Specify duration range of jobs
|
|
||||||
--mem_used <from> <to> Specify range for average main memory capacity of job
|
|
||||||
--mem_bandwidth <from> <to> Specify range for average main memory bandwidth of job
|
|
||||||
--flops_any <from> <to> Specify range for average flop any rate of job
|
|
||||||
|
|
||||||
Options:
|
|
||||||
--help Show a brief help information.
|
|
||||||
|
|
||||||
--man Read the manual, with examples
|
|
||||||
|
|
||||||
--hasprofile [true|false] Only show jobs with or without timerseries
|
|
||||||
metric data
|
|
||||||
|
|
||||||
--mode [ids|query|count|list|stat|perf] Specify output mode. Mode can be
|
|
||||||
one of:
|
|
||||||
|
|
||||||
ids - Print list of job ids matching conditions. One job id per
|
|
||||||
line.
|
|
||||||
|
|
||||||
query - Print the query string and then exit.
|
|
||||||
count - Only output the number of jobs matching the conditions.
|
|
||||||
(Default mode)
|
|
||||||
|
|
||||||
list - Output a record of every job matching the conditions.
|
|
||||||
|
|
||||||
stat - Output job statistic for all jobs matching the
|
|
||||||
conditions.
|
|
||||||
|
|
||||||
perf - Output job performance footprint statistic for all jobs
|
|
||||||
matching the conditions.
|
|
||||||
|
|
||||||
--user Search job for a specific user id.
|
|
||||||
|
|
||||||
--project Search job for a specific project.
|
|
||||||
|
|
||||||
--duration Specify condition for job duration. This option takes two
|
|
||||||
arguments: If both arguments are positive integers the condition is
|
|
||||||
duration between first argument and second argument. If the second
|
|
||||||
argument is zero condition is duration smaller than first argument. If
|
|
||||||
first argument is zero condition is duration larger than second
|
|
||||||
argument. Duration can be in seconds, minutes (append m) or hours
|
|
||||||
(append h).
|
|
||||||
|
|
||||||
--numnodes Specify condition for number of node range of job. This
|
|
||||||
option takes two arguments: If both arguments are positive integers the
|
|
||||||
condition is number of nodes between first argument and second argument.
|
|
||||||
If the second argument is zero condition is number of nodes smaller than
|
|
||||||
first argument. If first argument is zero condition is number of nodes
|
|
||||||
larger than second argument.
|
|
||||||
|
|
||||||
--starttime Specify condition for the starttime of job. This option
|
|
||||||
takes two arguments: If both arguments are positive integers the
|
|
||||||
condition is start time between first argument and second argument. If
|
|
||||||
the second argument is zero condition is start time smaller than first
|
|
||||||
argument. If first argument is zero condition is start time larger than
|
|
||||||
second argument. Start time must be given as date in the following
|
|
||||||
format: %d.%m.%Y/%H:%M.
|
|
||||||
|
|
||||||
--mem_used Specify condition for average main memory capacity used by
|
|
||||||
job. This option takes two arguments: If both arguments are positive
|
|
||||||
integers the condition is memory used is between first argument and
|
|
||||||
second argument. If the second argument is zero condition is memory used
|
|
||||||
is smaller than first argument. If first argument is zero condition is
|
|
||||||
memory used is larger than second argument.
|
|
||||||
|
|
||||||
--mem_bandwidth Specify condition for average main memory bandwidth used
|
|
||||||
by job. This option takes two arguments: If both arguments are positive
|
|
||||||
integers the condition is memory bandwidth is between first argument and
|
|
||||||
second argument. If the second argument is zero condition is memory
|
|
||||||
bandwidth is smaller than first argument. If first argument is zero
|
|
||||||
condition is memory bandwidth is larger than second argument.
|
|
||||||
|
|
||||||
--flops_any Specify condition for average flops any of job. This option
|
|
||||||
takes two arguments: If both arguments are positive integers the
|
|
||||||
condition is flops any is between first argument and second argument. If
|
|
||||||
the second argument is zero condition is flops any is smaller than first
|
|
||||||
argument. If first argument is zero condition is flops any is larger
|
|
||||||
than second argument.
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
# Examples
|
|
||||||
|
|
||||||
Query jobs with conditions:
|
|
||||||
|
|
||||||
```
|
|
||||||
[HPCJobDatabase] ./acQuery.pl --duration 20h 24h --starttime 01.08.2018/12:00 01.03.2019/12:00
|
|
||||||
COUNT 6476
|
|
||||||
```
|
|
||||||
|
|
||||||
Query jobs from alternative database file (default is jobDB):
|
|
||||||
|
|
||||||
```
|
|
||||||
[HPCJobDatabase] ./acQuery.pl --project project_30 --starttime 01.08.2018/12:00 01.03.2019/12:00 -- jobDB-anon-emmy
|
|
||||||
COUNT 21560
|
|
||||||
```
|
|
||||||
|
|
||||||
Get job statistics output:
|
|
||||||
|
|
||||||
```
|
|
||||||
[HPCJobDatabase] ./acQuery.pl --project project_30 --mode stat --duration 0 20h --starttime 01.08.2018/12:00 01.03.2019/12:00 -- jobDB-anon-emmy
|
|
||||||
=================================
|
|
||||||
Job count: 747
|
|
||||||
Total walltime [h]: 16334
|
|
||||||
Total node hours [h]: 78966
|
|
||||||
|
|
||||||
Histogram: Number of nodes
|
|
||||||
nodes count
|
|
||||||
1 54 ****
|
|
||||||
2 1
|
|
||||||
3 1
|
|
||||||
4 36 ****
|
|
||||||
5 522 *******
|
|
||||||
6 118 *****
|
|
||||||
7 15 ***
|
|
||||||
|
|
||||||
Histogram: Walltime
|
|
||||||
hours count
|
|
||||||
20 250 ******
|
|
||||||
21 200 ******
|
|
||||||
22 114 *****
|
|
||||||
23 183 ******
|
|
||||||
```
|
|
||||||
|
|
||||||
Get job performance statistics:
|
|
||||||
|
|
||||||
```
|
|
||||||
[HPCJobDatabase] ./acQuery.pl --project project_30 --mode perf --duration 0 20h --numnodes 1 4 --starttime 01.08.2018/12:00 01.03.2019/12:00 -- jobDB-anon-emmy
|
|
||||||
=================================
|
|
||||||
Job count: 92
|
|
||||||
Jobs with performance profile: 48
|
|
||||||
Total walltime [h]: 2070
|
|
||||||
Total node hours [h]: 4332
|
|
||||||
|
|
||||||
Histogram: Mem used
|
|
||||||
Mem count
|
|
||||||
2 3 **
|
|
||||||
3 4 **
|
|
||||||
18 2 *
|
|
||||||
19 3 **
|
|
||||||
20 2 *
|
|
||||||
21 1
|
|
||||||
22 2 *
|
|
||||||
23 5 **
|
|
||||||
24 2 *
|
|
||||||
25 1
|
|
||||||
26 1
|
|
||||||
27 3 **
|
|
||||||
29 1
|
|
||||||
30 2 *
|
|
||||||
31 1
|
|
||||||
34 1
|
|
||||||
35 1
|
|
||||||
36 1
|
|
||||||
41 1
|
|
||||||
42 2 *
|
|
||||||
43 2 *
|
|
||||||
44 1
|
|
||||||
49 1
|
|
||||||
50 2 *
|
|
||||||
51 1
|
|
||||||
52 1
|
|
||||||
53 1
|
|
||||||
|
|
||||||
Histogram: Memory bandwidth
|
|
||||||
BW count
|
|
||||||
1 1
|
|
||||||
2 9 ***
|
|
||||||
3 1
|
|
||||||
4 1
|
|
||||||
5 4 **
|
|
||||||
6 2 *
|
|
||||||
7 10 ***
|
|
||||||
8 9 ***
|
|
||||||
9 11 ***
|
|
||||||
|
|
||||||
Histogram: Flops any
|
|
||||||
flops count
|
|
||||||
1 3 **
|
|
||||||
2 1
|
|
||||||
3 4 **
|
|
||||||
4 3 **
|
|
||||||
5 9 ***
|
|
||||||
6 10 ***
|
|
||||||
7 11 ***
|
|
||||||
85 1
|
|
||||||
225 1
|
|
||||||
236 1
|
|
||||||
240 2 *
|
|
||||||
244 2 *
|
|
||||||
```
|
|
||||||
|
99
acSync.pl
99
acSync.pl
@ -1,99 +0,0 @@
|
|||||||
#!/usr/bin/env perl
|
|
||||||
# =======================================================================================
|
|
||||||
#
|
|
||||||
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
# Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
#
|
|
||||||
# =======================================================================================
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use utf8;
|
|
||||||
|
|
||||||
use File::Slurp;
|
|
||||||
use Data::Dumper;
|
|
||||||
use JSON::MaybeXS qw(encode_json decode_json);
|
|
||||||
use DBI;
|
|
||||||
|
|
||||||
my $database = $ARGV[0];
|
|
||||||
my $basedir = $ARGV[1];
|
|
||||||
|
|
||||||
my %attr = (
|
|
||||||
PrintError => 1,
|
|
||||||
RaiseError => 1
|
|
||||||
);
|
|
||||||
|
|
||||||
my $dbh = DBI->connect(
|
|
||||||
"DBI:SQLite:dbname=$database", "", "", \%attr)
|
|
||||||
or die "Could not connect to database: $DBI::errstr";
|
|
||||||
|
|
||||||
my $sth_select_job = $dbh->prepare(qq{
|
|
||||||
SELECT id, user_id, job_id, cluster_id,
|
|
||||||
start_time, stop_time, duration, num_nodes
|
|
||||||
FROM job
|
|
||||||
WHERE job_id=?
|
|
||||||
});
|
|
||||||
|
|
||||||
my $jobcount = 0;
|
|
||||||
my $wrongjobcount = 0;
|
|
||||||
|
|
||||||
opendir my $dh, $basedir or die "can't open directory: $!";
|
|
||||||
while ( readdir $dh ) {
|
|
||||||
chomp;
|
|
||||||
next if $_ eq '.' or $_ eq '..';
|
|
||||||
|
|
||||||
my $jobID = $_;
|
|
||||||
my $needsUpdate = 0;
|
|
||||||
|
|
||||||
my $jobmeta_json = read_file("$basedir/$jobID/meta.json");
|
|
||||||
my $job = decode_json $jobmeta_json;
|
|
||||||
my @row = $dbh->selectrow_array($sth_select_job, undef, $jobID);
|
|
||||||
|
|
||||||
if ( @row ) {
|
|
||||||
|
|
||||||
$jobcount++;
|
|
||||||
# print Dumper(@row);
|
|
||||||
my $duration_diff = abs($job->{duration} - $row[6]);
|
|
||||||
|
|
||||||
if ( $duration_diff > 120 ) {
|
|
||||||
$needsUpdate = 1;
|
|
||||||
# print "$jobID DIFF DURATION $duration_diff\n";
|
|
||||||
# print "CC $row[4] - $row[5]\n";
|
|
||||||
# print "DB $job->{start_time} - $job->{stop_time}\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( $row[7] != $job->{num_nodes} ){
|
|
||||||
$needsUpdate = 1;
|
|
||||||
# print "$jobID DIFF NODES $row[7] $job->{num_nodes}\n";
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
print "$jobID NOT in DB!\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( $needsUpdate ){
|
|
||||||
$wrongjobcount++;
|
|
||||||
print "$jobID\n";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
closedir $dh or die "can't close directory: $!";
|
|
||||||
$dbh->disconnect;
|
|
||||||
|
|
||||||
print "$wrongjobcount of $jobcount need update\n";
|
|
153
anonDB.pl
153
anonDB.pl
@ -1,153 +0,0 @@
|
|||||||
#!/usr/bin/env perl
|
|
||||||
# =======================================================================================
|
|
||||||
#
|
|
||||||
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
# Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
#
|
|
||||||
# =======================================================================================
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use utf8;
|
|
||||||
|
|
||||||
use File::Slurp;
|
|
||||||
use Data::Dumper;
|
|
||||||
use JSON::MaybeXS qw(encode_json decode_json);
|
|
||||||
use DBI;
|
|
||||||
|
|
||||||
my $database = $ARGV[0];
|
|
||||||
my $basedir = $ARGV[1];
|
|
||||||
|
|
||||||
my %attr = (
|
|
||||||
PrintError => 1,
|
|
||||||
RaiseError => 1
|
|
||||||
);
|
|
||||||
|
|
||||||
my $dbh = DBI->connect(
|
|
||||||
"DBI:SQLite:dbname=$database", "", "", \%attr)
|
|
||||||
or die "Could not connect to database: $DBI::errstr";
|
|
||||||
|
|
||||||
my $sth_select_all = $dbh->prepare(qq{
|
|
||||||
SELECT id, user_id, project_id
|
|
||||||
FROM job;
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_select_job = $dbh->prepare(qq{
|
|
||||||
SELECT id, user_id, project_id
|
|
||||||
FROM job
|
|
||||||
WHERE job_id=?;
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_update_job = $dbh->prepare(qq{
|
|
||||||
UPDATE job
|
|
||||||
SET user_id = ?,
|
|
||||||
project_id = ?
|
|
||||||
WHERE id=?;
|
|
||||||
});
|
|
||||||
|
|
||||||
my $user_index = 0; my $project_index = 0;
|
|
||||||
my %user_lookup; my %project_lookup;
|
|
||||||
my %user_group;
|
|
||||||
my %row;
|
|
||||||
|
|
||||||
# build lookups
|
|
||||||
$sth_select_all->execute;
|
|
||||||
$sth_select_all->bind_columns( \( @row{ @{$sth_select_all->{NAME_lc} } } ));
|
|
||||||
|
|
||||||
while ($sth_select_all->fetch) {
|
|
||||||
my $user_id = $row{'user_id'};
|
|
||||||
my $project_id = $row{'project_id'};
|
|
||||||
|
|
||||||
if ( not exists $user_lookup{$user_id}) {
|
|
||||||
$user_index++;
|
|
||||||
$user_lookup{$user_id} = $user_index;
|
|
||||||
$user_group{$user_id} = $project_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( not exists $project_lookup{$project_id}) {
|
|
||||||
$project_index++;
|
|
||||||
$project_lookup{$project_id} = $project_index;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
write_file("user-conversion.json", encode_json \%user_lookup);
|
|
||||||
write_file("project-conversion.json", encode_json \%project_lookup);
|
|
||||||
print "$user_index total users\n";
|
|
||||||
print "$project_index total projects\n";
|
|
||||||
|
|
||||||
# convert database
|
|
||||||
$sth_select_all->execute;
|
|
||||||
$sth_select_all->bind_columns( \( @row{ @{$sth_select_all->{NAME_lc} } } ));
|
|
||||||
|
|
||||||
while ($sth_select_all->fetch) {
|
|
||||||
my $user_id = 'user_'.$user_lookup{$row{'user_id'}};
|
|
||||||
my $project_id = 'project_'.$project_lookup{$row{'project_id'}};
|
|
||||||
|
|
||||||
# print "$row{'id'}: $user_id - $project_id\n";
|
|
||||||
|
|
||||||
$sth_update_job->execute(
|
|
||||||
$user_id,
|
|
||||||
$project_id,
|
|
||||||
$row{'id'}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
open(my $fh, '<:encoding(UTF-8)', './jobIds.txt')
|
|
||||||
or die "Could not open file $!";
|
|
||||||
|
|
||||||
# convert job meta file
|
|
||||||
while ( <$fh> ) {
|
|
||||||
|
|
||||||
my $line = $_;
|
|
||||||
my ($jobID, $path1, $path2) = split ' ', $line;
|
|
||||||
|
|
||||||
my $json = read_file("$basedir/$path1/$path2/meta.json");
|
|
||||||
my $job = decode_json $json;
|
|
||||||
|
|
||||||
my $user = $job->{'user_id'};
|
|
||||||
|
|
||||||
# if ( $user =~ /^user_.*/ ) {
|
|
||||||
# print "$jobID $user\n";
|
|
||||||
# }
|
|
||||||
|
|
||||||
my $project;
|
|
||||||
|
|
||||||
if ( exists $user_lookup{$user}) {
|
|
||||||
$project = $user_group{$user};
|
|
||||||
$user = 'user_'.$user_lookup{$user};
|
|
||||||
} else {
|
|
||||||
die "$user not in lookup hash!\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( exists $project_lookup{$project}) {
|
|
||||||
$project = 'project_'.$project_lookup{$project};
|
|
||||||
} else {
|
|
||||||
die "$project not in lookup hash!\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$job->{user_id} = $user;
|
|
||||||
$job->{project_id} = $project;
|
|
||||||
$json = encode_json $job;
|
|
||||||
write_file("$basedir/$path1/$path2/meta.json", $json);
|
|
||||||
}
|
|
||||||
close $fh;
|
|
||||||
|
|
||||||
$dbh->disconnect;
|
|
@ -1,67 +0,0 @@
|
|||||||
#!/usr/bin/env perl
|
|
||||||
# =======================================================================================
|
|
||||||
#
|
|
||||||
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
# Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
#
|
|
||||||
# =======================================================================================
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use utf8;
|
|
||||||
|
|
||||||
my $basedir = $ARGV[0];
|
|
||||||
open(my $fh, '>:encoding(UTF-8)', 'jobIds.txt')
|
|
||||||
or die "Could not open file $!";
|
|
||||||
|
|
||||||
opendir my $odh, $basedir or die "can't open directory: $!";
|
|
||||||
my $count = 0;
|
|
||||||
|
|
||||||
while ( readdir $odh ) {
|
|
||||||
chomp;
|
|
||||||
next if $_ eq '.' or $_ eq '..';
|
|
||||||
|
|
||||||
my $jobID1 = $_;
|
|
||||||
print "Open $jobID1\n";
|
|
||||||
|
|
||||||
opendir my $idh, "$basedir/$jobID1" or die "can't open directory: $!";
|
|
||||||
|
|
||||||
while ( readdir $idh ) {
|
|
||||||
chomp;
|
|
||||||
next if $_ eq '.' or $_ eq '..';
|
|
||||||
my $jobID2 = $_;
|
|
||||||
|
|
||||||
unless (-e "$basedir/$jobID1/$jobID2/data.json") {
|
|
||||||
print "$basedir/$jobID1/$jobID2/ File Doesn't Exist!\n";
|
|
||||||
# rmdir "$basedir/$jobID1/$jobID2";
|
|
||||||
$count++;
|
|
||||||
} else {
|
|
||||||
print $fh "$jobID1$jobID2.eadm $jobID1 $jobID2\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
closedir $idh or die "can't close directory: $!";
|
|
||||||
}
|
|
||||||
closedir $odh or die "can't close directory: $!";
|
|
||||||
close $fh;
|
|
||||||
|
|
||||||
print "$count empty jobs!\n";
|
|
@ -1,88 +0,0 @@
|
|||||||
#!/usr/bin/env perl
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use utf8;
|
|
||||||
|
|
||||||
use File::Slurp;
|
|
||||||
use Data::Dumper;
|
|
||||||
use JSON::MaybeXS qw(encode_json decode_json);
|
|
||||||
|
|
||||||
my $jobDirectory = '../data';
|
|
||||||
|
|
||||||
sub gnuplotControl {
|
|
||||||
my $jobID = shift;
|
|
||||||
my $metricName = shift;
|
|
||||||
my $numNodes = shift;
|
|
||||||
my $unit = shift;
|
|
||||||
|
|
||||||
my $gpMacros = <<"END";
|
|
||||||
set terminal png size 1400,768 enhanced font ,12
|
|
||||||
set output '$jobID-$metricName.png'
|
|
||||||
set xlabel 'runtime [s]'
|
|
||||||
set ylabel '[$unit]'
|
|
||||||
END
|
|
||||||
|
|
||||||
$gpMacros .= "plot '$metricName.dat' u 2 w lines notitle";
|
|
||||||
foreach my $col ( 3 ... $numNodes ){
|
|
||||||
$gpMacros .= ", '$metricName.dat' u $col w lines notitle";
|
|
||||||
}
|
|
||||||
|
|
||||||
open(my $fh, '>:encoding(UTF-8)', './metric.plot')
|
|
||||||
or die "Could not open file $!";
|
|
||||||
print $fh $gpMacros;
|
|
||||||
close $fh;
|
|
||||||
|
|
||||||
system('gnuplot','metric.plot');
|
|
||||||
}
|
|
||||||
|
|
||||||
sub createPlot {
|
|
||||||
my $jobID = shift;
|
|
||||||
my $metricName = shift;
|
|
||||||
my $metric = shift;
|
|
||||||
my $unit = shift;
|
|
||||||
|
|
||||||
my @lines;
|
|
||||||
|
|
||||||
foreach my $node ( @$metric ) {
|
|
||||||
my $i = 0;
|
|
||||||
|
|
||||||
foreach my $val ( @{$node->{data}} ){
|
|
||||||
$lines[$i++] .= " $val";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
open(my $fh, '>:encoding(UTF-8)', './'.$metricName.'.dat')
|
|
||||||
or die "Could not open file $!";
|
|
||||||
|
|
||||||
my $timestamp = 0;
|
|
||||||
|
|
||||||
foreach my $line ( @lines ) {
|
|
||||||
print $fh $timestamp.$line."\n";
|
|
||||||
$timestamp += 60;
|
|
||||||
}
|
|
||||||
|
|
||||||
close $fh;
|
|
||||||
gnuplotControl($jobID, $metricName, $#$metric + 2, $unit);
|
|
||||||
}
|
|
||||||
|
|
||||||
mkdir('./plots');
|
|
||||||
chdir('./plots');
|
|
||||||
|
|
||||||
while ( <> ) {
|
|
||||||
my $jobID = $_;
|
|
||||||
$jobID =~ s/\.eadm//;
|
|
||||||
chomp $jobID;
|
|
||||||
|
|
||||||
my $level1 = $jobID/1000;
|
|
||||||
my $level2 = $jobID%1000;
|
|
||||||
my $jobpath = sprintf("%s/%d/%03d", $jobDirectory, $level1, $level2);
|
|
||||||
|
|
||||||
my $json = read_file($jobpath.'/data.json');
|
|
||||||
my $data = decode_json $json;
|
|
||||||
$json = read_file($jobpath.'/meta.json');
|
|
||||||
my $meta = decode_json $json;
|
|
||||||
|
|
||||||
createPlot($jobID, 'flops_any', $data->{flops_any}->{series}, $data->{flops_any}->{unit});
|
|
||||||
createPlot($jobID, 'mem_bw', $data->{mem_bw}->{series}, $data->{mem_bw}->{unit});
|
|
||||||
}
|
|
12
go.mod
Normal file
12
go.mod
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
module github.com/moebiusband/cc-jobarchive
|
||||||
|
|
||||||
|
go 1.15
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/99designs/gqlgen v0.13.0
|
||||||
|
github.com/gorilla/handlers v1.5.1
|
||||||
|
github.com/gorilla/mux v1.6.1
|
||||||
|
github.com/jmoiron/sqlx v1.3.1 // indirect
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.6
|
||||||
|
github.com/vektah/gqlparser/v2 v2.1.0
|
||||||
|
)
|
91
go.sum
Normal file
91
go.sum
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
github.com/99designs/gqlgen v0.13.0 h1:haLTcUp3Vwp80xMVEg5KRNwzfUrgFdRmtBY8fuB8scA=
|
||||||
|
github.com/99designs/gqlgen v0.13.0/go.mod h1:NV130r6f4tpRWuAI+zsrSdooO/eWUv+Gyyoi3rEfXIk=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||||
|
github.com/agnivade/levenshtein v1.0.3 h1:M5ZnqLOoZR8ygVq0FfkXsNOKzMCk0xRiow0R5+5VkQ0=
|
||||||
|
github.com/agnivade/levenshtein v1.0.3/go.mod h1:4SFRZbbXWLF4MU1T9Qg0pGgH3Pjs+t6ie5efyrwRJXs=
|
||||||
|
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||||
|
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgryski/trifles v0.0.0-20190318185328-a8d75aae118c/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||||
|
github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
|
||||||
|
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
|
github.com/go-chi/chi v3.3.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||||
|
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||||
|
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||||
|
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
|
||||||
|
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
|
||||||
|
github.com/gorilla/mux v1.6.1 h1:KOwqsTYZdeuMacU7CxjMNYEKeBvLbxW+psodrbcEa3A=
|
||||||
|
github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
|
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||||
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/jmoiron/sqlx v1.3.1 h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE=
|
||||||
|
github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
||||||
|
github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007 h1:reVOUXwnhsYv/8UqjvhrMOu5CNT9UapHFLbQ2JcXsmg=
|
||||||
|
github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||||
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.4 h1:4rQjbDxdu9fSgI/r3KN72G3c2goxknAqHHgPWWs8UlI=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.4/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||||
|
github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047 h1:zCoDWFD5nrJJVjbXiDZcVhOBSzKn3o9LgRLLMRNuru8=
|
||||||
|
github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||||
|
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||||
|
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
|
github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/urfave/cli/v2 v2.1.1 h1:Qt8FeAtxE/vfdrLmR3rxR6JRE0RoVmbXu8+6kZtYU4k=
|
||||||
|
github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
|
||||||
|
github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e h1:+w0Zm/9gaWpEAyDlU1eKOuk5twTjAjuevXqcJJw8hrg=
|
||||||
|
github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U=
|
||||||
|
github.com/vektah/gqlparser/v2 v2.1.0 h1:uiKJ+T5HMGGQM2kRKQ8Pxw8+Zq9qhhZhz/lieYvCMns=
|
||||||
|
github.com/vektah/gqlparser/v2 v2.1.0/go.mod h1:SyUiHgLATUR8BiYURfTirrTcGpcE+4XkV2se04Px1Ms=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589 h1:rjUrONFu4kLchcZTfp3/96bR8bW8dIa8uz3cR5n0cgM=
|
||||||
|
golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
sourcegraph.com/sourcegraph/appdash v0.0.0-20180110180208-2cc67fd64755/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||||
|
sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k=
|
60
gqlgen.yml
Normal file
60
gqlgen.yml
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# Where are all the schema files located? globs are supported eg src/**/*.graphqls
|
||||||
|
schema:
|
||||||
|
- graph/*.graphqls
|
||||||
|
|
||||||
|
# Where should the generated server code go?
|
||||||
|
exec:
|
||||||
|
filename: graph/generated/generated.go
|
||||||
|
package: generated
|
||||||
|
|
||||||
|
# Uncomment to enable federation
|
||||||
|
# federation:
|
||||||
|
# filename: graph/generated/federation.go
|
||||||
|
# package: generated
|
||||||
|
|
||||||
|
# Where should any generated models go?
|
||||||
|
model:
|
||||||
|
filename: graph/model/models_gen.go
|
||||||
|
package: model
|
||||||
|
|
||||||
|
# Where should the resolver implementations go?
|
||||||
|
resolver:
|
||||||
|
layout: follow-schema
|
||||||
|
dir: graph
|
||||||
|
package: graph
|
||||||
|
|
||||||
|
# Optional: turn on use `gqlgen:"fieldName"` tags in your models
|
||||||
|
# struct_tag: json
|
||||||
|
|
||||||
|
# Optional: turn on to use []Thing instead of []*Thing
|
||||||
|
# omit_slice_element_pointers: false
|
||||||
|
|
||||||
|
# Optional: set to speed up generation time by not performing a final validation pass.
|
||||||
|
# skip_validation: true
|
||||||
|
|
||||||
|
# gqlgen will search for any type names in the schema in these go packages
|
||||||
|
# if they match it will use them, otherwise it will generate them.
|
||||||
|
autobind:
|
||||||
|
- "fossil.moebiusband.org/jobaccounting-backend/graph/model"
|
||||||
|
|
||||||
|
# This section declares type mapping between the GraphQL and go type systems
|
||||||
|
#
|
||||||
|
# The first line in each type will be used as defaults for resolver arguments and
|
||||||
|
# modelgen, the others will be allowed when binding to fields. Configure them to
|
||||||
|
# your liking
|
||||||
|
models:
|
||||||
|
ID:
|
||||||
|
model:
|
||||||
|
- github.com/99designs/gqlgen/graphql.ID
|
||||||
|
- github.com/99designs/gqlgen/graphql.Int
|
||||||
|
- github.com/99designs/gqlgen/graphql.Int64
|
||||||
|
- github.com/99designs/gqlgen/graphql.Int32
|
||||||
|
Int:
|
||||||
|
model:
|
||||||
|
- github.com/99designs/gqlgen/graphql.Int
|
||||||
|
- github.com/99designs/gqlgen/graphql.Int64
|
||||||
|
- github.com/99designs/gqlgen/graphql.Int32
|
||||||
|
Job:
|
||||||
|
model: "fossil.moebiusband.org/jobaccounting-backend/graph/model.Job"
|
||||||
|
Timestamp:
|
||||||
|
model: "fossil.moebiusband.org/jobaccounting-backend/graph/model.Timestamp"
|
25
graph/model/models.go
Normal file
25
graph/model/models.go
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Job struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
JobID string `json:"jobId" db:"job_id"`
|
||||||
|
UserID string `json:"userId" db:"user_id"`
|
||||||
|
ProjectID string `json:"projectId" db:"project_id"`
|
||||||
|
ClusterID string `json:"clusterId" db:"cluster_id"`
|
||||||
|
StartTime time.Time `json:"startTime" db:"start_time"`
|
||||||
|
Duration int `json:"duration" db:"duration"`
|
||||||
|
Walltime *int `json:"walltime" db:"walltime"`
|
||||||
|
Jobstate *string `json:"jobstate" db:"job_state"`
|
||||||
|
NumNodes int `json:"numNodes" db:"num_nodes"`
|
||||||
|
NodeList string `json:"nodelist" db:"node_list"`
|
||||||
|
HasProfile bool `json:"hasProfile" db:"has_profile"`
|
||||||
|
MemUsed_max *float64 `json:"memUsedMax" db:"mem_used_max"`
|
||||||
|
FlopsAny_avg *float64 `json:"flopsAnyAvg" db:"flops_any_avg"`
|
||||||
|
MemBw_avg *float64 `json:"memBwAvg" db:"mem_bw_avg"`
|
||||||
|
NetBw_avg *float64 `json:"netBwAvg" db:"net_bw_avg"`
|
||||||
|
FileBw_avg *float64 `json:"fileBwAvg" db:"file_bw_avg"`
|
||||||
|
}
|
248
graph/resolver.go
Normal file
248
graph/resolver.go
Normal file
@ -0,0 +1,248 @@
|
|||||||
|
package graph
|
||||||
|
|
||||||
|
//go:generate go run github.com/99designs/gqlgen
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"fossil.moebiusband.org/jobaccounting-backend/graph/generated"
|
||||||
|
"fossil.moebiusband.org/jobaccounting-backend/graph/model"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Resolver struct {
|
||||||
|
DB *sqlx.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRootResolvers(db *sqlx.DB) generated.Config {
|
||||||
|
c := generated.Config{
|
||||||
|
Resolvers: &Resolver{
|
||||||
|
DB: db,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
func addStringCondition(conditions []string, field string, input *model.StringInput) []string {
|
||||||
|
if input.Eq != nil {
|
||||||
|
conditions = append(conditions, fmt.Sprintf("%s='%s'", field, *input.Eq))
|
||||||
|
}
|
||||||
|
if input.StartsWith != nil {
|
||||||
|
conditions = append(conditions, fmt.Sprintf("%s LIKE '%s%%'", field, *input.StartsWith))
|
||||||
|
}
|
||||||
|
if input.Contains != nil {
|
||||||
|
conditions = append(conditions, fmt.Sprintf("%s LIKE '%%%s%%'", field, *input.Contains))
|
||||||
|
}
|
||||||
|
if input.EndsWith != nil {
|
||||||
|
conditions = append(conditions, fmt.Sprintf("%s LIKE '%%%s'", field, *input.EndsWith))
|
||||||
|
}
|
||||||
|
|
||||||
|
return conditions
|
||||||
|
}
|
||||||
|
|
||||||
|
func addIntCondition(conditions []string, field string, input *model.IntRange) []string {
|
||||||
|
conditions = append(conditions, fmt.Sprintf("%s BETWEEN %d AND %d", field, input.From, input.To))
|
||||||
|
return conditions
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTimeCondition(conditions []string, field string, input *model.TimeRange) []string {
|
||||||
|
conditions = append(conditions, fmt.Sprintf("%s BETWEEN %d AND %d", field, input.From.Unix(), input.To.Unix()))
|
||||||
|
return conditions
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQueryConditions(filterList *model.JobFilterList) string {
|
||||||
|
var conditions []string
|
||||||
|
|
||||||
|
for _, condition := range filterList.List {
|
||||||
|
if condition.JobID != nil {
|
||||||
|
conditions = addStringCondition(conditions, `job_id`, condition.JobID)
|
||||||
|
}
|
||||||
|
if condition.UserID != nil {
|
||||||
|
conditions = addStringCondition(conditions, `user_id`, condition.UserID)
|
||||||
|
}
|
||||||
|
if condition.ProjectID != nil {
|
||||||
|
conditions = addStringCondition(conditions, `project_id`, condition.ProjectID)
|
||||||
|
}
|
||||||
|
if condition.ClusterID != nil {
|
||||||
|
conditions = addStringCondition(conditions, `cluster_id`, condition.ClusterID)
|
||||||
|
}
|
||||||
|
if condition.StartTime != nil {
|
||||||
|
conditions = addTimeCondition(conditions, `start_time`, condition.StartTime)
|
||||||
|
}
|
||||||
|
if condition.Duration != nil {
|
||||||
|
conditions = addIntCondition(conditions, `duration`, condition.Duration)
|
||||||
|
}
|
||||||
|
if condition.NumNodes != nil {
|
||||||
|
conditions = addIntCondition(conditions, `num_nodes`, condition.NumNodes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(conditions, " AND ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queries
|
||||||
|
|
||||||
|
func (r *queryResolver) JobByID(
|
||||||
|
ctx context.Context,
|
||||||
|
jobID string) (*model.Job, error) {
|
||||||
|
var job model.Job
|
||||||
|
qstr := `SELECT * from job `
|
||||||
|
qstr += fmt.Sprintf("WHERE id=%s", jobID)
|
||||||
|
|
||||||
|
row := r.DB.QueryRowx(qstr)
|
||||||
|
err := row.StructScan(&job)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &job, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *queryResolver) Jobs(
|
||||||
|
ctx context.Context,
|
||||||
|
filterList *model.JobFilterList,
|
||||||
|
page *model.PageRequest,
|
||||||
|
orderBy *model.OrderByInput) (*model.JobResultList, error) {
|
||||||
|
|
||||||
|
var jobs []*model.Job
|
||||||
|
var limit, offset int
|
||||||
|
var qc, ob string
|
||||||
|
|
||||||
|
if page != nil {
|
||||||
|
limit = *page.Limit
|
||||||
|
offset = *page.Offset
|
||||||
|
} else {
|
||||||
|
limit = 20
|
||||||
|
offset = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if filterList != nil {
|
||||||
|
qc = buildQueryConditions(filterList)
|
||||||
|
|
||||||
|
if qc != "" {
|
||||||
|
qc = `WHERE ` + qc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if orderBy != nil {
|
||||||
|
ob = fmt.Sprintf("ORDER BY %s %s", orderBy.Field, *orderBy.Order)
|
||||||
|
}
|
||||||
|
|
||||||
|
qstr := `SELECT * `
|
||||||
|
qstr += fmt.Sprintf("FROM job %s %s LIMIT %d OFFSET %d", qc, ob, limit, offset)
|
||||||
|
log.Printf("%s", qstr)
|
||||||
|
|
||||||
|
rows, err := r.DB.Queryx(qstr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var job model.Job
|
||||||
|
err := rows.StructScan(&job)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
jobs = append(jobs, &job)
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int
|
||||||
|
qstr = fmt.Sprintf("SELECT COUNT(*) FROM job %s", qc)
|
||||||
|
row := r.DB.QueryRow(qstr)
|
||||||
|
err = row.Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
returnValue := model.JobResultList{
|
||||||
|
jobs,
|
||||||
|
&offset, &limit,
|
||||||
|
&count}
|
||||||
|
|
||||||
|
return &returnValue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *queryResolver) JobsStatistics(
|
||||||
|
ctx context.Context,
|
||||||
|
filterList *model.JobFilterList) (*model.JobsStatistics, error) {
|
||||||
|
var qc string
|
||||||
|
|
||||||
|
if filterList != nil {
|
||||||
|
qc = buildQueryConditions(filterList)
|
||||||
|
|
||||||
|
if qc != "" {
|
||||||
|
qc = `WHERE ` + qc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO Change current node hours to core hours
|
||||||
|
qstr := `SELECT COUNT(*), SUM(duration)/3600, SUM(duration*num_nodes)/3600 `
|
||||||
|
qstr += fmt.Sprintf("FROM job %s ", qc)
|
||||||
|
log.Printf("%s", qstr)
|
||||||
|
|
||||||
|
var stats model.JobsStatistics
|
||||||
|
row := r.DB.QueryRow(qstr)
|
||||||
|
err := row.Scan(&stats.TotalJobs, &stats.TotalWalltime, &stats.TotalCoreHours)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
qstr = `SELECT COUNT(*) `
|
||||||
|
qstr += fmt.Sprintf("FROM job %s AND duration < 120", qc)
|
||||||
|
log.Printf("%s", qstr)
|
||||||
|
row = r.DB.QueryRow(qstr)
|
||||||
|
err = row.Scan(&stats.ShortJobs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var histogram []*model.HistoPoint
|
||||||
|
// Node histogram
|
||||||
|
qstr = `SELECT num_nodes, COUNT(*) `
|
||||||
|
qstr += fmt.Sprintf("FROM job %s GROUP BY 1", qc)
|
||||||
|
log.Printf("%s", qstr)
|
||||||
|
|
||||||
|
rows, err := r.DB.Query(qstr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var point model.HistoPoint
|
||||||
|
rows.Scan(&point.Count, &point.Value)
|
||||||
|
histogram = append(histogram, &point)
|
||||||
|
}
|
||||||
|
stats.HistNumNodes = histogram
|
||||||
|
|
||||||
|
// Node histogram
|
||||||
|
qstr = `SELECT duration/3600, COUNT(*) `
|
||||||
|
qstr += fmt.Sprintf("FROM job %s GROUP BY 1", qc)
|
||||||
|
log.Printf("%s", qstr)
|
||||||
|
|
||||||
|
rows, err = r.DB.Query(qstr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
histogram = nil
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var point model.HistoPoint
|
||||||
|
rows.Scan(&point.Count, &point.Value)
|
||||||
|
histogram = append(histogram, &point)
|
||||||
|
}
|
||||||
|
stats.HistWalltime = histogram
|
||||||
|
|
||||||
|
return &stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
|
||||||
|
|
||||||
|
type queryResolver struct{ *Resolver }
|
121
graph/schema.graphqls
Normal file
121
graph/schema.graphqls
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
type Job {
|
||||||
|
id: ID!
|
||||||
|
jobId: String!
|
||||||
|
userId: String!
|
||||||
|
projectId: String!
|
||||||
|
clusterId: String!
|
||||||
|
startTime: Time!
|
||||||
|
duration: Int!
|
||||||
|
numNodes: Int!
|
||||||
|
}
|
||||||
|
|
||||||
|
type Query {
|
||||||
|
jobById(jobId: String!): Job
|
||||||
|
jobs(filter: JobFilterList, page: PageRequest, order: OrderByInput): JobResultList!
|
||||||
|
jobsStatistics(filter: JobFilterList): JobsStatistics!
|
||||||
|
}
|
||||||
|
|
||||||
|
type Mutation {
|
||||||
|
startJob(input: StartJobInput!): Job!
|
||||||
|
stopJob(input: StopJobInput!): Job!
|
||||||
|
addJob(input: AddJobInput!): Job!
|
||||||
|
}
|
||||||
|
|
||||||
|
input StartJobInput {
|
||||||
|
jobId: String!
|
||||||
|
userId: String!
|
||||||
|
projectId: String!
|
||||||
|
clusterId: String!
|
||||||
|
startTime: Time!
|
||||||
|
numNodes: Int!
|
||||||
|
}
|
||||||
|
|
||||||
|
input StopJobInput {
|
||||||
|
stopTime: Time!
|
||||||
|
}
|
||||||
|
|
||||||
|
input AddJobInput {
|
||||||
|
jobId: String!
|
||||||
|
userId: String!
|
||||||
|
projectId: String!
|
||||||
|
clusterId: String!
|
||||||
|
startTime: Time!
|
||||||
|
duration: Int!
|
||||||
|
numNodes: Int!
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
input JobFilterList {
|
||||||
|
list: [JobFilter]
|
||||||
|
}
|
||||||
|
|
||||||
|
input JobFilter {
|
||||||
|
jobId: StringInput
|
||||||
|
userId: StringInput
|
||||||
|
projectId: StringInput
|
||||||
|
clusterId: StringInput
|
||||||
|
duration: IntRange
|
||||||
|
numNodes: IntRange
|
||||||
|
startTime: TimeRange
|
||||||
|
hasProfile: Boolean
|
||||||
|
}
|
||||||
|
|
||||||
|
input OrderByInput {
|
||||||
|
field: String!
|
||||||
|
order: SortDirectionEnum = ASC
|
||||||
|
}
|
||||||
|
|
||||||
|
enum SortDirectionEnum {
|
||||||
|
DESC
|
||||||
|
ASC
|
||||||
|
}
|
||||||
|
|
||||||
|
input StringInput {
|
||||||
|
eq: String
|
||||||
|
contains: String
|
||||||
|
startsWith: String
|
||||||
|
endsWith: String
|
||||||
|
}
|
||||||
|
|
||||||
|
input IntRange {
|
||||||
|
from: Int!
|
||||||
|
to: Int!
|
||||||
|
}
|
||||||
|
|
||||||
|
input FloatRange {
|
||||||
|
from: Float!
|
||||||
|
to: Float!
|
||||||
|
}
|
||||||
|
|
||||||
|
input TimeRange {
|
||||||
|
from: Time!
|
||||||
|
to: Time!
|
||||||
|
}
|
||||||
|
|
||||||
|
type JobResultList {
|
||||||
|
items: [Job]!
|
||||||
|
offset: Int
|
||||||
|
limit: Int
|
||||||
|
count: Int
|
||||||
|
}
|
||||||
|
|
||||||
|
type HistoPoint {
|
||||||
|
count: Int!
|
||||||
|
value: Int!
|
||||||
|
}
|
||||||
|
|
||||||
|
type JobsStatistics {
|
||||||
|
totalJobs: Int!
|
||||||
|
shortJobs: Int!
|
||||||
|
totalWalltime: Int!
|
||||||
|
totalCoreHours: Int!
|
||||||
|
histWalltime: [HistoPoint]!
|
||||||
|
histNumNodes: [HistoPoint]!
|
||||||
|
}
|
||||||
|
|
||||||
|
input PageRequest {
|
||||||
|
itensPerPage: Int
|
||||||
|
page: Int
|
||||||
|
}
|
||||||
|
|
||||||
|
scalar Time
|
4
graph/schema.resolvers.go
Normal file
4
graph/schema.resolvers.go
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
package graph
|
||||||
|
|
||||||
|
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||||
|
// will be copied through when generating and any unknown code will be moved to the end.
|
141
jobTag.pl
141
jobTag.pl
@ -1,141 +0,0 @@
|
|||||||
#!/usr/bin/env perl
|
|
||||||
# =======================================================================================
|
|
||||||
#
|
|
||||||
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
# Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
#
|
|
||||||
# =======================================================================================
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use utf8;
|
|
||||||
|
|
||||||
use DBI;
|
|
||||||
|
|
||||||
my $database = 'jobDB';
|
|
||||||
|
|
||||||
my %attr = (
|
|
||||||
PrintError => 1,
|
|
||||||
RaiseError => 1
|
|
||||||
);
|
|
||||||
|
|
||||||
my $dbh = DBI->connect(
|
|
||||||
"DBI:SQLite:dbname=$database", "", "", \%attr)
|
|
||||||
or die "Could not connect to database: $DBI::errstr";
|
|
||||||
|
|
||||||
my $sth_select_tagged_jobs = $dbh->prepare(qq{
|
|
||||||
SELECT j.*
|
|
||||||
FROM job j
|
|
||||||
JOIN jobtag jt ON j.id = jt.job_id
|
|
||||||
JOIN tag t ON jt.tag_id = t.id
|
|
||||||
WHERE t.name = ?
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_select_job_tags = $dbh->prepare(qq{
|
|
||||||
SELECT t.*
|
|
||||||
FROM tag t
|
|
||||||
JOIN jobtag jt ON t.id = jt.tag_id
|
|
||||||
JOIN job j ON jt.job_id = j.id
|
|
||||||
WHERE j.job_id = ?
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_select_job = $dbh->prepare(qq{
|
|
||||||
SELECT id
|
|
||||||
FROM job
|
|
||||||
WHERE job_id=?
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_select_tag = $dbh->prepare(qq{
|
|
||||||
SELECT id
|
|
||||||
FROM tag
|
|
||||||
WHERE name=?
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_insert_tag = $dbh->prepare(qq{
|
|
||||||
INSERT INTO tag(type,name)
|
|
||||||
VALUES(?,?)
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_job_add_tag = $dbh->prepare(qq{
|
|
||||||
INSERT INTO jobtag(job_id,tag_id)
|
|
||||||
VALUES(?,?)
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_job_has_tag = $dbh->prepare(qq{
|
|
||||||
SELECT id FROM job
|
|
||||||
WHERE job_id=? AND tag_id=?
|
|
||||||
});
|
|
||||||
|
|
||||||
my $CMD = $ARGV[0];
|
|
||||||
my $JOB_ID = $ARGV[1];
|
|
||||||
my $TAG_NAME = $ARGV[2];
|
|
||||||
|
|
||||||
my ($jid, $tid);
|
|
||||||
|
|
||||||
# check if job exists
|
|
||||||
my @row = $dbh->selectrow_array($sth_select_job, undef, $JOB_ID);
|
|
||||||
|
|
||||||
if ( @row ) {
|
|
||||||
$jid = $row[0];
|
|
||||||
} else {
|
|
||||||
die "Job does not exist: $JOB_ID!\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
# check if tag already exists
|
|
||||||
@row = $dbh->selectrow_array($sth_select_tag, undef, $TAG_NAME);
|
|
||||||
|
|
||||||
if ( @row ) {
|
|
||||||
$tid = $row[0];
|
|
||||||
} else {
|
|
||||||
print "Insert new tag: $TAG_NAME!\n";
|
|
||||||
|
|
||||||
$sth_insert_tag->execute('pathologic', $TAG_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( $CMD eq 'ADD' ) {
|
|
||||||
@row = $dbh->selectrow_array($sth_job_has_tag, undef, $jid, $tid);
|
|
||||||
if ( @row ) {
|
|
||||||
die "Job already tagged!\n";
|
|
||||||
} else {
|
|
||||||
print "Adding tag $TAG_NAME to job $JOB_ID!\n";
|
|
||||||
$sth_job_add_tag($jid, $tid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
elsif ( $CMD eq 'RM' ) {
|
|
||||||
# elsif...
|
|
||||||
}
|
|
||||||
elsif ( $CMD eq 'LST' ) {
|
|
||||||
$sth_select_job_tags->execute;
|
|
||||||
my ($id, $type, $name);
|
|
||||||
|
|
||||||
while(($id,$type,$name) = $sth->fetchrow()){
|
|
||||||
print("$id, $type, $name\n");
|
|
||||||
}
|
|
||||||
$sth_select_job_tags->finish();
|
|
||||||
}
|
|
||||||
elsif ( $CMD eq 'LSJ' ) {
|
|
||||||
# elsif...
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
die "Unknown command: $CMD!\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$dbh->disconnect();
|
|
@ -1,78 +0,0 @@
|
|||||||
#=======================================================================================
|
|
||||||
#
|
|
||||||
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
# Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
#
|
|
||||||
#=======================================================================================
|
|
||||||
TAG = CLANG
|
|
||||||
#CONFIGURE BUILD SYSTEM
|
|
||||||
TARGET = jobTagger-$(TAG)
|
|
||||||
BUILD_DIR = ./$(TAG)
|
|
||||||
SRC_DIR = ./src
|
|
||||||
MAKE_DIR = ./
|
|
||||||
Q ?= @
|
|
||||||
|
|
||||||
#DO NOT EDIT BELOW
|
|
||||||
include $(MAKE_DIR)/include_$(TAG).mk
|
|
||||||
|
|
||||||
VPATH = $(SRC_DIR)
|
|
||||||
ASM = $(patsubst $(SRC_DIR)/%.c, $(BUILD_DIR)/%.s,$(wildcard $(SRC_DIR)/*.c))
|
|
||||||
OBJ = $(patsubst $(SRC_DIR)/%.c, $(BUILD_DIR)/%.o,$(wildcard $(SRC_DIR)/*.c))
|
|
||||||
CPPFLAGS := $(CPPFLAGS) $(DEFINES) $(OPTIONS) $(INCLUDES)
|
|
||||||
|
|
||||||
|
|
||||||
${TARGET}: $(BUILD_DIR) $(OBJ)
|
|
||||||
@echo "===> LINKING $(TARGET)"
|
|
||||||
$(Q)${LINKER} ${LFLAGS} -o $(TARGET) $(OBJ) $(LIBS)
|
|
||||||
|
|
||||||
asm: $(BUILD_DIR) $(ASM)
|
|
||||||
|
|
||||||
$(BUILD_DIR)/%.o: %.c
|
|
||||||
@echo "===> COMPILE $@"
|
|
||||||
$(CC) -c $(CPPFLAGS) $(CFLAGS) $< -o $@
|
|
||||||
$(Q)$(GCC) $(CPPFLAGS) -MT $(@:.d=.o) -MM $< > $(BUILD_DIR)/$*.d
|
|
||||||
|
|
||||||
$(BUILD_DIR)/%.s: %.c
|
|
||||||
@echo "===> GENERATE ASM $@"
|
|
||||||
$(CC) -S $(CPPFLAGS) $(CFLAGS) $< -o $@
|
|
||||||
|
|
||||||
tags:
|
|
||||||
@echo "===> GENERATE TAGS"
|
|
||||||
$(Q)ctags -R
|
|
||||||
|
|
||||||
$(BUILD_DIR):
|
|
||||||
@mkdir $(BUILD_DIR)
|
|
||||||
|
|
||||||
ifeq ($(findstring $(MAKECMDGOALS),clean),)
|
|
||||||
-include $(OBJ:.o=.d)
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: clean distclean
|
|
||||||
|
|
||||||
clean:
|
|
||||||
@echo "===> CLEAN"
|
|
||||||
@rm -rf $(BUILD_DIR)
|
|
||||||
@rm -f tags
|
|
||||||
|
|
||||||
distclean: clean
|
|
||||||
@echo "===> DIST CLEAN"
|
|
||||||
@rm -f $(TARGET)
|
|
||||||
@rm -f tags
|
|
@ -1,10 +0,0 @@
|
|||||||
CC = clang
|
|
||||||
GCC = gcc
|
|
||||||
LINKER = $(CC)
|
|
||||||
|
|
||||||
OPENMP = -fopenmp
|
|
||||||
CFLAGS = -Ofast -std=c99 $(OPENMP)
|
|
||||||
LFLAGS = $(OPENMP)
|
|
||||||
DEFINES = -D_GNU_SOURCE -DJSMN_PARENT_LINKS
|
|
||||||
INCLUDES =
|
|
||||||
LIBS =
|
|
@ -1,13 +0,0 @@
|
|||||||
CC = gcc
|
|
||||||
GCC = gcc
|
|
||||||
LINKER = $(CC)
|
|
||||||
|
|
||||||
ifeq ($(ENABLE_OPENMP),true)
|
|
||||||
OPENMP = -fopenmp
|
|
||||||
endif
|
|
||||||
|
|
||||||
CFLAGS = -Ofast -ffreestanding -std=c99 $(OPENMP)
|
|
||||||
LFLAGS = $(OPENMP)
|
|
||||||
DEFINES = -D_GNU_SOURCE
|
|
||||||
INCLUDES =
|
|
||||||
LIBS =
|
|
@ -1,13 +0,0 @@
|
|||||||
CC = icc
|
|
||||||
GCC = gcc
|
|
||||||
LINKER = $(CC)
|
|
||||||
|
|
||||||
ifeq ($(ENABLE_OPENMP),true)
|
|
||||||
OPENMP = -qopenmp
|
|
||||||
endif
|
|
||||||
|
|
||||||
CFLAGS = -qopt-report -Ofast -xHost -std=c99 -ffreestanding $(OPENMP)
|
|
||||||
LFLAGS = $(OPENMP)
|
|
||||||
DEFINES = -D_GNU_SOURCE
|
|
||||||
INCLUDES =
|
|
||||||
LIBS =
|
|
@ -1,88 +0,0 @@
|
|||||||
/*
|
|
||||||
* =======================================================================================
|
|
||||||
*
|
|
||||||
* Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
* Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in all
|
|
||||||
* copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
* SOFTWARE.
|
|
||||||
*
|
|
||||||
* =======================================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef __linux__
|
|
||||||
#ifdef _OPENMP
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <sched.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <sys/syscall.h>
|
|
||||||
|
|
||||||
#define MAX_NUM_THREADS 128
|
|
||||||
#define gettid() syscall(SYS_gettid)
|
|
||||||
|
|
||||||
static int
|
|
||||||
getProcessorID(cpu_set_t* cpu_set)
|
|
||||||
{
|
|
||||||
int processorId;
|
|
||||||
|
|
||||||
for ( processorId = 0; processorId < MAX_NUM_THREADS; processorId++ )
|
|
||||||
{
|
|
||||||
if ( CPU_ISSET(processorId,cpu_set) )
|
|
||||||
{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return processorId;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
affinity_getProcessorId()
|
|
||||||
{
|
|
||||||
cpu_set_t cpu_set;
|
|
||||||
CPU_ZERO(&cpu_set);
|
|
||||||
sched_getaffinity(gettid(),sizeof(cpu_set_t), &cpu_set);
|
|
||||||
|
|
||||||
return getProcessorID(&cpu_set);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
affinity_pinThread(int processorId)
|
|
||||||
{
|
|
||||||
cpu_set_t cpuset;
|
|
||||||
pthread_t thread;
|
|
||||||
|
|
||||||
thread = pthread_self();
|
|
||||||
CPU_ZERO(&cpuset);
|
|
||||||
CPU_SET(processorId, &cpuset);
|
|
||||||
pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
affinity_pinProcess(int processorId)
|
|
||||||
{
|
|
||||||
cpu_set_t cpuset;
|
|
||||||
|
|
||||||
CPU_ZERO(&cpuset);
|
|
||||||
CPU_SET(processorId, &cpuset);
|
|
||||||
sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
|
|
||||||
}
|
|
||||||
#endif /*_OPENMP*/
|
|
||||||
#endif /*__linux__*/
|
|
@ -1,35 +0,0 @@
|
|||||||
/*
|
|
||||||
* =======================================================================================
|
|
||||||
*
|
|
||||||
* Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
* Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in all
|
|
||||||
* copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
* SOFTWARE.
|
|
||||||
*
|
|
||||||
* =======================================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef AFFINITY_H
|
|
||||||
#define AFFINITY_H
|
|
||||||
|
|
||||||
extern int affinity_getProcessorId();
|
|
||||||
extern void affinity_pinProcess(int);
|
|
||||||
extern void affinity_pinThread(int);
|
|
||||||
|
|
||||||
#endif /*AFFINITY_H*/
|
|
@ -1,58 +0,0 @@
|
|||||||
/*
|
|
||||||
* =======================================================================================
|
|
||||||
*
|
|
||||||
* Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
* Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in all
|
|
||||||
* copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
* SOFTWARE.
|
|
||||||
*
|
|
||||||
* =======================================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <errno.h>
|
|
||||||
|
|
||||||
void* allocate (int alignment, size_t bytesize)
|
|
||||||
{
|
|
||||||
int errorCode;
|
|
||||||
void* ptr;
|
|
||||||
|
|
||||||
errorCode = posix_memalign(&ptr, alignment, bytesize);
|
|
||||||
|
|
||||||
if (errorCode) {
|
|
||||||
if (errorCode == EINVAL) {
|
|
||||||
fprintf(stderr,
|
|
||||||
"Error: Alignment parameter is not a power of two\n");
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
if (errorCode == ENOMEM) {
|
|
||||||
fprintf(stderr,
|
|
||||||
"Error: Insufficient memory to fulfill the request\n");
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ptr == NULL) {
|
|
||||||
fprintf(stderr, "Error: posix_memalign failed!\n");
|
|
||||||
exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ptr;
|
|
||||||
}
|
|
@ -1,33 +0,0 @@
|
|||||||
/*
|
|
||||||
* =======================================================================================
|
|
||||||
*
|
|
||||||
* Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
* Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in all
|
|
||||||
* copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
* SOFTWARE.
|
|
||||||
*
|
|
||||||
* =======================================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __ALLOCATE_H_
|
|
||||||
#define __ALLOCATE_H_
|
|
||||||
|
|
||||||
extern void* allocate (int alignment, size_t bytesize);
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,468 +0,0 @@
|
|||||||
/*
|
|
||||||
* MIT License
|
|
||||||
*
|
|
||||||
* Copyright (c) 2010 Serge Zaitsev
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
* SOFTWARE.
|
|
||||||
*/
|
|
||||||
#ifndef JSMN_H
|
|
||||||
#define JSMN_H
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JSMN_STATIC
|
|
||||||
#define JSMN_API static
|
|
||||||
#else
|
|
||||||
#define JSMN_API extern
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
|
||||||
* JSON type identifier. Basic types are:
|
|
||||||
* o Object
|
|
||||||
* o Array
|
|
||||||
* o String
|
|
||||||
* o Other primitive: number, boolean (true/false) or null
|
|
||||||
*/
|
|
||||||
typedef enum {
|
|
||||||
JSMN_UNDEFINED = 0,
|
|
||||||
JSMN_OBJECT = 1,
|
|
||||||
JSMN_ARRAY = 2,
|
|
||||||
JSMN_STRING = 3,
|
|
||||||
JSMN_PRIMITIVE = 4
|
|
||||||
} jsmntype_t;
|
|
||||||
|
|
||||||
enum jsmnerr {
|
|
||||||
/* Not enough tokens were provided */
|
|
||||||
JSMN_ERROR_NOMEM = -1,
|
|
||||||
/* Invalid character inside JSON string */
|
|
||||||
JSMN_ERROR_INVAL = -2,
|
|
||||||
/* The string is not a full JSON packet, more bytes expected */
|
|
||||||
JSMN_ERROR_PART = -3
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* JSON token description.
|
|
||||||
* type type (object, array, string etc.)
|
|
||||||
* start start position in JSON data string
|
|
||||||
* end end position in JSON data string
|
|
||||||
*/
|
|
||||||
typedef struct {
|
|
||||||
jsmntype_t type;
|
|
||||||
int start;
|
|
||||||
int end;
|
|
||||||
int size;
|
|
||||||
#ifdef JSMN_PARENT_LINKS
|
|
||||||
int parent;
|
|
||||||
#endif
|
|
||||||
} jsmntok_t;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* JSON parser. Contains an array of token blocks available. Also stores
|
|
||||||
* the string being parsed now and current position in that string.
|
|
||||||
*/
|
|
||||||
typedef struct {
|
|
||||||
unsigned int pos; /* offset in the JSON string */
|
|
||||||
unsigned int toknext; /* next token to allocate */
|
|
||||||
int toksuper; /* superior token node, e.g. parent object or array */
|
|
||||||
} jsmn_parser;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create JSON parser over an array of tokens
|
|
||||||
*/
|
|
||||||
JSMN_API void jsmn_init(jsmn_parser *parser);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Run JSON parser. It parses a JSON data string into and array of tokens, each
|
|
||||||
* describing
|
|
||||||
* a single JSON object.
|
|
||||||
*/
|
|
||||||
JSMN_API int jsmn_parse(jsmn_parser *parser, const char *js, const size_t len,
|
|
||||||
jsmntok_t *tokens, const unsigned int num_tokens);
|
|
||||||
|
|
||||||
#ifndef JSMN_HEADER
|
|
||||||
/**
|
|
||||||
* Allocates a fresh unused token from the token pool.
|
|
||||||
*/
|
|
||||||
static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser, jsmntok_t *tokens,
|
|
||||||
const size_t num_tokens) {
|
|
||||||
jsmntok_t *tok;
|
|
||||||
if (parser->toknext >= num_tokens) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
tok = &tokens[parser->toknext++];
|
|
||||||
tok->start = tok->end = -1;
|
|
||||||
tok->size = 0;
|
|
||||||
#ifdef JSMN_PARENT_LINKS
|
|
||||||
tok->parent = -1;
|
|
||||||
#endif
|
|
||||||
return tok;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Fills token type and boundaries.
|
|
||||||
*/
|
|
||||||
static void jsmn_fill_token(jsmntok_t *token, const jsmntype_t type,
|
|
||||||
const int start, const int end) {
|
|
||||||
token->type = type;
|
|
||||||
token->start = start;
|
|
||||||
token->end = end;
|
|
||||||
token->size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Fills next available token with JSON primitive.
|
|
||||||
*/
|
|
||||||
static int jsmn_parse_primitive(jsmn_parser *parser, const char *js,
|
|
||||||
const size_t len, jsmntok_t *tokens,
|
|
||||||
const size_t num_tokens) {
|
|
||||||
jsmntok_t *token;
|
|
||||||
int start;
|
|
||||||
|
|
||||||
start = parser->pos;
|
|
||||||
|
|
||||||
for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
|
|
||||||
switch (js[parser->pos]) {
|
|
||||||
#ifndef JSMN_STRICT
|
|
||||||
/* In strict mode primitive must be followed by "," or "}" or "]" */
|
|
||||||
case ':':
|
|
||||||
#endif
|
|
||||||
case '\t':
|
|
||||||
case '\r':
|
|
||||||
case '\n':
|
|
||||||
case ' ':
|
|
||||||
case ',':
|
|
||||||
case ']':
|
|
||||||
case '}':
|
|
||||||
goto found;
|
|
||||||
}
|
|
||||||
if (js[parser->pos] < 32 || js[parser->pos] >= 127) {
|
|
||||||
parser->pos = start;
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#ifdef JSMN_STRICT
|
|
||||||
/* In strict mode primitive must be followed by a comma/object/array */
|
|
||||||
parser->pos = start;
|
|
||||||
return JSMN_ERROR_PART;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
found:
|
|
||||||
if (tokens == NULL) {
|
|
||||||
parser->pos--;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
token = jsmn_alloc_token(parser, tokens, num_tokens);
|
|
||||||
if (token == NULL) {
|
|
||||||
parser->pos = start;
|
|
||||||
return JSMN_ERROR_NOMEM;
|
|
||||||
}
|
|
||||||
jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos);
|
|
||||||
#ifdef JSMN_PARENT_LINKS
|
|
||||||
token->parent = parser->toksuper;
|
|
||||||
#endif
|
|
||||||
parser->pos--;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Fills next token with JSON string.
|
|
||||||
*/
|
|
||||||
static int jsmn_parse_string(jsmn_parser *parser, const char *js,
|
|
||||||
const size_t len, jsmntok_t *tokens,
|
|
||||||
const size_t num_tokens) {
|
|
||||||
jsmntok_t *token;
|
|
||||||
|
|
||||||
int start = parser->pos;
|
|
||||||
|
|
||||||
parser->pos++;
|
|
||||||
|
|
||||||
/* Skip starting quote */
|
|
||||||
for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
|
|
||||||
char c = js[parser->pos];
|
|
||||||
|
|
||||||
/* Quote: end of string */
|
|
||||||
if (c == '\"') {
|
|
||||||
if (tokens == NULL) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
token = jsmn_alloc_token(parser, tokens, num_tokens);
|
|
||||||
if (token == NULL) {
|
|
||||||
parser->pos = start;
|
|
||||||
return JSMN_ERROR_NOMEM;
|
|
||||||
}
|
|
||||||
jsmn_fill_token(token, JSMN_STRING, start + 1, parser->pos);
|
|
||||||
#ifdef JSMN_PARENT_LINKS
|
|
||||||
token->parent = parser->toksuper;
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Backslash: Quoted symbol expected */
|
|
||||||
if (c == '\\' && parser->pos + 1 < len) {
|
|
||||||
int i;
|
|
||||||
parser->pos++;
|
|
||||||
switch (js[parser->pos]) {
|
|
||||||
/* Allowed escaped symbols */
|
|
||||||
case '\"':
|
|
||||||
case '/':
|
|
||||||
case '\\':
|
|
||||||
case 'b':
|
|
||||||
case 'f':
|
|
||||||
case 'r':
|
|
||||||
case 'n':
|
|
||||||
case 't':
|
|
||||||
break;
|
|
||||||
/* Allows escaped symbol \uXXXX */
|
|
||||||
case 'u':
|
|
||||||
parser->pos++;
|
|
||||||
for (i = 0; i < 4 && parser->pos < len && js[parser->pos] != '\0';
|
|
||||||
i++) {
|
|
||||||
/* If it isn't a hex character we have an error */
|
|
||||||
if (!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */
|
|
||||||
(js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */
|
|
||||||
(js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */
|
|
||||||
parser->pos = start;
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
parser->pos++;
|
|
||||||
}
|
|
||||||
parser->pos--;
|
|
||||||
break;
|
|
||||||
/* Unexpected symbol */
|
|
||||||
default:
|
|
||||||
parser->pos = start;
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
parser->pos = start;
|
|
||||||
return JSMN_ERROR_PART;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse JSON string and fill tokens.
|
|
||||||
*/
|
|
||||||
JSMN_API int jsmn_parse(jsmn_parser *parser, const char *js, const size_t len,
|
|
||||||
jsmntok_t *tokens, const unsigned int num_tokens) {
|
|
||||||
int r;
|
|
||||||
int i;
|
|
||||||
jsmntok_t *token;
|
|
||||||
int count = parser->toknext;
|
|
||||||
|
|
||||||
for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
|
|
||||||
char c;
|
|
||||||
jsmntype_t type;
|
|
||||||
|
|
||||||
c = js[parser->pos];
|
|
||||||
switch (c) {
|
|
||||||
case '{':
|
|
||||||
case '[':
|
|
||||||
count++;
|
|
||||||
if (tokens == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
token = jsmn_alloc_token(parser, tokens, num_tokens);
|
|
||||||
if (token == NULL) {
|
|
||||||
return JSMN_ERROR_NOMEM;
|
|
||||||
}
|
|
||||||
if (parser->toksuper != -1) {
|
|
||||||
jsmntok_t *t = &tokens[parser->toksuper];
|
|
||||||
#ifdef JSMN_STRICT
|
|
||||||
/* In strict mode an object or array can't become a key */
|
|
||||||
if (t->type == JSMN_OBJECT) {
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
t->size++;
|
|
||||||
#ifdef JSMN_PARENT_LINKS
|
|
||||||
token->parent = parser->toksuper;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY);
|
|
||||||
token->start = parser->pos;
|
|
||||||
parser->toksuper = parser->toknext - 1;
|
|
||||||
break;
|
|
||||||
case '}':
|
|
||||||
case ']':
|
|
||||||
if (tokens == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY);
|
|
||||||
#ifdef JSMN_PARENT_LINKS
|
|
||||||
if (parser->toknext < 1) {
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
token = &tokens[parser->toknext - 1];
|
|
||||||
for (;;) {
|
|
||||||
if (token->start != -1 && token->end == -1) {
|
|
||||||
if (token->type != type) {
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
token->end = parser->pos + 1;
|
|
||||||
parser->toksuper = token->parent;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (token->parent == -1) {
|
|
||||||
if (token->type != type || parser->toksuper == -1) {
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
token = &tokens[token->parent];
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
for (i = parser->toknext - 1; i >= 0; i--) {
|
|
||||||
token = &tokens[i];
|
|
||||||
if (token->start != -1 && token->end == -1) {
|
|
||||||
if (token->type != type) {
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
parser->toksuper = -1;
|
|
||||||
token->end = parser->pos + 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* Error if unmatched closing bracket */
|
|
||||||
if (i == -1) {
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
for (; i >= 0; i--) {
|
|
||||||
token = &tokens[i];
|
|
||||||
if (token->start != -1 && token->end == -1) {
|
|
||||||
parser->toksuper = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
break;
|
|
||||||
case '\"':
|
|
||||||
r = jsmn_parse_string(parser, js, len, tokens, num_tokens);
|
|
||||||
if (r < 0) {
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
count++;
|
|
||||||
if (parser->toksuper != -1 && tokens != NULL) {
|
|
||||||
tokens[parser->toksuper].size++;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case '\t':
|
|
||||||
case '\r':
|
|
||||||
case '\n':
|
|
||||||
case ' ':
|
|
||||||
break;
|
|
||||||
case ':':
|
|
||||||
parser->toksuper = parser->toknext - 1;
|
|
||||||
break;
|
|
||||||
case ',':
|
|
||||||
if (tokens != NULL && parser->toksuper != -1 &&
|
|
||||||
tokens[parser->toksuper].type != JSMN_ARRAY &&
|
|
||||||
tokens[parser->toksuper].type != JSMN_OBJECT) {
|
|
||||||
#ifdef JSMN_PARENT_LINKS
|
|
||||||
parser->toksuper = tokens[parser->toksuper].parent;
|
|
||||||
#else
|
|
||||||
for (i = parser->toknext - 1; i >= 0; i--) {
|
|
||||||
if (tokens[i].type == JSMN_ARRAY || tokens[i].type == JSMN_OBJECT) {
|
|
||||||
if (tokens[i].start != -1 && tokens[i].end == -1) {
|
|
||||||
parser->toksuper = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
#ifdef JSMN_STRICT
|
|
||||||
/* In strict mode primitives are: numbers and booleans */
|
|
||||||
case '-':
|
|
||||||
case '0':
|
|
||||||
case '1':
|
|
||||||
case '2':
|
|
||||||
case '3':
|
|
||||||
case '4':
|
|
||||||
case '5':
|
|
||||||
case '6':
|
|
||||||
case '7':
|
|
||||||
case '8':
|
|
||||||
case '9':
|
|
||||||
case 't':
|
|
||||||
case 'f':
|
|
||||||
case 'n':
|
|
||||||
/* And they must not be keys of the object */
|
|
||||||
if (tokens != NULL && parser->toksuper != -1) {
|
|
||||||
const jsmntok_t *t = &tokens[parser->toksuper];
|
|
||||||
if (t->type == JSMN_OBJECT ||
|
|
||||||
(t->type == JSMN_STRING && t->size != 0)) {
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
/* In non-strict mode every unquoted value is a primitive */
|
|
||||||
default:
|
|
||||||
#endif
|
|
||||||
r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens);
|
|
||||||
if (r < 0) {
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
count++;
|
|
||||||
if (parser->toksuper != -1 && tokens != NULL) {
|
|
||||||
tokens[parser->toksuper].size++;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
#ifdef JSMN_STRICT
|
|
||||||
/* Unexpected char in strict mode */
|
|
||||||
default:
|
|
||||||
return JSMN_ERROR_INVAL;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tokens != NULL) {
|
|
||||||
for (i = parser->toknext - 1; i >= 0; i--) {
|
|
||||||
/* Unmatched opened object or array */
|
|
||||||
if (tokens[i].start != -1 && tokens[i].end == -1) {
|
|
||||||
return JSMN_ERROR_PART;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new parser based over a given buffer with an array of tokens
|
|
||||||
* available.
|
|
||||||
*/
|
|
||||||
JSMN_API void jsmn_init(jsmn_parser *parser) {
|
|
||||||
parser->pos = 0;
|
|
||||||
parser->toknext = 0;
|
|
||||||
parser->toksuper = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* JSMN_HEADER */
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JSMN_H */
|
|
@ -1,304 +0,0 @@
|
|||||||
/*
|
|
||||||
* =======================================================================================
|
|
||||||
*
|
|
||||||
* Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
* Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in all
|
|
||||||
* copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
* SOFTWARE.
|
|
||||||
*
|
|
||||||
* =======================================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <float.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
|
|
||||||
#ifdef _OPENMP
|
|
||||||
#include <omp.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "jsmn.h"
|
|
||||||
#include "timing.h"
|
|
||||||
#include "allocate.h"
|
|
||||||
#include "affinity.h"
|
|
||||||
|
|
||||||
#define HLINE "----------------------------------------------------------------------------\n"
|
|
||||||
|
|
||||||
#ifndef MIN
|
|
||||||
#define MIN(x,y) ((x)<(y)?(x):(y))
|
|
||||||
#endif
|
|
||||||
#ifndef MAX
|
|
||||||
#define MAX(x,y) ((x)>(y)?(x):(y))
|
|
||||||
#endif
|
|
||||||
#ifndef ABS
|
|
||||||
#define ABS(a) ((a) >= 0 ? (a) : -(a))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
char* json_fetch(char* filepath)
|
|
||||||
{
|
|
||||||
int fd = open(filepath, O_RDONLY);
|
|
||||||
if ( fd == -1) {
|
|
||||||
perror("Cannot open output file\n"); exit(1);
|
|
||||||
}
|
|
||||||
int len = lseek(fd, 0, SEEK_END);
|
|
||||||
void *data = mmap(0, len, PROT_READ, MAP_PRIVATE, fd, 0);
|
|
||||||
|
|
||||||
return (char*) data;
|
|
||||||
}
|
|
||||||
|
|
||||||
jsmntok_t * json_tokenise(char *js)
|
|
||||||
{
|
|
||||||
jsmn_parser parser;
|
|
||||||
jsmn_init(&parser);
|
|
||||||
|
|
||||||
unsigned int n = 4096;
|
|
||||||
jsmntok_t *tokens = malloc(sizeof(jsmntok_t) * n);
|
|
||||||
|
|
||||||
int ret = jsmn_parse(&parser, js, strlen(js), tokens, n);
|
|
||||||
|
|
||||||
while (ret == JSMN_ERROR_NOMEM)
|
|
||||||
{
|
|
||||||
n = n * 2 + 1;
|
|
||||||
tokens = realloc(tokens, sizeof(jsmntok_t) * n);
|
|
||||||
ret = jsmn_parse(&parser, js, strlen(js), tokens, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret == JSMN_ERROR_INVAL) {
|
|
||||||
printf("jsmn_parse: invalid JSON string");
|
|
||||||
exit(EXIT_SUCCESS);
|
|
||||||
}
|
|
||||||
if (ret == JSMN_ERROR_PART) {
|
|
||||||
printf("jsmn_parse: truncated JSON string");
|
|
||||||
exit(EXIT_SUCCESS);
|
|
||||||
}
|
|
||||||
|
|
||||||
return tokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
int json_token_streq(char* js, jsmntok_t* t, char* s)
|
|
||||||
{
|
|
||||||
return (strncmp(js + t->start, s, t->end - t->start) == 0
|
|
||||||
&& strlen(s) == (size_t) (t->end - t->start));
|
|
||||||
}
|
|
||||||
|
|
||||||
char* json_token_tostr(char* js, jsmntok_t* t)
|
|
||||||
{
|
|
||||||
js[t->end] = '\0';
|
|
||||||
return js + t->start;
|
|
||||||
}
|
|
||||||
|
|
||||||
void print_token(jsmntok_t* t)
|
|
||||||
{
|
|
||||||
char* type;
|
|
||||||
|
|
||||||
switch ( t->type ){
|
|
||||||
case JSMN_STRING:
|
|
||||||
type = "STRING";
|
|
||||||
break;
|
|
||||||
case JSMN_OBJECT:
|
|
||||||
type = "OBJECT";
|
|
||||||
break;
|
|
||||||
case JSMN_ARRAY:
|
|
||||||
type = "ARRAY";
|
|
||||||
break;
|
|
||||||
case JSMN_PRIMITIVE:
|
|
||||||
type = "PRIMITIVE";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("%s: S%d E%d C%d\n", type, t->start, t->end, t->size);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
int main (int argc, char** argv)
|
|
||||||
{
|
|
||||||
char* filepath;
|
|
||||||
|
|
||||||
if ( argc > 1 ) {
|
|
||||||
filepath = argv[1];
|
|
||||||
} else {
|
|
||||||
printf("Usage: %s <filepath>\n",argv[0]);
|
|
||||||
exit(EXIT_SUCCESS);
|
|
||||||
}
|
|
||||||
|
|
||||||
char* js = json_fetch(filepath);
|
|
||||||
jsmntok_t* tokens = json_tokenise(js);
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
START,
|
|
||||||
METRIC, METRIC_OBJECT,
|
|
||||||
SERIES, NODE_ARRAY,
|
|
||||||
NODE_OBJECT,
|
|
||||||
DATA,
|
|
||||||
SKIP,
|
|
||||||
STOP
|
|
||||||
} parse_state;
|
|
||||||
|
|
||||||
parse_state state = START;
|
|
||||||
size_t node_tokens = 0;
|
|
||||||
size_t skip_tokens = 0;
|
|
||||||
size_t metrics = 0;
|
|
||||||
size_t nodes = 0;
|
|
||||||
size_t elements = 0;
|
|
||||||
|
|
||||||
for (size_t i = 0, j = 1; j > 0; i++, j--)
|
|
||||||
{
|
|
||||||
jsmntok_t* t = &tokens[i];
|
|
||||||
|
|
||||||
if (t->type == JSMN_ARRAY || t->type == JSMN_OBJECT){
|
|
||||||
j += t->size;
|
|
||||||
}
|
|
||||||
print_token(t);
|
|
||||||
|
|
||||||
switch (state)
|
|
||||||
{
|
|
||||||
case START:
|
|
||||||
if (t->type != JSMN_OBJECT){
|
|
||||||
printf("Invalid response: root element must be object.");
|
|
||||||
exit(EXIT_SUCCESS);
|
|
||||||
}
|
|
||||||
|
|
||||||
state = METRIC;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case METRIC:
|
|
||||||
if (t->type != JSMN_STRING){
|
|
||||||
printf("Invalid response: metric key must be a string.");
|
|
||||||
exit(EXIT_SUCCESS);
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("METRIC\n");
|
|
||||||
state = METRIC_OBJECT;
|
|
||||||
object_tokens = t->size;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case METRIC_OBJECT:
|
|
||||||
printf("METRIC OBJECT %lu\n", object_tokens);
|
|
||||||
object_tokens--;
|
|
||||||
|
|
||||||
if (t->type == JSMN_STRING && json_token_streq(js, t, "series")) {
|
|
||||||
state = SERIES;
|
|
||||||
} else {
|
|
||||||
state = SKIP;
|
|
||||||
if (t->type == JSMN_ARRAY || t->type == JSMN_OBJECT) {
|
|
||||||
skip_tokens = t->size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Last object value
|
|
||||||
if (object_tokens == 0) {
|
|
||||||
state = METRIC;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case SKIP:
|
|
||||||
skip_tokens--;
|
|
||||||
|
|
||||||
printf("SKIP\n");
|
|
||||||
if (t->type == JSMN_ARRAY || t->type == JSMN_OBJECT) {
|
|
||||||
skip_tokens += t->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case SERIES:
|
|
||||||
if (t->type != JSMN_ARRAY) {
|
|
||||||
printf("Unknown series value: expected array.");
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("SERIES\n");
|
|
||||||
nodes = t->size;
|
|
||||||
state = NODE_ARRAY;
|
|
||||||
|
|
||||||
if (nodes == 0) {
|
|
||||||
state = METRIC_OBJECT;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case NODE_ARRAY:
|
|
||||||
nodes--;
|
|
||||||
|
|
||||||
printf("NODE_ARRAY\n");
|
|
||||||
node_tokens = t->size;
|
|
||||||
state = NODE_OBJECT;
|
|
||||||
|
|
||||||
// Last node object
|
|
||||||
if (nodes == 0) {
|
|
||||||
state = STOP;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case NODE_OBJECT:
|
|
||||||
node_tokens--;
|
|
||||||
|
|
||||||
printf("NODE_OBJECT\n");
|
|
||||||
// Keys are odd-numbered tokens within the object
|
|
||||||
if (node_tokens % 2 == 1)
|
|
||||||
{
|
|
||||||
if (t->type == JSMN_STRING && json_token_streq(js, t, "data")) {
|
|
||||||
state = DATA;
|
|
||||||
} else {
|
|
||||||
state = SKIP;
|
|
||||||
if (t->type == JSMN_ARRAY || t->type == JSMN_OBJECT) {
|
|
||||||
skip_tokens = t->size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Last object value
|
|
||||||
if (node_tokens == 0) {
|
|
||||||
state = NODE_ARRAY;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DATA:
|
|
||||||
if (t->type != JSMN_ARRAY || t->type != JSMN_STRING) {
|
|
||||||
printf("Unknown data value: expected string or array.");
|
|
||||||
}
|
|
||||||
if (t->type == JSMN_ARRAY) {
|
|
||||||
elements = t->size;
|
|
||||||
printf("%lu elements\n", elements );
|
|
||||||
state = SKIP;
|
|
||||||
skip_tokens = elements;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case STOP:
|
|
||||||
// Just consume the tokens
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
printf("Invalid state %u", state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
free(tokens);
|
|
||||||
return (EXIT_SUCCESS);
|
|
||||||
}
|
|
@ -1,48 +0,0 @@
|
|||||||
/*
|
|
||||||
* =======================================================================================
|
|
||||||
*
|
|
||||||
* Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
* Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in all
|
|
||||||
* copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
* SOFTWARE.
|
|
||||||
*
|
|
||||||
* =======================================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <time.h>
|
|
||||||
|
|
||||||
double getTimeStamp()
|
|
||||||
{
|
|
||||||
struct timespec ts;
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
||||||
return (double)ts.tv_sec + (double)ts.tv_nsec * 1.e-9;
|
|
||||||
}
|
|
||||||
|
|
||||||
double getTimeResolution()
|
|
||||||
{
|
|
||||||
struct timespec ts;
|
|
||||||
clock_getres(CLOCK_MONOTONIC, &ts);
|
|
||||||
return (double)ts.tv_sec + (double)ts.tv_nsec * 1.e-9;
|
|
||||||
}
|
|
||||||
|
|
||||||
double getTimeStamp_()
|
|
||||||
{
|
|
||||||
return getTimeStamp();
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
/*
|
|
||||||
* =======================================================================================
|
|
||||||
*
|
|
||||||
* Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
* Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in all
|
|
||||||
* copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
* SOFTWARE.
|
|
||||||
*
|
|
||||||
* =======================================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __TIMING_H_
|
|
||||||
#define __TIMING_H_
|
|
||||||
|
|
||||||
extern double getTimeStamp();
|
|
||||||
extern double getTimeResolution();
|
|
||||||
extern double getTimeStamp_();
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,50 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
||||||
"title": "HPC Cluster description",
|
|
||||||
"description": "Meta data information of a HPC cluster",
|
|
||||||
"type": "object",
|
|
||||||
"properties":{
|
|
||||||
"cluster_id": {
|
|
||||||
"description": "The unique identifier of a cluster",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"processor_type": {
|
|
||||||
"description": "Processor type",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"sockets_per_node": {
|
|
||||||
"description": "Number of sockets per node",
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"cores_per_socket": {
|
|
||||||
"description": "Number of cores per socket",
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"threads_per_core": {
|
|
||||||
"description": "Number of SMT threads per core",
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"flop_rate_scalar": {
|
|
||||||
"description": "Theorethical node peak flop rate for scalar code in GFlops/s",
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"flop_rate_simd": {
|
|
||||||
"description": "Theorethical node peak flop rate for SIMD code in GFlops/s",
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"memory_bandwidth": {
|
|
||||||
"description": "Theorethical node peak memory bandwidth in GB/s",
|
|
||||||
"type": "integer"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required":[
|
|
||||||
"cluster_id",
|
|
||||||
"processor_type",
|
|
||||||
"sockets_per_node",
|
|
||||||
"cores_per_socket",
|
|
||||||
"threads_per_core",
|
|
||||||
"flop_rate_scalar",
|
|
||||||
"flop_rate_simd",
|
|
||||||
"memory_bandwidth"
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,135 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
||||||
"title": "Job metric data",
|
|
||||||
"description": "Meta data information of a HPC job",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"mem_used": {
|
|
||||||
"description": "Memory capacity used (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"flops_any": {
|
|
||||||
"description": "Total flop rate with DP flops scaled up (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"mem_bw": {
|
|
||||||
"description": "Main memory bandwidth (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"net_bw": {
|
|
||||||
"description": "Total fast interconnect network bandwidth (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"file_bw": {
|
|
||||||
"description": "Total file IO bandwidth (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"ipc": {
|
|
||||||
"description": "Instructions executed per cycle",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"cpu_used": {
|
|
||||||
"description": "CPU core utilization",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"flops_dp": {
|
|
||||||
"description": "Double precision flop rate",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"flops_sp": {
|
|
||||||
"description": "Single precision flops rate",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"rapl_power": {
|
|
||||||
"description": "CPU power consumption",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"gpu_used": {
|
|
||||||
"description": "GPU utilization",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"gpu_mem_used": {
|
|
||||||
"description": "GPU memory capacity used",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"gpu_power": {
|
|
||||||
"description": "GPU power consumption",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"clock": {
|
|
||||||
"description": "Average core frequency",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"eth_read_bw": {
|
|
||||||
"description": "Ethernet read bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"eth_write_bw": {
|
|
||||||
"description": "Ethernet write bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_read_bw": {
|
|
||||||
"description": "Lustre read bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_write_bw": {
|
|
||||||
"description": "Lustre write bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_read_req": {
|
|
||||||
"description": "Lustre read requests",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_write_req": {
|
|
||||||
"description": "Lustre write requests",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_inodes": {
|
|
||||||
"description": "Lustre inodes used",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_accesses": {
|
|
||||||
"description": "Lustre open and close",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_fsync": {
|
|
||||||
"description": "Lustre fsync",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_create": {
|
|
||||||
"description": "Lustre create",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_open": {
|
|
||||||
"description": "Lustre open",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_close": {
|
|
||||||
"description": "Lustre close",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_seek": {
|
|
||||||
"description": "Lustre seek",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"ib_read_bw": {
|
|
||||||
"description": "Infiniband read bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"ib_write_bw": {
|
|
||||||
"description": "Infiniband write bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
},
|
|
||||||
"ib_congestion": {
|
|
||||||
"description": "Infiniband congestion",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-metric-data.schema.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"mem_used",
|
|
||||||
"flops_any",
|
|
||||||
"mem_bw",
|
|
||||||
"net_bw",
|
|
||||||
"file_bw"
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,238 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
||||||
"title": "Job meta data",
|
|
||||||
"description": "Meta data information of a HPC job",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"job_id": {
|
|
||||||
"description": "The unique identifier of a job",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"user_id": {
|
|
||||||
"description": "The unique identifier of a user",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"project_id": {
|
|
||||||
"description": "The unique identifier of a project",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"cluster_id": {
|
|
||||||
"description": "The unique identifier of a cluster",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"num_nodes": {
|
|
||||||
"description": "Number of nodes used",
|
|
||||||
"type": "integer",
|
|
||||||
"exclusiveMinimum": 0
|
|
||||||
},
|
|
||||||
"exclusive": {
|
|
||||||
"description": "Job uses only exclusive nodes",
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
"walltime": {
|
|
||||||
"description": "Requested walltime of job in seconds",
|
|
||||||
"type": "integer",
|
|
||||||
"exclusiveMinimum": 0
|
|
||||||
},
|
|
||||||
"job_state": {
|
|
||||||
"description": "Final state of job",
|
|
||||||
"type": "string",
|
|
||||||
"enum": [
|
|
||||||
"completed",
|
|
||||||
"failed",
|
|
||||||
"canceled",
|
|
||||||
"timeout"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"start_time": {
|
|
||||||
"description": "Start epoch time stamp in seconds",
|
|
||||||
"type": "integer",
|
|
||||||
"exclusiveMinimum": 0
|
|
||||||
},
|
|
||||||
"stop_time": {
|
|
||||||
"description": "Stop epoch time stamp in seconds",
|
|
||||||
"type": "integer",
|
|
||||||
"exclusiveMinimum": 0
|
|
||||||
},
|
|
||||||
"duration": {
|
|
||||||
"description": "Duration of job in seconds",
|
|
||||||
"type": "integer",
|
|
||||||
"exclusiveMinimum": 0
|
|
||||||
},
|
|
||||||
"nodes": {
|
|
||||||
"description": "List of nodes",
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"minItems": 1,
|
|
||||||
"uniqueItems": true
|
|
||||||
},
|
|
||||||
"tags": {
|
|
||||||
"description": "List of tags",
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"name": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"name",
|
|
||||||
"type"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"uniqueItems": true
|
|
||||||
},
|
|
||||||
"statistics": {
|
|
||||||
"description": "Job statistic data",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"mem_used": {
|
|
||||||
"description": "Memory capacity used (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"flops_any": {
|
|
||||||
"description": "Total flop rate with DP flops scaled up (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"mem_bw": {
|
|
||||||
"description": "Main memory bandwidth (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"net_bw": {
|
|
||||||
"description": "Total fast interconnect network bandwidth (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"file_bw": {
|
|
||||||
"description": "Total file IO bandwidth (required)",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"ipc": {
|
|
||||||
"description": "Instructions executed per cycle",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"cpu_used": {
|
|
||||||
"description": "CPU core utilization",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"flops_dp": {
|
|
||||||
"description": "Double precision flop rate",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"flops_sp": {
|
|
||||||
"description": "Single precision flops rate",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"rapl_power": {
|
|
||||||
"description": "CPU power consumption",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"gpu_used": {
|
|
||||||
"description": "GPU utilization",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"gpu_mem_used": {
|
|
||||||
"description": "GPU memory capacity used",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"gpu_power": {
|
|
||||||
"description": "GPU power consumption",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"clock": {
|
|
||||||
"description": "Average core frequency",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"eth_read_bw": {
|
|
||||||
"description": "Ethernet read bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"eth_write_bw": {
|
|
||||||
"description": "Ethernet write bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_read_bw": {
|
|
||||||
"description": "Lustre read bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_write_bw": {
|
|
||||||
"description": "Lustre write bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_read_req": {
|
|
||||||
"description": "Lustre read requests",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_write_req": {
|
|
||||||
"description": "Lustre write requests",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_inodes": {
|
|
||||||
"description": "Lustre inodes used",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_accesses": {
|
|
||||||
"description": "Lustre open and close",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_fsync": {
|
|
||||||
"description": "Lustre fsync",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_create": {
|
|
||||||
"description": "Lustre create",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_open": {
|
|
||||||
"description": "Lustre open",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_close": {
|
|
||||||
"description": "Lustre close",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"lustre_seek": {
|
|
||||||
"description": "Lustre seek",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"ib_read_bw": {
|
|
||||||
"description": "Infiniband read bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"ib_write_bw": {
|
|
||||||
"description": "Infiniband write bandwidth",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
},
|
|
||||||
"ib_congestion": {
|
|
||||||
"description": "Infiniband congestion",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-statistic.schema.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"mem_used",
|
|
||||||
"flops_any",
|
|
||||||
"mem_bw",
|
|
||||||
"net_bw",
|
|
||||||
"file_bw"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"job_id",
|
|
||||||
"user_id",
|
|
||||||
"project_id",
|
|
||||||
"cluster_id",
|
|
||||||
"num_nodes",
|
|
||||||
"start_time",
|
|
||||||
"stop_time",
|
|
||||||
"duration",
|
|
||||||
"nodes",
|
|
||||||
"tags",
|
|
||||||
"statistics"
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,83 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
||||||
"title": "Job metric data",
|
|
||||||
"description": "Metric data of a HPC job",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"unit": {
|
|
||||||
"description": "",
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"scope": {
|
|
||||||
"description": "",
|
|
||||||
"type": "string",
|
|
||||||
"enum": [
|
|
||||||
"node",
|
|
||||||
"cpu",
|
|
||||||
"socket"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"timestep": {
|
|
||||||
"description": "Measurement interval in seconds",
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"series": {
|
|
||||||
"description": "",
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"node_id": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"id": {
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"statistics": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"avg": {
|
|
||||||
"description": "Series average",
|
|
||||||
"type": "number",
|
|
||||||
"minimum": 0
|
|
||||||
},
|
|
||||||
"min": {
|
|
||||||
"description": "Series minimum",
|
|
||||||
"type": "number",
|
|
||||||
"minimum": 0
|
|
||||||
},
|
|
||||||
"max": {
|
|
||||||
"description": "Series maximum",
|
|
||||||
"type": "number",
|
|
||||||
"minimum": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"avg",
|
|
||||||
"min",
|
|
||||||
"max"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"data": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "number",
|
|
||||||
"minimum": 0
|
|
||||||
},
|
|
||||||
"minItems": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"node_id",
|
|
||||||
"data"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"unit",
|
|
||||||
"scope",
|
|
||||||
"timestep",
|
|
||||||
"series"
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,33 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
||||||
"title": "Job statistics",
|
|
||||||
"description": "Format specification for job metric statistics",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"unit": {
|
|
||||||
"description": "Metric unit",
|
|
||||||
"#ref": "https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/unit.schema.json"
|
|
||||||
},
|
|
||||||
"avg": {
|
|
||||||
"description": "Job metric average",
|
|
||||||
"type": "number",
|
|
||||||
"minimum": 0
|
|
||||||
},
|
|
||||||
"min": {
|
|
||||||
"description": "Job metric minimum",
|
|
||||||
"type": "number",
|
|
||||||
"minimum": 0
|
|
||||||
},
|
|
||||||
"max": {
|
|
||||||
"description": "Job metric maximum",
|
|
||||||
"type": "number",
|
|
||||||
"minimum": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"unit",
|
|
||||||
"avg",
|
|
||||||
"min",
|
|
||||||
"max"
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
||||||
"title": "Metric unit",
|
|
||||||
"description": "Format specification for job metric units",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"base_unit": {
|
|
||||||
"description": "Metric base unit",
|
|
||||||
"type": "string",
|
|
||||||
"enum": [
|
|
||||||
"B",
|
|
||||||
"F",
|
|
||||||
"B/s",
|
|
||||||
"F/s",
|
|
||||||
"CPI",
|
|
||||||
"IPC",
|
|
||||||
"Hz",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"prefix": {
|
|
||||||
"description": "Unit prefix",
|
|
||||||
"type": "string",
|
|
||||||
"enum": [
|
|
||||||
"K",
|
|
||||||
"M",
|
|
||||||
"G",
|
|
||||||
"T",
|
|
||||||
"P",
|
|
||||||
"E"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"base_unit"
|
|
||||||
]
|
|
||||||
}
|
|
36
restruct.pl
36
restruct.pl
@ -1,36 +0,0 @@
|
|||||||
#!/usr/bin/env perl
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use utf8;
|
|
||||||
|
|
||||||
use File::Copy;
|
|
||||||
|
|
||||||
my $trunk = '/home/jan/prg/HPCJobDatabase';
|
|
||||||
my $basedir = $ARGV[0];
|
|
||||||
my $destdir = $ARGV[1];
|
|
||||||
|
|
||||||
|
|
||||||
opendir my $dh, $basedir or die "can't open directory: $!";
|
|
||||||
while ( readdir $dh ) {
|
|
||||||
use integer;
|
|
||||||
chomp;
|
|
||||||
next if $_ eq '.' or $_ eq '..';
|
|
||||||
|
|
||||||
my $jobID = $_;
|
|
||||||
my $srcPath = "$trunk/$basedir/$jobID";
|
|
||||||
$jobID =~ s/\.eadm//;
|
|
||||||
|
|
||||||
my $level1 = $jobID/1000;
|
|
||||||
my $level2 = $jobID%1000;
|
|
||||||
|
|
||||||
my $dstPath = sprintf("%s/%s/%d/%03d", $trunk, $destdir, $level1, $level2);
|
|
||||||
# print "COPY from $srcPath to $dstPath\n";
|
|
||||||
# print "$trunk/$destdir/$level1\n";
|
|
||||||
if (not -d "$trunk/$destdir/$level1") {
|
|
||||||
mkdir "$trunk/$destdir/$level1";
|
|
||||||
}
|
|
||||||
|
|
||||||
move($srcPath, $dstPath);
|
|
||||||
}
|
|
||||||
|
|
43
server.go
Normal file
43
server.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/99designs/gqlgen/graphql/handler"
|
||||||
|
"github.com/99designs/gqlgen/graphql/playground"
|
||||||
|
"github.com/gorilla/handlers"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
_ "github.com/mattn/go-sqlite3"
|
||||||
|
"github.com/moebiusband/cc-jobarchive/generated"
|
||||||
|
"github.com/moebiusband/cc-jobarchive/graph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultPort = "8080"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
port := os.Getenv("PORT")
|
||||||
|
if port == "" {
|
||||||
|
port = defaultPort
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := sqlx.Open("sqlite3", "./job.db")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
r := mux.NewRouter()
|
||||||
|
loggedRouter := handlers.LoggingHandler(os.Stdout, r)
|
||||||
|
srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{DB: db}}))
|
||||||
|
r.HandleFunc("/", playground.Handler("GraphQL playground", "/query"))
|
||||||
|
r.Handle("/query", srv)
|
||||||
|
|
||||||
|
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
|
||||||
|
log.Fatal(http.ListenAndServe("127.0.0.1:8080",
|
||||||
|
handlers.CORS(handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}),
|
||||||
|
handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}),
|
||||||
|
handlers.AllowedOrigins([]string{"*"}))(loggedRouter)))
|
||||||
|
}
|
143
syncDB.pl
143
syncDB.pl
@ -1,143 +0,0 @@
|
|||||||
#!/usr/bin/env perl
|
|
||||||
# =======================================================================================
|
|
||||||
#
|
|
||||||
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
# Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
#
|
|
||||||
# =======================================================================================
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use utf8;
|
|
||||||
|
|
||||||
use File::Slurp;
|
|
||||||
use Data::Dumper;
|
|
||||||
use JSON::MaybeXS qw(encode_json decode_json);
|
|
||||||
use DBI;
|
|
||||||
|
|
||||||
my $database = $ARGV[0];
|
|
||||||
my $basedir = $ARGV[1];
|
|
||||||
|
|
||||||
my %attr = (
|
|
||||||
PrintError => 1,
|
|
||||||
RaiseError => 1
|
|
||||||
);
|
|
||||||
|
|
||||||
my $dbh = DBI->connect(
|
|
||||||
"DBI:SQLite:dbname=$database", "", "", \%attr)
|
|
||||||
or die "Could not connect to database: $DBI::errstr";
|
|
||||||
|
|
||||||
my $sth_select_job = $dbh->prepare(qq{
|
|
||||||
SELECT id, user_id, job_id, cluster_id,
|
|
||||||
start_time, stop_time, duration, num_nodes,
|
|
||||||
has_profile
|
|
||||||
FROM job
|
|
||||||
WHERE job_id=?
|
|
||||||
});
|
|
||||||
|
|
||||||
my $sth_update_job = $dbh->prepare(qq{
|
|
||||||
UPDATE job
|
|
||||||
SET has_profile = ?,
|
|
||||||
mem_used_max = ?,
|
|
||||||
flops_any_avg = ?,
|
|
||||||
mem_bw_avg = ?
|
|
||||||
WHERE id=?;
|
|
||||||
});
|
|
||||||
|
|
||||||
my ($TS, $TE);
|
|
||||||
my $counter = 0;
|
|
||||||
|
|
||||||
open(my $fh, '<:encoding(UTF-8)', './jobIds.txt')
|
|
||||||
or die "Could not open file $!";
|
|
||||||
$TS = time();
|
|
||||||
|
|
||||||
while ( <$fh> ) {
|
|
||||||
|
|
||||||
my ($jobID, $path1, $path2) = split ' ', $_;
|
|
||||||
$counter++;
|
|
||||||
|
|
||||||
my $jobmeta_json = read_file("$basedir/$path1/$path2/meta.json");
|
|
||||||
my $job = decode_json $jobmeta_json;
|
|
||||||
my @row = $dbh->selectrow_array($sth_select_job, undef, $jobID);
|
|
||||||
my ($db_id, $db_user_id, $db_job_id, $db_cluster_id, $db_start_time, $db_stop_time, $db_duration, $db_num_nodes, $db_has_profile);
|
|
||||||
|
|
||||||
# print Dumper($job);
|
|
||||||
|
|
||||||
if ( @row ) {
|
|
||||||
($db_id,
|
|
||||||
$db_user_id,
|
|
||||||
$db_job_id,
|
|
||||||
$db_cluster_id,
|
|
||||||
$db_start_time,
|
|
||||||
$db_stop_time,
|
|
||||||
$db_duration,
|
|
||||||
$db_num_nodes,
|
|
||||||
$db_has_profile) = @row;
|
|
||||||
|
|
||||||
if ($db_has_profile == 0) {
|
|
||||||
|
|
||||||
my $stats = $job->{statistics};
|
|
||||||
|
|
||||||
if ( $job->{user_id} ne $db_user_id ) {
|
|
||||||
print "jobID $jobID $job->{user_id} $db_user_id\n";
|
|
||||||
$job->{user_id} = $db_user_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
# if ( $job->{start_time} != $db_start_time ) {
|
|
||||||
# print "start $jobID $job->{start_time} $db_start_time\n";
|
|
||||||
# }
|
|
||||||
# if ( $job->{stop_time} != $db_stop_time ) {
|
|
||||||
# print "stop $jobID $job->{stop_time} $db_stop_time\n";
|
|
||||||
# }
|
|
||||||
if ( $job->{duration} != $db_duration ) {
|
|
||||||
my $difference = $job->{duration} - $db_duration;
|
|
||||||
if ( abs($difference) > 120 ) {
|
|
||||||
print "####duration $jobID $job->{duration} $db_duration $difference\n";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( $job->{num_nodes} != $db_num_nodes ) {
|
|
||||||
print "####num nodes $jobID $job->{num_nodes} $db_num_nodes\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
$sth_update_job->execute(
|
|
||||||
1,
|
|
||||||
$stats->{mem_used}->{max},
|
|
||||||
$stats->{flops_any}->{avg},
|
|
||||||
$stats->{mem_bw}->{avg},
|
|
||||||
$db_id
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
print "$jobID NOT in DB!\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( $counter == 100 ) {
|
|
||||||
$TE = time();
|
|
||||||
my $rate = $counter/($TE-$TS);
|
|
||||||
$counter = 0;
|
|
||||||
print "Processing $rate jobs per second\n";
|
|
||||||
$TS = $TE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$dbh->disconnect;
|
|
||||||
close $fh;
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
|||||||
#!/usr/bin/env perl
|
|
||||||
# =======================================================================================
|
|
||||||
#
|
|
||||||
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
|
||||||
# Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
#
|
|
||||||
# =======================================================================================
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use utf8;
|
|
||||||
|
|
||||||
use File::Slurp;
|
|
||||||
use Data::Dumper;
|
|
||||||
use JSON::MaybeXS qw(encode_json decode_json);
|
|
||||||
|
|
||||||
my $basedir = $ARGV[0];
|
|
||||||
my $basedir = './data';
|
|
||||||
|
|
||||||
|
|
||||||
my ($TS, $TE);
|
|
||||||
my $counter = 0;
|
|
||||||
|
|
||||||
open(my $fhn, '>:encoding(UTF-8)', './jobIds-tagged.txt')
|
|
||||||
or die "Could not open file $!";
|
|
||||||
open(my $fh, '<:encoding(UTF-8)', './jobIds.txt')
|
|
||||||
or die "Could not open file $!";
|
|
||||||
$TS = time();
|
|
||||||
|
|
||||||
while ( <$fh> ) {
|
|
||||||
|
|
||||||
my $line = $_;
|
|
||||||
my ($jobID, $system) = split '.', $line;
|
|
||||||
$counter++;
|
|
||||||
|
|
||||||
# my $json = read_file($jobDirectory.'/data.json');
|
|
||||||
# my $data = decode_json $json;
|
|
||||||
my $json = read_file($jobDirectory.'/meta.json');
|
|
||||||
my $meta = decode_json $json;
|
|
||||||
|
|
||||||
my $footprint = $meta->{statistics};
|
|
||||||
|
|
||||||
if ( $footprint->{flops_any}->{max} < 2.0 and $footprint->{mem_bw}->{max} < 2.0 ){
|
|
||||||
print $fhn $jobID;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( $counter == 20 ) {
|
|
||||||
$TE = time();
|
|
||||||
my $rate = $counter/($TE-$TS);
|
|
||||||
$counter = 0;
|
|
||||||
print "Processing $rate jobs per second\n";
|
|
||||||
$TS = $TE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close $fh;
|
|
||||||
close $fhn;
|
|
226
utils/README.md
Normal file
226
utils/README.md
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
# HPCJobDatabase
|
||||||
|
A standardized interface and reference implementation for HPC job data.
|
||||||
|
The DB and json schema specification is available in the [wiki](https://github.com/RRZE-HPC/HPCJobDatabase/wiki).
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
|
||||||
|
* Getopt::Long
|
||||||
|
* Pod::Usage
|
||||||
|
* DateTime::Format::Strptime
|
||||||
|
* DBD::SQLite
|
||||||
|
|
||||||
|
# Setup
|
||||||
|
|
||||||
|
```
|
||||||
|
sqlite3 jobDB < initDB.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
# Helper Scripts
|
||||||
|
|
||||||
|
For all scripts apart from `acQuery.pl` the advice *use the source Luke* holds.
|
||||||
|
|
||||||
|
Help text for acQuery:
|
||||||
|
```
|
||||||
|
Usage:
|
||||||
|
acQuery.pl [options] -- <DB file>
|
||||||
|
|
||||||
|
Help Options:
|
||||||
|
--help Show help text
|
||||||
|
--man Show man page
|
||||||
|
--hasprofile <true|false> Only show jobs with timerseries metric data
|
||||||
|
--mode <mode> Set the operation mode
|
||||||
|
--user <user_id> Search for jobs of specific user
|
||||||
|
--project <project_id> Search for jobs of specific project
|
||||||
|
--numnodes <from> <to> Specify range for number of nodes of job
|
||||||
|
--starttime <from> <to> Specify range for start time of jobs
|
||||||
|
--duration <from> <to> Specify duration range of jobs
|
||||||
|
--mem_used <from> <to> Specify range for average main memory capacity of job
|
||||||
|
--mem_bandwidth <from> <to> Specify range for average main memory bandwidth of job
|
||||||
|
--flops_any <from> <to> Specify range for average flop any rate of job
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--help Show a brief help information.
|
||||||
|
|
||||||
|
--man Read the manual, with examples
|
||||||
|
|
||||||
|
--hasprofile [true|false] Only show jobs with or without timerseries
|
||||||
|
metric data
|
||||||
|
|
||||||
|
--mode [ids|query|count|list|stat|perf] Specify output mode. Mode can be
|
||||||
|
one of:
|
||||||
|
|
||||||
|
ids - Print list of job ids matching conditions. One job id per
|
||||||
|
line.
|
||||||
|
|
||||||
|
query - Print the query string and then exit.
|
||||||
|
count - Only output the number of jobs matching the conditions.
|
||||||
|
(Default mode)
|
||||||
|
|
||||||
|
list - Output a record of every job matching the conditions.
|
||||||
|
|
||||||
|
stat - Output job statistic for all jobs matching the
|
||||||
|
conditions.
|
||||||
|
|
||||||
|
perf - Output job performance footprint statistic for all jobs
|
||||||
|
matching the conditions.
|
||||||
|
|
||||||
|
--user Search job for a specific user id.
|
||||||
|
|
||||||
|
--project Search job for a specific project.
|
||||||
|
|
||||||
|
--duration Specify condition for job duration. This option takes two
|
||||||
|
arguments: If both arguments are positive integers the condition is
|
||||||
|
duration between first argument and second argument. If the second
|
||||||
|
argument is zero condition is duration smaller than first argument. If
|
||||||
|
first argument is zero condition is duration larger than second
|
||||||
|
argument. Duration can be in seconds, minutes (append m) or hours
|
||||||
|
(append h).
|
||||||
|
|
||||||
|
--numnodes Specify condition for number of node range of job. This
|
||||||
|
option takes two arguments: If both arguments are positive integers the
|
||||||
|
condition is number of nodes between first argument and second argument.
|
||||||
|
If the second argument is zero condition is number of nodes smaller than
|
||||||
|
first argument. If first argument is zero condition is number of nodes
|
||||||
|
larger than second argument.
|
||||||
|
|
||||||
|
--starttime Specify condition for the starttime of job. This option
|
||||||
|
takes two arguments: If both arguments are positive integers the
|
||||||
|
condition is start time between first argument and second argument. If
|
||||||
|
the second argument is zero condition is start time smaller than first
|
||||||
|
argument. If first argument is zero condition is start time larger than
|
||||||
|
second argument. Start time must be given as date in the following
|
||||||
|
format: %d.%m.%Y/%H:%M.
|
||||||
|
|
||||||
|
--mem_used Specify condition for average main memory capacity used by
|
||||||
|
job. This option takes two arguments: If both arguments are positive
|
||||||
|
integers the condition is memory used is between first argument and
|
||||||
|
second argument. If the second argument is zero condition is memory used
|
||||||
|
is smaller than first argument. If first argument is zero condition is
|
||||||
|
memory used is larger than second argument.
|
||||||
|
|
||||||
|
--mem_bandwidth Specify condition for average main memory bandwidth used
|
||||||
|
by job. This option takes two arguments: If both arguments are positive
|
||||||
|
integers the condition is memory bandwidth is between first argument and
|
||||||
|
second argument. If the second argument is zero condition is memory
|
||||||
|
bandwidth is smaller than first argument. If first argument is zero
|
||||||
|
condition is memory bandwidth is larger than second argument.
|
||||||
|
|
||||||
|
--flops_any Specify condition for average flops any of job. This option
|
||||||
|
takes two arguments: If both arguments are positive integers the
|
||||||
|
condition is flops any is between first argument and second argument. If
|
||||||
|
the second argument is zero condition is flops any is smaller than first
|
||||||
|
argument. If first argument is zero condition is flops any is larger
|
||||||
|
than second argument.
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
# Examples
|
||||||
|
|
||||||
|
Query jobs with conditions:
|
||||||
|
|
||||||
|
```
|
||||||
|
[HPCJobDatabase] ./acQuery.pl --duration 20h 24h --starttime 01.08.2018/12:00 01.03.2019/12:00
|
||||||
|
COUNT 6476
|
||||||
|
```
|
||||||
|
|
||||||
|
Query jobs from alternative database file (default is jobDB):
|
||||||
|
|
||||||
|
```
|
||||||
|
[HPCJobDatabase] ./acQuery.pl --project project_30 --starttime 01.08.2018/12:00 01.03.2019/12:00 -- jobDB-anon-emmy
|
||||||
|
COUNT 21560
|
||||||
|
```
|
||||||
|
|
||||||
|
Get job statistics output:
|
||||||
|
|
||||||
|
```
|
||||||
|
[HPCJobDatabase] ./acQuery.pl --project project_30 --mode stat --duration 0 20h --starttime 01.08.2018/12:00 01.03.2019/12:00 -- jobDB-anon-emmy
|
||||||
|
=================================
|
||||||
|
Job count: 747
|
||||||
|
Total walltime [h]: 16334
|
||||||
|
Total node hours [h]: 78966
|
||||||
|
|
||||||
|
Histogram: Number of nodes
|
||||||
|
nodes count
|
||||||
|
1 54 ****
|
||||||
|
2 1
|
||||||
|
3 1
|
||||||
|
4 36 ****
|
||||||
|
5 522 *******
|
||||||
|
6 118 *****
|
||||||
|
7 15 ***
|
||||||
|
|
||||||
|
Histogram: Walltime
|
||||||
|
hours count
|
||||||
|
20 250 ******
|
||||||
|
21 200 ******
|
||||||
|
22 114 *****
|
||||||
|
23 183 ******
|
||||||
|
```
|
||||||
|
|
||||||
|
Get job performance statistics:
|
||||||
|
|
||||||
|
```
|
||||||
|
[HPCJobDatabase] ./acQuery.pl --project project_30 --mode perf --duration 0 20h --numnodes 1 4 --starttime 01.08.2018/12:00 01.03.2019/12:00 -- jobDB-anon-emmy
|
||||||
|
=================================
|
||||||
|
Job count: 92
|
||||||
|
Jobs with performance profile: 48
|
||||||
|
Total walltime [h]: 2070
|
||||||
|
Total node hours [h]: 4332
|
||||||
|
|
||||||
|
Histogram: Mem used
|
||||||
|
Mem count
|
||||||
|
2 3 **
|
||||||
|
3 4 **
|
||||||
|
18 2 *
|
||||||
|
19 3 **
|
||||||
|
20 2 *
|
||||||
|
21 1
|
||||||
|
22 2 *
|
||||||
|
23 5 **
|
||||||
|
24 2 *
|
||||||
|
25 1
|
||||||
|
26 1
|
||||||
|
27 3 **
|
||||||
|
29 1
|
||||||
|
30 2 *
|
||||||
|
31 1
|
||||||
|
34 1
|
||||||
|
35 1
|
||||||
|
36 1
|
||||||
|
41 1
|
||||||
|
42 2 *
|
||||||
|
43 2 *
|
||||||
|
44 1
|
||||||
|
49 1
|
||||||
|
50 2 *
|
||||||
|
51 1
|
||||||
|
52 1
|
||||||
|
53 1
|
||||||
|
|
||||||
|
Histogram: Memory bandwidth
|
||||||
|
BW count
|
||||||
|
1 1
|
||||||
|
2 9 ***
|
||||||
|
3 1
|
||||||
|
4 1
|
||||||
|
5 4 **
|
||||||
|
6 2 *
|
||||||
|
7 10 ***
|
||||||
|
8 9 ***
|
||||||
|
9 11 ***
|
||||||
|
|
||||||
|
Histogram: Flops any
|
||||||
|
flops count
|
||||||
|
1 3 **
|
||||||
|
2 1
|
||||||
|
3 4 **
|
||||||
|
4 3 **
|
||||||
|
5 9 ***
|
||||||
|
6 10 ***
|
||||||
|
7 11 ***
|
||||||
|
85 1
|
||||||
|
225 1
|
||||||
|
236 1
|
||||||
|
240 2 *
|
||||||
|
244 2 *
|
||||||
|
```
|
@ -2,7 +2,7 @@
|
|||||||
# =======================================================================================
|
# =======================================================================================
|
||||||
#
|
#
|
||||||
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
# Author: Jan Eitzinger (je), jan.eitzinger@fau.de
|
||||||
# Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
|
# Copyright (c) 2020 RRZE, University Erlangen-Nuremberg
|
||||||
#
|
#
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
@ -31,6 +31,7 @@ use utf8;
|
|||||||
use Data::Dumper;
|
use Data::Dumper;
|
||||||
use Getopt::Long;
|
use Getopt::Long;
|
||||||
use Pod::Usage;
|
use Pod::Usage;
|
||||||
|
use DateTime;
|
||||||
use DateTime::Format::Strptime;
|
use DateTime::Format::Strptime;
|
||||||
use DBI;
|
use DBI;
|
||||||
|
|
||||||
@ -50,6 +51,7 @@ my $man = 0;
|
|||||||
my $hasprofile = '';
|
my $hasprofile = '';
|
||||||
my $mode = 'count';
|
my $mode = 'count';
|
||||||
my $user = '';
|
my $user = '';
|
||||||
|
my $jobID = '';
|
||||||
my $project = '';
|
my $project = '';
|
||||||
my @numnodes;
|
my @numnodes;
|
||||||
my @starttime;
|
my @starttime;
|
||||||
@ -64,6 +66,7 @@ GetOptions (
|
|||||||
'hasprofile=s' => \$hasprofile,
|
'hasprofile=s' => \$hasprofile,
|
||||||
'mode=s' => \$mode,
|
'mode=s' => \$mode,
|
||||||
'user=s' => \$user,
|
'user=s' => \$user,
|
||||||
|
'job=s' => \$jobID,
|
||||||
'project=s' => \$project,
|
'project=s' => \$project,
|
||||||
'numnodes=i{2}' => \@numnodes,
|
'numnodes=i{2}' => \@numnodes,
|
||||||
'starttime=s{2}' => \@starttime,
|
'starttime=s{2}' => \@starttime,
|
||||||
@ -249,14 +252,17 @@ sub printJobStat {
|
|||||||
|
|
||||||
sub printJob {
|
sub printJob {
|
||||||
my $job = shift;
|
my $job = shift;
|
||||||
|
my $durationHours = sprintf("%.2f", $job->{duration}/3600);
|
||||||
|
my $startDatetime = DateTime->from_epoch(epoch=>$job->{start_time}, time_zone => 'Europe/Berlin',);
|
||||||
|
my $stopDatetime = DateTime->from_epoch(epoch=>$job->{stop_time}, time_zone => 'Europe/Berlin',);
|
||||||
|
|
||||||
my $jobString = <<"END_JOB";
|
my $jobString = <<"END_JOB";
|
||||||
=================================
|
=================================
|
||||||
JobId: $job->{job_id}
|
JobId: $job->{job_id}
|
||||||
UserId: $job->{user_id}
|
UserId: $job->{user_id}
|
||||||
Number of nodes: $job->{num_nodes}
|
Number of nodes: $job->{num_nodes}
|
||||||
From $job->{start_time} to $job->{stop_time}
|
From $startDatetime to $stopDatetime
|
||||||
Duration $job->{duration}
|
Duration $durationHours hours
|
||||||
END_JOB
|
END_JOB
|
||||||
|
|
||||||
print $jobString;
|
print $jobString;
|
||||||
@ -265,6 +271,17 @@ END_JOB
|
|||||||
pod2usage(1) if $help;
|
pod2usage(1) if $help;
|
||||||
pod2usage(-verbose => 2) if $man;
|
pod2usage(-verbose => 2) if $man;
|
||||||
|
|
||||||
|
if ( $jobID ) {
|
||||||
|
my $sth = $dbh->prepare("SELECT * FROM job WHERE job_id=\'$jobID\'");
|
||||||
|
$sth->execute;
|
||||||
|
my %row;
|
||||||
|
$sth->bind_columns( \( @row{ @{$sth->{NAME_lc} } } ));
|
||||||
|
while ($sth->fetch) {
|
||||||
|
printJob(\%row);
|
||||||
|
}
|
||||||
|
exit;
|
||||||
|
}
|
||||||
|
|
||||||
# build query conditions
|
# build query conditions
|
||||||
if ( $user ) {
|
if ( $user ) {
|
||||||
push @conditions, "user_id=\'$user\'";
|
push @conditions, "user_id=\'$user\'";
|
||||||
@ -274,7 +291,6 @@ if ( $project ) {
|
|||||||
push @conditions, "project_id=\'$project\'";
|
push @conditions, "project_id=\'$project\'";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if ( @numnodes ) {
|
if ( @numnodes ) {
|
||||||
($add, $from, $to) = processRange($numnodes[0], $numnodes[1]);
|
($add, $from, $to) = processRange($numnodes[0], $numnodes[1]);
|
||||||
buildCondition('num_nodes');
|
buildCondition('num_nodes');
|
||||||
@ -384,6 +400,7 @@ acQuery.pl - Wrapper script to access sqlite job database.
|
|||||||
--man Show man page
|
--man Show man page
|
||||||
--hasprofile <true|false> Only show jobs with timerseries metric data
|
--hasprofile <true|false> Only show jobs with timerseries metric data
|
||||||
--mode <mode> Set the operation mode
|
--mode <mode> Set the operation mode
|
||||||
|
--job <job_id> Search for a specific job
|
||||||
--user <user_id> Search for jobs of specific user
|
--user <user_id> Search for jobs of specific user
|
||||||
--project <project_id> Search for jobs of specific project
|
--project <project_id> Search for jobs of specific project
|
||||||
--numnodes <from> <to> Specify range for number of nodes of job
|
--numnodes <from> <to> Specify range for number of nodes of job
|
Loading…
Reference in New Issue
Block a user