Cleaning, stabilizing and testing

This commit is contained in:
Aditya Ujeniya
2025-07-04 11:22:03 +02:00
parent 60a346dec1
commit 1ec3c7d80f
12 changed files with 28 additions and 202 deletions

View File

@@ -1,5 +1,7 @@
#!/bin/bash
set -euo pipefail
cd scripts
# Check if required perl modules are installed

View File

@@ -1,100 +0,0 @@
#!/usr/bin/env perl
use strict;
use warnings;
use utf8;
use File::Path qw( make_path rmtree );
use Cpanel::JSON::XS qw( decode_json encode_json );
use File::Slurp;
use Data::Dumper;
use Time::Piece;
use Sort::Versions;
use REST::Client;
### INFLUXDB
my $newCheckpoints = './data/cc-metric-store/checkpoints';
my @CheckpClusters;
my $verbose = 1;
my $restClient = REST::Client->new();
$restClient->setHost('http://localhost:8086'); # Adapt port here!
$restClient->addHeader('Authorization', "Token 74008ea2a8dad5e6f856838a90c6392e"); # compare .env file
$restClient->addHeader('Content-Type', 'text/plain; charset=utf-8');
$restClient->addHeader('Accept', 'application/json');
$restClient->getUseragent()->ssl_opts(SSL_verify_mode => 0); # Temporary: Disable Cert Check
$restClient->getUseragent()->ssl_opts(verify_hostname => 0); # Temporary: Disable Cert Check
# Get clusters by cc-metric-store/$subfolder
opendir my $dhc, $newCheckpoints or die "can't open directory: $!";
while ( readdir $dhc ) {
chomp; next if $_ eq '.' or $_ eq '..' or $_ eq 'job-archive';
my $cluster = $_;
push @CheckpClusters, $cluster;
}
# start to read checkpoints for influx
foreach my $cluster ( @CheckpClusters ) {
print "Starting to read updated checkpoint-files into influx for $cluster\n";
opendir my $dhLevel1, "$newCheckpoints/$cluster" or die "can't open directory: $!";
while ( readdir $dhLevel1 ) {
chomp; next if $_ eq '.' or $_ eq '..';
my $level1 = $_;
if ( -d "$newCheckpoints/$cluster/$level1" ) {
my $nodeSource = "$newCheckpoints/$cluster/$level1/";
my @files = read_dir($nodeSource);
my $length = @files;
if (!@files || $length != 14) { # needs 14 files == 7 days worth of data
next;
}
my @sortedFiles = sort { versioncmp($a,$b) } @files; # sort alphanumerically: _Really_ start with index == 0 == 1609459200.json
my $nodeMeasurement;
foreach my $file (@sortedFiles) {
# print "$file\n";
my $rawstr = read_file("$nodeSource/$file");
my $json = decode_json($rawstr);
my $fileMeasurement;
foreach my $metric (keys %{$json->{metrics}}) {
my $start = $json->{metrics}->{$metric}->{start};
my $timestep = $json->{metrics}->{$metric}->{frequency};
my $data = $json->{metrics}->{$metric}->{data};
my $length = @$data;
my $measurement;
while (my ($index, $value) = each(@$data)) {
if ($value) {
my $timestamp = $start + ($timestep * $index);
$measurement .= "$metric,cluster=$cluster,hostname=$level1,type=node value=".$value." $timestamp"."\n";
}
}
# Use v2 API for Influx2
if ($measurement) {
# print "Adding: #VALUES $length KEY $metric"."\n";
$fileMeasurement .= $measurement;
}
}
if ($fileMeasurement) {
$nodeMeasurement .= $fileMeasurement;
}
}
$restClient->POST("/api/v2/write?org=ClusterCockpit&bucket=ClusterCockpit&precision=s", "$nodeMeasurement"); # compare .env for bucket and org
my $responseCode = $restClient->responseCode();
if ( $responseCode eq '204') {
if ( $verbose ) {
print "INFLUX API WRITE: CLUSTER $cluster HOST $level1"."\n";
};
} else {
if ( $responseCode ne '422' ) { # Exclude High Frequency Error 422 - Temporary!
my $response = $restClient->responseContent();
print "INFLUX API WRITE ERROR CODE ".$responseCode.": ".$response."\n";
};
};
}
}
}
print "Done for influx\n";

View File

@@ -1,65 +0,0 @@
#!/bin/bash
set -euo pipefail
NEW_CHECKPOINTS='../data/cc-metric-store/checkpoints'
VERBOSE=1
INFLUX_HOST='http://0.0.0.0:8181'
HEADERS=(
-H "Content-Type: text/plain; charset=utf-8"
-H "Accept: application/json"
)
checkp_clusters=()
while IFS= read -r -d '' dir; do
checkp_clusters+=("$(basename "$dir")")
done < <(find "$NEW_CHECKPOINTS" -mindepth 1 -maxdepth 1 -type d \! -name 'job-archive' -print0)
for cluster in "${checkp_clusters[@]}"; do
echo "Starting to read updated checkpoint-files into influx for $cluster"
while IFS= read -r -d '' level1_dir; do
level1=$(basename "$level1_dir")
node_source="$NEW_CHECKPOINTS/$cluster/$level1"
mapfile -t files < <(find "$node_source" -type f -name '*.json' | sort -V)
# if [[ ${#files[@]} -ne 14 ]]; then
# continue
# fi
node_measurement=""
for file in "${files[@]}"; do
rawstr=$(<"$file")
while IFS= read -r metric; do
start=$(jq -r ".metrics[\"$metric\"].start" <<<"$rawstr")
timestep=$(jq -r ".metrics[\"$metric\"].frequency" <<<"$rawstr")
while IFS= read -r index_value; do
index=$(awk -F: '{print $1}' <<<"$index_value")
value=$(awk -F: '{print $2}' <<<"$index_value")
if [[ -n "$value" && "$value" != "null" ]]; then
timestamp=$((start + (timestep * index)))
node_measurement+="$metric,cluster=$cluster,hostname=$level1,type=node value=$value $timestamp\n"
fi
done < <(jq -r ".metrics[\"$metric\"].data | to_entries | map(\"\(.key):\(.value // \"null\")\") | .[]" <<<"$rawstr")
done < <(jq -r '.metrics | keys[]' <<<"$rawstr")
done
if [[ -n "$node_measurement" ]]; then
while IFS= read -r chunk; do
response_code=$(curl -s -o /dev/null -w "%{http_code}" "${HEADERS[@]}" --data-binary "$chunk" "$INFLUX_HOST/api/v2/write?bucket=mydb&precision=s")
if [[ "$response_code" == "204" ]]; then
[[ "$VERBOSE" -eq 1 ]] && echo "INFLUX API WRITE: CLUSTER $cluster HOST $level1"
elif [[ "$response_code" != "422" ]]; then
echo "INFLUX API WRITE ERROR CODE $response_code"
fi
done < <(echo -e "$node_measurement" | split -l 1000 --filter='cat')
fi
echo "Done for : "$node_source
done < <(find "$NEW_CHECKPOINTS/$cluster" -mindepth 1 -maxdepth 1 -type d -print0)
done
echo "Done for influx"

View File

@@ -1,5 +1,7 @@
#!/bin/bash -l
set -euo pipefail
sudo apt-get update
sudo apt-get upgrade -f -y
@@ -33,7 +35,7 @@ sudo cpan Time::Piece
sudo cpan Sort::Versions
sudo groupadd docker
sudo usermod -aG docker ubuntu
sudo usermod -aG docker $USER
sudo shutdown -r -t 0

View File

@@ -1,12 +0,0 @@
#!/bin/bash
echo "Will run prerequisites 'apt install python3-pip' and 'pip install sqlite3-to-mysql'"
sudo apt install python3-pip
pip install sqlite3-to-mysql
echo "'sqlite3mysql' requires running DB container, will fail otherwise."
# -f FILE -d DBNAME -u USER -h HOST -P PORT
~/.local/bin/sqlite3mysql -f job.db -d ClusterCockpit -u root --mysql-password root -h localhost -P 3306