5 Commits

Author SHA1 Message Date
d494228d77 alg benchmarking runner and processor 2025-11-05 12:08:34 +01:00
Erik Fabrizzi
79bc443bcb templating algsupport 2025-10-31 14:08:04 +01:00
Erik Fabrizzi
da15851c5c some comment 2025-10-31 12:33:27 +01:00
80e0419083 node pinning templates 2025-10-31 12:28:48 +01:00
c3a1bef8b0 Restored PMI flag when submitting multinode job 2025-06-02 13:56:49 +02:00
4 changed files with 378 additions and 3 deletions

203
launch_alg_bench.py Executable file
View File

@@ -0,0 +1,203 @@
import os
import subprocess
from datetime import datetime
################ HELPER FUNCTIONS ################
def load_template(template_path: str):
output_template = ""
with open(template_path, "r") as handle:
output_template = handle.read()
return output_template
def write_batch(batch_fpath: str, batch_content: str):
with open(batch_fpath, "w") as handle:
_ = handle.write(batch_content)
################### SETUP DIRS ###################
output_dir = os.getcwd()+"/output/"
err_dir = os.getcwd()+"/error/"
batch_files_dir = os.getcwd()+"/batchs/"
data_dir = os.getcwd()+"/data/"
if os.path.isdir(output_dir) == False:
os.mkdir(output_dir)
if os.path.isdir(err_dir) == False:
os.mkdir(err_dir)
if os.path.isdir(data_dir) == False:
os.mkdir(data_dir)
if os.path.isdir(batch_files_dir) == False:
os.mkdir(batch_files_dir)
################ GLOBAL DEFAULTS #################
mpi1_bin = "/home/hpc/ihpc/ihpc136h/workspace/mpi-benchmark-tool/bin/IMB-MPI1"
default_parameter = {
"time_stamp": datetime.now().strftime("%y_%m_%d_%H-%M-%S"),
"job_name": "",
"output_dir": os.getcwd()+"/output/",
"err_dir": os.getcwd()+"/error/",
"data_dir": os.getcwd()+"/data/",
"n_procs": 18,
"off_cache_flag": "",
"bin": mpi1_bin,
"n_nodes": 1
}
algs_dic = [{'name': "Allgather",
'flag': "I_MPI_ADJUST_ALLGATHER",
'algs': [
"Recursive doubling ",
"Bruck`s ",
"Ring ",
"Topology aware Gatherv + Bcast ",
"Knomial ",
]},
{'name': "Allreduce",
'flag': "I_MPI_ADJUST_ALLREDUCE",
'algs': [
"Recursive doubling ",
"Rabenseifner`s ",
"Reduce + Bcast ",
"Topology aware Reduce + Bcast ",
"Binomial gather + scatter ",
"Topology aware binominal gather + scatter ",
"Shumilin`s ring ",
"Ring ",
"Knomial ",
"Topology aware SHM-based flat ",
"Topology aware SHM-based Knomial ",
"Topology aware SHM-based Knary ",
]},
{'name': "Alltoall",
'flag': "I_MPI_ADJUST_ALLTOALL",
'algs': [
"Bruck`s ",
"Isend/Irecv + waitall ",
"Pair wise exchange ",
"Plum`s ",
]},
{'name': "Barrier",
'flag': "I_MPI_ADJUST_BARRIER",
'algs': [
"Dissemination ",
"Recursive doubling ",
"Topology aware dissemination ",
"Topology aware recursive doubling ",
"Binominal gather + scatter ",
"Topology aware binominal gather + scatter ",
"Topology aware SHM-based flat ",
"Topology aware SHM-based Knomial ",
"Topology aware SHM-based Knary ",
]},
{'name': "Bcast",
'flag': "I_MPI_ADJUST_BCAST",
'algs': [
"Binomial ",
"Recursive doubling ",
"Ring ",
"Topology aware binomial ",
"Topology aware recursive doubling ",
"Topology aware ring ",
"Shumilin`s ",
"Knomial ",
"Topology aware SHM-based flat ",
"Topology aware SHM-based Knomial ",
"Topology aware SHM-based Knary ",
"NUMA aware SHM-based (SSE4.2) ",
"NUMA aware SHM-based (AVX2) ",
"NUMA aware SHM-based (AVX512) ",
]},
{'name': "Gather",
'flag': "I_MPI_ADJUST_GATHER",
'algs': [
"Binomial ",
"Topology aware binomial ",
"Shumilin`s ",
"Binomial with segmentation ",
]},
{'name': "Reduce_scatter",
'flag': "I_MPI_ADJUST_REDUCE_SCATTER",
'algs': [
"Recursive halving ",
"Pair wise exchange ",
"Recursive doubling ",
"Reduce + Scatterv ",
"Topology aware Reduce + Scatterv ",
]},
{'name': "Reduce",
'flag': "I_MPI_ADJUST_REDUCE",
'algs': [
"Shumilin`s ",
"Binomial ",
"Topology aware Shumilin`s ",
"Topology aware binomial ",
"Rabenseifner`s ",
"Topology aware Rabenseifner`s ",
"Knomial ",
"Topology aware SHM-based flat ",
"Topology aware SHM-based Knomial ",
"Topology aware SHM-based Knary ",
"Topology aware SHM-based binomial ",
]},
{'name': "Scatter",
'flag': "I_MPI_ADJUST_SCATTER",
'algs': [
"Binomial ",
"Topology aware binomial ",
"Shumilin`s ",
]},
]
log = ""
############## MULTIPLE-NODE LAUNCH ##############
off_cache_flags = [
"-off_cache -1",
"-off_cache 50",
""
]
ndcnt = [
2,
3,
4,
5,
6,
7,
8,
9,
10
]
proc_per_node = 72
multiple_node_parameter = dict(default_parameter)
multiple_node_template = load_template("./templates/multinode_algs.template")
for flag in off_cache_flags:
multiple_node_parameter["off_cache_flag"] = flag
for n_nodes in ndcnt:
n_procs = n_nodes*proc_per_node
multiple_node_parameter["n_procs"] = int(n_procs)
multiple_node_parameter["n_nodes"] = n_nodes
for alg_conf in algs_dic:
collective = alg_conf['name']
multiple_node_parameter["job_name"] = collective
multiple_node_parameter["alg_flag"] = alg_conf['flag']
algs = alg_conf["algs"]
for idx, alg in enumerate(algs):
multiple_node_parameter["alg_name"] = alg
multiple_node_parameter["alg_idx"] = idx
batch_file = os.path.join(batch_files_dir,
f"{collective}_{alg.strip().replace('`','').replace(' ','_').replace('/','_')}.sh")
write_batch(batch_file,
multiple_node_template.format(**multiple_node_parameter))
result = subprocess.run(["sbatch", batch_file],
capture_output=True, text=True)
log += f"#{collective} {n_procs}" + "\n"
log += "\tSTDOUT:" + result.stdout + "\n"
log += "\tSTDERR:" + result.stderr + "\n"
print(log)

112
postprocess_data_algs.py Executable file
View File

@@ -0,0 +1,112 @@
from venv import create
import pandas as pd
import os
data_markers = {
"block_separator": "#----------------------------------------------------------------",
"benchmark_type": "# Benchmarking",
"processes_num": "# #processes = ",
"min_bytelen": "# Minimum message length in bytes",
"max_bytelen": "# Maximum message length in bytes",
"mpi_datatype": "# MPI_Datatype :",
"mpi_red_datatype": "# MPI_Datatype for reductions :",
"mpi_red_op": "# MPI_Op",
"end_of_table": "# All processes entering MPI_Finalize",
"creation_time": "# CREATION_TIME :",
"n_nodes": "# N_NODES :",
"off_cache_flag": "# OFF_CACHE_FLAG :",
"algorithm":"# ALGORITHM :"
}
column_names = [
"benchmark_type",
"proc_num",
"msg_size_bytes",
"repetitions",
"t_min_usec",
"t_max_usec",
"t_avg_usec",
"mpi_datatype",
"mpi_red_datatype",
"mpi_red_op",
"creation_time",
"n_nodes",
"off_cache_flag",
"algorithm"
]
data = list()
for file in os.listdir("data/"):
with open("data/"+file, 'r') as f:
lines = f.readlines()
past_preheader = False
in_header = False
in_body = False
btype = "NA"
proc_num = "NA"
mpi_datatype = "NA"
mpi_red_datatype = "NA"
mpi_red_op = "NA"
creation_time = "NA"
n_nodes = "NA"
off_cache_flag = "NA"
algorithm = "NA"
for line in lines:
if data_markers["block_separator"] in line:
if in_header and not past_preheader:
past_preheader = True
elif in_header and past_preheader:
in_body = True
in_header = not in_header
continue
if not in_header and not in_body and past_preheader:
if data_markers["mpi_datatype"] in line:
mpi_datatype = line.split()[-1]
elif data_markers["mpi_red_datatype"] in line:
mpi_red_datatype = line.split()[-1]
elif data_markers["mpi_red_op"] in line:
mpi_red_op = line.split()[-1]
if not in_header and not in_body and not past_preheader:
if data_markers["n_nodes"] in line:
n_nodes = line.split()[-1]
if data_markers["creation_time"] in line:
creation_time = line.split()[-1]
if data_markers["off_cache_flag"] in line:
off_cache_flag = line.split(":")[-1].strip()
if off_cache_flag == "": off_cache_flag = "NA"
else: off_cache_flag = off_cache_flag.replace("-off_cache","")
if data_markers["algorithm"] in line:
algorithm = line.split(":")[-1].strip()
if past_preheader and in_header:
if data_markers["benchmark_type"] in line:
btype = line.split()[2]
if data_markers["processes_num"] in line:
proc_num = int(line.split()[3])
if in_body:
if "#" in line or "".join(line.split()) == "":
continue
if data_markers["end_of_table"] in line:
break
if("int-overflow" in line) : continue
if("out-of-mem" in line) : continue
data.append([btype, proc_num]+[int(s) if s.isdigit()
else float(s) for s in line.split()] +
[
mpi_datatype,
mpi_red_datatype,
mpi_red_op,
creation_time,
n_nodes,
off_cache_flag,
algorithm
])
df = pd.DataFrame(data, columns=column_names)
df.to_csv("data.csv", index=False)

View File

@@ -3,13 +3,36 @@
#SBATCH --output={output_dir}{job_name}_{n_procs}.out
#SBATCH --error={err_dir}{job_name}_{n_procs}.err
#SBATCH --nodes={n_nodes}
#SBATCH --time=00:10:00
#SBATCH --nodelist=f01[01-64]
#SBATCH --time=00:30:00
#SBATCH --export=NONE
# Switch Help Table
# SwitchName=fswibl01 Level=0 LinkSpeed=1 Nodes=f01[01-64]
# SwitchName=fswibl02 Level=0 LinkSpeed=1 Nodes=f02[01-64]
# SwitchName=fswibl03 Level=0 LinkSpeed=1 Nodes=f03[01-64]
# SwitchName=fswibl04 Level=0 LinkSpeed=1 Nodes=f04[01-64]
# SwitchName=fswibl05 Level=0 LinkSpeed=1 Nodes=f05[01-64]
# SwitchName=fswibl06 Level=0 LinkSpeed=1 Nodes=f06[01-64]
# SwitchName=fswibl07 Level=0 LinkSpeed=1 Nodes=f01[65-88],f02[65-88]
# SwitchName=fswibl08 Level=0 LinkSpeed=1 Nodes=f03[65-88],f04[65-88],fritz[1-2]
# SwitchName=fswibl09 Level=0 LinkSpeed=1 Nodes=f05[65-88],f06[65-88],fritz[3-4],fviz1
# SwitchName=fswibl10 Level=0 LinkSpeed=1 Nodes=f07[01-64]
# SwitchName=fswibl11 Level=0 LinkSpeed=1 Nodes=f08[01-64]
# SwitchName=fswibl12 Level=0 LinkSpeed=1 Nodes=f09[01-64]
# SwitchName=fswibl13 Level=0 LinkSpeed=1 Nodes=f10[01-64]
unset SLURM_EXPORT_ENV
module load intel intelmpi
# Enable tuned collectives
export I_MPI_TUNING=on
export I_MPI_TUNING_MODE=auto # or 'collectives'
# Options: 0=auto, 1=recursive doubling, 2=ring, 3=binomial tree, 4=scatter-allgather
export I_MPI_COLL_ALLGATHER=2
export I_MPI_COLL_GATHER=2
OUTPUT_FILENAME="{data_dir}/{job_name}_$SLURM_JOB_ID.dat"
@@ -17,6 +40,5 @@ echo "# CREATION_TIME : {time_stamp}" > $OUTPUT_FILENAME
echo "# N_NODES : {n_nodes}" >> $OUTPUT_FILENAME
echo "# OFF_CACHE_FLAG : {off_cache_flag}">> $OUTPUT_FILENAME
srun --cpu-freq=2000000-2000000:performance -N {n_nodes} -n{n_procs} {bin} {job_name} -npmin {n_procs} {off_cache_flag} >> $OUTPUT_FILENAME
srun --cpu-freq=2000000-2000000:performance -N {n_nodes} -n{n_procs} {bin} {job_name} -npmin {n_procs} {off_cache_flag} -mem 2 -time 60 >> $OUTPUT_FILENAME

View File

@@ -0,0 +1,38 @@
#!/bin/bash -l
#SBATCH --job-name={job_name}_{n_procs}_{alg_idx}
#SBATCH --output={output_dir}{job_name}_{n_procs}.out
#SBATCH --error={err_dir}{job_name}_{n_procs}.err
#SBATCH --nodes={n_nodes}
#SBATCH --nodelist=f01[01-64]
#SBATCH --time=00:30:00
#SBATCH --export=NONE
# SwitchName=fswibl01 Level=0 LinkSpeed=1 Nodes=f01[01-64]
# SwitchName=fswibl02 Level=0 LinkSpeed=1 Nodes=f02[01-64]
# SwitchName=fswibl03 Level=0 LinkSpeed=1 Nodes=f03[01-64]
# SwitchName=fswibl04 Level=0 LinkSpeed=1 Nodes=f04[01-64]
# SwitchName=fswibl05 Level=0 LinkSpeed=1 Nodes=f05[01-64]
# SwitchName=fswibl06 Level=0 LinkSpeed=1 Nodes=f06[01-64]
# SwitchName=fswibl07 Level=0 LinkSpeed=1 Nodes=f01[65-88],f02[65-88]
# SwitchName=fswibl08 Level=0 LinkSpeed=1 Nodes=f03[65-88],f04[65-88],fritz[1-2]
# SwitchName=fswibl09 Level=0 LinkSpeed=1 Nodes=f05[65-88],f06[65-88],fritz[3-4],fviz1
# SwitchName=fswibl10 Level=0 LinkSpeed=1 Nodes=f07[01-64]
# SwitchName=fswibl11 Level=0 LinkSpeed=1 Nodes=f08[01-64]
# SwitchName=fswibl12 Level=0 LinkSpeed=1 Nodes=f09[01-64]
# SwitchName=fswibl13 Level=0 LinkSpeed=1 Nodes=f10[01-64]
unset SLURM_EXPORT_ENV
module load intel intelmpi
export {alg_flag}={alg_idx}
OUTPUT_FILENAME="{data_dir}/{job_name}_$SLURM_JOB_ID.dat"
echo "# CREATION_TIME : {time_stamp}" > $OUTPUT_FILENAME
echo "# N_NODES : {n_nodes}" >> $OUTPUT_FILENAME
echo "# OFF_CACHE_FLAG : {off_cache_flag}">> $OUTPUT_FILENAME
echo "# ALGORITHM : {alg_name}">> $OUTPUT_FILENAME
srun --cpu-freq=2000000-2000000:performance -N {n_nodes} -n{n_procs} {bin} {job_name} -npmin {n_procs} {off_cache_flag} -mem 2 -time 60 >> $OUTPUT_FILENAME