Unix port based slurmrest service+

This commit is contained in:
Aditya Ujeniya 2024-10-24 14:54:40 +02:00
parent f574568d76
commit 0c1f6b446e
11 changed files with 219 additions and 41 deletions

3
curl_slurmrestd.sh Executable file
View File

@ -0,0 +1,3 @@
JWT="eyJhbGciOiJSUzI1NiIsICJ0eXAiOiJKV1QifQ.eyJpc3MiOiJzbHVybSJ9.dzAHf1Ojoa149uRCCWY1eP3vDyCIZCOZ3h554R-KJJ8-OP0CJ0ymvSkFISLcYcyd9vVKmaYdSN3tWEF6bNZEmyX7G560i1MbkNFvhkhNVSPLKEKNPs38h5ra3ZlTlLlxAlDzXRAAn6UEEgKdm5vx4Jhec7ptaRL_zeSFpTS5fJPc0QE1Cm7e7nU39-9e8l4WU4KpRMxT6ANFm22_G4-mSA-AgCAvKQFzj2FInKsXDUTGlliNJuAgFxf-9LQxoeAknOQhEqcTXii_yBy9DNcT03pdNcAu5Ru4_qlX62vroInU_eh5mWQyiUdXN9Wj_OfMmfLoYFkJeUFYexBMZnSBgg"
curl -X 'GET' -v 'http://localhost:6820/slurm/v0.0.39/ping' -H "X-SLURM-USER-NAME:slurm" -H "X-SLURM-USER-TOKEN:$SLURM_JWT"

View File

@ -91,6 +91,7 @@ services:
volumes: volumes:
- ${DATADIR}/slurm/home:/home - ${DATADIR}/slurm/home:/home
- ${DATADIR}/slurm/secret:/.secret - ${DATADIR}/slurm/secret:/.secret
- ${DATADIR}/slurm/tmp:/tmp:rw
- ./slurm/database/slurmdbd.conf:/home/config/slurmdbd.conf - ./slurm/database/slurmdbd.conf:/home/config/slurmdbd.conf
- /etc/timezone:/etc/timezone:ro - /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
@ -109,6 +110,7 @@ services:
volumes: volumes:
- ${DATADIR}/slurm/home:/home - ${DATADIR}/slurm/home:/home
- ${DATADIR}/slurm/secret:/.secret - ${DATADIR}/slurm/secret:/.secret
- ${DATADIR}/slurm/tmp:/tmp:rw
- ./slurm/worker/cgroup.conf:/home/config/cgroup.conf - ./slurm/worker/cgroup.conf:/home/config/cgroup.conf
- ./slurm/controller/slurm.conf:/home/config/slurm.conf - ./slurm/controller/slurm.conf:/home/config/slurm.conf
- /etc/timezone:/etc/timezone:ro - /etc/timezone:/etc/timezone:ro
@ -121,12 +123,16 @@ services:
hostname: slurmrestd hostname: slurmrestd
build: build:
context: ./slurm/rest context: ./slurm/rest
args:
uid_u: ${UID_U}
gid_g: ${GID_G}
depends_on: depends_on:
- slurmctld - slurmctld
privileged: true privileged: true
volumes: volumes:
- ${DATADIR}/slurm/home:/home - ${DATADIR}/slurm/home:/home
- ${DATADIR}/slurm/secret:/.secret - ${DATADIR}/slurm/secret:/.secret
- ${DATADIR}/slurm/tmp:/tmp:rw
- ./slurm/controller/slurm.conf:/home/config/slurm.conf - ./slurm/controller/slurm.conf:/home/config/slurm.conf
- ./slurm/rest/slurmrestd.conf:/home/config/slurmrestd.conf - ./slurm/rest/slurmrestd.conf:/home/config/slurmrestd.conf
- /etc/timezone:/etc/timezone:ro - /etc/timezone:/etc/timezone:ro

View File

@ -1,21 +1,24 @@
#!/bin/bash #!/bin/bash
echo "" echo ""
echo "-----------------------------------------------------------------" echo "|--------------------------------------------------------------------------------------|"
echo "Welcome to cc-docker automatic deployment script." echo "| Welcome to cc-docker automatic deployment script. |"
echo "Make sure you have sudo rights to run docker services" echo "| Make sure you have sudo rights to run docker services |"
echo "This script assumes that docker command is added to sudo group" echo "| This script assumes that docker command is added to sudo group |"
echo "This means that docker commands do not explicitly require" echo "| This means that docker commands do not explicitly require |"
echo "'sudo' keyword to run. You can use this following command:" echo "| 'sudo' keyword to run. You can use this following command: |"
echo "" echo "| |"
echo "sudo groupadd docker" echo "| > sudo groupadd docker |"
echo "sudo usermod -aG docker $USER" echo "| > sudo usermod -aG docker $USER |"
echo "" echo "| |"
echo "This will add docker to the sudo usergroup and all the docker" echo "| This will add docker to the sudo usergroup and all the docker |"
echo "command will run as sudo by default without requiring" echo "| command will run as sudo by default without requiring |"
echo "'sudo' keyword." echo "| 'sudo' keyword. |"
echo "-----------------------------------------------------------------" echo "|--------------------------------------------------------------------------------------|"
echo "" echo ""
export UID_U=$(id -u $USER)
export GID_G=$(id -g $USER)
# Check cc-backend, touch job.db if exists # Check cc-backend, touch job.db if exists
if [ ! -d cc-backend ]; then if [ ! -d cc-backend ]; then
echo "'cc-backend' not yet prepared! Please clone cc-backend repository before starting this script." echo "'cc-backend' not yet prepared! Please clone cc-backend repository before starting this script."
@ -98,6 +101,15 @@ docker-compose build
docker-compose up -d docker-compose up -d
echo "" echo ""
echo "Setup complete, containers are up by default: Shut down with 'docker-compose down'." echo "|--------------------------------------------------------------------------------------|"
echo "Use './cc-backend/cc-backend -server' to start cc-backend." echo "| Check logs for each slurm service by using these commands: |"
echo "Use scripts in /scripts to load data into influx or mariadb." echo "| docker-compose logs slurmctld |"
echo "| docker-compose logs slurmdbd |"
echo "| docker-compose logs slurmrestd |"
echo "| docker-compose logs node01 |"
echo "|======================================================================================|"
echo "| Setup complete, containers are up by default: Shut down with 'docker-compose down'. |"
echo "| Use './cc-backend/cc-backend -server' to start cc-backend. |"
echo "| Use scripts in /scripts to load data into influx or mariadb. |"
echo "|--------------------------------------------------------------------------------------|"
echo ""

View File

@ -19,7 +19,7 @@ RUN yum install -y munge munge-libs rng-tools \
openssh-server openssh-clients dbus-devel \ openssh-server openssh-clients dbus-devel \
pam-devel numactl numactl-devel hwloc sudo \ pam-devel numactl numactl-devel hwloc sudo \
lua readline-devel ncurses-devel man2html \ lua readline-devel ncurses-devel man2html \
autoconf automake json-c-devel \ autoconf automake json-c-devel libjwt-devel \
libibmad libibumad rpm-build perl-ExtUtils-MakeMaker.noarch rpm-build make wget libibmad libibumad rpm-build perl-ExtUtils-MakeMaker.noarch rpm-build make wget
RUN dnf --enablerepo=powertools install -y munge-devel rrdtool-devel lua-devel hwloc-devel mariadb-server mariadb-devel RUN dnf --enablerepo=powertools install -y munge-devel rrdtool-devel lua-devel hwloc-devel mariadb-server mariadb-devel

View File

@ -4,6 +4,8 @@ set -e
# Determine the system architecture dynamically # Determine the system architecture dynamically
ARCH=$(uname -m) ARCH=$(uname -m)
SLURM_VERSION="24.05.3" SLURM_VERSION="24.05.3"
SLURM_JWT=daemon
SLURMRESTD_SECURITY=disable_user_check
_delete_secrets() { _delete_secrets() {
if [ -f /.secret/munge.key ]; then if [ -f /.secret/munge.key ]; then
@ -11,6 +13,9 @@ _delete_secrets() {
sudo rm -rf /.secret/munge.key sudo rm -rf /.secret/munge.key
sudo rm -rf /.secret/worker-secret.tar.gz sudo rm -rf /.secret/worker-secret.tar.gz
sudo rm -rf /.secret/setup-worker-ssh.sh sudo rm -rf /.secret/setup-worker-ssh.sh
sudo rm -rf /.secret/jwt.key
sudo rm -rf /.secret/jwt_public.key
sudo rm -rf /.secret/jwt_token.key
echo "Done removing secrets" echo "Done removing secrets"
ls /.secret/ ls /.secret/
@ -88,6 +93,31 @@ _copy_secrets() {
rm -f /home/worker/setup-worker-ssh.sh rm -f /home/worker/setup-worker-ssh.sh
} }
_openssl_jwt_key() {
cd /.secret
openssl rand -base64 32 > jwt.key
# openssl genpkey -algorithm RSA -out jwt.key -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in jwt.key -out jwt_public.key
cd ..
}
_generate_jwt_token() {
PEM=$(cat /etc/config/jwt.key)
USER=\"slurm\"
NOW=$(date +%s)
IAT="${NOW}"
EXP=$((${NOW} + 3600000))
HEADER_RAW='{"alg":"HS256", "typ":"JWT"}'
HEADER=$(echo -n "${HEADER_RAW}" | openssl base64 | tr -d '=' | tr '/+' '_-' | tr -d '\n')
PAYLOAD_RAW='{"iss":'${USER}'}'
PAYLOAD=$(echo -n "${PAYLOAD_RAW}" | openssl base64 | tr -d '=' | tr '/+' '_-' | tr -d '\n')
HEADER_PAYLOAD="${HEADER}"."${PAYLOAD}"
SIGNATURE=$(openssl dgst -sha256 -sign <(echo -n "${PEM}") <(echo -n "${HEADER_PAYLOAD}") | openssl base64 | tr -d '=' | tr '/+' '_-' | tr -d '\n')
JWT="${HEADER_PAYLOAD}"."${SIGNATURE}"
echo $JWT | cat >/.secret/jwt_token.txt
chmod 777 /.secret/jwt_token.txt
}
# run slurmctld # run slurmctld
_slurmctld() { _slurmctld() {
cd /root/rpmbuild/RPMS/$ARCH cd /root/rpmbuild/RPMS/$ARCH
@ -105,19 +135,22 @@ _slurmctld() {
echo "" echo ""
mkdir -p /var/spool/slurm/ctld /var/spool/slurm/d /var/log/slurm /etc/slurm /var/run/slurm/d /var/run/slurm/ctld /var/lib/slurm/d /var/lib/slurm/ctld mkdir -p /var/spool/slurm/ctld /var/spool/slurm/d /var/log/slurm /etc/slurm /var/run/slurm/d /var/run/slurm/ctld /var/lib/slurm/d /var/lib/slurm/ctld
chown -R slurm: /var/spool/slurm/ctld /var/spool/slurm/d /var/log/slurm /var/spool /var/lib /var/run/slurm/d /var/run/slurm/ctld /var/lib/slurm/d /var/lib/slurm/ctld chown -R slurm: /var/spool/slurm/ctld /var/spool/slurm/d /var/log/slurm /var/spool /var/lib /var/run/slurm/d /var/run/slurm/ctld /var/lib/slurm/d /var/lib/slurm/ctld
mkdir -p /etc/config
chown -R slurm: /etc/config
touch /var/log/slurmctld.log touch /var/log/slurmctld.log
chown slurm: /var/log/slurmctld.log chown -R slurm: /var/log/slurmctld.log
touch /var/log/slurmd.log touch /var/log/slurmd.log
chown slurm: /var/log/slurmd.log chown -R slurm: /var/log/slurmd.log
touch /var/lib/slurm/d/job_state touch /var/lib/slurm/d/job_state
chown slurm: /var/lib/slurm/d/job_state chown -R slurm: /var/lib/slurm/d/job_state
touch /var/lib/slurm/d/fed_mgr_state touch /var/lib/slurm/d/fed_mgr_state
chown slurm: /var/lib/slurm/d/fed_mgr_state chown -R slurm: /var/lib/slurm/d/fed_mgr_state
touch /var/run/slurm/d/slurmctld.pid touch /var/run/slurm/d/slurmctld.pid
chown slurm: /var/run/slurm/d/slurmctld.pid chown -R slurm: /var/run/slurm/d/slurmctld.pid
touch /var/run/slurm/d/slurmd.pid touch /var/run/slurm/d/slurmd.pid
chown slurm: /var/run/slurm/d/slurmd.pid chown -R slurm: /var/run/slurm/d/slurmd.pid
if [[ ! -f /home/config/slurm.conf ]]; then if [[ ! -f /home/config/slurm.conf ]]; then
echo "### Missing slurm.conf ###" echo "### Missing slurm.conf ###"
@ -129,6 +162,19 @@ _slurmctld() {
chmod 600 /etc/slurm/slurm.conf chmod 600 /etc/slurm/slurm.conf
fi fi
_openssl_jwt_key
if [ ! -f /.secret/jwt.key ]; then
echo "### Missing jwt.key ###"
exit 1
else
cp /.secret/jwt.key /etc/config/jwt.key
chown slurm: /etc/config/jwt.key
chmod 0400 /etc/config/jwt.key
fi
_generate_jwt_token
sudo yum install -y nc sudo yum install -y nc
sudo yum install -y procps sudo yum install -y procps
sudo yum install -y iputils sudo yum install -y iputils
@ -149,6 +195,7 @@ _slurmctld() {
### main ### ### main ###
_delete_secrets _delete_secrets
_sshd_host _sshd_host
_ssh_worker _ssh_worker
_munge_start _munge_start
_copy_secrets _copy_secrets

View File

@ -22,6 +22,8 @@ MpiDefault=none
SlurmctldPidFile=/var/run/slurm/d/slurmctld.pid SlurmctldPidFile=/var/run/slurm/d/slurmctld.pid
SlurmdPidFile=/var/run/slurm/d/slurmd.pid SlurmdPidFile=/var/run/slurm/d/slurmd.pid
ProctrackType=proctrack/linuxproc ProctrackType=proctrack/linuxproc
AuthAltTypes=auth/jwt
AuthAltParameters=jwt_key=/etc/config/jwt.key
#PluginDir= #PluginDir=
#CacheGroups=0 #CacheGroups=0
#FirstJobId= #FirstJobId=

View File

@ -4,7 +4,7 @@ set -e
# Determine the system architecture dynamically # Determine the system architecture dynamically
ARCH=$(uname -m) ARCH=$(uname -m)
SLURM_VERSION="24.05.3" SLURM_VERSION="24.05.3"
SLURM_JWT=daemon
SLURM_ACCT_DB_SQL=/slurm_acct_db.sql SLURM_ACCT_DB_SQL=/slurm_acct_db.sql
# start sshd server # start sshd server
@ -57,7 +57,11 @@ _slurmdbd() {
slurm-perlapi-$SLURM_VERSION*.$ARCH.rpm \ slurm-perlapi-$SLURM_VERSION*.$ARCH.rpm \
slurm-slurmdbd-$SLURM_VERSION*.$ARCH.rpm slurm-slurmdbd-$SLURM_VERSION*.$ARCH.rpm
mkdir -p /var/spool/slurm/d /var/log/slurm /etc/slurm mkdir -p /var/spool/slurm/d /var/log/slurm /etc/slurm
chown slurm: /var/spool/slurm/d /var/log/slurm chown -R slurm: /var/spool/slurm/d /var/log/slurm
mkdir -p /etc/config
chown -R slurm: /etc/config
if [[ ! -f /home/config/slurmdbd.conf ]]; then if [[ ! -f /home/config/slurmdbd.conf ]]; then
echo "### Missing slurmdbd.conf ###" echo "### Missing slurmdbd.conf ###"
exit exit
@ -67,8 +71,26 @@ _slurmdbd() {
chown slurm: /etc/slurm/slurmdbd.conf chown slurm: /etc/slurm/slurmdbd.conf
chmod 600 /etc/slurm/slurmdbd.conf chmod 600 /etc/slurm/slurmdbd.conf
fi fi
echo "Starting slurmdbd"
echo -n "checking for jwt.key"
while [ ! -f /.secret/jwt.key ]; do
echo -n "."
sleep 1
done
cp /.secret/jwt.key /etc/config/jwt.key
chown slurm: /etc/config/jwt.key
chmod 0400 /etc/config/jwt.key
echo ""
sudo yum install -y nc
sudo yum install -y procps
sudo yum install -y iputils
cp /etc/slurm/slurmdbd.conf /.secret/slurmdbd.conf cp /etc/slurm/slurmdbd.conf /.secret/slurmdbd.conf
echo "Starting slurmdbd"
/usr/sbin/slurmdbd -Dvv /usr/sbin/slurmdbd -Dvv
echo "Started slurmdbd" echo "Started slurmdbd"
} }

View File

@ -14,7 +14,8 @@
# Authentication info # Authentication info
AuthType=auth/munge AuthType=auth/munge
#AuthInfo=/var/run/munge/munge.socket.2 #AuthInfo=/var/run/munge/munge.socket.2
# AuthAltTypes=auth/jwt
AuthAltParameters=jwt_key=/etc/config/jwt.key
# slurmDBD info # slurmDBD info
DbdAddr=slurmdbd DbdAddr=slurmdbd
DbdHost=slurmdbd DbdHost=slurmdbd

View File

@ -1,10 +1,15 @@
FROM clustercockpit/slurm.base:24.05.3 FROM clustercockpit/slurm.base:24.05.3
LABEL org.opencontainers.image.authors="jan.eitzinger@fau.de" LABEL org.opencontainers.image.authors="jan.eitzinger@fau.de"
ARG uid_u
ARG gid_g
ENV uid_u=${uid_u}
ENV gid_g=${gid_g}
# clean up # clean up
RUN rm -f /root/rpmbuild/RPMS/slurm-*.rpm \ RUN rm -f /root/rpmbuild/RPMS/slurm-*.rpm \
&& yum clean all \ && yum clean all \
&& rm -rf /var/cache/yum && rm -rf /var/cache/yum
COPY docker-entrypoint.sh /docker-entrypoint.sh COPY docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"] ENTRYPOINT /docker-entrypoint.sh $uid_u $gid_g

View File

@ -4,6 +4,18 @@ set -e
# Determine the system architecture dynamically # Determine the system architecture dynamically
ARCH=$(uname -m) ARCH=$(uname -m)
SLURM_VERSION="24.05.3" SLURM_VERSION="24.05.3"
SLURMRESTD="/tmp/slurmrestd.socket"
# SLURM_JWT=daemon
uid_u="${1:-}"
gid_g="${2:-}"
echo Your container args are: "$@"
# Change the uid
# usermod -u "${uid_u}" slurm
# Change the gid
# groupmod -g "${gid_g}" slurm
# start sshd server # start sshd server
_sshd_host() { _sshd_host() {
@ -14,7 +26,6 @@ _sshd_host() {
/usr/sbin/sshd /usr/sbin/sshd
} }
# start munge and generate key
# start munge using existing key # start munge using existing key
_munge_start_using_key() { _munge_start_using_key() {
if [ ! -f /.secret/munge.key ]; then if [ ! -f /.secret/munge.key ]; then
@ -37,6 +48,48 @@ _munge_start_using_key() {
remunge remunge
} }
_enable_slurmrestd() {
cd /tmp
mkdir statesave
dd if=/dev/random of=/tmp/statesave/jwt_hs256.key bs=32 count=1
chown slurm:slurm /tmp/statesave/jwt_hs256.key
chmod 0600 /tmp/statesave/jwt_hs256.key
chown slurm:slurm /tmp/statesave
chmod 0755 /tmp/statesave
cat >/usr/lib/systemd/system/slurmrestd.service <<EOF
[Unit]
Description=Slurm REST daemon
After=network-online.target slurmctld.service
Wants=network-online.target
ConditionPathExists=/etc/slurm/slurm.conf
[Service]
Type=simple
EnvironmentFile=-/etc/sysconfig/slurmrestd
EnvironmentFile=-/etc/default/slurmrestd
# slurmrestd should not run as root or the slurm user.
# Please either use the -u and -g options in /etc/sysconfig/slurmrestd or
# /etc/default/slurmrestd, or explicitly set the User and Group in this file
# an unpriviledged user to run as.
User=slurm
Restart=always
RestartSec=5
# Group=
# Default to listen on both socket and slurmrestd port
ExecStart=/usr/sbin/slurmrestd -f /etc/config/slurmrestd.conf -a rest_auth/jwt $SLURMRESTD_OPTIONS -vvvvvv -s dbv0.0.39,v0.0.39 unix:$SLURMRESTD 0.0.0.0:6820
# /usr/sbin/slurmrestd -f /etc/config/slurmrestd.conf -vvvvvv -a rest_auth/jwt -s dbv0.0.39,v0.0.39 -u slurm unix:$SLURMRESTD 0.0.0.0:6820
# Enable auth/jwt be default, comment out the line to disable it for slurmrestd
Environment="SLURM_JWT=daemon"
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target
EOF
}
# run slurmrestd # run slurmrestd
_slurmrestd() { _slurmrestd() {
cd /root/rpmbuild/RPMS/$ARCH cd /root/rpmbuild/RPMS/$ARCH
@ -46,20 +99,23 @@ _slurmrestd() {
slurm-torque-$SLURM_VERSION*.$ARCH.rpm \ slurm-torque-$SLURM_VERSION*.$ARCH.rpm \
slurm-slurmctld-$SLURM_VERSION*.$ARCH.rpm \ slurm-slurmctld-$SLURM_VERSION*.$ARCH.rpm \
slurm-slurmrestd-$SLURM_VERSION*.$ARCH.rpm slurm-slurmrestd-$SLURM_VERSION*.$ARCH.rpm
echo -n "checking for slurmdbd.conf" echo -n "checking for slurmdbd.conf"
while [ ! -f /.secret/slurmdbd.conf ]; do while [ ! -f /.secret/slurmdbd.conf ]; do
echo -n "." echo -n "."
sleep 1 sleep 1
done done
echo "" echo ""
# mkdir -p /var/spool/slurm/ctld /var/spool/slurm/d /var/log/slurm /etc/slurm
# chown -R slurm: /var/spool/slurm/ctld /var/spool/slurm/d /var/log/slurm
mkdir -p /etc/config /var/spool/slurm /var/spool/slurm/restd /var/spool/slurm/restd/rest mkdir -p /etc/config /var/spool/slurm /var/spool/slurm/restd /var/spool/slurm/restd/rest /var/run/slurm
chown -R slurm: /etc/config /var/spool/slurm /var/spool/slurm/restd /var/spool/slurm/restd/rest chown -R slurm: /etc/config /var/spool/slurm /var/spool/slurm/restd /var/spool/slurm/restd/rest /var/run/slurm
chmod 755 /var/run/slurm
touch /var/log/slurmrestd.log touch /var/log/slurmrestd.log
chown slurm: /var/log/slurmrestd.log chown slurm: /var/log/slurmrestd.log
chown slurm: /tmp
chmod 777 /tmp
if [[ ! -f /home/config/slurmrestd.conf ]]; then if [[ ! -f /home/config/slurmrestd.conf ]]; then
echo "### Missing slurm.conf ###" echo "### Missing slurm.conf ###"
exit exit
@ -67,11 +123,31 @@ _slurmrestd() {
echo "### use provided slurmrestd.conf ###" echo "### use provided slurmrestd.conf ###"
cp /home/config/slurmrestd.conf /etc/config/slurmrestd.conf cp /home/config/slurmrestd.conf /etc/config/slurmrestd.conf
cp /home/config/slurm.conf /etc/config/slurm.conf cp /home/config/slurm.conf /etc/config/slurm.conf
fi fi
echo -n "checking for jwt.key"
while [ ! -f /.secret/jwt.key ]; do
echo -n "."
sleep 1
done
sudo yum install -y nc
sudo yum install -y procps
sudo yum install -y iputils
cp /.secret/jwt.key /etc/config/jwt.key
chown slurm: /etc/config/jwt.key
chmod 0400 /etc/config/jwt.key
echo ""
sleep 2s sleep 2s
export SLURMRESTD=/var/spool/slurm/restd/rest echo "Starting slurmrestd"
/usr/sbin/slurmrestd -f /etc/config/slurmrestd.conf -s dbv0.0.39,v0.0.39 -vv -u slurm 0.0.0.0:6820 # _enable_slurmrestd
# sudo ln -s /usr/lib/systemd/system/slurmrestd.service /etc/systemd/system/multi-user.target.wants/slurmrestd.service
/usr/sbin/slurmrestd -f /etc/config/slurmrestd.conf -vvvvvv -s dbv0.0.39,v0.0.39 -u slurm unix:$SLURMRESTD 0.0.0.0:6820
echo "Started slurmrestd"
} }
### main ### ### main ###

View File

@ -78,6 +78,10 @@ _slurmd() {
fi fi
echo "found slurm.conf" echo "found slurm.conf"
sudo yum install -y nc
sudo yum install -y procps
sudo yum install -y iputils
mkdir -p /var/spool/slurm/d /etc/slurm /var/run/slurm/d /var/log/slurm mkdir -p /var/spool/slurm/d /etc/slurm /var/run/slurm/d /var/log/slurm
chown slurm: /var/spool/slurm/d /var/run/slurm/d /var/log/slurm chown slurm: /var/spool/slurm/d /var/run/slurm/d /var/log/slurm
cp /home/config/cgroup.conf /etc/slurm/cgroup.conf cp /home/config/cgroup.conf /etc/slurm/cgroup.conf