
Slurm 是一个开源的、高度可扩展的工作负载调度器,专为高性能计算集群设计。它的名字是 Simple Linux Utility for Resource Management 的缩写,但其功能远不止“简单”二字。在现代HPC系统中,Slurm扮演着“集群大脑”或“数字神经中枢”的角色,其主要核心作用包括:
Slurm 是学术界和工业界构建HPC环境时事实上的标准调度系统。它的典型使用场景包括:
其重要性体现在:
Munge用户要确保管理节点和计算(登录)节点的UID和GID相同,所有节点都需要安装Munge:
groupadd -g 1108 munge
useradd -m -c "Munge Uid 'N' Gid Emporium" -d /var/lib/munge -u 1108 -g munge -s /sbin/nologin mungednf install -y rng-tools# rngd -r /dev/urandom
# 修改service参数如下
nano /usr/lib/systemd/system/rngd.service
[Service]
ExecStart=/sbin/rngd -f -r /dev/urandomsystemctl daemon-reload
systemctl start rngd
systemctl enable rngddnf install epel-release -y
dnf install munge munge-libs munge-devel -y注意:munge-devel如果在rocky9官方源找不到,请点击以下链接进行查找:https://rockylinux.pkgs.org/9/rockylinux-devel-x86_64/munge-devel-0.5.13-13.el9.x86_64.rpm.html
/usr/sbin/create-munge-key -r
dd if=/dev/urandom bs=1 count=1024 > /etc/munge/munge.keyscp -p /etc/munge/munge.key root@login01:/etc/munge/
scp -p /etc/munge/munge.key root@compute01:/etc/munge/
scp -p /etc/munge/munge.key root@dcv01:/etc/munge/
# 所有节点赋权
chown munge: /etc/munge/munge.key
chmod 400 /etc/munge/munge.keysystemctl start munge
systemctl enable munge每个节点与控制节点进行连接验证
munge -nmunge -n | unmungemunge -n | ssh compute01 unmungeremunge安装mariadb,作为Slurm Accounting配置,在管理节点执行:
dnf -y install mariadb-server
systemctl start mariadb
systemctl enable mariadb
ROOT_PASS=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) #使用随机密码
mysql -e "CREATE USER root IDENTIFIED BY '${ROOT_PASS}'"
mysql -uroot -p$ROOT_PASS -e 'create database slurm_hpc_db'mysql -uroot -p$ROOT_PASS
create user slurm;
grant all on slurm_hpc_db.* TO 'slurm'@'localhost' identified by '123456' with grant option;
flush privileges;groupadd -g 1109 slurm
useradd -m -c "Slurm manager" -d /var/lib/slurm -u 1109 -g slurm -s /bin/bash slurmdnf install gcc gcc-c++ readline-devel perl-ExtUtils-MakeMaker pam-devel rpm-build mariadb-devel python3 -y注意: 如mariadb-devel在官方镜像源找不到,请点击以下链接进行查找:https://rockylinux.pkgs.org/9/rockylinux-devel-x86_64/munge-devel-0.5.13-13.el9.x86_64.rpm.html
wget https://download.schedmd.com/slurm/slurm-25.05.2.tar.bz2dnf install rpm-build -y
rpmbuild -ta --nodeps slurm-25.05.2.tar.bz2mkdir -p /root/rpmbuild/RPMS/
scp -r /root/rpmbuild/RPMS/x86_64 root@login01:/root/rpmbuild/RPMS/x86_64
scp -r /root/rpmbuild/RPMS/x86_64 root@compute01:/root/rpmbuild/RPMS/x86_64 cd /root/rpmbuild/RPMS/x86_64/
dnf localinstall slurm-*cp /etc/slurm/cgroup.conf.example /etc/slurm/cgroup.conf
cp /etc/slurm/slurm.conf.example /etc/slurm/slurm.conf
cp /etc/slurm/slurmdbd.conf.example /etc/slurm/slurmdbd.conf# slurmdbd.conf可不用复制
scp -r /etc/slurm/*.conf root@login01:/etc/slurm/
scp -r /etc/slurm/*.conf root@compute01:/etc/slurm/mkdir /var/spool/slurmd
chown slurm: /var/spool/slurmd
mkdir /var/log/slurm
chown slurm: /var/log/slurm
mkdir /var/spool/slurmctld
chown slurm: /var/spool/slurmctld
mkdir /var/run/slurm
chown slurm: /var/run/slurm# 管理节点
systemctl start slurmdbd
systemctl enable slurmdbd
systemctl start slurmctld
systemctl enable slurmctld
# 所有节点
systemctl start slurmd
systemctl enable slurmd# 1. 启动slurmdbd时报错(一):
slurmdbd: fatal: slurmdbd.conf file /etc/slurm/slurmdbd.conf should be 600 is 644 acc... others
# 解决方法
chmod 600 slurmdbd.conf
systemctl restart slurmdbd
# 2. 启动slurmdbd时报错(二):
slurmdbd: fatal: slurmdbd.conf not owned by SlurmUser root!=slurm
# 解决方法
chown slurm: /etc/slurm/slurmdbd.conf
systemctl restart slurmdbdClusterName=cluster
SlurmctldHost=manage01
ProctrackType=proctrack/cgroup
ReturnToService=1
SlurmctldPidFile=/var/run/slurm/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurm/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/var/spool/slurmd
SlurmUser=slurm
StateSaveLocation=/var/spool/slurmctld
SwitchType=switch/none
TaskPlugin=task/affinity
InactiveLimit=0
KillWait=30
MinJobAge=300
SlurmctldTimeout=120
SlurmdTimeout=300
Waittime=0
SchedulerType=sched/backfill
SelectType=select/cons_tres
SelectTypeParameters=CR_Core
AccountingStorageEnforce=associations,limits,qos
AccountingStorageHost=manage01
AccountingStoragePass=/var/run/munge/munge.socket.2
AccountingStoragePort=6819
AccountingStorageType=accounting_storage/slurmdbd
JobCompHost=localhost
JobCompLoc=slurm_hpc_db
JobCompPass=123456
JobCompPort=3306
JobCompType=jobcomp/mysql
JobCompUser=slurm
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/linux
SlurmctldDebug=info
SlurmctldLogFile=/var/log/slurm/slurmctld.log
SlurmdDebug=info
SlurmdLogFile=/var/log/slurm/slurmd.log
# HPC NODES
NodeName=manage01 NodeAddr=192.168.1.100 CPUs=8 CoresPerSocket=1 ThreadsPerCore=1 RealMemory=200 Procs=1 State=UNKNOWN
NodeName=login01 NodeAddr=192.168.1.101 CPUs=8 CoresPerSocket=1 ThreadsPerCore=1 RealMemory=200 Procs=1 State=UNKNOWN
NodeName=compute01 NodeAddr=192.168.1.102 CPUs=8 CoresPerSocket=1 ThreadsPerCore=1 RealMemory=1024 Procs=1 State=UNKNOWN
NodeName=dcv01 NodeAddr=192.168.1.12 CPUs=8 CoresPerSocket=4 ThreadsPerCore=1 RealMemory=4096 Procs=1 State=UNKNOWN
# PARTITIONS
PartitionName=compute Nodes=compute01 Default=YES MaxTime=INFINITE State=UP
PartitionName=dcv Nodes=dcv01 Default=NO MaxTime=INFINITE State=UP
PartitionName=debug Nodes=dcv01,compute01,login01,manage01 Default=NO MaxTime=INFINITE State=UP# Authentication info
AuthType=auth/munge
AuthInfo=/var/run/munge/munge.socket.2
# slurmDBD info
DbdAddr=localhost
DbdHost=localhost
SlurmUser=slurm
DebugLevel=verbose
#DefaultQOS=normal,standby
LogFile=/var/log/slurm/slurmdbd.log
PidFile=/var/run/slurm/slurmdbd.pid
#PluginDir=/usr/lib/slurm
#PrivateData=accounts,users,usage,jobs
# Database info
StorageType=accounting_storage/mysql
StoragePass=123456
StorageUser=slurm
StorageLoc=slurm_hpc_dbCgroupMountpoint=/sys/fs/cgroup
ConstrainCores=yes
ConstrainDevices=yes
ConstrainRAMSpace=yes
ConstrainSwapSpace=yesSlurm 是现代HPC集群不可或缺的核心中间件。通过本教程,您将学习如何在 Rocky Linux 9 系统上从源码编译并安装配置这一强大的调度系统,为构建您自己的高性能计算环境奠定坚实的基础。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。