Exemples de scripts SLURM
job séquentiel
#!/bin/bash
#SBATCH --job-name=job_seq
#SBATCH --output=job.%j.out
#SBATCH --error=job.%j.err
#SBATCH --partition=test
#SBATCH --ntasks=1
#SBATCH --mem=100M
#SBATCH -t 00:15:00
echo ${SLURM_NODELIST}
module purge
module load GCC/4.9.2
./executable
job MPI
#!/bin/bash
#SBATCH --job-name=job_mpi
#SBATCH --output=job.%j.out
#SBATCH --error=job.%j.err
#SBATCH --partition=haswell
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=5
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=500M
#SBATCH -t 00:15:00
echo ${SLURM_NODELIST}
module purge
module load OpenMPI/1.8.4-GCC-4.9.2
mpirun -np $SLURM_NTASKS ./executable < input.dat > res.out
job OpenMP
#!/bin/bash
#SBATCH --job-name=job_omp
#SBATCH --output=job.%j.out
#SBATCH --error=job.%j.err
#SBATCH --partition=haswell
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=16
#SBATCH --mem=1G
#SBATCH -t 00:15:00
echo ${SLURM_NODELIST}
module purge
module load foss/2018b
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
./executable < input.dat > res.out
job Hybride MPI/OpenMP
#!/bin/bash
#SBATCH --job-name=job_hybrid
#SBATCH --output=job.%j.out
#SBATCH --error=job.%j.err
#SBATCH --partition=haswell
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=8
#SBATCH --cpus-per-task=2
#SBATCH --mem=1G
#SBATCH -t 00:15:00
echo ${SLURM_NODELIST}
module purge
module load foss/2018b
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
echo ${OMP_NUM_THREADS}
mpirun -np ${SLURM_NTASKS} ./executable
job GPU
#SBATCH --job-name=job_gpu
#SBATCH --output=job.%j.out
#SBATCH --error=job.%j.err
#SBATCH --partition=gpu
#SBATCH --gres=gpu:quadro_rtx_6000:2
#SBATCH --nodes=1
#SBATCH --mem=180G
#SBATCH --ntasks-per-node=2
#SBATCH --cpus-per-task=1
#SBATCH -t 12:00:00
echo ${SLURM_NODELIST}
module purge
module load fosscuda/2020b
python Pytorch.py
job interactif
srun -N 1 -n 2 -p test --mem=1G --pty $SHELL