Sample Slurm Submit Scripts

Sample Script

This first script uses the SLURM_JOBID variable to create a directory to store the results of the job


#!/bin/bash
#SBATCH --job-name=mclz             # Job name
#SBATCH --partition=batch           # Partition (queue) name
#SBATCH --nodes=1                   # Number of nodes
#SBATCH --ntasks-per-node=1         # Number of tasks to call on each node
#SBATCH --mem-per-cpu=200mb         # Memory per processor
#SBATCH --time=24:00:00             # Time limit hrs:min:sec
#SBATCH --output=mclz.%j.out        # Standard output log
#SBATCH --error=mclz.%j.err         # Standard error log

JOBDIR=${SLURM_JOBID}
mkdir $JOBDIR

export PATH=./bin:$PATH
time srun ./bin/mclz.sh -n -I C -Z 6 -A He -L TA -N "TA"

cp c6+he* $JOBDIR

Sample OpenMPI Script


#!/bin/sh 
#SBATCH --nodes=5                      # Number of nodes
#SBATCH --ntasks-per-node=6            # Number of tasks per node
#SBATCH --mem=20                       # Memory per CPU (MB)
#SBATCH -p batch                       # Partition to use
#SBATCH --output=example.%j.out        # Standard output log
#SBATCH --error=example.%j.err         # Standard error log

export PMIX_MCA_psec=native
export OMPI_MCA_btl="tcp,self"
srun centmpi

Sample Intel MPI Script


#!/bin/bash
#SBATCH --job-name=water0
#SBATCH --partition=batch             # Partition (queue) name
#SBATCH --nodes=1                     # Number of nodes
#SBATCH --ntasks=2                    # Number of MPI ranks
#SBATCH --ntasks-per-node=2           # Number of tasks to run on each node
#SBATCH --cpus-per-task=1             # Number of cores per MPI rank 
#SBATCH --ntasks-per-core=1           # Number of tasks to call on each core
#SBATCH --output=water0-%j.out        # Standard output log
#SBATCH --error=water0-%j.err         # Standard error log

export PMIX_MCA_psec=native           # Required for munge to work
#export OMPI_MCA_btl="tcp,self"        # Required for OpenMPI

## If compiling before submitting, run these three commands before compiling. 
## Otherwise, run these three commands before submitting your job, or remove the '###' to uncomment them
###module purge
###module load intel/2022.1.0
###module load impi/2021.6.0

unset I_MPI_PMI_LIBRARY               # When using impi and mpirun

mpirun -np 2 ./water0_MPI.exe


Sample OpenMP/Intel MPI Hybrid Script


#!/bin/bash
#SBATCH --job-name=test1
#SBATCH --partition=batch             # Partition (queue) name
#SBATCH --nodes=1                     # Number of nodes
#SBATCH --ntasks=4                    # Number of MPI ranks
#SBATCH --ntasks-per-node=4           # Number of tasks to run on each node
#SBATCH --output=test1-%j.out        # Standard output log
#SBATCH --error=test1-%j.err         # Standard error log

# Since this code is compiled with intel mpi compilers, swap out gcc/OpenMPI
# You can also run these three module commands from the command line before submitting your job
# Comment them out here if you run these commands from the command line before job submission.
module purge
module load intel/2022.1.0
module load impi/2021.6.0

export PMIX_MCA_psec=native           # Required for munge to work
#export OMPI_MCA_btl="tcp,self"        # Required for OpenMPI
unset I_MPI_PMI_LIBRARY               # When using impi and mpirun

export OMP_NUM_THREADS=4
export OMP_SCHEDULE="DYNAMIC,32"

JOBDIR=${SLURM_JOBID}
mkdir $JOBDIR

cp vrrmm_OHHe $JOBDIR
cp vrrmm.in $JOBDIR
cp *.dat $JOBDIR
cd $JOBDIR

mpirun -np $OMP_NUM_THREADS ./vrrmm_OHHe

Latest News

Your Support