Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/bin/bash
- #!
- #! Example SLURM job script for Peta4-Skylake (Skylake CPUs, OPA)
- #! Last updated: Thu 21 Jul 6:20:00 GMT 2022
- #!
- #!#############################################################
- #!#### Modify the options in this section as appropriate ######
- #!#############################################################
- #! sbatch directives begin here ###############################
- #! Name of the job:
- #SBATCH -J REDACTED
- #! Which project should be charged:
- #SBATCH -A REDACTED-SL3-CPU
- #! How many whole nodes should be allocated?
- #SBATCH --nodes=1
- #! How many (MPI) tasks will there be in total? (<= nodes*32)
- #! The skylake/skylake-himem nodes have 32 CPUs (cores) each.
- #SBATCH --ntasks=1
- #SBATCH --ntasks-per-node=1
- #SBATCH -p icelake-himem --cpus-per-task=20
- #! How much wallclock time will be required?
- #SBATCH --time=12:00:00
- #! What types of email messages do you wish to receive?
- #SBATCH --mail-type=ALL
- #! Uncomment this to prevent the job from being requeued (e.g. if
- #! interrupted by node failure or system downtime):
- ##SBATCH --no-requeue
- #! For 6GB per CPU, set "-p skylake"; for 12GB per CPU, set "-p skylake-himem":
- #SBATCH -p icelake-himem
- #! sbatch directives end here (put any additional directives above this line)
- #! Notes:
- #! Charging is determined by core number*walltime.
- #! The --ntasks value refers to the number of tasks to be launched by SLURM only. This
- #! usually equates to the number of MPI tasks launched. Reduce this from nodes*32 if
- #! demanded by memory requirements, or if OMP_NUM_THREADS>1.
- #! Each task is allocated 1 core by default, and each core is allocated 5980MB (skylake)
- #! and 12030MB (skylake-himem). If this is insufficient, also specify
- #! --cpus-per-task and/or --mem (the latter specifies MB per node).
- #! Number of nodes and tasks per node allocated by SLURM (do not change):
- numnodes=$SLURM_JOB_NUM_NODES
- numtasks=$SLURM_NTASKS
- mpi_tasks_per_node=$(echo "$SLURM_TASKS_PER_NODE" | sed -e 's/^\([0-9][0-9]*\).*$/\1/')
- #! ############################################################
- #! Modify the settings below to specify the application's environment, location
- #! and launch method:
- #! Optionally modify the environment seen by the application
- #! (note that SLURM reproduces the environment at submission irrespective of ~/.bashrc):
- . /etc/profile.d/modules.sh # Leave this line (enables the module command)
- module purge # Removes all modules still loaded
- module load rhel8/default-icl # REQUIRED - loads the basic environment
- #! Insert additional module load commands after this line if needed:
- #! Full path to application executable:
- DATASET_DIR="openpain.org/subacute_longitudinal_study/"
- OUTPUT_DIR="output/"
- LICENSE="license.txt"
- SUBJECT=101
- # Compose the command line
- export SINGULARITYENV_TEMPLATEFLOW_HOME=$HOME/.cache/templateflow
- cmd="singularity run --cleanenv fmriprep-22.0.0.simg $DATASET_DIR $OUTPUT_DIR participant --participant-label $SUBJECT --nthreads 16 --verbose --fs-license-file $LICENSE --mem_mb 30000 --omp-nthreads 8 --use-aroma"
- # Setup done, run the command
- echo Running task ${SLURM_ARRAY_TASK_ID}
- echo Commandline: $cmd
- eval $cmd
- exitcode=$?
- # Output results to a table
- echo "sub-$SUBJECT ${SLURM_ARRAY_TASK_ID} $exitcode" \
- >> ${SLURM_JOB_NAME}.${SLURM_ARRAY_JOB_ID}.tsv
- echo Finished tasks ${SLURM_ARRAY_TASK_ID} with exit code $exitcode
- exit $exitcode
- #! Work directory (i.e. where the job will run):
- # workdir="$SLURM_SUBMIT_DIR" # The value of SLURM_SUBMIT_DIR sets workdir to the directory
- # in which sbatch is run.
- #! Are you using OpenMP (NB this is unrelated to OpenMPI)? If so increase this
- #! safe value to no more than 32:
- # export OMP_NUM_THREADS=1
- #! Number of MPI tasks to be started by the application per node and in total (do not change):
- # np=$[${numnodes}*${mpi_tasks_per_node}]
- #! The following variables define a sensible pinning strategy for Intel MPI tasks -
- #! this should be suitable for both pure MPI and hybrid MPI/OpenMP jobs:
- # export I_MPI_PIN_DOMAIN=omp:compact # Domains are $OMP_NUM_THREADS cores in size
- # export I_MPI_PIN_ORDER=scatter # Adjacent domains have minimal sharing of caches/sockets
- #! Notes:
- #! 1. These variables influence Intel MPI only.
- #! 2. Domains are non-overlapping sets of cores which map 1-1 to MPI tasks.
- #! 3. I_MPI_PIN_PROCESSOR_LIST is ignored if I_MPI_PIN_DOMAIN is set.
- #! 4. If MPI tasks perform better when sharing caches/sockets, try I_MPI_PIN_ORDER=compact.
- #! Uncomment one choice for CMD below (add mpirun/mpiexec options if necessary):
- #! Choose this for a MPI code (possibly using OpenMP) using Intel MPI.
- # CMD="mpirun -ppn $mpi_tasks_per_node -np $np $application $options"
- # CMD="${SINGULARITY_CMD}"
- #! Choose this for a pure shared-memory OpenMP parallel program on a single node:
- #! (OMP_NUM_THREADS threads will be created):
- #CMD="$application $options"
- #! Choose this for a MPI code (possibly using OpenMP) using OpenMPI:
- #CMD="mpirun -npernode $mpi_tasks_per_node -np $np $application $options"
- ###############################################################
- ### You should not have to change anything below this line ####
- ###############################################################
- cd $workdir
- echo -e "Changed directory to `pwd`.\n"
- JOBID=$SLURM_JOB_ID
- echo -e "JobID: $JOBID\n======"
- echo "Time: `date`"
- echo "Running on master node: `hostname`"
- echo "Current directory: `pwd`"
- if [ "$SLURM_JOB_NODELIST" ]; then
- #! Create a machine file:
- export NODEFILE=`generate_pbs_nodefile`
- cat $NODEFILE | uniq > machine.file.$JOBID
- echo -e "\nNodes allocated:\n================"
- echo `cat machine.file.$JOBID | sed -e 's/\..*$//g'`
- fi
- echo -e "\nnumtasks=$numtasks, numnodes=$numnodes, mpi_tasks_per_node=$mpi_tasks_per_node (OMP_NUM_THREADS=$OMP_NUM_THREADS)"
- echo -e "\nExecuting command:\n==================\n$CMD\n"
- eval $CMD
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement