Advertisement
Guest User

Slurm script fMRIPrep

a guest
Aug 27th, 2022
105
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Bash 5.97 KB | None | 0 0
  1. #!/bin/bash
  2. #!
  3. #! Example SLURM job script for Peta4-Skylake (Skylake CPUs, OPA)
  4. #! Last updated: Thu 21 Jul 6:20:00 GMT 2022
  5. #!
  6.  
  7. #!#############################################################
  8. #!#### Modify the options in this section as appropriate ######
  9. #!#############################################################
  10.  
  11. #! sbatch directives begin here ###############################
  12. #! Name of the job:
  13. #SBATCH -J REDACTED
  14. #! Which project should be charged:
  15. #SBATCH -A REDACTED-SL3-CPU
  16. #! How many whole nodes should be allocated?
  17. #SBATCH --nodes=1
  18. #! How many (MPI) tasks will there be in total? (<= nodes*32)
  19. #! The skylake/skylake-himem nodes have 32 CPUs (cores) each.
  20. #SBATCH --ntasks=1
  21. #SBATCH --ntasks-per-node=1
  22. #SBATCH -p icelake-himem --cpus-per-task=20
  23. #! How much wallclock time will be required?
  24. #SBATCH --time=12:00:00
  25. #! What types of email messages do you wish to receive?
  26. #SBATCH --mail-type=ALL
  27. #! Uncomment this to prevent the job from being requeued (e.g. if
  28. #! interrupted by node failure or system downtime):
  29. ##SBATCH --no-requeue
  30.  
  31. #! For 6GB per CPU, set "-p skylake"; for 12GB per CPU, set "-p skylake-himem":
  32. #SBATCH -p icelake-himem
  33.  
  34. #! sbatch directives end here (put any additional directives above this line)
  35.  
  36. #! Notes:
  37. #! Charging is determined by core number*walltime.
  38. #! The --ntasks value refers to the number of tasks to be launched by SLURM only. This
  39. #! usually equates to the number of MPI tasks launched. Reduce this from nodes*32 if
  40. #! demanded by memory requirements, or if OMP_NUM_THREADS>1.
  41. #! Each task is allocated 1 core by default, and each core is allocated 5980MB (skylake)
  42. #! and 12030MB (skylake-himem). If this is insufficient, also specify
  43. #! --cpus-per-task and/or --mem (the latter specifies MB per node).
  44.  
  45. #! Number of nodes and tasks per node allocated by SLURM (do not change):
  46. numnodes=$SLURM_JOB_NUM_NODES
  47. numtasks=$SLURM_NTASKS
  48. mpi_tasks_per_node=$(echo "$SLURM_TASKS_PER_NODE" | sed -e  's/^\([0-9][0-9]*\).*$/\1/')
  49. #! ############################################################
  50. #! Modify the settings below to specify the application's environment, location
  51. #! and launch method:
  52.  
  53.  
  54. #! Optionally modify the environment seen by the application
  55. #! (note that SLURM reproduces the environment at submission irrespective of ~/.bashrc):
  56. . /etc/profile.d/modules.sh                # Leave this line (enables the module command)
  57. module purge                               # Removes all modules still loaded
  58. module load rhel8/default-icl           # REQUIRED - loads the basic environment
  59.  
  60. #! Insert additional module load commands after this line if needed:
  61.  
  62. #! Full path to application executable:
  63. DATASET_DIR="openpain.org/subacute_longitudinal_study/"
  64. OUTPUT_DIR="output/"
  65. LICENSE="license.txt"
  66.  
  67. SUBJECT=101
  68.  
  69. # Compose the command line
  70. export SINGULARITYENV_TEMPLATEFLOW_HOME=$HOME/.cache/templateflow
  71.  
  72. cmd="singularity run --cleanenv fmriprep-22.0.0.simg $DATASET_DIR $OUTPUT_DIR participant --participant-label $SUBJECT --nthreads 16 --verbose --fs-license-file $LICENSE --mem_mb 30000 --omp-nthreads 8 --use-aroma"
  73.  
  74. # Setup done, run the command
  75. echo Running task ${SLURM_ARRAY_TASK_ID}
  76. echo Commandline: $cmd
  77. eval $cmd
  78. exitcode=$?
  79.  
  80.  
  81. # Output results to a table
  82. echo "sub-$SUBJECT   ${SLURM_ARRAY_TASK_ID}    $exitcode" \
  83.       >> ${SLURM_JOB_NAME}.${SLURM_ARRAY_JOB_ID}.tsv
  84. echo Finished tasks ${SLURM_ARRAY_TASK_ID} with exit code $exitcode
  85. exit $exitcode
  86.  
  87. #! Work directory (i.e. where the job will run):
  88. # workdir="$SLURM_SUBMIT_DIR"  # The value of SLURM_SUBMIT_DIR sets workdir to the directory
  89.                              # in which sbatch is run.
  90.  
  91. #! Are you using OpenMP (NB this is unrelated to OpenMPI)? If so increase this
  92. #! safe value to no more than 32:
  93. # export OMP_NUM_THREADS=1
  94.  
  95. #! Number of MPI tasks to be started by the application per node and in total (do not change):
  96. # np=$[${numnodes}*${mpi_tasks_per_node}]
  97.  
  98. #! The following variables define a sensible pinning strategy for Intel MPI tasks -
  99. #! this should be suitable for both pure MPI and hybrid MPI/OpenMP jobs:
  100. # export I_MPI_PIN_DOMAIN=omp:compact # Domains are $OMP_NUM_THREADS cores in size
  101. # export I_MPI_PIN_ORDER=scatter # Adjacent domains have minimal sharing of caches/sockets
  102. #! Notes:
  103. #! 1. These variables influence Intel MPI only.
  104. #! 2. Domains are non-overlapping sets of cores which map 1-1 to MPI tasks.
  105. #! 3. I_MPI_PIN_PROCESSOR_LIST is ignored if I_MPI_PIN_DOMAIN is set.
  106. #! 4. If MPI tasks perform better when sharing caches/sockets, try I_MPI_PIN_ORDER=compact.
  107.  
  108.  
  109. #! Uncomment one choice for CMD below (add mpirun/mpiexec options if necessary):
  110.  
  111. #! Choose this for a MPI code (possibly using OpenMP) using Intel MPI.
  112. # CMD="mpirun -ppn $mpi_tasks_per_node -np $np $application $options"
  113. # CMD="${SINGULARITY_CMD}"
  114.  
  115. #! Choose this for a pure shared-memory OpenMP parallel program on a single node:
  116. #! (OMP_NUM_THREADS threads will be created):
  117. #CMD="$application $options"
  118.  
  119. #! Choose this for a MPI code (possibly using OpenMP) using OpenMPI:
  120. #CMD="mpirun -npernode $mpi_tasks_per_node -np $np $application $options"
  121.  
  122.  
  123. ###############################################################
  124. ### You should not have to change anything below this line ####
  125. ###############################################################
  126.  
  127. cd $workdir
  128. echo -e "Changed directory to `pwd`.\n"
  129.  
  130. JOBID=$SLURM_JOB_ID
  131.  
  132. echo -e "JobID: $JOBID\n======"
  133. echo "Time: `date`"
  134. echo "Running on master node: `hostname`"
  135. echo "Current directory: `pwd`"
  136.  
  137. if [ "$SLURM_JOB_NODELIST" ]; then
  138.         #! Create a machine file:
  139.         export NODEFILE=`generate_pbs_nodefile`
  140.         cat $NODEFILE | uniq > machine.file.$JOBID
  141.         echo -e "\nNodes allocated:\n================"
  142.         echo `cat machine.file.$JOBID | sed -e 's/\..*$//g'`
  143. fi
  144.  
  145. echo -e "\nnumtasks=$numtasks, numnodes=$numnodes, mpi_tasks_per_node=$mpi_tasks_per_node (OMP_NUM_THREADS=$OMP_NUM_THREADS)"
  146.  
  147. echo -e "\nExecuting command:\n==================\n$CMD\n"
  148.  
  149. eval $CMD
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement