#!/bin/bash -l # # Sections of this script that can/should be edited are delimited by a # [EDIT] tag. All Slurm job options are denoted by a line that starts # with "#SBATCH " followed by flags that would otherwise be passed on # the command line. Slurm job options can easily be disabled in a # script by inserting a space in the prefix, e.g. "# SLURM " and # reenabled by deleting that space. # # This is a batch job template for a program using multiple processor # cores/threads on a single node. This includes programs with OpenMP # parallelism or explicit threading via the pthreads library. # # Do not alter the --nodes/--ntasks options! #SBATCH --nodes=1 #SBATCH --ntasks=1 # # [EDIT] Indicate the number of processor cores/threads to be used # by the job: # #SBATCH --cpus-per-task=4 # # [EDIT] All jobs have memory limits imposed. The default is 1 GB per # CPU allocated to the job. The default can be overridden either # with a per-node value (--mem) or a per-CPU value (--mem-per-cpu) # with unitless values in MB and the suffixes K|M|G|T denoting # kibi, mebi, gibi, and tebibyte units. Delete the space between # the "#" and the word SBATCH to enable one of them: # # SBATCH --mem=8G # SBATCH --mem-per-cpu=1024M # # .... more options not used .... # # [EDIT] It can be helpful to provide a descriptive (terse) name for # the job (be sure to use quotes if there's whitespace in the # name): # #SBATCH --job-name=dgels-ex # # [EDIT] The partition determines which nodes can be used and with what # maximum runtime limits, etc. Partition limits can be displayed # with the "sinfo --summarize" command. # # SBATCH --partition=standard # # To run with priority-access to resources owned by your workgroup, # use the "_workgroup_" partition: # #SBATCH --partition=_workgroup_ # # [EDIT] The maximum runtime for the job; a single integer is interpreted # as a number of minutes, otherwise use the format # # d-hh:mm:ss # # Jobs default to the default runtime limit of the chosen partition # if this option is omitted. # #SBATCH --time=0-02:00:00 # # You can also provide a minimum acceptable runtime so the scheduler # may be able to run your job sooner. If you do not provide a # value, it will be set to match the maximum runtime limit (discussed # above). # # SBATCH --time-min=0-01:00:00 # # .... more options not used .... # # Do standard OpenMP environment setup: # . /opt/shared/slurm/templates/libexec/openmp.sh # # [EDIT] Execute your OpenMP/threaded program using the srun command: # echo "--- Set environment ---" vpkg_require intel echo "" echo "--- Run Test with $SLURM_CPUS_PER_TASK threads ---" export MKL_NUM_THREADS=$SLURM_CPUS_PER_TASK time ./$SLURM_JOB_NAME < $SLURM_JOB_NAME.d echo "" echo "--- Compare Results ---" cat $SLURM_JOB_NAME.r