Sample batch job submission scripts on SapSlurm: Difference between revisions
(Created page with "====Serial (single-processor) Job==== Sample job submission script (sub.sh) to run an R program called add.R using a single core: <pre class="gscript"> #!/bin/bash #SBATCH -...") |
No edit summary |
||
(One intermediate revision by the same user not shown) | |||
Line 1: | Line 1: | ||
[[Category:Sapelo2]] | |||
====Serial (single-processor) Job==== | ====Serial (single-processor) Job==== | ||
Line 11: | Line 13: | ||
#SBATCH --time=02:00:00 # Time limit hrs:min:sec | #SBATCH --time=02:00:00 # Time limit hrs:min:sec | ||
#SBATCH --output=testserial.%j.out # Standard output log | #SBATCH --output=testserial.%j.out # Standard output log | ||
#SBATCH --error=testserial.%j.err | #SBATCH --error=testserial.%j.err # Standard error log | ||
#SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | ||
Line 31: | Line 33: | ||
<pre class="gscript"> | <pre class="gscript"> | ||
#!/bin/bash | #!/bin/bash | ||
#SBATCH --job-name=mpitest | #SBATCH --job-name=mpitest # Job name | ||
#SBATCH --partition=batch # Partition (queue) name | #SBATCH --partition=batch # Partition (queue) name | ||
#SBATCH --nodes=2 | #SBATCH --nodes=2 # Number of nodes | ||
#SBATCH --ntasks=16 | #SBATCH --ntasks=16 # Number of MPI ranks | ||
#SBATCH --ntasks-per-node=8 | #SBATCH --ntasks-per-node=8 # How many tasks on each node | ||
#SBATCH --cpus-per-task=1 | #SBATCH --cpus-per-task=1 # Number of cores per MPI rank | ||
#SBATCH --mem-per-cpu=600mb | #SBATCH --mem-per-cpu=600mb # Memory per processor | ||
#SBATCH --time=02:00:00 | #SBATCH --time=02:00:00 # Time limit hrs:min:sec | ||
#SBATCH --output=mpitest.%j.out | #SBATCH --output=mpitest.%j.out # Standard output log | ||
#SBATCH --error=mpitest.%j.err | #SBATCH --error=mpitest.%j.err # Standard error log | ||
#SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | ||
Line 58: | Line 60: | ||
<pre class="gscript"> | <pre class="gscript"> | ||
#!/bin/bash | #!/bin/bash | ||
#SBATCH --job-name=mctest | #SBATCH --job-name=mctest # Job name | ||
#SBATCH --partition=batch # Partition (queue) name | #SBATCH --partition=batch # Partition (queue) name | ||
#SBATCH --ntasks=1 | #SBATCH --ntasks=1 # Run a single task | ||
#SBATCH --cpus-per-task=6 | #SBATCH --cpus-per-task=6 # Number of CPU cores per task | ||
#SBATCH --mem=4gb | #SBATCH --mem=4gb # Job memory request | ||
#SBATCH --time=02:00:00 | #SBATCH --time=02:00:00 # Time limit hrs:min:sec | ||
#SBATCH --output=mctest.%j.out | #SBATCH --output=mctest.%j.out # Standard output log | ||
#SBATCH --error=mctest.%j.err | #SBATCH --error=mctest.%j.err # Standard error log | ||
#SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | ||
Line 86: | Line 88: | ||
<pre class="gscript"> | <pre class="gscript"> | ||
#!/bin/bash | #!/bin/bash | ||
#SBATCH --job-name=highmemtest | #SBATCH --job-name=highmemtest # Job name | ||
#SBATCH --partition=highmem | #SBATCH --partition=highmem # Partition (queue) name | ||
#SBATCH --ntasks=1 | #SBATCH --ntasks=1 # Run a single task | ||
#SBATCH --cpus-per-task=4 | #SBATCH --cpus-per-task=4 # Number of CPU cores per task | ||
#SBATCH --mem=50gb # Job memory request | #SBATCH --mem=50gb # Job memory request | ||
#SBATCH --time=02:00:00 | #SBATCH --time=02:00:00 # Time limit hrs:min:sec | ||
#SBATCH --output=highmemtest.%j.out | #SBATCH --output=highmemtest.%j.out # Standard output log | ||
#SBATCH --error=highmemtest.%j.err | #SBATCH --error=highmemtest.%j.err # Standard error log | ||
#SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | ||
Line 113: | Line 115: | ||
<pre class="gscript"> | <pre class="gscript"> | ||
#!/bin/bash | #!/bin/bash | ||
#SBATCH --job-name=hybridtest | #SBATCH --job-name=hybridtest # Job name | ||
#SBATCH --partition=batch # Partition (queue) name | #SBATCH --partition=batch # Partition (queue) name | ||
#SBATCH --nodes=2 | #SBATCH --nodes=2 # Number of nodes | ||
#SBATCH --ntasks=8 | #SBATCH --ntasks=8 # Number of MPI ranks | ||
#SBATCH --ntasks-per-node=4 | #SBATCH --ntasks-per-node=4 # Number of MPI ranks per node | ||
#SBATCH --cpus-per-task=3 | #SBATCH --cpus-per-task=3 # Number of OpenMP threads for each MPI process/rank | ||
#SBATCH --mem-per-cpu=2000mb | #SBATCH --mem-per-cpu=2000mb # Per processor memory request | ||
#SBATCH --time=2-00:00:00 | #SBATCH --time=2-00:00:00 # Walltime in hh:mm:ss or d-hh:mm:ss (2 days in the example) | ||
#SBATCH --output=hybridtest.%j.out | #SBATCH --output=hybridtest.%j.out # Standard output log | ||
#SBATCH --error=hybridtest.%j.err | #SBATCH --error=hybridtest.%j.err # Standard error log | ||
#SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) | ||
Line 140: | Line 142: | ||
<pre class="gscript"> | <pre class="gscript"> | ||
#!/bin/bash | #!/bin/bash | ||
#SBATCH --job-name=arrayjobtest | #SBATCH --job-name=arrayjobtest # Job name | ||
#SBATCH --partition=batch | #SBATCH --partition=batch # Partition (queue) name | ||
#SBATCH --ntasks=1 | #SBATCH --ntasks=1 # Run a single task | ||
#SBATCH --mem=1gb | #SBATCH --mem=1gb # Job Memory | ||
#SBATCH --time=10:00:00 | #SBATCH --time=10:00:00 # Time limit hrs:min:sec | ||
#SBATCH --output=array_%A-%a.out | #SBATCH --output=array_%A-%a.out # Standard output log | ||
#SBATCH --error=array_%A-%a.err | #SBATCH --error=array_%A-%a.err # Standard error log | ||
#SBATCH --array=0-9 | #SBATCH --array=0-9 # Array range | ||
cd $SLURM_SUBMIT_DIR | cd $SLURM_SUBMIT_DIR | ||
Line 160: | Line 162: | ||
====GPU/CUDA==== | ====GPU/CUDA==== | ||
Sample script to run Amber on a GPU node | Sample script to run Amber on a GPU node using one node, 2 CPU cores, and 1 GPU card: | ||
<pre class="gscript"> | <pre class="gscript"> | ||
#!/bin/bash | #!/bin/bash | ||
#SBATCH --job-name=amber | #SBATCH --job-name=amber # Job name | ||
#SBATCH --partition=gpu_p # Partition (queue) name | #SBATCH --partition=gpu_p # Partition (queue) name | ||
#SBATCH --gres=gpu:1 | #SBATCH --gres=gpu:1 # Requests one GPU device | ||
#SBATCH --ntasks=1 | #SBATCH --ntasks=1 # Run a single task | ||
#SBATCH --cpus-per-task= | #SBATCH --cpus-per-task=2 # Number of CPU cores per task | ||
#SBATCH --mem=40gb # Job memory request | #SBATCH --mem=40gb # Job memory request | ||
#SBATCH --time=10:00:00 | #SBATCH --time=10:00:00 # Time limit hrs:min:sec | ||
#SBATCH --output=amber.%j.out | #SBATCH --output=amber.%j.out # Standard output log | ||
#SBATCH --error=amber.%j.err # Standard error log | #SBATCH --error=amber.%j.err # Standard error log | ||
Latest revision as of 22:38, 9 July 2020
Serial (single-processor) Job
Sample job submission script (sub.sh) to run an R program called add.R using a single core:
#!/bin/bash #SBATCH --job-name=testserial # Job name #SBATCH --partition=batch # Partition (queue) name #SBATCH --ntasks=1 # Run on a single CPU #SBATCH --mem=1gb # Job memory request #SBATCH --time=02:00:00 # Time limit hrs:min:sec #SBATCH --output=testserial.%j.out # Standard output log #SBATCH --error=testserial.%j.err # Standard error log #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) #SBATCH --mail-user=username@uga.edu # Where to send mail cd $SLURM_SUBMIT_DIR module load R/3.6.2-foss-2019b R CMD BATCH add.R
In this sample script, the standard output and error of the job will be saved into a file called testserial.o%j, where %j will be automatically replaced by the job id of the job.
MPI Job
Sample job submission script (sub.sh) to run an OpenMPI application. In this example the job requests 16 cores and further specifies that these 16 cores need to be divided equally on 2 nodes (8 cores per node) and the binary is called mympi.exe:
#!/bin/bash #SBATCH --job-name=mpitest # Job name #SBATCH --partition=batch # Partition (queue) name #SBATCH --nodes=2 # Number of nodes #SBATCH --ntasks=16 # Number of MPI ranks #SBATCH --ntasks-per-node=8 # How many tasks on each node #SBATCH --cpus-per-task=1 # Number of cores per MPI rank #SBATCH --mem-per-cpu=600mb # Memory per processor #SBATCH --time=02:00:00 # Time limit hrs:min:sec #SBATCH --output=mpitest.%j.out # Standard output log #SBATCH --error=mpitest.%j.err # Standard error log #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) #SBATCH --mail-user=username@uga.edu # Where to send mail cd $SLURM_SUBMIT_DIR module load OpenMPI/3.1.4-GCC-8.3.0 mpirun ./mympi.exe
OpenMP (Multi-Thread) Job
Sample job submission script (sub.sh) to run a program that uses OpenMP with 6 threads. Please set --ntasks=1 and set --cpus-per-task to the number of threads you wish to use. The name of the binary in this example is a.out.
#!/bin/bash #SBATCH --job-name=mctest # Job name #SBATCH --partition=batch # Partition (queue) name #SBATCH --ntasks=1 # Run a single task #SBATCH --cpus-per-task=6 # Number of CPU cores per task #SBATCH --mem=4gb # Job memory request #SBATCH --time=02:00:00 # Time limit hrs:min:sec #SBATCH --output=mctest.%j.out # Standard output log #SBATCH --error=mctest.%j.err # Standard error log #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) #SBATCH --mail-user=username@uga.edu # Where to send mail cd $SLURM_SUBMIT_DIR export OMP_NUM_THREADS=6 module load foss/2019b # load the appropriate module file, e.g. foss/2019b time ./a.out
High Memory Job
Sample job submission script (sub.sh) to run a velvet application that needs to use 50GB of memory and 4 threads:
#!/bin/bash #SBATCH --job-name=highmemtest # Job name #SBATCH --partition=highmem # Partition (queue) name #SBATCH --ntasks=1 # Run a single task #SBATCH --cpus-per-task=4 # Number of CPU cores per task #SBATCH --mem=50gb # Job memory request #SBATCH --time=02:00:00 # Time limit hrs:min:sec #SBATCH --output=highmemtest.%j.out # Standard output log #SBATCH --error=highmemtest.%j.err # Standard error log #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) #SBATCH --mail-user=username@uga.edu # Where to send mail cd $SLURM_SUBMIT_DIR export OMP_NUM_THREADS=4 module load Velvet velvetg [options]
Sample job submission script (sub.sh) to run a parallel job that uses 4 MPI processes with OpenMPI and each MPI process runs with 3 threads:
#!/bin/bash #SBATCH --job-name=hybridtest # Job name #SBATCH --partition=batch # Partition (queue) name #SBATCH --nodes=2 # Number of nodes #SBATCH --ntasks=8 # Number of MPI ranks #SBATCH --ntasks-per-node=4 # Number of MPI ranks per node #SBATCH --cpus-per-task=3 # Number of OpenMP threads for each MPI process/rank #SBATCH --mem-per-cpu=2000mb # Per processor memory request #SBATCH --time=2-00:00:00 # Walltime in hh:mm:ss or d-hh:mm:ss (2 days in the example) #SBATCH --output=hybridtest.%j.out # Standard output log #SBATCH --error=hybridtest.%j.err # Standard error log #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) #SBATCH --mail-user=username@uga.edu # Where to send mail cd $SLURM_SUBMIT_DIR export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK mpirun ./myhybridprog.exe
Array job
Sample job submission script (sub.sh) to submit an array job with 10 elements. In this example, each array job element will run the a.out binary using an input file called input_0, input_1, ..., input_9.
#!/bin/bash #SBATCH --job-name=arrayjobtest # Job name #SBATCH --partition=batch # Partition (queue) name #SBATCH --ntasks=1 # Run a single task #SBATCH --mem=1gb # Job Memory #SBATCH --time=10:00:00 # Time limit hrs:min:sec #SBATCH --output=array_%A-%a.out # Standard output log #SBATCH --error=array_%A-%a.err # Standard error log #SBATCH --array=0-9 # Array range cd $SLURM_SUBMIT_DIR module load foss/2019b # load any needed module files, e.g. foss/2019b time ./a.out < input_$SLURM_ARRAY_TASK_ID
GPU/CUDA
Sample script to run Amber on a GPU node using one node, 2 CPU cores, and 1 GPU card:
#!/bin/bash #SBATCH --job-name=amber # Job name #SBATCH --partition=gpu_p # Partition (queue) name #SBATCH --gres=gpu:1 # Requests one GPU device #SBATCH --ntasks=1 # Run a single task #SBATCH --cpus-per-task=2 # Number of CPU cores per task #SBATCH --mem=40gb # Job memory request #SBATCH --time=10:00:00 # Time limit hrs:min:sec #SBATCH --output=amber.%j.out # Standard output log #SBATCH --error=amber.%j.err # Standard error log #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) #SBATCH --mail-user=username@uga.edu # Where to send mail cd $SLURM_SUBMIT_DIR ml Amber/18-fosscuda-2018b-AmberTools-18-patchlevel-10-8 mpiexec $AMBERHOME/bin/pmemd.cuda -O -i ./prod.in -o prod_c4-23.out -p ./dimerFBP_GOL.prmtop -c ./restart.rst -r prod.rst -x prod.mdcrd