#!/bin/bash

#SBATCH --time=00:10:00         # Walltime
#SBATCH --nodes=1          # Use 1 Node     (Unless code is multi-node parallelized)
#SBATCH --ntasks=1         # We only run one R instance = 1 task
#SBATCH --cpus-per-task=12 # number of threads we want to run on
#SBATCH --account=owner-guest
#SBATCH --partition=ember-guest
#SBATCH -o slurm-%j.out-%N
#SBATCH --mail-type=ALL
#SBATCH --mail-user=$USER@utah.edu
#SBATCH --job-name=seaIce

export FILENAME=seaice.R
export SCR_DIR=/scratch/general/lustre/$USER/$SLURM_JOBID
export WORK_DIR=$HOME/TestBench/R/SeaIce

# Load R (version 3.3.2)
module load R

# Take advantage of all the threads (linear algebra)
# $SLURM_CPUS_ON_NODE returns actual number of cores on node
# rather than $SLURM_JOB_CPUS_PER_NODE, which returns what --cpus-per-task asks for
export OMP_NUM_THREADS=$SLURM_CPUS_ON_NODE

# Create scratch & copy everything over to scratch
mkdir -p $SCR_DIR
cd $SCR_DIR
cp -p $WORK_DIR/* . 

# Run the R script in batch
Rscript $FILENAME > $SLURM_JOBID.out

# Copy results over + clean up
cd $WORK_DIR
cp -pR $SCR_DIR/* .
rm -rf $SCR_DIR

echo "End of program at `date`"
