-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_cooling_above3.8.sh
89 lines (75 loc) · 3.15 KB
/
run_cooling_above3.8.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#!/bin/bash
#
#----------------------------------------------------------------
# running a multiple independent jobs
#----------------------------------------------------------------
#
# Defining options for slurm how to run
#----------------------------------------------------------------
#
#SBATCH --job-name=arr_wd_cool
#SBATCH --output=array_%A_wd_cool_%a.log
# %A and %a are placeholders for the jobid and taskid, resp.
#
#Number of CPU cores to use within one node
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=6
#
#Define the number of hours the job should run.
#Maximum runtime is limited to 10 days, ie. 240 hours
#SBATCH --time=1:30:00
#
#Define the amount of RAM used by your job in GigaBytes
#In shared memory applications this is shared among multiple CPUs
#SBATCH --mem-per-cpu=3G
#
#Send emails when a job starts, it is finished or it exits
#SBATCH [email protected]
#SBATCH --mail-type=ALL
#
#Do not requeue the job in the case it fails.
#SBATCH --no-requeue
#
#Do not export the local environment to the compute nodes
#SBATCH --export=NONE
unset SLURM_EXPORT_ENV
#
#Set the number of threads to the SLURM internal variable
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
# load the respective software module(s) you intend to use
#----------------------------------------------------------------
module purge
module load mesastar/23.05.1
module load python/3.10
# define sequence of jobs to run as you would do in a BASH script
# use variable $SLURM_ARRAY_TASK_ID to address individual behaviour
# in different iteration of the script execution
#----------------------------------------------------------------
# FILL IN THE BLANKS __________ !!!
# Path to this working directory. All array-tasks will be put into the ./tasks/ directory
# !!!Always put the absolute path here to be completely certain everything works!!!
WORK_DIR=/nfs/scistore18/bugnegrp/leinramh/mesa_array_job_template
# BASE template directory on which to build the MESA run
# If you have a personal template you can also give the absolute path
# there as well.
BASE_MESA_DIR=$MESA_DIR/star/test_suite/wd_cool_0.6M
# Pre-setup of the run
#----------------------------------------------------------------
# Setup the directory specific for the array-task
TASK_NAME=task_$SLURM_ARRAY_TASK_ID
TASK_DIR=$WORK_DIR/tasks/cool_wd/$TASK_NAME
# Create the specific task directory in ./tasks/ as a copy of the chosen MESA template
cp -a $BASE_MESA_DIR/. $TASK_DIR
# Add the star_job.defaults file to the specific task foler as well as the caches folder structure
# bash $WORK_DIR/helper_scripts/caches.sh $WORK_DIR $TASK_DIR
cd $TASK_DIR
mkdir $TASK_DIR/caches
export MESA_CACHES_DIR=$TASK_DIR/caches
# Any additional work needed before running MESA goes here
# -----------------------------------------------------------------------------
cp $WORK_DIR/co_wd_models/mod$SLURM_ARRAY_TASK_ID/co_wd_settled.mod $TASK_DIR/
cp $WORK_DIR/non_default_files/profile_columns.list $TASK_DIR/
cp $WORK_DIR/non_default_files/inlist_wd_cool_0.6M $TASK_DIR/
# This runs MESA in
# -----------------------------------------------------------------------------
srun --cpu_bind=verbose ./clean && ./mk && ./rn > screenlog.log