forked from maxsonBraunLab/cutTag-pipeline
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_pipeline_singularity.sh
executable file
·43 lines (33 loc) · 1.5 KB
/
run_pipeline_singularity.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#!/usr/bin/bash
#SBATCH --time 24:00:00
#SBATCH --partition exacloud
#SBATCH --job-name run_pipeline
#SBATCH --output=jobs/run_pipeline_%j.log
# This is a wrapper script for running the cutTag pipeline as a batch job and using Singularity.
# Make sure to do the following before running this script:
# - be on an interactive/compute node so that Singularity module can be activated
# - conda activate snakemake environment
# - check that slurm_singularity profile for snakemake is set up
# - check that "jobs" folder exists in the main pipeline directory (if not, mkdir jobs)
# - add correct paths to indices and fastq folders below
# To run this wrapper, do: sbatch run_pipeline_singularity.sh
# set folder paths
indices_folder="/home/groups/MaxsonLab/indices"
conda_folder="${CONDA_PREFIX_1}/envs"
conda_pkgs_folder="${CONDA_PREFIX_1}/pkgs"
fastq_folder="" # add absolute path to folder containing original fastq files (not the symlinks)
# set the number of jobs to run at a time (no spaces)
num_jobs=100
# module load singularity before running snakemake
module load /etc/modulefiles/singularity/current
# run snakemake pipeline
# Note: if a separate Snakemake slurm profile for Singularity exists (e.g. slurm_singularity), you can use it instead of the default slurm profile
snakemake -j $num_jobs \
--verbose \
--use-conda \
--use-singularity \
--singularity-args "--bind $indices_folder,$conda_folder,$conda_pkgs_folder,$fastq_folder" \
--profile slurm \
--conda-prefix $conda_folder \
--cluster-config cluster.yaml
exit