From 1912a72754fee32274a806691e566e67d9912584 Mon Sep 17 00:00:00 2001 From: Marco Barilari Date: Wed, 3 Jul 2024 10:38:48 +0200 Subject: [PATCH] debug and improve how to run mriqc --- doc/cpp_mriqc.slurm | 61 +++++++++++++++++---------------------- doc/cpp_mriqc_group.slurm | 46 +++++++++++++++++------------ doc/run_mriqc.md | 12 +++----- 3 files changed, 58 insertions(+), 61 deletions(-) diff --git a/doc/cpp_mriqc.slurm b/doc/cpp_mriqc.slurm index 12020b6..6ece5d4 100644 --- a/doc/cpp_mriqc.slurm +++ b/doc/cpp_mriqc.slurm @@ -4,41 +4,26 @@ #SBATCH --time=4:00:00 # hh:mm:ss #SBATCH --ntasks=1 -#SBATCH --cpus-per-task=9 +#SBATCH --cpus-per-task=4 #SBATCH --mem-per-cpu=10000 # megabytes -#SBATCH --partition=batch,debug +#SBATCH --partition=batch #SBATCH --mail-user=marco.barilari@uclouvain.be #SBATCH --mail-type=ALL -#SBATCH --output=/home/ucl/irsp/marcobar/jobs_report/mriqc_job-%j.txt +#SBATCH --output=/home/ucl/cosy/marcobar/jobs_report/mriqc_job-%j.txt #SBATCH --comment=project-name -#export OMP_NUM_THREADS=9 -#export MKL_NUM_THREADS=9 +#export OMP_NUM_THREADS=4 +#export MKL_NUM_THREADS=4 -## CPP MRIqc script for CECI cluster v0.2.0 +## CPP MRIqc script for CECI cluster v0.3.0 # # writtent by CPP people # # Submission command for Lemaitre4 # -# sbatch cpp_mriqc.slurm -# -# examples: -# - 1 subject -# sbatch cpp_mriqc.slurm sub-01 -# -# - all subjects -# sbatch cpp_mriqc.slurm '' -# -# - multiple subjects -# sbatch cpp_mriqc.slurm 'sub-01 sub-02' -# -# - submit all the subjects (1 per job) all at once -# read subj list to submit each to a job -# !!! to run from within `raw` folder -# ls -d sub* | xargs -n1 -I{} sbatch path/to/cpp_mriqc.slurm {} +# sbatch cpp_mriqc_group.slurm # create jobs_report folder in case they don't exist mkdir -p $HOME/jobs_report/ @@ -55,22 +40,30 @@ module --force purge subjID=$1 # "latest" or procide specific version number -MRIQC_VERSION="23.1.0" - -# set username to locate scratch folder -ceci_username="marcobar" +MRIQC_VERSION="24.0.0" +# cluster paths path_to_singularity_image="$HOME/tools/containers/images/bids/bids-mriqc--${MRIQC_VERSION}.sing" -bids_dir="$HOME/mriqc_trial/raw" -output_dir="$HOME/mriqc_trial/derivatives/mriqc" +scratch_dir=$GLOBALSCRATCH + +# data paths +root_dir="$HOME/path-to-project-yoda-fodler" +bids_dir="$root_dir/inputs/raw" +output_dir="$root_dir/outputs/derivatives/mriqc" + +# make the scratch folder, here there is no limit space and fmriprep can store stuff in case of crash and do not start from zero again +mkdir -p "${scratch_dir}"/work-mriqc # create output folder in case it does not exists mkdir -p "${output_dir}" singularity run --cleanenv \ - -B "${bids_dir}":/data \ - -B "${output_dir}":/out \ - "${path_to_singularity_image}" \ - /data \ - /out \ - participant --participant-label "${subjID}" + -B "${scratch_dir}":/scratch_dir \ + -B "${bids_dir}":/bids_dir \ + -B "${output_dir}":/output \ + "${path_to_singularity_image}" \ + /bids_dir \ + /output \ + participant --participant-label "${subjID}" \ + --work-dir /scratch_dir/work-mriqc/"${subjID}" \ + --verbose-reports diff --git a/doc/cpp_mriqc_group.slurm b/doc/cpp_mriqc_group.slurm index 949fcb9..e21db8d 100644 --- a/doc/cpp_mriqc_group.slurm +++ b/doc/cpp_mriqc_group.slurm @@ -1,23 +1,23 @@ #!/bin/bash #SBATCH --job-name=MRIqc -#SBATCH --time=9:00:00 # hh:mm:ss +#SBATCH --time=4:00:00 # hh:mm:ss #SBATCH --ntasks=1 -#SBATCH --cpus-per-task=9 +#SBATCH --cpus-per-task=4 #SBATCH --mem-per-cpu=10000 # megabytes -#SBATCH --partition=batch,debug +#SBATCH --partition=batch #SBATCH --mail-user=marco.barilari@uclouvain.be #SBATCH --mail-type=ALL -#SBATCH --output=/home/ucl/irsp/marcobar/jobs_report/mriqc_job-%j.txt +#SBATCH --output=/home/ucl/cosy/marcobar/jobs_report/mriqc_job-%j.txt #SBATCH --comment=project-name -#export OMP_NUM_THREADS=9 -#export MKL_NUM_THREADS=9 +#export OMP_NUM_THREADS=4 +#export MKL_NUM_THREADS=4 -## CPP MRIqc script for CECI cluster +## CPP MRIqc script for CECI cluster v0.3.0 # # writtent by CPP people # @@ -38,22 +38,30 @@ set -e -x -u -o pipefail module --force purge # "latest" or procide specific version number -MRIQC_VERSION="23.1.0" - -# set username to locate scratch folder -ceci_username="marcobar" +MRIQC_VERSION="24.0.0" +# cluster paths path_to_singularity_image="$HOME/tools/containers/images/bids/bids-mriqc--${MRIQC_VERSION}.sing" -bids_dir="$HOME/mriqc_trial/raw" -output_dir="$HOME/mriqc_trial/derivatives/mriqc" +scratch_dir=$GLOBALSCRATCH + +# data paths +root_dir="$HOME/path-to-project-yoda-fodler" +bids_dir="$root_dir/inputs/raw" +output_dir="$root_dir/outputs/derivatives/mriqc" + +# make the scratch folder, here there is no limit space and fmriprep can store stuff in case of crash and do not start from zero again +mkdir -p "${scratch_dir}"/work-mriqc # create mriqc output folder in case they don't exist mkdir -p "${output_dir}" singularity run --cleanenv \ - -B "${bids_dir}":/data \ - -B "${output_dir}":/out \ - "${path_to_singularity_image}" \ - /data \ - /out \ - group + -B "${scratch_dir}":/scratch_dir \ + -B "${bids_dir}":/bids_dir \ + -B "${output_dir}":/output \ + "${path_to_singularity_image}" \ + /bids_dir \ + /output \ + --work-dir /scratch_dir/work-mriqc/"${subjID}" \ + --verbose-reports \ + group diff --git a/doc/run_mriqc.md b/doc/run_mriqc.md index 21a07e6..562559b 100644 --- a/doc/run_mriqc.md +++ b/doc/run_mriqc.md @@ -4,10 +4,6 @@ Written by CPP lab people To contribute see [here](https://cpp-lln-lab.github.io/CPP_HPC/contributing/) -!!! Warning - - Space problem since the `work` folder is not set in the script. Marco is working on it. - ## General tips @@ -43,7 +39,7 @@ cd path/to/containers datald update --merge `````` -Depending on the cluster “unlock” is needed or not. No need for `lemaitre3`. !! TO CHECK ON LEMAITRE4 +Depending on the cluster “unlock” is needed or not. No need for `lemaitre4`. ```bash datalad unlock containers/images/bids/bids-mriqc--24.0.0.sing @@ -65,7 +61,7 @@ Content of the `cpp_mriqc.slurm` file (download and edit from [here](cpp_mriqc.s !!! Warning 1. Read the MRIqc documentation to know what you are doing and how the arguments of the run call effects the results - 2. All the paths and email are set afte Marco's users for demosntration. + 2. All the paths and email are set afte Marco's users for demosntration. Change them for your user. 3. Edit the scripts with the info you need to make it run for your user from top to buttom of the script, do not over look the first "commented" chunk cause it is not a real commented section (check the email and job report path, data paths and the `username` etc.). ```bash @@ -99,7 +95,7 @@ Content of the `cpp_mriqc_group.slurm` file (download and edit from [here](cpp_m !!! Warning 1. Read the MRIqc documentation to know what you are doing and how the arguments of the run call effects the results - 2. All the paths and email are set afte Marco's users for demosntration. + 2. All the paths and email are set afte Marco's users for demosntration. Change them for your user. 3. Edit the scripts with the info you need to make it run for your user from top to buttom of the script, do not over look the first "commented" chunk cause it is not a real commented section (check the email and job report path, data paths and the `username` etc.). ```bash @@ -122,6 +118,6 @@ sbatch cpp_mriqc_group.slurm ### check your job -see [here](https://github.com/cpp-lln-lab/CPP_HPC/cluster_code_snippets/#check-your-running-jobs) +see [here](https://github.com/cpp-lln-lab.github.io/CPP_HPC/cluster_code_snippets/#check-your-running-jobs) To contribute see [here](https://cpp-lln-lab.github.io/CPP_HPC/contributing/)