From 255fd20cce10808fa2edf18b54b5a60ba0133445 Mon Sep 17 00:00:00 2001 From: Marco Barilari Date: Wed, 3 Jul 2024 10:38:36 +0200 Subject: [PATCH 1/2] debug and improve how to run fmriprep --- doc/cpp_fmriprep.slurm | 21 +++++++++++++++------ doc/run_fmriprep.md | 9 ++++----- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/doc/cpp_fmriprep.slurm b/doc/cpp_fmriprep.slurm index 1e836fe..c8135e8 100644 --- a/doc/cpp_fmriprep.slurm +++ b/doc/cpp_fmriprep.slurm @@ -10,14 +10,14 @@ #SBATCH --mail-user=marco.barilari@uclouvain.be #SBATCH --mail-type=ALL -#SBATCH --output=/home/ucl/irsp/marcobar/jobs_report/fmriprep_job-%j.txt +#SBATCH --output=/home/ucl/cosy/marcobar/jobs_report/fmriprep_job-%j.txt #SBATCH --comment=project-name #export OMP_NUM_THREADS=4 #export MKL_NUM_THREADS=4 -## CPP frmiprep script for CECI cluster v0.2.0 +## CPP frmiprep script for CECI cluster v0.3.0 # # writtent by CPP people # @@ -70,11 +70,15 @@ ceci_username="marcobar" # set fmriprep arguments nb_dummy_scans=0 +# cluster paths path_to_singularity_image="$HOME/tools/containers/images/bids/bids-fmriprep--${FMRIPREP_VERSION}.sing" -scratch_dir="/scratch/users/m/a/${ceci_username}" -bids_dir="$HOME/fmriprep_trial/raw" -output_dir="$HOME/fmriprep_trial/derivatives/fmriprep" -freesurfer_license_folder="$HOME/tools/freesurfer_license" +scratch_dir=$GLOBALSCRATCH +freesurfer_license_folder="$HOME/tools" + +# data paths +root_dir="$HOME/path-to-project-yoda-fodler" +bids_dir="$root_dir/inputs/raw" +output_dir="$root_dir/outputs/derivatives/fmriprep" # make the scratch folder, here there is no limit space and fmriprep can store stuff in case of crash and do not start from zero again mkdir -p "${scratch_dir}"/work-fmriprep @@ -99,3 +103,8 @@ singularity run --cleanenv \ --notrack \ --skip_bids_validation \ --stop-on-first-crash + + +# more useful options to keep in mind: +# +# --fs-no-reconall # skip freesurfer segmentation \ No newline at end of file diff --git a/doc/run_fmriprep.md b/doc/run_fmriprep.md index dce05fa..30ca7b5 100644 --- a/doc/run_fmriprep.md +++ b/doc/run_fmriprep.md @@ -15,7 +15,8 @@ To contribute see [here](https://cpp-lln-lab.github.io/CPP_HPC/contributing/) ## Prepare to run fmriprep on the cluster -- have your data on the cluster +- have your data on the cluster and unlock them if they are managed by datalad +- get your `freesurfer` license (user specific) for free [here](https://surfer.nmr.mgh.harvard.edu/registration.html) and move it to the cluster at `~/tools` - install datalad on your user (see [here](https://github.com/cpp-lln-lab/CPP_HPC/install_datalad)) - get the fmriprep singularity image as follow: @@ -38,14 +39,12 @@ cd path/to/containers datald update --merge `````` -Depending on the cluster “unlock” is needed or not. No need for `lemaitre3`. !!! TO CHECK ON LEMAITRE4 +Depending on the cluster “unlock” is needed or not. No need for `lemaitre4`. ```bash datalad unlock containers/images/bids/bids-fmriprep--24.0.0.sing ``` - - get your `freesurfer` license (user specific) for free [here](https://surfer.nmr.mgh.harvard.edu/registration.html) and move it to the cluster - ## Submit a fmriprep job via a `slurm` script - pros: @@ -60,7 +59,7 @@ Content of the `cpp_fmriprep.slurm` file (download and edit from [here](cpp_fmri !!! Warning 1. Read the fmriprep documentation to know what you are doing and how the arguments of the run call effects the results - 2. All the paths and email are set afte Marco's users for demosntration. + 2. All the paths and email are set afte Marco's users for demosntration. Change them for your user. 3. Edit the scripts with the info you need to make it run for your user from top to buttom of the script, do not over look the first "commented" chunk cause it is not a real commented section (check the email and job report path, data paths and the `username` etc.). ```bash From 1912a72754fee32274a806691e566e67d9912584 Mon Sep 17 00:00:00 2001 From: Marco Barilari Date: Wed, 3 Jul 2024 10:38:48 +0200 Subject: [PATCH 2/2] debug and improve how to run mriqc --- doc/cpp_mriqc.slurm | 61 +++++++++++++++++---------------------- doc/cpp_mriqc_group.slurm | 46 +++++++++++++++++------------ doc/run_mriqc.md | 12 +++----- 3 files changed, 58 insertions(+), 61 deletions(-) diff --git a/doc/cpp_mriqc.slurm b/doc/cpp_mriqc.slurm index 12020b6..6ece5d4 100644 --- a/doc/cpp_mriqc.slurm +++ b/doc/cpp_mriqc.slurm @@ -4,41 +4,26 @@ #SBATCH --time=4:00:00 # hh:mm:ss #SBATCH --ntasks=1 -#SBATCH --cpus-per-task=9 +#SBATCH --cpus-per-task=4 #SBATCH --mem-per-cpu=10000 # megabytes -#SBATCH --partition=batch,debug +#SBATCH --partition=batch #SBATCH --mail-user=marco.barilari@uclouvain.be #SBATCH --mail-type=ALL -#SBATCH --output=/home/ucl/irsp/marcobar/jobs_report/mriqc_job-%j.txt +#SBATCH --output=/home/ucl/cosy/marcobar/jobs_report/mriqc_job-%j.txt #SBATCH --comment=project-name -#export OMP_NUM_THREADS=9 -#export MKL_NUM_THREADS=9 +#export OMP_NUM_THREADS=4 +#export MKL_NUM_THREADS=4 -## CPP MRIqc script for CECI cluster v0.2.0 +## CPP MRIqc script for CECI cluster v0.3.0 # # writtent by CPP people # # Submission command for Lemaitre4 # -# sbatch cpp_mriqc.slurm -# -# examples: -# - 1 subject -# sbatch cpp_mriqc.slurm sub-01 -# -# - all subjects -# sbatch cpp_mriqc.slurm '' -# -# - multiple subjects -# sbatch cpp_mriqc.slurm 'sub-01 sub-02' -# -# - submit all the subjects (1 per job) all at once -# read subj list to submit each to a job -# !!! to run from within `raw` folder -# ls -d sub* | xargs -n1 -I{} sbatch path/to/cpp_mriqc.slurm {} +# sbatch cpp_mriqc_group.slurm # create jobs_report folder in case they don't exist mkdir -p $HOME/jobs_report/ @@ -55,22 +40,30 @@ module --force purge subjID=$1 # "latest" or procide specific version number -MRIQC_VERSION="23.1.0" - -# set username to locate scratch folder -ceci_username="marcobar" +MRIQC_VERSION="24.0.0" +# cluster paths path_to_singularity_image="$HOME/tools/containers/images/bids/bids-mriqc--${MRIQC_VERSION}.sing" -bids_dir="$HOME/mriqc_trial/raw" -output_dir="$HOME/mriqc_trial/derivatives/mriqc" +scratch_dir=$GLOBALSCRATCH + +# data paths +root_dir="$HOME/path-to-project-yoda-fodler" +bids_dir="$root_dir/inputs/raw" +output_dir="$root_dir/outputs/derivatives/mriqc" + +# make the scratch folder, here there is no limit space and fmriprep can store stuff in case of crash and do not start from zero again +mkdir -p "${scratch_dir}"/work-mriqc # create output folder in case it does not exists mkdir -p "${output_dir}" singularity run --cleanenv \ - -B "${bids_dir}":/data \ - -B "${output_dir}":/out \ - "${path_to_singularity_image}" \ - /data \ - /out \ - participant --participant-label "${subjID}" + -B "${scratch_dir}":/scratch_dir \ + -B "${bids_dir}":/bids_dir \ + -B "${output_dir}":/output \ + "${path_to_singularity_image}" \ + /bids_dir \ + /output \ + participant --participant-label "${subjID}" \ + --work-dir /scratch_dir/work-mriqc/"${subjID}" \ + --verbose-reports diff --git a/doc/cpp_mriqc_group.slurm b/doc/cpp_mriqc_group.slurm index 949fcb9..e21db8d 100644 --- a/doc/cpp_mriqc_group.slurm +++ b/doc/cpp_mriqc_group.slurm @@ -1,23 +1,23 @@ #!/bin/bash #SBATCH --job-name=MRIqc -#SBATCH --time=9:00:00 # hh:mm:ss +#SBATCH --time=4:00:00 # hh:mm:ss #SBATCH --ntasks=1 -#SBATCH --cpus-per-task=9 +#SBATCH --cpus-per-task=4 #SBATCH --mem-per-cpu=10000 # megabytes -#SBATCH --partition=batch,debug +#SBATCH --partition=batch #SBATCH --mail-user=marco.barilari@uclouvain.be #SBATCH --mail-type=ALL -#SBATCH --output=/home/ucl/irsp/marcobar/jobs_report/mriqc_job-%j.txt +#SBATCH --output=/home/ucl/cosy/marcobar/jobs_report/mriqc_job-%j.txt #SBATCH --comment=project-name -#export OMP_NUM_THREADS=9 -#export MKL_NUM_THREADS=9 +#export OMP_NUM_THREADS=4 +#export MKL_NUM_THREADS=4 -## CPP MRIqc script for CECI cluster +## CPP MRIqc script for CECI cluster v0.3.0 # # writtent by CPP people # @@ -38,22 +38,30 @@ set -e -x -u -o pipefail module --force purge # "latest" or procide specific version number -MRIQC_VERSION="23.1.0" - -# set username to locate scratch folder -ceci_username="marcobar" +MRIQC_VERSION="24.0.0" +# cluster paths path_to_singularity_image="$HOME/tools/containers/images/bids/bids-mriqc--${MRIQC_VERSION}.sing" -bids_dir="$HOME/mriqc_trial/raw" -output_dir="$HOME/mriqc_trial/derivatives/mriqc" +scratch_dir=$GLOBALSCRATCH + +# data paths +root_dir="$HOME/path-to-project-yoda-fodler" +bids_dir="$root_dir/inputs/raw" +output_dir="$root_dir/outputs/derivatives/mriqc" + +# make the scratch folder, here there is no limit space and fmriprep can store stuff in case of crash and do not start from zero again +mkdir -p "${scratch_dir}"/work-mriqc # create mriqc output folder in case they don't exist mkdir -p "${output_dir}" singularity run --cleanenv \ - -B "${bids_dir}":/data \ - -B "${output_dir}":/out \ - "${path_to_singularity_image}" \ - /data \ - /out \ - group + -B "${scratch_dir}":/scratch_dir \ + -B "${bids_dir}":/bids_dir \ + -B "${output_dir}":/output \ + "${path_to_singularity_image}" \ + /bids_dir \ + /output \ + --work-dir /scratch_dir/work-mriqc/"${subjID}" \ + --verbose-reports \ + group diff --git a/doc/run_mriqc.md b/doc/run_mriqc.md index 21a07e6..562559b 100644 --- a/doc/run_mriqc.md +++ b/doc/run_mriqc.md @@ -4,10 +4,6 @@ Written by CPP lab people To contribute see [here](https://cpp-lln-lab.github.io/CPP_HPC/contributing/) -!!! Warning - - Space problem since the `work` folder is not set in the script. Marco is working on it. - ## General tips @@ -43,7 +39,7 @@ cd path/to/containers datald update --merge `````` -Depending on the cluster “unlock” is needed or not. No need for `lemaitre3`. !! TO CHECK ON LEMAITRE4 +Depending on the cluster “unlock” is needed or not. No need for `lemaitre4`. ```bash datalad unlock containers/images/bids/bids-mriqc--24.0.0.sing @@ -65,7 +61,7 @@ Content of the `cpp_mriqc.slurm` file (download and edit from [here](cpp_mriqc.s !!! Warning 1. Read the MRIqc documentation to know what you are doing and how the arguments of the run call effects the results - 2. All the paths and email are set afte Marco's users for demosntration. + 2. All the paths and email are set afte Marco's users for demosntration. Change them for your user. 3. Edit the scripts with the info you need to make it run for your user from top to buttom of the script, do not over look the first "commented" chunk cause it is not a real commented section (check the email and job report path, data paths and the `username` etc.). ```bash @@ -99,7 +95,7 @@ Content of the `cpp_mriqc_group.slurm` file (download and edit from [here](cpp_m !!! Warning 1. Read the MRIqc documentation to know what you are doing and how the arguments of the run call effects the results - 2. All the paths and email are set afte Marco's users for demosntration. + 2. All the paths and email are set afte Marco's users for demosntration. Change them for your user. 3. Edit the scripts with the info you need to make it run for your user from top to buttom of the script, do not over look the first "commented" chunk cause it is not a real commented section (check the email and job report path, data paths and the `username` etc.). ```bash @@ -122,6 +118,6 @@ sbatch cpp_mriqc_group.slurm ### check your job -see [here](https://github.com/cpp-lln-lab/CPP_HPC/cluster_code_snippets/#check-your-running-jobs) +see [here](https://github.com/cpp-lln-lab.github.io/CPP_HPC/cluster_code_snippets/#check-your-running-jobs) To contribute see [here](https://cpp-lln-lab.github.io/CPP_HPC/contributing/)