Skip to content

Commit

Permalink
Merge pull request #36 from marcobarilari/main
Browse files Browse the repository at this point in the history
ENH and debuged fmriprep and mriqc for lemaitre4
  • Loading branch information
marcobarilari authored Jul 3, 2024
2 parents 550546c + 1912a72 commit b58d55d
Show file tree
Hide file tree
Showing 5 changed files with 77 additions and 72 deletions.
21 changes: 15 additions & 6 deletions doc/cpp_fmriprep.slurm
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@

#SBATCH --mail-user=marco.barilari@uclouvain.be
#SBATCH --mail-type=ALL
#SBATCH --output=/home/ucl/irsp/marcobar/jobs_report/fmriprep_job-%j.txt
#SBATCH --output=/home/ucl/cosy/marcobar/jobs_report/fmriprep_job-%j.txt

#SBATCH --comment=project-name

#export OMP_NUM_THREADS=4
#export MKL_NUM_THREADS=4

## CPP frmiprep script for CECI cluster v0.2.0
## CPP frmiprep script for CECI cluster v0.3.0
#
# writtent by CPP people
#
Expand Down Expand Up @@ -70,11 +70,15 @@ ceci_username="marcobar"
# set fmriprep arguments
nb_dummy_scans=0

# cluster paths
path_to_singularity_image="$HOME/tools/containers/images/bids/bids-fmriprep--${FMRIPREP_VERSION}.sing"
scratch_dir="/scratch/users/m/a/${ceci_username}"
bids_dir="$HOME/fmriprep_trial/raw"
output_dir="$HOME/fmriprep_trial/derivatives/fmriprep"
freesurfer_license_folder="$HOME/tools/freesurfer_license"
scratch_dir=$GLOBALSCRATCH
freesurfer_license_folder="$HOME/tools"

# data paths
root_dir="$HOME/path-to-project-yoda-fodler"
bids_dir="$root_dir/inputs/raw"
output_dir="$root_dir/outputs/derivatives/fmriprep"

# make the scratch folder, here there is no limit space and fmriprep can store stuff in case of crash and do not start from zero again
mkdir -p "${scratch_dir}"/work-fmriprep
Expand All @@ -99,3 +103,8 @@ singularity run --cleanenv \
--notrack \
--skip_bids_validation \
--stop-on-first-crash


# more useful options to keep in mind:
#
# --fs-no-reconall # skip freesurfer segmentation
61 changes: 27 additions & 34 deletions doc/cpp_mriqc.slurm
Original file line number Diff line number Diff line change
Expand Up @@ -4,41 +4,26 @@
#SBATCH --time=4:00:00 # hh:mm:ss

#SBATCH --ntasks=1
#SBATCH --cpus-per-task=9
#SBATCH --cpus-per-task=4
#SBATCH --mem-per-cpu=10000 # megabytes
#SBATCH --partition=batch,debug
#SBATCH --partition=batch

#SBATCH --mail-user=marco.barilari@uclouvain.be
#SBATCH --mail-type=ALL
#SBATCH --output=/home/ucl/irsp/marcobar/jobs_report/mriqc_job-%j.txt
#SBATCH --output=/home/ucl/cosy/marcobar/jobs_report/mriqc_job-%j.txt

#SBATCH --comment=project-name

#export OMP_NUM_THREADS=9
#export MKL_NUM_THREADS=9
#export OMP_NUM_THREADS=4
#export MKL_NUM_THREADS=4

## CPP MRIqc script for CECI cluster v0.2.0
## CPP MRIqc script for CECI cluster v0.3.0
#
# writtent by CPP people
#
# Submission command for Lemaitre4
#
# sbatch cpp_mriqc.slurm <subjID>
#
# examples:
# - 1 subject
# sbatch cpp_mriqc.slurm sub-01
#
# - all subjects
# sbatch cpp_mriqc.slurm ''
#
# - multiple subjects
# sbatch cpp_mriqc.slurm 'sub-01 sub-02'
#
# - submit all the subjects (1 per job) all at once
# read subj list to submit each to a job
# !!! to run from within `raw` folder
# ls -d sub* | xargs -n1 -I{} sbatch path/to/cpp_mriqc.slurm {}
# sbatch cpp_mriqc_group.slurm

# create jobs_report folder in case they don't exist
mkdir -p $HOME/jobs_report/
Expand All @@ -55,22 +40,30 @@ module --force purge
subjID=$1

# "latest" or procide specific version number
MRIQC_VERSION="23.1.0"

# set username to locate scratch folder
ceci_username="marcobar"
MRIQC_VERSION="24.0.0"

# cluster paths
path_to_singularity_image="$HOME/tools/containers/images/bids/bids-mriqc--${MRIQC_VERSION}.sing"
bids_dir="$HOME/mriqc_trial/raw"
output_dir="$HOME/mriqc_trial/derivatives/mriqc"
scratch_dir=$GLOBALSCRATCH

# data paths
root_dir="$HOME/path-to-project-yoda-fodler"
bids_dir="$root_dir/inputs/raw"
output_dir="$root_dir/outputs/derivatives/mriqc"

# make the scratch folder, here there is no limit space and fmriprep can store stuff in case of crash and do not start from zero again
mkdir -p "${scratch_dir}"/work-mriqc

# create output folder in case it does not exists
mkdir -p "${output_dir}"

singularity run --cleanenv \
-B "${bids_dir}":/data \
-B "${output_dir}":/out \
"${path_to_singularity_image}" \
/data \
/out \
participant --participant-label "${subjID}"
-B "${scratch_dir}":/scratch_dir \
-B "${bids_dir}":/bids_dir \
-B "${output_dir}":/output \
"${path_to_singularity_image}" \
/bids_dir \
/output \
participant --participant-label "${subjID}" \
--work-dir /scratch_dir/work-mriqc/"${subjID}" \
--verbose-reports
46 changes: 27 additions & 19 deletions doc/cpp_mriqc_group.slurm
Original file line number Diff line number Diff line change
@@ -1,23 +1,23 @@
#!/bin/bash

#SBATCH --job-name=MRIqc
#SBATCH --time=9:00:00 # hh:mm:ss
#SBATCH --time=4:00:00 # hh:mm:ss

#SBATCH --ntasks=1
#SBATCH --cpus-per-task=9
#SBATCH --cpus-per-task=4
#SBATCH --mem-per-cpu=10000 # megabytes
#SBATCH --partition=batch,debug
#SBATCH --partition=batch

#SBATCH --mail-user=marco.barilari@uclouvain.be
#SBATCH --mail-type=ALL
#SBATCH --output=/home/ucl/irsp/marcobar/jobs_report/mriqc_job-%j.txt
#SBATCH --output=/home/ucl/cosy/marcobar/jobs_report/mriqc_job-%j.txt

#SBATCH --comment=project-name

#export OMP_NUM_THREADS=9
#export MKL_NUM_THREADS=9
#export OMP_NUM_THREADS=4
#export MKL_NUM_THREADS=4

## CPP MRIqc script for CECI cluster
## CPP MRIqc script for CECI cluster v0.3.0
#
# writtent by CPP people
#
Expand All @@ -38,22 +38,30 @@ set -e -x -u -o pipefail
module --force purge

# "latest" or procide specific version number
MRIQC_VERSION="23.1.0"

# set username to locate scratch folder
ceci_username="marcobar"
MRIQC_VERSION="24.0.0"

# cluster paths
path_to_singularity_image="$HOME/tools/containers/images/bids/bids-mriqc--${MRIQC_VERSION}.sing"
bids_dir="$HOME/mriqc_trial/raw"
output_dir="$HOME/mriqc_trial/derivatives/mriqc"
scratch_dir=$GLOBALSCRATCH

# data paths
root_dir="$HOME/path-to-project-yoda-fodler"
bids_dir="$root_dir/inputs/raw"
output_dir="$root_dir/outputs/derivatives/mriqc"

# make the scratch folder, here there is no limit space and fmriprep can store stuff in case of crash and do not start from zero again
mkdir -p "${scratch_dir}"/work-mriqc

# create mriqc output folder in case they don't exist
mkdir -p "${output_dir}"

singularity run --cleanenv \
-B "${bids_dir}":/data \
-B "${output_dir}":/out \
"${path_to_singularity_image}" \
/data \
/out \
group
-B "${scratch_dir}":/scratch_dir \
-B "${bids_dir}":/bids_dir \
-B "${output_dir}":/output \
"${path_to_singularity_image}" \
/bids_dir \
/output \
--work-dir /scratch_dir/work-mriqc/"${subjID}" \
--verbose-reports \
group
9 changes: 4 additions & 5 deletions doc/run_fmriprep.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ To contribute see [here](https://cpp-lln-lab.github.io/CPP_HPC/contributing/)

## Prepare to run fmriprep on the cluster

- have your data on the cluster
- have your data on the cluster and unlock them if they are managed by datalad
- get your `freesurfer` license (user specific) for free [here](https://surfer.nmr.mgh.harvard.edu/registration.html) and move it to the cluster at `~/tools`
- install datalad on your user (see [here](https://github.com/cpp-lln-lab/CPP_HPC/install_datalad))
- get the fmriprep singularity image as follow:

Expand All @@ -38,14 +39,12 @@ cd path/to/containers
datald update --merge
``````

Depending on the cluster “unlock” is needed or not. No need for `lemaitre3`. !!! TO CHECK ON LEMAITRE4
Depending on the cluster “unlock” is needed or not. No need for `lemaitre4`.

```bash
datalad unlock containers/images/bids/bids-fmriprep--24.0.0.sing
```

- get your `freesurfer` license (user specific) for free [here](https://surfer.nmr.mgh.harvard.edu/registration.html) and move it to the cluster

## Submit a fmriprep job via a `slurm` script

- pros:
Expand All @@ -60,7 +59,7 @@ Content of the `cpp_fmriprep.slurm` file (download and edit from [here](cpp_fmri
!!! Warning

1. Read the fmriprep documentation to know what you are doing and how the arguments of the run call effects the results
2. All the paths and email are set afte Marco's users for demosntration.
2. All the paths and email are set afte Marco's users for demosntration. Change them for your user.
3. Edit the scripts with the info you need to make it run for your user from top to buttom of the script, do not over look the first "commented" chunk cause it is not a real commented section (check the email and job report path, data paths and the `username` etc.).
```bash
Expand Down
12 changes: 4 additions & 8 deletions doc/run_mriqc.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,6 @@ Written by CPP lab people

To contribute see [here](https://cpp-lln-lab.github.io/CPP_HPC/contributing/)

!!! Warning

Space problem since the `work` folder is not set in the script. Marco is working on it.


## General tips

Expand Down Expand Up @@ -43,7 +39,7 @@ cd path/to/containers
datald update --merge
``````

Depending on the cluster “unlock” is needed or not. No need for `lemaitre3`. !! TO CHECK ON LEMAITRE4
Depending on the cluster “unlock” is needed or not. No need for `lemaitre4`.

```bash
datalad unlock containers/images/bids/bids-mriqc--24.0.0.sing
Expand All @@ -65,7 +61,7 @@ Content of the `cpp_mriqc.slurm` file (download and edit from [here](cpp_mriqc.s
!!! Warning

1. Read the MRIqc documentation to know what you are doing and how the arguments of the run call effects the results
2. All the paths and email are set afte Marco's users for demosntration.
2. All the paths and email are set afte Marco's users for demosntration. Change them for your user.
3. Edit the scripts with the info you need to make it run for your user from top to buttom of the script, do not over look the first "commented" chunk cause it is not a real commented section (check the email and job report path, data paths and the `username` etc.).
```bash
Expand Down Expand Up @@ -99,7 +95,7 @@ Content of the `cpp_mriqc_group.slurm` file (download and edit from [here](cpp_m
!!! Warning
1. Read the MRIqc documentation to know what you are doing and how the arguments of the run call effects the results
2. All the paths and email are set afte Marco's users for demosntration.
2. All the paths and email are set afte Marco's users for demosntration. Change them for your user.
3. Edit the scripts with the info you need to make it run for your user from top to buttom of the script, do not over look the first "commented" chunk cause it is not a real commented section (check the email and job report path, data paths and the `username` etc.).

```bash
Expand All @@ -122,6 +118,6 @@ sbatch cpp_mriqc_group.slurm

### check your job

see [here](https://github.com/cpp-lln-lab/CPP_HPC/cluster_code_snippets/#check-your-running-jobs)
see [here](https://github.com/cpp-lln-lab.github.io/CPP_HPC/cluster_code_snippets/#check-your-running-jobs)

To contribute see [here](https://cpp-lln-lab.github.io/CPP_HPC/contributing/)

0 comments on commit b58d55d

Please sign in to comment.