From 72de7d683d40781b6eb287d60c6b6c1ed392bf39 Mon Sep 17 00:00:00 2001
From: diersk
Date: Tue, 30 Apr 2024 18:46:05 +0200
Subject: [PATCH 01/25] Updated version
---
VERSION | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index e9307ca..7121511 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.0.2
+2.1.0-beta
From 3e4679188504b3c9f7a4184fb8fa65f078dff4b0 Mon Sep 17 00:00:00 2001
From: diersk
Date: Fri, 3 May 2024 19:43:10 +0200
Subject: [PATCH 02/25] Implemented status file and skip-existing option
---
fsqc/fsqcMain.py | 1328 +++++++++++++++++++++++++++-------------------
1 file changed, 770 insertions(+), 558 deletions(-)
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index 2177299..697808e 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -200,7 +200,8 @@ def get_help(print_help=True, return_help=False):
[--fornix] [--fornix-html] [--hypothalamus]
[--hypothalamus-html] [--hippocampus]
[--hippocampus-html] [--hippocampus-label ]
- [--shape] [--outlier] [--fastsurfer] [-h]
+ [--shape] [--outlier] [--fastsurfer]
+ [--exit-on-error] [--skip-existing] [-h]
required arguments:
--subjects_dir
@@ -243,6 +244,9 @@ def get_help(print_help=True, return_help=False):
--exit-on-error terminate the program when encountering an error;
otherwise, try to continue with the next module or
case
+ --skip-existing skips processing for a given case if output
+ already exists, even with possibly different
+ parameters or settings.
getting help:
-h, --help display this help message and exit
@@ -616,6 +620,14 @@ def _parse_arguments():
action="store_true",
required=False,
)
+ optional.add_argument(
+ "--skip-existing",
+ dest="skip_existing",
+ help="skip processing if output already exists",
+ default=False,
+ action="store_true",
+ required=False,
+ )
expert = parser.add_argument_group("expert arguments")
expert.add_argument(
@@ -729,6 +741,7 @@ def _parse_arguments():
argsDict["outlier_table"] = args.outlier_table
argsDict["fastsurfer"] = args.fastsurfer
argsDict["exit_on_error"] = args.exit_on_error
+ argsDict["skip_existing"] = args.skip_existing
#
return argsDict
@@ -1476,6 +1489,7 @@ def _do_fsqc(argsDict):
import time
import numpy as np
+ import pandas as pd
from fsqc.checkCCSize import checkCCSize
from fsqc.checkContrast import checkContrast
@@ -1553,698 +1567,865 @@ def _do_fsqc(argsDict):
metricsDict.update({subject: {"subject": subject}})
statusDict.update({subject: {"subject": subject}})
+ # ----------------------------------------------------------------------
+ # check for existing statusfile
+
+ # check / create subject-specific status_outdir
+ status_outdir = os.path.join(argsDict["output_dir"], "status", subject)
+ if not os.path.isdir(status_outdir):
+ os.makedirs(status_outdir)
+
+ # if it already exists, read statusfile
+ if os.path.exists(os.path.join(status_outdir, "status.txt")):
+ statusDict[subject] = dict(pd.read_csv(os.path.join(status_outdir, "status.txt"), sep=":", header=None, comment="#", names=["module", "status"]).to_dict(orient="split")['data'])
+
+ # note:
+ # 0: OK
+ # 1: Failed
+ # 2: Not done
+ # 3: Skipped
+
# ----------------------------------------------------------------------
# compute core metrics
- # set status
- metrics_ok = True
+ # check / create subject-specific metrics_outdir
+ metrics_outdir = os.path.join(argsDict["output_dir"], "metrics", subject)
+ if not os.path.isdir(metrics_outdir):
+ os.makedirs(metrics_outdir)
- # get WM and GM SNR for orig.mgz
- try:
- wm_snr_orig, gm_snr_orig = checkSNR(
- argsDict["subjects_dir"],
- subject,
- SNR_AMOUT_EROSION,
- ref_image="orig.mgz",
- aparc_image=aparc_image,
- )
+ #
+ metrics_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["metrics"] == 0 or statusDict[subject]["metrics"] == 3:
+ metrics_status = 3
+ logging.info("Skipping metrics computation for " + subject)
+ else:
+ logging.info("Not skipping metrics computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ else:
+ logging.info("Not skipping metrics computation for " + subject + ", because no statusfile was found")
- except Exception as e:
- logging.error("ERROR: SNR computation failed for " + subject)
- logging.error("Reason: " + str(e))
- wm_snr_orig = np.nan
- gm_snr_orig = np.nan
- metrics_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ if metrics_status == 0:
- # get WM and GM SNR for norm.mgz
- try:
- wm_snr_norm, gm_snr_norm = checkSNR(
- argsDict["subjects_dir"],
- subject,
- SNR_AMOUT_EROSION,
- ref_image="norm.mgz",
- aparc_image=aparc_image,
- )
+ # get WM and GM SNR for orig.mgz
+ try:
+ wm_snr_orig, gm_snr_orig = checkSNR(
+ argsDict["subjects_dir"],
+ subject,
+ SNR_AMOUT_EROSION,
+ ref_image="orig.mgz",
+ aparc_image=aparc_image,
+ )
- except Exception as e:
- logging.error("ERROR: SNR computation failed for " + subject)
- logging.error("Reason: " + str(e))
- wm_snr_norm = np.nan
- gm_snr_norm = np.nan
- metrics_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: SNR computation failed for " + subject)
+ logging.error("Reason: " + str(e))
+ wm_snr_orig = np.nan
+ gm_snr_orig = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # check CC size
- try:
- cc_size = checkCCSize(argsDict["subjects_dir"], subject)
+ # get WM and GM SNR for norm.mgz
+ try:
+ wm_snr_norm, gm_snr_norm = checkSNR(
+ argsDict["subjects_dir"],
+ subject,
+ SNR_AMOUT_EROSION,
+ ref_image="norm.mgz",
+ aparc_image=aparc_image,
+ )
- except Exception as e:
- logging.error("ERROR: CC size computation failed for " + subject)
- logging.error("Reason: " + str(e))
- cc_size = np.nan
- metrics_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: SNR computation failed for " + subject)
+ logging.error("Reason: " + str(e))
+ wm_snr_norm = np.nan
+ gm_snr_norm = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # check topology
- try:
- (
- holes_lh,
- holes_rh,
- defects_lh,
- defects_rh,
- topo_lh,
- topo_rh,
- ) = checkTopology(argsDict["subjects_dir"], subject)
+ # check CC size
+ try:
+ cc_size = checkCCSize(argsDict["subjects_dir"], subject)
- except Exception as e:
- logging.error("ERROR: Topology check failed for " + subject)
- logging.error("Reason: " + str(e))
- holes_lh = np.nan
- holes_rh = np.nan
- defects_lh = np.nan
- defects_rh = np.nan
- topo_lh = np.nan
- topo_rh = np.nan
- metrics_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: CC size computation failed for " + subject)
+ logging.error("Reason: " + str(e))
+ cc_size = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # check contrast
- try:
- con_snr_lh, con_snr_rh = checkContrast(argsDict["subjects_dir"], subject)
+ # check topology
+ try:
+ (
+ holes_lh,
+ holes_rh,
+ defects_lh,
+ defects_rh,
+ topo_lh,
+ topo_rh,
+ ) = checkTopology(argsDict["subjects_dir"], subject)
- except Exception as e:
- logging.error("ERROR: Contrast check failed for " + subject)
- logging.error("Reason: " + str(e))
- con_snr_lh = np.nan
- con_snr_rh = np.nan
- metrics_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: Topology check failed for " + subject)
+ logging.error("Reason: " + str(e))
+ holes_lh = np.nan
+ holes_rh = np.nan
+ defects_lh = np.nan
+ defects_rh = np.nan
+ topo_lh = np.nan
+ topo_rh = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # check rotation
- try:
- rot_tal_x, rot_tal_y, rot_tal_z = checkRotation(
- argsDict["subjects_dir"], subject
+ # check contrast
+ try:
+ con_snr_lh, con_snr_rh = checkContrast(argsDict["subjects_dir"], subject)
+
+ except Exception as e:
+ logging.error("ERROR: Contrast check failed for " + subject)
+ logging.error("Reason: " + str(e))
+ con_snr_lh = np.nan
+ con_snr_rh = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
+
+ # check rotation
+ try:
+ rot_tal_x, rot_tal_y, rot_tal_z = checkRotation(
+ argsDict["subjects_dir"], subject
+ )
+
+ except Exception as e:
+ logging.error("ERROR: Rotation failed for " + subject)
+ logging.error("Reason: " + str(e))
+ rot_tal_x = np.nan
+ rot_tal_y = np.nan
+ rot_tal_z = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
+
+ # store data
+ metricsDict[subject].update(
+ {
+ "wm_snr_orig": wm_snr_orig,
+ "gm_snr_orig": gm_snr_orig,
+ "wm_snr_norm": wm_snr_norm,
+ "gm_snr_norm": gm_snr_norm,
+ "cc_size": cc_size,
+ "holes_lh": holes_lh,
+ "holes_rh": holes_rh,
+ "defects_lh": defects_lh,
+ "defects_rh": defects_rh,
+ "topo_lh": topo_lh,
+ "topo_rh": topo_rh,
+ "con_snr_lh": con_snr_lh,
+ "con_snr_rh": con_snr_rh,
+ "rot_tal_x": rot_tal_x,
+ "rot_tal_y": rot_tal_y,
+ "rot_tal_z": rot_tal_z,
+ }
)
- except Exception as e:
- logging.error("ERROR: Rotation failed for " + subject)
- logging.error("Reason: " + str(e))
- rot_tal_x = np.nan
- rot_tal_y = np.nan
- rot_tal_z = np.nan
- metrics_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ # write to file
+ pd.DataFrame(metricsDict[subject], index=[subject]).to_csv(os.path.join(argsDict["output_dir"], "metrics", subject, "metrics.csv"))
- # store data
- metricsDict[subject].update(
- {
- "wm_snr_orig": wm_snr_orig,
- "gm_snr_orig": gm_snr_orig,
- "wm_snr_norm": wm_snr_norm,
- "gm_snr_norm": gm_snr_norm,
- "cc_size": cc_size,
- "holes_lh": holes_lh,
- "holes_rh": holes_rh,
- "defects_lh": defects_lh,
- "defects_rh": defects_rh,
- "topo_lh": topo_lh,
- "topo_rh": topo_rh,
- "con_snr_lh": con_snr_lh,
- "con_snr_rh": con_snr_rh,
- "rot_tal_x": rot_tal_x,
- "rot_tal_y": rot_tal_y,
- "rot_tal_z": rot_tal_z,
- }
- )
+ # note that we cannot "not do" the metrics module, only skipping is possible.
+ # hence no metrics_status == 2 possible.
# store data
- statusDict[subject].update({"metrics": metrics_ok})
+ statusDict[subject].update({"metrics": metrics_status})
# ----------------------------------------------------------------------
# run optional modules: shape analysis
if argsDict["shape"] is True:
- #
- try:
- # message
- print("-----------------------------")
- print("Running brainPrint analysis ...")
- print("")
- from pathlib import Path
+ shape_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["shape"] == 0 or statusDict[subject]["shape"] == 3:
+ shape_status = 3
+ logging.info("Skipping shape computation for " + subject)
+ else:
+ logging.info("Not skipping shape computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ else:
+ logging.info("Not skipping shape computation for " + subject + ", because no statusfile was found")
- # compute brainprint (will also compute shapeDNA)
- import brainprint
+ if shape_status == 0:
- # check / create subject-specific brainprint_outdir
- brainprint_outdir = Path(
- os.path.join(argsDict["output_dir"], "brainprint", subject)
- )
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Running brainPrint analysis ...")
+ print("")
- # run brainPrint
- evMat, evecMat, dstMat = brainprint.brainprint.run_brainprint(
- subjects_dir=argsDict["subjects_dir"],
- subject_id=subject,
- destination=brainprint_outdir,
- keep_eigenvectors=SHAPE_EVEC,
- skip_cortex=SHAPE_SKIPCORTEX,
- num=SHAPE_NUM,
- norm=SHAPE_NORM,
- reweight=SHAPE_REWEIGHT,
- asymmetry=SHAPE_ASYMMETRY,
- )
+ from pathlib import Path
- # get a subset of the brainprint results
- distDict = {subject: dstMat}
+ # compute brainprint (will also compute shapeDNA)
+ import brainprint
- # return
- shape_ok = True
+ # check / create subject-specific brainprint_outdir
+ brainprint_outdir = Path(
+ os.path.join(argsDict["output_dir"], "brainprint", subject)
+ )
- #
- except Exception as e:
- distDict = {subject: []}
- logging.error("ERROR: the shape module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- shape_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ # run brainPrint
+ evMat, evecMat, dstMat = brainprint.brainprint.run_brainprint(
+ subjects_dir=argsDict["subjects_dir"],
+ subject_id=subject,
+ destination=brainprint_outdir,
+ keep_eigenvectors=SHAPE_EVEC,
+ skip_cortex=SHAPE_SKIPCORTEX,
+ num=SHAPE_NUM,
+ norm=SHAPE_NORM,
+ reweight=SHAPE_REWEIGHT,
+ asymmetry=SHAPE_ASYMMETRY,
+ )
- # store data
- metricsDict[subject].update(distDict[subject])
+ # get a subset of the brainprint results
+ distDict = {subject: dstMat}
- # store data
- statusDict[subject].update({"shape": shape_ok})
+ # return
+ shape_status = 0
+
+ #
+ except Exception as e:
+ distDict = {subject: []}
+ logging.error("ERROR: the shape module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ shape_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
+
+ # store data
+ metricsDict[subject].update(distDict[subject])
+
+ else:
+ shape_status = 2
+
+ # store data
+ statusDict[subject].update({"shape": shape_status})
# ----------------------------------------------------------------------
# run optional modules: screenshots
if argsDict["screenshots"] is True or argsDict["screenshots_html"] is True:
- #
- try:
- # message
- print("-----------------------------")
- print("Creating screenshots ...")
- print("")
-
- # check / create subject-specific screenshots_outdir
- screenshots_outdir = os.path.join(
- argsDict["output_dir"], "screenshots", subject
- )
- if not os.path.isdir(screenshots_outdir):
- os.makedirs(screenshots_outdir)
- outfile = os.path.join(screenshots_outdir, subject + ".png")
-
- # re-initialize
- screenshots_base_subj = list()
- screenshots_overlay_subj = list()
- screenshots_surf_subj = list()
-
- # check screenshots_base
- if argsDict["screenshots_base"][0] == "default":
- screenshots_base_subj = argsDict["screenshots_base"]
- logging.info("Using default for screenshot base image")
- elif os.path.isfile(argsDict["screenshots_base"][0]):
- screenshots_base_subj = argsDict["screenshots_base"]
- logging.info(
- "Using "
- + screenshots_base_subj[0]
- + " as screenshot base image"
- )
- elif os.path.isfile(
- os.path.join(
- argsDict["subjects_dir"],
- subject,
- "mri",
- argsDict["screenshots_base"][0],
- )
- ):
- screenshots_base_subj = [
- os.path.join(
- argsDict["subjects_dir"],
- subject,
- "mri",
- argsDict["screenshots_base"][0],
- )
- ]
- logging.info(
- "Using "
- + screenshots_base_subj[0]
- + " as screenshot base image"
- )
+
+ screenshots_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["screenshots"] == 0 or statusDict[subject]["screenshots"] == 3:
+ screenshots_status = 3
+ logging.info("Skipping screenshots computation for " + subject)
+ else:
+ logging.info("Not skipping screenshots computation for " + subject + ", because statusfile did not indicate ok or skipped")
else:
- raise FileNotFoundError(
- "ERROR: cannot find the screenshots base file "
- + argsDict["screenshots_base"][0]
- )
+ logging.info("Not skipping screenshots computation for " + subject + ", because no statusfile was found")
- # check screenshots_overlay
- if argsDict["screenshots_overlay"] is not None:
- if argsDict["screenshots_overlay"][0] == "default":
- screenshots_overlay_subj = argsDict["screenshots_overlay"]
- logging.info("Using default for screenshot overlay image")
- elif os.path.isfile(argsDict["screenshots_overlay"][0]):
- screenshots_overlay_subj = argsDict["screenshots_overlay"]
+ if screenshots_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Creating screenshots ...")
+ print("")
+
+ # check / create subject-specific screenshots_outdir
+ screenshots_outdir = os.path.join(
+ argsDict["output_dir"], "screenshots", subject
+ )
+ if not os.path.isdir(screenshots_outdir):
+ os.makedirs(screenshots_outdir)
+ outfile = os.path.join(screenshots_outdir, subject + ".png")
+
+ # re-initialize
+ screenshots_base_subj = list()
+ screenshots_overlay_subj = list()
+ screenshots_surf_subj = list()
+
+ # check screenshots_base
+ if argsDict["screenshots_base"][0] == "default":
+ screenshots_base_subj = argsDict["screenshots_base"]
+ logging.info("Using default for screenshot base image")
+ elif os.path.isfile(argsDict["screenshots_base"][0]):
+ screenshots_base_subj = argsDict["screenshots_base"]
logging.info(
"Using "
- + screenshots_overlay_subj[0]
- + " as screenshot overlay image"
+ + screenshots_base_subj[0]
+ + " as screenshot base image"
)
elif os.path.isfile(
os.path.join(
argsDict["subjects_dir"],
subject,
"mri",
- argsDict["screenshots_overlay"][0],
+ argsDict["screenshots_base"][0],
)
):
- screenshots_overlay_subj = [
+ screenshots_base_subj = [
os.path.join(
argsDict["subjects_dir"],
subject,
"mri",
- argsDict["screenshots_overlay"][0],
+ argsDict["screenshots_base"][0],
)
]
logging.info(
"Using "
- + screenshots_overlay_subj[0]
- + " as screenshot overlay image"
+ + screenshots_base_subj[0]
+ + " as screenshot base image"
)
else:
raise FileNotFoundError(
- "ERROR: cannot find the screenshots overlay file "
- + argsDict["screenshots_overlay"][0]
+ "ERROR: cannot find the screenshots base file "
+ + argsDict["screenshots_base"][0]
)
- else:
- screenshots_overlay_subj = argsDict["screenshots_overlay"]
-
- # check screenshots_surf
- if argsDict["screenshots_surf"] is not None:
- for screenshots_surf_i in argsDict["screenshots_surf"]:
- if screenshots_surf_i == "default":
- logging.info("Using default for screenshot surface")
- elif os.path.isfile(screenshots_surf_i):
+
+ # check screenshots_overlay
+ if argsDict["screenshots_overlay"] is not None:
+ if argsDict["screenshots_overlay"][0] == "default":
+ screenshots_overlay_subj = argsDict["screenshots_overlay"]
+ logging.info("Using default for screenshot overlay image")
+ elif os.path.isfile(argsDict["screenshots_overlay"][0]):
+ screenshots_overlay_subj = argsDict["screenshots_overlay"]
logging.info(
- "Using " + screenshots_surf_i + " as screenshot surface"
+ "Using "
+ + screenshots_overlay_subj[0]
+ + " as screenshot overlay image"
)
elif os.path.isfile(
os.path.join(
argsDict["subjects_dir"],
subject,
- "surf",
- screenshots_surf_i,
+ "mri",
+ argsDict["screenshots_overlay"][0],
)
):
- screenshots_surf_i = os.path.join(
- argsDict["subjects_dir"],
- subject,
- "surf",
- screenshots_surf_i,
- )
+ screenshots_overlay_subj = [
+ os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "mri",
+ argsDict["screenshots_overlay"][0],
+ )
+ ]
logging.info(
- "Using " + screenshots_surf_i + " as screenshot surface"
+ "Using "
+ + screenshots_overlay_subj[0]
+ + " as screenshot overlay image"
)
else:
raise FileNotFoundError(
- "ERROR: cannot find the screenshots surface file "
- + screenshots_surf_i
+ "ERROR: cannot find the screenshots overlay file "
+ + argsDict["screenshots_overlay"][0]
)
- screenshots_surf_subj.append(screenshots_surf_i)
- else:
- screenshots_surf_subj = None
-
- # process
- createScreenshots(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTFILE=outfile,
- INTERACTIVE=False,
- BASE=screenshots_base_subj,
- OVERLAY=screenshots_overlay_subj,
- SURF=screenshots_surf_subj,
- VIEWS=argsDict["screenshots_views"],
- LAYOUT=argsDict["screenshots_layout"],
- ORIENTATION=argsDict["screenshots_orientation"],
- )
+ else:
+ screenshots_overlay_subj = argsDict["screenshots_overlay"]
- # return
- screenshots_ok = True
+ # check screenshots_surf
+ if argsDict["screenshots_surf"] is not None:
+ for screenshots_surf_i in argsDict["screenshots_surf"]:
+ if screenshots_surf_i == "default":
+ logging.info("Using default for screenshot surface")
+ elif os.path.isfile(screenshots_surf_i):
+ logging.info(
+ "Using " + screenshots_surf_i + " as screenshot surface"
+ )
+ elif os.path.isfile(
+ os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "surf",
+ screenshots_surf_i,
+ )
+ ):
+ screenshots_surf_i = os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "surf",
+ screenshots_surf_i,
+ )
+ logging.info(
+ "Using " + screenshots_surf_i + " as screenshot surface"
+ )
+ else:
+ raise FileNotFoundError(
+ "ERROR: cannot find the screenshots surface file "
+ + screenshots_surf_i
+ )
+ screenshots_surf_subj.append(screenshots_surf_i)
+ else:
+ screenshots_surf_subj = None
+
+ # process
+ createScreenshots(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTFILE=outfile,
+ INTERACTIVE=False,
+ BASE=screenshots_base_subj,
+ OVERLAY=screenshots_overlay_subj,
+ SURF=screenshots_surf_subj,
+ VIEWS=argsDict["screenshots_views"],
+ LAYOUT=argsDict["screenshots_layout"],
+ ORIENTATION=argsDict["screenshots_orientation"],
+ )
- #
- except Exception as e:
- logging.error("ERROR: screenshots module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- screenshots_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ # return
+ screenshots_status = 0
- # store data
- if screenshots_ok:
- imagesScreenshotsDict[subject] = outfile
- else:
- imagesScreenshotsDict[subject] = []
+ #
+ except Exception as e:
+ logging.error("ERROR: screenshots module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ screenshots_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # store data
- statusDict[subject].update({"screenshots": screenshots_ok})
+ # store data
+ if screenshots_status == 0: # TODO: need outfile even for status 3
+ imagesScreenshotsDict[subject] = outfile
+ else:
+ imagesScreenshotsDict[subject] = []
+
+ else:
+ screenshots_status = 2
+
+ # store data
+ statusDict[subject].update({"screenshots": screenshots_status})
# ----------------------------------------------------------------------
# run optional modules: surface plots
if argsDict["surfaces"] is True or argsDict["surfaces_html"] is True:
- #
- try:
- # message
- print("-----------------------------")
- print("Creating surface plots ...")
- print("")
-
- # check / create subject-specific surfaces_outdir
- surfaces_outdir = os.path.join(
- argsDict["output_dir"], "surfaces", subject
- )
- if not os.path.isdir(surfaces_outdir):
- os.makedirs(surfaces_outdir)
-
- # process
- createSurfacePlots(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- SURFACES_OUTDIR=surfaces_outdir,
- VIEWS=argsDict["surfaces_views"],
- FASTSURFER=argsDict["fastsurfer"],
- )
- # return
- surfaces_ok = True
- #
- except Exception as e:
- logging.error("ERROR: surfaces module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- surfaces_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ surfaces_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["surfaces"] == 0 or statusDict[subject]["surfaces"] == 3:
+ surfaces_status = 3
+ logging.info("Skipping surfaces computation for " + subject)
+ else:
+ logging.info("Not skipping surfaces computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ else:
+ logging.info("Not skipping surfaces computation for " + subject + ", because no statusfile was found")
- # store data
- if surfaces_ok:
- imagesSurfacesDict[subject] = surfaces_outdir
- else:
- imagesSurfacesDict[subject] = []
+ if surfaces_status == 0:
- # store data
- statusDict[subject].update({"surfaces": surfaces_ok})
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Creating surface plots ...")
+ print("")
+
+ # check / create subject-specific surfaces_outdir
+ surfaces_outdir = os.path.join(
+ argsDict["output_dir"], "surfaces", subject
+ )
+ if not os.path.isdir(surfaces_outdir):
+ os.makedirs(surfaces_outdir)
+
+ # process
+ createSurfacePlots(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ SURFACES_OUTDIR=surfaces_outdir,
+ VIEWS=argsDict["surfaces_views"],
+ FASTSURFER=argsDict["fastsurfer"],
+ )
+ # return
+ surfaces_status = 0
+
+ #
+ except Exception as e:
+ logging.error("ERROR: surfaces module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ surfaces_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
+
+ # store data
+ if surfaces_status == 0: # TODO: need outfile even for status 3
+ imagesSurfacesDict[subject] = surfaces_outdir
+ else:
+ imagesSurfacesDict[subject] = []
+
+ else:
+ surfaces_status = 2
+
+ # store data
+ statusDict[subject].update({"surfaces": surfaces_status})
# ----------------------------------------------------------------------
# run optional modules: skullstrip
if argsDict["skullstrip"] is True or argsDict["skullstrip_html"] is True:
- #
- try:
- # message
- print("-----------------------------")
- print("Creating skullstrip evaluation ...")
- print("")
-
- # check / create subject-specific skullstrip_outdir
- skullstrip_outdir = os.path.join(
- argsDict["output_dir"], "skullstrip", subject
- )
- if not os.path.isdir(skullstrip_outdir):
- os.makedirs(skullstrip_outdir)
- outfile = os.path.join(skullstrip_outdir, subject + ".png")
-
- # re-initialize
- skullstrip_base_subj = list()
- skullstrip_overlay_subj = list()
-
- # check skullstrip_base
- if os.path.isfile(
- os.path.join(argsDict["subjects_dir"], subject, "mri", "orig.mgz")
- ):
- skullstrip_base_subj = [
- os.path.join(
- argsDict["subjects_dir"], subject, "mri", "orig.mgz"
- )
- ]
- logging.info("Using " + "orig.mgz" + " as skullstrip base image")
+
+ skullstrip_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["skullstrip"] == 0 or statusDict[subject]["skullstrip"] == 3:
+ skullstrip_status = 3
+ logging.info("Skipping skullstrip computation for " + subject)
+ else:
+ logging.info("Not skipping skullstrip computation for " + subject + ", because statusfile did not indicate ok or skipped")
else:
- raise FileNotFoundError(
- "ERROR: cannot find the skullstrip base file " + "orig.mgz"
- )
+ logging.info("Not skipping skullstrip computation for " + subject + ", because no statusfile was found")
+
+ if skullstrip_status == 0:
- # check skullstrip_overlay
- if os.path.isfile(
- os.path.join(
- argsDict["subjects_dir"], subject, "mri", "brainmask.mgz"
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Creating skullstrip evaluation ...")
+ print("")
+
+ # check / create subject-specific skullstrip_outdir
+ skullstrip_outdir = os.path.join(
+ argsDict["output_dir"], "skullstrip", subject
)
- ):
- skullstrip_overlay_subj = [
+ if not os.path.isdir(skullstrip_outdir):
+ os.makedirs(skullstrip_outdir)
+ outfile = os.path.join(skullstrip_outdir, subject + ".png")
+
+ # re-initialize
+ skullstrip_base_subj = list()
+ skullstrip_overlay_subj = list()
+
+ # check skullstrip_base
+ if os.path.isfile(
+ os.path.join(argsDict["subjects_dir"], subject, "mri", "orig.mgz")
+ ):
+ skullstrip_base_subj = [
+ os.path.join(
+ argsDict["subjects_dir"], subject, "mri", "orig.mgz"
+ )
+ ]
+ logging.info("Using " + "orig.mgz" + " as skullstrip base image")
+ else:
+ raise FileNotFoundError(
+ "ERROR: cannot find the skullstrip base file " + "orig.mgz"
+ )
+
+ # check skullstrip_overlay
+ if os.path.isfile(
os.path.join(
argsDict["subjects_dir"], subject, "mri", "brainmask.mgz"
)
- ]
- logging.info(
- "Using " + "brainmask.mgz" + " as skullstrip overlay image"
- )
- else:
- raise FileNotFoundError(
- "ERROR: cannot find the skullstrip overlay file "
- + "brainmask.mgz"
+ ):
+ skullstrip_overlay_subj = [
+ os.path.join(
+ argsDict["subjects_dir"], subject, "mri", "brainmask.mgz"
+ )
+ ]
+ logging.info(
+ "Using " + "brainmask.mgz" + " as skullstrip overlay image"
+ )
+ else:
+ raise FileNotFoundError(
+ "ERROR: cannot find the skullstrip overlay file "
+ + "brainmask.mgz"
+ )
+
+ # process
+ createScreenshots(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTFILE=outfile,
+ INTERACTIVE=False,
+ BASE=skullstrip_base_subj,
+ OVERLAY=skullstrip_overlay_subj,
+ SURF=None,
+ VIEWS=argsDict["screenshots_views"],
+ LAYOUT=argsDict["screenshots_layout"],
+ BINARIZE=True,
+ ORIENTATION=argsDict["screenshots_orientation"],
)
- # process
- createScreenshots(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTFILE=outfile,
- INTERACTIVE=False,
- BASE=skullstrip_base_subj,
- OVERLAY=skullstrip_overlay_subj,
- SURF=None,
- VIEWS=argsDict["screenshots_views"],
- LAYOUT=argsDict["screenshots_layout"],
- BINARIZE=True,
- ORIENTATION=argsDict["screenshots_orientation"],
- )
+ # return
+ skullstrip_status = 0
- # return
- skullstrip_ok = True
+ #
+ except Exception as e:
+ logging.error("ERROR: skullstrip module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ skullstrip_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- #
- except Exception as e:
- logging.error("ERROR: skullstrip module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- skullstrip_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ # store data
+ if skullstrip_status == 0: # TODO: need outfile even for status 3
+ imagesSkullstripDict[subject] = outfile
+ else:
+ imagesSkullstripDict[subject] = []
- # store data
- if skullstrip_ok:
- imagesSkullstripDict[subject] = outfile
- else:
- imagesSkullstripDict[subject] = []
+ else:
+ skullstrip_status = 2
- # store data
- statusDict[subject].update({"skullstrip": skullstrip_ok})
+ # store data
+ statusDict[subject].update({"skullstrip": skullstrip_status})
# ----------------------------------------------------------------------
# run optional modules: fornix
if argsDict["fornix"] is True or argsDict["fornix_html"] is True:
- #
- try:
- # message
- print("-----------------------------")
- print("Checking fornix segmentation ...")
- print("")
-
- # check / create subject-specific fornix_outdir
- fornix_outdir = os.path.join(argsDict["output_dir"], "fornix", subject)
- if not os.path.isdir(fornix_outdir):
- os.makedirs(fornix_outdir)
- fornix_screenshot_outfile = os.path.join(fornix_outdir, "cc.png")
-
- # process
- fornixShapeOutput = evaluateFornixSegmentation(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTPUT_DIR=fornix_outdir,
- CREATE_SCREENSHOT=FORNIX_SCREENSHOT,
- SCREENSHOTS_OUTFILE=fornix_screenshot_outfile,
- RUN_SHAPEDNA=FORNIX_SHAPE,
- N_EIGEN=FORNIX_N_EIGEN,
- )
- # create a dictionary from fornix shape output
- fornixShapeDict = {
- subject: dict(
- zip(
- map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
- fornixShapeOutput,
- )
+ fornix_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["fornix"] == 0 or statusDict[subject]["fornix"] == 3:
+ fornix_status = 3
+ logging.info("Skipping fornix computation for " + subject)
+ else:
+ logging.info("Not skipping fornix computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ else:
+ logging.info("Not skipping fornix computation for " + subject + ", because no statusfile was found")
+
+ if fornix_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Checking fornix segmentation ...")
+ print("")
+
+ # check / create subject-specific fornix_outdir
+ fornix_outdir = os.path.join(argsDict["output_dir"], "fornix", subject)
+ if not os.path.isdir(fornix_outdir):
+ os.makedirs(fornix_outdir)
+ fornix_screenshot_outfile = os.path.join(fornix_outdir, "cc.png")
+
+ # process
+ fornixShapeOutput = evaluateFornixSegmentation(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTPUT_DIR=fornix_outdir,
+ CREATE_SCREENSHOT=FORNIX_SCREENSHOT,
+ SCREENSHOTS_OUTFILE=fornix_screenshot_outfile,
+ RUN_SHAPEDNA=FORNIX_SHAPE,
+ N_EIGEN=FORNIX_N_EIGEN,
)
- }
-
- # return
- fornix_ok = True
- #
- except Exception as e:
- fornixShapeDict = {
- subject: dict(
- zip(
- map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
- np.full(FORNIX_N_EIGEN, np.nan),
+ # create a dictionary from fornix shape output
+ fornixShapeDict = {
+ subject: dict(
+ zip(
+ map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
+ fornixShapeOutput,
+ )
)
- )
- }
- logging.error("ERROR: fornix module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- fornix_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ }
- # store data
- if FORNIX_SHAPE:
- metricsDict[subject].update(fornixShapeDict[subject])
+ # return
+ fornix_status = 0
- # store data
- if FORNIX_SCREENSHOT and fornix_ok:
- imagesFornixDict[subject] = fornix_screenshot_outfile
- else:
- imagesFornixDict[subject] = []
+ #
+ except Exception as e:
+ fornixShapeDict = {
+ subject: dict(
+ zip(
+ map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
+ np.full(FORNIX_N_EIGEN, np.nan),
+ )
+ )
+ }
+ logging.error("ERROR: fornix module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ fornix_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
+
+ # store data
+ if FORNIX_SHAPE:
+ metricsDict[subject].update(fornixShapeDict[subject])
+
+ # store data
+ if FORNIX_SCREENSHOT and fornix_status == 0: # TODO: need outfile even for status 3
+ imagesFornixDict[subject] = fornix_screenshot_outfile
+ else:
+ imagesFornixDict[subject] = []
- # store data
- statusDict[subject].update({"fornix": fornix_ok})
+ else:
+ fornix_status = 2
+
+ # store data
+ statusDict[subject].update({"fornix": fornix_status})
# ----------------------------------------------------------------------
# run optional modules: hypothalamus
if argsDict["hypothalamus"] is True or argsDict["hypothalamus_html"] is True:
- #
- try:
- # message
- print("-----------------------------")
- print("Checking hypothalamus segmentation ...")
- print("")
-
- # check / create subject-specific hypothalamus_outdir
- hypothalamus_outdir = os.path.join(
- argsDict["output_dir"], "hypothalamus", subject
- )
- if not os.path.isdir(hypothalamus_outdir):
- os.makedirs(hypothalamus_outdir)
- hypothalamus_screenshot_outfile = os.path.join(
- hypothalamus_outdir, "hypothalamus.png"
- )
- # process
- evaluateHypothalamicSegmentation(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTPUT_DIR=hypothalamus_outdir,
- CREATE_SCREENSHOT=HYPOTHALAMUS_SCREENSHOT,
- SCREENSHOTS_OUTFILE=hypothalamus_screenshot_outfile,
- SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
- )
+ hypothalamus_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["hypothalamus"] == 0 or statusDict[subject]["hypothalamus"] == 3:
+ hypothalamus_status = 3
+ logging.info("Skipping hypothalamus computation for " + subject)
+ else:
+ logging.info("Not skipping hypothalamus computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ else:
+ logging.info("Not skipping hypothalamus computation for " + subject + ", because no statusfile was found")
- # return
- hypothalamus_ok = True
+ if hypothalamus_status == 0:
- #
- except Exception as e:
- logging.error(
- "ERROR: hypothalamus module failed for subject " + subject
- )
- logging.error("Reason: " + str(e))
- hypothalamus_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Checking hypothalamus segmentation ...")
+ print("")
- # store data
- if HYPOTHALAMUS_SCREENSHOT and hypothalamus_ok:
- imagesHypothalamusDict[subject] = hypothalamus_screenshot_outfile
- else:
- imagesHypothalamusDict[subject] = []
+ # check / create subject-specific hypothalamus_outdir
+ hypothalamus_outdir = os.path.join(
+ argsDict["output_dir"], "hypothalamus", subject
+ )
+ if not os.path.isdir(hypothalamus_outdir):
+ os.makedirs(hypothalamus_outdir)
+ hypothalamus_screenshot_outfile = os.path.join(
+ hypothalamus_outdir, "hypothalamus.png"
+ )
- # store data
- statusDict[subject].update({"hypothalamus": hypothalamus_ok})
+ # process
+ evaluateHypothalamicSegmentation(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTPUT_DIR=hypothalamus_outdir,
+ CREATE_SCREENSHOT=HYPOTHALAMUS_SCREENSHOT,
+ SCREENSHOTS_OUTFILE=hypothalamus_screenshot_outfile,
+ SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
+ )
+
+ # return
+ hypothalamus_status = 0
+
+ #
+ except Exception as e:
+ logging.error(
+ "ERROR: hypothalamus module failed for subject " + subject
+ )
+ logging.error("Reason: " + str(e))
+ hypothalamus_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
+
+ # store data
+ if HYPOTHALAMUS_SCREENSHOT and hypothalamus_status == 0: # TODO: need outfile even for status 3
+ imagesHypothalamusDict[subject] = hypothalamus_screenshot_outfile
+ else:
+ imagesHypothalamusDict[subject] = []
+
+ else:
+ hypothalamus_status = 2
+
+ # store data
+ statusDict[subject].update({"hypothalamus": hypothalamus_status})
# ----------------------------------------------------------------------
# run optional modules: hippocampus
if argsDict["hippocampus"] is True or argsDict["hippocampus_html"] is True:
- #
- try:
- # message
- print("-----------------------------")
- print("Checking hippocampus segmentation ...")
- print("")
-
- # check / create subject-specific hippocampus_outdir
- hippocampus_outdir = os.path.join(
- argsDict["output_dir"], "hippocampus", subject
- )
- if not os.path.isdir(hippocampus_outdir):
- os.makedirs(hippocampus_outdir)
- hippocampus_screenshot_outfile_left = os.path.join(
- hippocampus_outdir, "hippocampus-left.png"
- )
- hippocampus_screenshot_outfile_right = os.path.join(
- hippocampus_outdir, "hippocampus-right.png"
- )
- # process left
- evaluateHippocampalSegmentation(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTPUT_DIR=hippocampus_outdir,
- CREATE_SCREENSHOT=HIPPOCAMPUS_SCREENSHOT,
- SCREENSHOTS_OUTFILE=hippocampus_screenshot_outfile_left,
- SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
- HEMI="lh",
- LABEL=argsDict["hippocampus_label"],
- )
- evaluateHippocampalSegmentation(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTPUT_DIR=hippocampus_outdir,
- CREATE_SCREENSHOT=HIPPOCAMPUS_SCREENSHOT,
- SCREENSHOTS_OUTFILE=hippocampus_screenshot_outfile_right,
- SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
- HEMI="rh",
- LABEL=argsDict["hippocampus_label"],
- )
+ hippocampus_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["hippocampus"] == 0 or statusDict[subject]["hippocampus"] == 3:
+ hippocampus_status = 3
+ logging.info("Skipping hippocampus computation for " + subject)
+ else:
+ logging.info("Not skipping hippocampus computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ else:
+ logging.info("Not skipping hippocampus computation for " + subject + ", because no statusfile was found")
- # return
- hippocampus_ok = True
+ if hippocampus_status == 0:
- #
- except Exception as e:
- logging.error("ERROR: hippocampus module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- hippocampus_ok = False
- if argsDict["exit_on_error"] is True:
- raise
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Checking hippocampus segmentation ...")
+ print("")
- # store data
- if HIPPOCAMPUS_SCREENSHOT and hippocampus_ok:
- imagesHippocampusLeftDict[subject] = hippocampus_screenshot_outfile_left
- imagesHippocampusRightDict[
- subject
- ] = hippocampus_screenshot_outfile_right
- else:
- imagesHippocampusLeftDict[subject] = []
- imagesHippocampusRightDict[subject] = []
+ # check / create subject-specific hippocampus_outdir
+ hippocampus_outdir = os.path.join(
+ argsDict["output_dir"], "hippocampus", subject
+ )
+ if not os.path.isdir(hippocampus_outdir):
+ os.makedirs(hippocampus_outdir)
+ hippocampus_screenshot_outfile_left = os.path.join(
+ hippocampus_outdir, "hippocampus-left.png"
+ )
+ hippocampus_screenshot_outfile_right = os.path.join(
+ hippocampus_outdir, "hippocampus-right.png"
+ )
- # store data
- statusDict[subject].update({"hippocampus": hippocampus_ok})
+ # process left
+ evaluateHippocampalSegmentation(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTPUT_DIR=hippocampus_outdir,
+ CREATE_SCREENSHOT=HIPPOCAMPUS_SCREENSHOT,
+ SCREENSHOTS_OUTFILE=hippocampus_screenshot_outfile_left,
+ SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
+ HEMI="lh",
+ LABEL=argsDict["hippocampus_label"],
+ )
+ evaluateHippocampalSegmentation(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTPUT_DIR=hippocampus_outdir,
+ CREATE_SCREENSHOT=HIPPOCAMPUS_SCREENSHOT,
+ SCREENSHOTS_OUTFILE=hippocampus_screenshot_outfile_right,
+ SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
+ HEMI="rh",
+ LABEL=argsDict["hippocampus_label"],
+ )
+
+ # return
+ hippocampus_status = 0
+
+ #
+ except Exception as e:
+ logging.error("ERROR: hippocampus module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ hippocampus_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
+
+ # store data
+ if HIPPOCAMPUS_SCREENSHOT and hippocampus_status == 0: # TODO: need outfile even for status 3
+ imagesHippocampusLeftDict[subject] = hippocampus_screenshot_outfile_left
+ imagesHippocampusRightDict[
+ subject
+ ] = hippocampus_screenshot_outfile_right
+ else:
+ imagesHippocampusLeftDict[subject] = []
+ imagesHippocampusRightDict[subject] = []
+
+ else:
+ hippocampus_status = 2
+
+ # store data
+ statusDict[subject].update({"hippocampus": hippocampus_status})
+
+ # --------------------------------------------------------------------------
+ # write statusfile
+ # 0: OK
+ # 1: Failed
+ # 2: Not done
+ # 3: Skipped
+ pd.DataFrame(statusDict[subject], index=[subject]).T.to_csv(os.path.join(argsDict["output_dir"], "status", subject, "status.txt"), header=False, sep=":")
# --------------------------------------------------------------------------
# message
@@ -2315,7 +2496,7 @@ def _do_fsqc(argsDict):
)
# return
- # outlier_ok = True
+ # outlier_status = 0
#
except Exception as e:
@@ -2880,6 +3061,32 @@ def foo(exctype, value, tb):
logging.error("Reason: " + str(e))
raise
+ # check if mandatory status subdirectory directory exists or can be created
+ if os.path.isdir(os.path.join(argsDict["output_dir"], "status")):
+ logging.info("Found status directory " + os.path.join(argsDict["output_dir"], "status"))
+ else:
+ try:
+ os.mkdir(os.path.join(argsDict["output_dir"], "status"))
+ except Exception as e:
+ logging.error(
+ "ERROR: cannot create status directory " + os.path.join(argsDict["output_dir"], "status")
+ )
+ logging.error("Reason: " + str(e))
+ raise
+
+ # check if mandatory metrics subdirectory directory exists or can be created
+ if os.path.isdir(os.path.join(argsDict["output_dir"], "metrics")):
+ logging.info("Found metrics directory " + os.path.join(argsDict["output_dir"], "metrics"))
+ else:
+ try:
+ os.mkdir(os.path.join(argsDict["output_dir"], "metrics"))
+ except Exception as e:
+ logging.error(
+ "ERROR: cannot create metrics directory " + os.path.join(argsDict["output_dir"], "metrics")
+ )
+ logging.error("Reason: " + str(e))
+ raise
+
# check if logfile can be written in output directory
try:
testfile = tempfile.TemporaryFile(dir=argsDict["output_dir"])
@@ -2944,6 +3151,7 @@ def run_fsqc(
outlier_table=None,
fastsurfer=False,
exit_on_error=False,
+ skip_existing=False,
logfile=None,
):
"""
@@ -3026,6 +3234,9 @@ def run_fsqc(
exit_on_error : bool, default: False
Exit on error. If False, a warning is thrown and the analysis
continues.
+ skip_existing : bool, default: False
+ Skip processing for a given case if output already exists, even with
+ possibly different parameters or settings.
logfile : str, default: None
Specify a custom location for the logfile. Default location is the
output directory.
@@ -3072,6 +3283,7 @@ def run_fsqc(
argsDict["outlier_table"] = outlier_table
argsDict["fastsurfer"] = fastsurfer
argsDict["exit_on_error"] = exit_on_error
+ argsDict["skip_existing"] = skip_existing
argsDict["logfile"] = logfile
elif (argsDict is not None) and (
From ea28383d83c9266503831c8b94e789d112131864 Mon Sep 17 00:00:00 2001
From: diersk
Date: Fri, 3 May 2024 19:44:58 +0200
Subject: [PATCH 03/25] Updated documentation
---
DESCRIPTION.md | 6 +++++-
README.md | 6 +++++-
doc/Usage.rst | 3 +++
3 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/DESCRIPTION.md b/DESCRIPTION.md
index f24c37c..f170491 100644
--- a/DESCRIPTION.md
+++ b/DESCRIPTION.md
@@ -150,7 +150,8 @@ run_fsqc --subjects_dir --output_dir
[--fornix] [--fornix-html] [--hippocampus]
[--hippocampus-html] [--hippocampus-label ... ]
[--hypothalamus] [--hypothalamus-html] [--shape]
- [--outlier] [--fastsurfer] [-h] [--more-help]
+ [--outlier] [--fastsurfer] [--exit-on-error]
+ [--skip-existing] [-h] [--more-help]
[...]
@@ -194,6 +195,9 @@ optional arguments:
--exit-on-error terminate the program when encountering an error;
otherwise, try to continue with the next module or
case
+ --skip-existing skips processing for a given case if output
+ already exists, even with possibly different
+ parameters or settings.
getting help:
-h, --help display this help message and exit
diff --git a/README.md b/README.md
index 4f4322d..f7c1d64 100644
--- a/README.md
+++ b/README.md
@@ -222,7 +222,8 @@ run_fsqc --subjects_dir --output_dir
[--fornix] [--fornix-html] [--hippocampus]
[--hippocampus-html] [--hippocampus-label ... ]
[--hypothalamus] [--hypothalamus-html] [--shape]
- [--outlier] [--fastsurfer] [-h] [--more-help]
+ [--outlier] [--fastsurfer] [exit-on-error]
+ [--skip-existing] [-h] [--more-help]
[...]
@@ -266,6 +267,9 @@ optional arguments:
--exit-on-error terminate the program when encountering an error;
otherwise, try to continue with the next module or
case
+ --skip-existing skips processing for a given case if output
+ already exists, even with possibly different
+ parameters or settings.
getting help:
-h, --help display this help message and exit
diff --git a/doc/Usage.rst b/doc/Usage.rst
index c4b57d2..dc30d39 100644
--- a/doc/Usage.rst
+++ b/doc/Usage.rst
@@ -91,6 +91,9 @@ As a Command Line Tool
--exit-on-error
Terminate the program when encountering an error; otherwise, try to continue with the next module or case
+ --skip-existing
+ skips processing for a given case if output already exists, even with possibly different parameters or settings
+
Getting Help:
-------------
-h, --help
From 9b8a9b60dcd77d4a8de24b151e125e113313e916 Mon Sep 17 00:00:00 2001
From: diersk
Date: Fri, 17 May 2024 18:29:35 +0200
Subject: [PATCH 04/25] Updated skip-existing option
---
fsqc/fsqcMain.py | 221 +++++++++++++++++++++++++----------------------
1 file changed, 117 insertions(+), 104 deletions(-)
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index 697808e..40cf168 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -1601,9 +1601,9 @@ def _do_fsqc(argsDict):
metrics_status = 3
logging.info("Skipping metrics computation for " + subject)
else:
- logging.info("Not skipping metrics computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ logging.info("Not skipping metrics computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping metrics computation for " + subject + ", because no statusfile was found")
+ logging.info("Not skipping metrics computation for " + subject + ": no statusfile was found")
if metrics_status == 0:
@@ -1753,9 +1753,9 @@ def _do_fsqc(argsDict):
shape_status = 3
logging.info("Skipping shape computation for " + subject)
else:
- logging.info("Not skipping shape computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ logging.info("Not skipping shape computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping shape computation for " + subject + ", because no statusfile was found")
+ logging.info("Not skipping shape computation for " + subject + ": no statusfile was found")
if shape_status == 0:
@@ -1805,7 +1805,7 @@ def _do_fsqc(argsDict):
raise
# store data
- metricsDict[subject].update(distDict[subject])
+ metricsDict[subject].update(distDict[subject]) # TODO: what if shape_status==3? write to and read from disk?
else:
shape_status = 2
@@ -1818,6 +1818,7 @@ def _do_fsqc(argsDict):
if argsDict["screenshots"] is True or argsDict["screenshots_html"] is True:
+ # determine status
screenshots_status = 0
if argsDict["skip_existing"] is True:
if subject in statusDict.keys():
@@ -1825,10 +1826,19 @@ def _do_fsqc(argsDict):
screenshots_status = 3
logging.info("Skipping screenshots computation for " + subject)
else:
- logging.info("Not skipping screenshots computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ logging.info("Not skipping screenshots computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping screenshots computation for " + subject + ", because no statusfile was found")
+ logging.info("Not skipping screenshots computation for " + subject + ": no statusfile was found")
+ # check / create subject-specific screenshots_outdir
+ screenshots_outdir = os.path.join(
+ argsDict["output_dir"], "screenshots", subject
+ )
+ if not os.path.isdir(screenshots_outdir):
+ os.makedirs(screenshots_outdir)
+ outfile = os.path.join(screenshots_outdir, subject + ".png")
+
+ #
if screenshots_status == 0:
#
@@ -1838,14 +1848,6 @@ def _do_fsqc(argsDict):
print("Creating screenshots ...")
print("")
- # check / create subject-specific screenshots_outdir
- screenshots_outdir = os.path.join(
- argsDict["output_dir"], "screenshots", subject
- )
- if not os.path.isdir(screenshots_outdir):
- os.makedirs(screenshots_outdir)
- outfile = os.path.join(screenshots_outdir, subject + ".png")
-
# re-initialize
screenshots_base_subj = list()
screenshots_overlay_subj = list()
@@ -1990,11 +1992,11 @@ def _do_fsqc(argsDict):
if argsDict["exit_on_error"] is True:
raise
- # store data
- if screenshots_status == 0: # TODO: need outfile even for status 3
- imagesScreenshotsDict[subject] = outfile
- else:
- imagesScreenshotsDict[subject] = []
+ # store data
+ if screenshots_status == 0 or screenshots_status == 3:
+ imagesScreenshotsDict[subject] = outfile
+ else:
+ imagesScreenshotsDict[subject] = []
else:
screenshots_status = 2
@@ -2007,6 +2009,7 @@ def _do_fsqc(argsDict):
if argsDict["surfaces"] is True or argsDict["surfaces_html"] is True:
+ # determine status
surfaces_status = 0
if argsDict["skip_existing"] is True:
if subject in statusDict.keys():
@@ -2014,10 +2017,18 @@ def _do_fsqc(argsDict):
surfaces_status = 3
logging.info("Skipping surfaces computation for " + subject)
else:
- logging.info("Not skipping surfaces computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ logging.info("Not skipping surfaces computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping surfaces computation for " + subject + ", because no statusfile was found")
+ logging.info("Not skipping surfaces computation for " + subject + ": no statusfile was found")
+
+ # check / create subject-specific surfaces_outdir
+ surfaces_outdir = os.path.join(
+ argsDict["output_dir"], "surfaces", subject
+ )
+ if not os.path.isdir(surfaces_outdir):
+ os.makedirs(surfaces_outdir)
+ #
if surfaces_status == 0:
#
@@ -2027,13 +2038,6 @@ def _do_fsqc(argsDict):
print("Creating surface plots ...")
print("")
- # check / create subject-specific surfaces_outdir
- surfaces_outdir = os.path.join(
- argsDict["output_dir"], "surfaces", subject
- )
- if not os.path.isdir(surfaces_outdir):
- os.makedirs(surfaces_outdir)
-
# process
createSurfacePlots(
SUBJECT=subject,
@@ -2053,11 +2057,11 @@ def _do_fsqc(argsDict):
if argsDict["exit_on_error"] is True:
raise
- # store data
- if surfaces_status == 0: # TODO: need outfile even for status 3
- imagesSurfacesDict[subject] = surfaces_outdir
- else:
- imagesSurfacesDict[subject] = []
+ # store data
+ if surfaces_status == 0 or surfaces_status == 3:
+ imagesSurfacesDict[subject] = surfaces_outdir
+ else:
+ imagesSurfacesDict[subject] = []
else:
surfaces_status = 2
@@ -2070,6 +2074,7 @@ def _do_fsqc(argsDict):
if argsDict["skullstrip"] is True or argsDict["skullstrip_html"] is True:
+ # determine status
skullstrip_status = 0
if argsDict["skip_existing"] is True:
if subject in statusDict.keys():
@@ -2077,10 +2082,20 @@ def _do_fsqc(argsDict):
skullstrip_status = 3
logging.info("Skipping skullstrip computation for " + subject)
else:
- logging.info("Not skipping skullstrip computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ logging.info("Not skipping skullstrip computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping skullstrip computation for " + subject + ", because no statusfile was found")
+ logging.info("Not skipping skullstrip computation for " + subject + ": no statusfile was found")
+
+ # check / create subject-specific skullstrip_outdir
+ skullstrip_outdir = os.path.join(
+ argsDict["output_dir"], "skullstrip", subject
+ )
+ if not os.path.isdir(skullstrip_outdir):
+ os.makedirs(skullstrip_outdir)
+ outfile = os.path.join(skullstrip_outdir, subject + ".png")
+
+ #
if skullstrip_status == 0:
#
@@ -2090,14 +2105,6 @@ def _do_fsqc(argsDict):
print("Creating skullstrip evaluation ...")
print("")
- # check / create subject-specific skullstrip_outdir
- skullstrip_outdir = os.path.join(
- argsDict["output_dir"], "skullstrip", subject
- )
- if not os.path.isdir(skullstrip_outdir):
- os.makedirs(skullstrip_outdir)
- outfile = os.path.join(skullstrip_outdir, subject + ".png")
-
# re-initialize
skullstrip_base_subj = list()
skullstrip_overlay_subj = list()
@@ -2163,11 +2170,11 @@ def _do_fsqc(argsDict):
if argsDict["exit_on_error"] is True:
raise
- # store data
- if skullstrip_status == 0: # TODO: need outfile even for status 3
- imagesSkullstripDict[subject] = outfile
- else:
- imagesSkullstripDict[subject] = []
+ # store data
+ if skullstrip_status == 0 or skullstrip_status == 3:
+ imagesSkullstripDict[subject] = outfile
+ else:
+ imagesSkullstripDict[subject] = []
else:
skullstrip_status = 2
@@ -2180,6 +2187,7 @@ def _do_fsqc(argsDict):
if argsDict["fornix"] is True or argsDict["fornix_html"] is True:
+ # determine status
fornix_status = 0
if argsDict["skip_existing"] is True:
if subject in statusDict.keys():
@@ -2187,10 +2195,17 @@ def _do_fsqc(argsDict):
fornix_status = 3
logging.info("Skipping fornix computation for " + subject)
else:
- logging.info("Not skipping fornix computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ logging.info("Not skipping fornix computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping fornix computation for " + subject + ", because no statusfile was found")
+ logging.info("Not skipping fornix computation for " + subject + ": no statusfile was found")
+
+ # check / create subject-specific fornix_outdir
+ fornix_outdir = os.path.join(argsDict["output_dir"], "fornix", subject)
+ if not os.path.isdir(fornix_outdir):
+ os.makedirs(fornix_outdir)
+ fornix_screenshot_outfile = os.path.join(fornix_outdir, "cc.png")
+ #
if fornix_status == 0:
#
@@ -2200,12 +2215,6 @@ def _do_fsqc(argsDict):
print("Checking fornix segmentation ...")
print("")
- # check / create subject-specific fornix_outdir
- fornix_outdir = os.path.join(argsDict["output_dir"], "fornix", subject)
- if not os.path.isdir(fornix_outdir):
- os.makedirs(fornix_outdir)
- fornix_screenshot_outfile = os.path.join(fornix_outdir, "cc.png")
-
# process
fornixShapeOutput = evaluateFornixSegmentation(
SUBJECT=subject,
@@ -2248,13 +2257,13 @@ def _do_fsqc(argsDict):
# store data
if FORNIX_SHAPE:
- metricsDict[subject].update(fornixShapeDict[subject])
+ metricsDict[subject].update(fornixShapeDict[subject]) # TODO: what if fornix_status==3? write to and read from disk?
- # store data
- if FORNIX_SCREENSHOT and fornix_status == 0: # TODO: need outfile even for status 3
- imagesFornixDict[subject] = fornix_screenshot_outfile
- else:
- imagesFornixDict[subject] = []
+ # store data
+ if FORNIX_SCREENSHOT and (fornix_status == 0 or fornix_status == 3):
+ imagesFornixDict[subject] = fornix_screenshot_outfile
+ else:
+ imagesFornixDict[subject] = []
else:
fornix_status = 2
@@ -2267,6 +2276,7 @@ def _do_fsqc(argsDict):
if argsDict["hypothalamus"] is True or argsDict["hypothalamus_html"] is True:
+ # determine status
hypothalamus_status = 0
if argsDict["skip_existing"] is True:
if subject in statusDict.keys():
@@ -2274,10 +2284,21 @@ def _do_fsqc(argsDict):
hypothalamus_status = 3
logging.info("Skipping hypothalamus computation for " + subject)
else:
- logging.info("Not skipping hypothalamus computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ logging.info("Not skipping hypothalamus computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping hypothalamus computation for " + subject + ", because no statusfile was found")
+ logging.info("Not skipping hypothalamus computation for " + subject + ": no statusfile was found")
+
+ # check / create subject-specific hypothalamus_outdir
+ hypothalamus_outdir = os.path.join(
+ argsDict["output_dir"], "hypothalamus", subject
+ )
+ if not os.path.isdir(hypothalamus_outdir):
+ os.makedirs(hypothalamus_outdir)
+ hypothalamus_screenshot_outfile = os.path.join(
+ hypothalamus_outdir, "hypothalamus.png"
+ )
+ #
if hypothalamus_status == 0:
#
@@ -2287,16 +2308,6 @@ def _do_fsqc(argsDict):
print("Checking hypothalamus segmentation ...")
print("")
- # check / create subject-specific hypothalamus_outdir
- hypothalamus_outdir = os.path.join(
- argsDict["output_dir"], "hypothalamus", subject
- )
- if not os.path.isdir(hypothalamus_outdir):
- os.makedirs(hypothalamus_outdir)
- hypothalamus_screenshot_outfile = os.path.join(
- hypothalamus_outdir, "hypothalamus.png"
- )
-
# process
evaluateHypothalamicSegmentation(
SUBJECT=subject,
@@ -2320,11 +2331,11 @@ def _do_fsqc(argsDict):
if argsDict["exit_on_error"] is True:
raise
- # store data
- if HYPOTHALAMUS_SCREENSHOT and hypothalamus_status == 0: # TODO: need outfile even for status 3
- imagesHypothalamusDict[subject] = hypothalamus_screenshot_outfile
- else:
- imagesHypothalamusDict[subject] = []
+ # store data
+ if HYPOTHALAMUS_SCREENSHOT and (hypothalamus_status == 0 or hypothalamus_status == 3):
+ imagesHypothalamusDict[subject] = hypothalamus_screenshot_outfile
+ else:
+ imagesHypothalamusDict[subject] = []
else:
hypothalamus_status = 2
@@ -2337,6 +2348,7 @@ def _do_fsqc(argsDict):
if argsDict["hippocampus"] is True or argsDict["hippocampus_html"] is True:
+ # determine status
hippocampus_status = 0
if argsDict["skip_existing"] is True:
if subject in statusDict.keys():
@@ -2344,10 +2356,24 @@ def _do_fsqc(argsDict):
hippocampus_status = 3
logging.info("Skipping hippocampus computation for " + subject)
else:
- logging.info("Not skipping hippocampus computation for " + subject + ", because statusfile did not indicate ok or skipped")
+ logging.info("Not skipping hippocampus computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping hippocampus computation for " + subject + ", because no statusfile was found")
+ logging.info("Not skipping hippocampus computation for " + subject + ": no statusfile was found")
+
+ # check / create subject-specific hippocampus_outdir
+ hippocampus_outdir = os.path.join(
+ argsDict["output_dir"], "hippocampus", subject
+ )
+ if not os.path.isdir(hippocampus_outdir):
+ os.makedirs(hippocampus_outdir)
+ hippocampus_screenshot_outfile_left = os.path.join(
+ hippocampus_outdir, "hippocampus-left.png"
+ )
+ hippocampus_screenshot_outfile_right = os.path.join(
+ hippocampus_outdir, "hippocampus-right.png"
+ )
+ #
if hippocampus_status == 0:
#
@@ -2357,19 +2383,6 @@ def _do_fsqc(argsDict):
print("Checking hippocampus segmentation ...")
print("")
- # check / create subject-specific hippocampus_outdir
- hippocampus_outdir = os.path.join(
- argsDict["output_dir"], "hippocampus", subject
- )
- if not os.path.isdir(hippocampus_outdir):
- os.makedirs(hippocampus_outdir)
- hippocampus_screenshot_outfile_left = os.path.join(
- hippocampus_outdir, "hippocampus-left.png"
- )
- hippocampus_screenshot_outfile_right = os.path.join(
- hippocampus_outdir, "hippocampus-right.png"
- )
-
# process left
evaluateHippocampalSegmentation(
SUBJECT=subject,
@@ -2403,15 +2416,15 @@ def _do_fsqc(argsDict):
if argsDict["exit_on_error"] is True:
raise
- # store data
- if HIPPOCAMPUS_SCREENSHOT and hippocampus_status == 0: # TODO: need outfile even for status 3
- imagesHippocampusLeftDict[subject] = hippocampus_screenshot_outfile_left
- imagesHippocampusRightDict[
- subject
- ] = hippocampus_screenshot_outfile_right
- else:
- imagesHippocampusLeftDict[subject] = []
- imagesHippocampusRightDict[subject] = []
+ # store data
+ if HIPPOCAMPUS_SCREENSHOT and (hippocampus_status == 0 or hippocampus_status == 3):
+ imagesHippocampusLeftDict[subject] = hippocampus_screenshot_outfile_left
+ imagesHippocampusRightDict[
+ subject
+ ] = hippocampus_screenshot_outfile_right
+ else:
+ imagesHippocampusLeftDict[subject] = []
+ imagesHippocampusRightDict[subject] = []
else:
hippocampus_status = 2
From 81462831d5598f484d5bafbab447ebca1621330f Mon Sep 17 00:00:00 2001
From: diersk
Date: Fri, 17 May 2024 21:25:57 +0200
Subject: [PATCH 05/25] Optionally write out fornix shape metrics
---
fsqc/evaluateFornixSegmentation.py | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/fsqc/evaluateFornixSegmentation.py b/fsqc/evaluateFornixSegmentation.py
index 35e4b97..5480db3 100644
--- a/fsqc/evaluateFornixSegmentation.py
+++ b/fsqc/evaluateFornixSegmentation.py
@@ -13,6 +13,7 @@ def evaluateFornixSegmentation(
SCREENSHOTS_OUTFILE=[],
RUN_SHAPEDNA=True,
N_EIGEN=15,
+ WRITE_EIGEN=True,
):
"""
Evaluate potential missegmentation of the fornix.
@@ -46,6 +47,8 @@ def evaluateFornixSegmentation(
Whether to run shape analysis.
N_EIGEN : int, optional (default: 30)
Number of Eigenvalues for shape analysis.
+ WRITE_EIGEN : bool, optional (default: True)
+ Write csv file with eigenvalues (or nans) to output directory.
Returns
-------
@@ -61,6 +64,7 @@ def evaluateFornixSegmentation(
import nibabel as nb
import numpy as np
+ import pandas as pd
from fsqc.createScreenshots import createScreenshots
from fsqc.fsqcUtils import applyTransform, binarizeImage
@@ -187,10 +191,17 @@ def evaluateFornixSegmentation(
d["Eigenvectors"] = evec
# return
- return d["Eigenvalues"]
+ out = d["Eigenvalues"]
else:
out = np.empty(N_EIGEN)
out[:] = np.nan
- return out
+ # write output
+ if WRITE_EIGEN is True:
+ pd.DataFrame(out).transpose().to_csv(os.path.join(OUTPUT_DIR, SUBJECT + ".fornix.csv"), na_rep="NA", index=False)
+
+ # --------------------------------------------------------------------------
+ # return
+
+ return out
From 9146fb16614123d3934f4720edb985e5b5a7c229 Mon Sep 17 00:00:00 2001
From: diersk
Date: Mon, 27 May 2024 15:55:18 +0200
Subject: [PATCH 06/25] Added group-only, no-group options
---
doc/Usage.rst | 6 +
fsqc/fsqcMain.py | 2477 ++++++++++++++++++++++++----------------------
2 files changed, 1295 insertions(+), 1188 deletions(-)
diff --git a/doc/Usage.rst b/doc/Usage.rst
index dc30d39..7a31c02 100644
--- a/doc/Usage.rst
+++ b/doc/Usage.rst
@@ -94,6 +94,12 @@ As a Command Line Tool
--skip-existing
skips processing for a given case if output already exists, even with possibly different parameters or settings
+ --no-group
+ run script in subject-level mode. will compute individual files and statistics, but not create group-level summaries.
+
+ --group-only
+ run script in group mode. will create group-level summaries from existing inputs. needs to be run on output directory with already existing results.
+
Getting Help:
-------------
-h, --help
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index 40cf168..d3b4569 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -201,6 +201,7 @@ def get_help(print_help=True, return_help=False):
[--hypothalamus-html] [--hippocampus]
[--hippocampus-html] [--hippocampus-label ]
[--shape] [--outlier] [--fastsurfer]
+ [--no-group] [--group-only]
[--exit-on-error] [--skip-existing] [-h]
required arguments:
@@ -241,6 +242,13 @@ def get_help(print_help=True, return_help=False):
--outlier-table specify normative values (only in conjunction with
--outlier)
--fastsurfer use FastSurfer instead of FreeSurfer output
+ --no-group run script in subject-level mode. will compute
+ individual files and statistics, but not create
+ group-level summaries.
+ --group-only run script in group mode. will create group-level
+ summaries from existing inputs. needs to be run
+ on output directory with already existing
+ results
--exit-on-error terminate the program when encountering an error;
otherwise, try to continue with the next module or
case
@@ -612,6 +620,22 @@ def _parse_arguments():
action="store_true",
required=False,
)
+ optional.add_argument(
+ "--no-group",
+ dest="no_group",
+ help="run script in subject-level mode",
+ default=False,
+ action="store_true",
+ required=False,
+ )
+ optional.add_argument(
+ "--group-only",
+ dest="group_only",
+ help="run script in group mode",
+ default=False,
+ action="store_true",
+ required=False,
+ )
optional.add_argument(
"--exit-on-error",
dest="exit_on_error",
@@ -740,6 +764,8 @@ def _parse_arguments():
argsDict["outlier"] = args.outlier
argsDict["outlier_table"] = args.outlier_table
argsDict["fastsurfer"] = args.fastsurfer
+ argsDict["no_group"] = args.no_group
+ argsDict["group_only"] = args.group_only
argsDict["exit_on_error"] = args.exit_on_error
argsDict["skip_existing"] = args.skip_existing
@@ -831,6 +857,12 @@ def _check_arguments(argsDict):
logging.info("Found subject " + subject)
argsDict["subjects"].extend([subject])
+ # check if only one of no_group and group_only is true
+ if argsDict["no_group"] is True and argsDict["group_only"] is True:
+ raise ValueError(
+ "ERROR: Use either --no-group or --group-only (but not both)."
+ )
+
# check if screenshots subdirectory exists or can be created and is writable
if argsDict["screenshots"] is True or argsDict["screenshots_html"] is True:
if os.path.isdir(os.path.join(argsDict["output_dir"], "screenshots")):
@@ -1491,6 +1523,8 @@ def _do_fsqc(argsDict):
import numpy as np
import pandas as pd
+ from pathlib import Path
+
from fsqc.checkCCSize import checkCCSize
from fsqc.checkContrast import checkContrast
from fsqc.checkRotation import checkRotation
@@ -1510,6 +1544,7 @@ def _do_fsqc(argsDict):
FORNIX_SCREENSHOT = True
FORNIX_SHAPE = False
FORNIX_N_EIGEN = 15
+ FORNIX_WRITE_EIGEN = True
HYPOTHALAMUS_SCREENSHOT = True
HIPPOCAMPUS_SCREENSHOT = True
OUTLIER_N_MIN = 5
@@ -1543,1476 +1578,1532 @@ def _do_fsqc(argsDict):
# create status dict
statusDict = dict()
- # loop through the specified subjects
- for subject in argsDict["subjects"]:
- #
- logging.info(
- "Starting fsqc for subject "
- + subject
- + " at "
- + time.strftime("%Y-%m-%d %H:%M %Z", time.localtime(time.time())),
- )
+ # --------------------------------------------------------------------------
+ # subject-level processing
- # ----------------------------------------------------------------------
- # set images
+ if argsDict["group_only"] is False:
- if argsDict["fastsurfer"] is True:
- aparc_image = "aparc.DKTatlas+aseg.deep.mgz"
- else:
- aparc_image = "aparc+aseg.mgz"
+ # loop through the specified subjects
+ for subject in argsDict["subjects"]:
+ #
+ logging.info(
+ "Starting fsqc for subject "
+ + subject
+ + " at "
+ + time.strftime("%Y-%m-%d %H:%M %Z", time.localtime(time.time())),
+ )
- # ----------------------------------------------------------------------
- # add subject to dictionary
+ # ----------------------------------------------------------------------
+ # set images
- metricsDict.update({subject: {"subject": subject}})
- statusDict.update({subject: {"subject": subject}})
+ if argsDict["fastsurfer"] is True:
+ aparc_image = "aparc.DKTatlas+aseg.deep.mgz"
+ else:
+ aparc_image = "aparc+aseg.mgz"
- # ----------------------------------------------------------------------
- # check for existing statusfile
+ # ----------------------------------------------------------------------
+ # add subject to dictionary
- # check / create subject-specific status_outdir
- status_outdir = os.path.join(argsDict["output_dir"], "status", subject)
- if not os.path.isdir(status_outdir):
- os.makedirs(status_outdir)
+ metricsDict.update({subject: {"subject": subject}})
+ statusDict.update({subject: {"subject": subject}})
- # if it already exists, read statusfile
- if os.path.exists(os.path.join(status_outdir, "status.txt")):
- statusDict[subject] = dict(pd.read_csv(os.path.join(status_outdir, "status.txt"), sep=":", header=None, comment="#", names=["module", "status"]).to_dict(orient="split")['data'])
+ # ----------------------------------------------------------------------
+ # check for existing statusfile
- # note:
- # 0: OK
- # 1: Failed
- # 2: Not done
- # 3: Skipped
+ # check / create subject-specific status_outdir
+ status_outdir = os.path.join(argsDict["output_dir"], "status", subject)
+ if not os.path.isdir(status_outdir):
+ os.makedirs(status_outdir)
- # ----------------------------------------------------------------------
- # compute core metrics
+ # if it already exists, read statusfile
+ if os.path.exists(os.path.join(status_outdir, "status.txt")):
+ status_dict = dict(pd.read_csv(os.path.join(status_outdir, "status.txt"), sep=":", header=None, comment="#", names=["module", "status"], dtype=str).to_dict(orient="split")['data'])
+ for x in ['metrics', 'shape', 'screenshots', 'surfaces', 'skullstrip', 'fornix', 'hypothalamus', 'hippocampus']:
+ status_dict[x] = int(status_dict[x])
+ statusDict[subject] = status_dict
- # check / create subject-specific metrics_outdir
- metrics_outdir = os.path.join(argsDict["output_dir"], "metrics", subject)
- if not os.path.isdir(metrics_outdir):
- os.makedirs(metrics_outdir)
+ # note:
+ # 0: OK
+ # 1: Failed
+ # 2: Not done
+ # 3: Skipped
- #
- metrics_status = 0
- if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
- if statusDict[subject]["metrics"] == 0 or statusDict[subject]["metrics"] == 3:
- metrics_status = 3
- logging.info("Skipping metrics computation for " + subject)
+ # ----------------------------------------------------------------------
+ # compute core metrics
+
+ # check / create subject-specific metrics_outdir
+ metrics_outdir = os.path.join(argsDict["output_dir"], "metrics", subject)
+ if not os.path.isdir(metrics_outdir):
+ os.makedirs(metrics_outdir)
+
+ #
+ metrics_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["metrics"] == 0 or statusDict[subject]["metrics"] == 3:
+ metrics_status = 3
+ logging.info("Skipping metrics computation for " + subject)
+ else:
+ logging.info("Not skipping metrics computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping metrics computation for " + subject + ": statusfile did not indicate ok or skipped")
- else:
- logging.info("Not skipping metrics computation for " + subject + ": no statusfile was found")
+ logging.info("Not skipping metrics computation for " + subject + ": no statusfile was found")
- if metrics_status == 0:
+ if metrics_status == 0:
- # get WM and GM SNR for orig.mgz
- try:
- wm_snr_orig, gm_snr_orig = checkSNR(
- argsDict["subjects_dir"],
- subject,
- SNR_AMOUT_EROSION,
- ref_image="orig.mgz",
- aparc_image=aparc_image,
- )
+ # get WM and GM SNR for orig.mgz
+ try:
+ wm_snr_orig, gm_snr_orig = checkSNR(
+ argsDict["subjects_dir"],
+ subject,
+ SNR_AMOUT_EROSION,
+ ref_image="orig.mgz",
+ aparc_image=aparc_image,
+ )
- except Exception as e:
- logging.error("ERROR: SNR computation failed for " + subject)
- logging.error("Reason: " + str(e))
- wm_snr_orig = np.nan
- gm_snr_orig = np.nan
- metrics_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: SNR computation failed for " + subject)
+ logging.error("Reason: " + str(e))
+ wm_snr_orig = np.nan
+ gm_snr_orig = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # get WM and GM SNR for norm.mgz
- try:
- wm_snr_norm, gm_snr_norm = checkSNR(
- argsDict["subjects_dir"],
- subject,
- SNR_AMOUT_EROSION,
- ref_image="norm.mgz",
- aparc_image=aparc_image,
- )
+ # get WM and GM SNR for norm.mgz
+ try:
+ wm_snr_norm, gm_snr_norm = checkSNR(
+ argsDict["subjects_dir"],
+ subject,
+ SNR_AMOUT_EROSION,
+ ref_image="norm.mgz",
+ aparc_image=aparc_image,
+ )
- except Exception as e:
- logging.error("ERROR: SNR computation failed for " + subject)
- logging.error("Reason: " + str(e))
- wm_snr_norm = np.nan
- gm_snr_norm = np.nan
- metrics_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: SNR computation failed for " + subject)
+ logging.error("Reason: " + str(e))
+ wm_snr_norm = np.nan
+ gm_snr_norm = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # check CC size
- try:
- cc_size = checkCCSize(argsDict["subjects_dir"], subject)
+ # check CC size
+ try:
+ cc_size = checkCCSize(argsDict["subjects_dir"], subject)
- except Exception as e:
- logging.error("ERROR: CC size computation failed for " + subject)
- logging.error("Reason: " + str(e))
- cc_size = np.nan
- metrics_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: CC size computation failed for " + subject)
+ logging.error("Reason: " + str(e))
+ cc_size = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # check topology
- try:
- (
- holes_lh,
- holes_rh,
- defects_lh,
- defects_rh,
- topo_lh,
- topo_rh,
- ) = checkTopology(argsDict["subjects_dir"], subject)
+ # check topology
+ try:
+ (
+ holes_lh,
+ holes_rh,
+ defects_lh,
+ defects_rh,
+ topo_lh,
+ topo_rh,
+ ) = checkTopology(argsDict["subjects_dir"], subject)
- except Exception as e:
- logging.error("ERROR: Topology check failed for " + subject)
- logging.error("Reason: " + str(e))
- holes_lh = np.nan
- holes_rh = np.nan
- defects_lh = np.nan
- defects_rh = np.nan
- topo_lh = np.nan
- topo_rh = np.nan
- metrics_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: Topology check failed for " + subject)
+ logging.error("Reason: " + str(e))
+ holes_lh = np.nan
+ holes_rh = np.nan
+ defects_lh = np.nan
+ defects_rh = np.nan
+ topo_lh = np.nan
+ topo_rh = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # check contrast
- try:
- con_snr_lh, con_snr_rh = checkContrast(argsDict["subjects_dir"], subject)
+ # check contrast
+ try:
+ con_snr_lh, con_snr_rh = checkContrast(argsDict["subjects_dir"], subject)
- except Exception as e:
- logging.error("ERROR: Contrast check failed for " + subject)
- logging.error("Reason: " + str(e))
- con_snr_lh = np.nan
- con_snr_rh = np.nan
- metrics_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: Contrast check failed for " + subject)
+ logging.error("Reason: " + str(e))
+ con_snr_lh = np.nan
+ con_snr_rh = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # check rotation
- try:
- rot_tal_x, rot_tal_y, rot_tal_z = checkRotation(
- argsDict["subjects_dir"], subject
- )
+ # check rotation
+ try:
+ rot_tal_x, rot_tal_y, rot_tal_z = checkRotation(
+ argsDict["subjects_dir"], subject
+ )
- except Exception as e:
- logging.error("ERROR: Rotation failed for " + subject)
- logging.error("Reason: " + str(e))
- rot_tal_x = np.nan
- rot_tal_y = np.nan
- rot_tal_z = np.nan
- metrics_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ except Exception as e:
+ logging.error("ERROR: Rotation failed for " + subject)
+ logging.error("Reason: " + str(e))
+ rot_tal_x = np.nan
+ rot_tal_y = np.nan
+ rot_tal_z = np.nan
+ metrics_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # store data
- metricsDict[subject].update(
- {
- "wm_snr_orig": wm_snr_orig,
- "gm_snr_orig": gm_snr_orig,
- "wm_snr_norm": wm_snr_norm,
- "gm_snr_norm": gm_snr_norm,
- "cc_size": cc_size,
- "holes_lh": holes_lh,
- "holes_rh": holes_rh,
- "defects_lh": defects_lh,
- "defects_rh": defects_rh,
- "topo_lh": topo_lh,
- "topo_rh": topo_rh,
- "con_snr_lh": con_snr_lh,
- "con_snr_rh": con_snr_rh,
- "rot_tal_x": rot_tal_x,
- "rot_tal_y": rot_tal_y,
- "rot_tal_z": rot_tal_z,
- }
- )
+ # store data
+ metricsDict[subject].update(
+ {
+ "wm_snr_orig": wm_snr_orig,
+ "gm_snr_orig": gm_snr_orig,
+ "wm_snr_norm": wm_snr_norm,
+ "gm_snr_norm": gm_snr_norm,
+ "cc_size": cc_size,
+ "holes_lh": holes_lh,
+ "holes_rh": holes_rh,
+ "defects_lh": defects_lh,
+ "defects_rh": defects_rh,
+ "topo_lh": topo_lh,
+ "topo_rh": topo_rh,
+ "con_snr_lh": con_snr_lh,
+ "con_snr_rh": con_snr_rh,
+ "rot_tal_x": rot_tal_x,
+ "rot_tal_y": rot_tal_y,
+ "rot_tal_z": rot_tal_z,
+ }
+ )
- # write to file
- pd.DataFrame(metricsDict[subject], index=[subject]).to_csv(os.path.join(argsDict["output_dir"], "metrics", subject, "metrics.csv"))
+ # write to file
+ pd.DataFrame(metricsDict[subject], index=[subject]).to_csv(os.path.join(argsDict["output_dir"], "metrics", subject, "metrics.csv"))
- # note that we cannot "not do" the metrics module, only skipping is possible.
- # hence no metrics_status == 2 possible.
+ # note that we cannot "not do" the metrics module, only skipping is possible.
+ # hence no metrics_status == 2 possible.
- # store data
- statusDict[subject].update({"metrics": metrics_status})
+ # store data
+ statusDict[subject].update({"metrics": metrics_status})
- # ----------------------------------------------------------------------
- # run optional modules: shape analysis
+ # ----------------------------------------------------------------------
+ # run optional modules: shape analysis
- if argsDict["shape"] is True:
+ if argsDict["shape"] is True:
- shape_status = 0
- if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
- if statusDict[subject]["shape"] == 0 or statusDict[subject]["shape"] == 3:
- shape_status = 3
- logging.info("Skipping shape computation for " + subject)
+ # determine status
+ shape_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["shape"] == 0 or statusDict[subject]["shape"] == 3:
+ shape_status = 3
+ logging.info("Skipping shape computation for " + subject)
+ else:
+ logging.info("Not skipping shape computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping shape computation for " + subject + ": statusfile did not indicate ok or skipped")
- else:
- logging.info("Not skipping shape computation for " + subject + ": no statusfile was found")
+ logging.info("Not skipping shape computation for " + subject + ": no statusfile was found")
- if shape_status == 0:
+ # check / create subject-specific brainprint_outdir
+ brainprint_outdir = Path(
+ os.path.join(argsDict["output_dir"], "brainprint", subject)
+ )
#
- try:
- # message
- print("-----------------------------")
- print("Running brainPrint analysis ...")
- print("")
+ if shape_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Running brainPrint analysis ...")
+ print("")
+
+ # compute brainprint (will also compute shapeDNA)
+ import brainprint
+
+ # run brainPrint
+ evMat, evecMat, dstMat = brainprint.brainprint.run_brainprint(
+ subjects_dir=argsDict["subjects_dir"],
+ subject_id=subject,
+ destination=brainprint_outdir,
+ keep_eigenvectors=SHAPE_EVEC,
+ skip_cortex=SHAPE_SKIPCORTEX,
+ num=SHAPE_NUM,
+ norm=SHAPE_NORM,
+ reweight=SHAPE_REWEIGHT,
+ asymmetry=SHAPE_ASYMMETRY,
+ )
- from pathlib import Path
+ # get a subset of the brainprint results
+ distDict = {subject: dstMat}
- # compute brainprint (will also compute shapeDNA)
- import brainprint
+ # return
+ shape_status = 0
- # check / create subject-specific brainprint_outdir
- brainprint_outdir = Path(
- os.path.join(argsDict["output_dir"], "brainprint", subject)
- )
+ #
+ except Exception as e:
+ distDict = {subject: []}
+ logging.error("ERROR: the shape module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ shape_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # run brainPrint
- evMat, evecMat, dstMat = brainprint.brainprint.run_brainprint(
- subjects_dir=argsDict["subjects_dir"],
- subject_id=subject,
- destination=brainprint_outdir,
- keep_eigenvectors=SHAPE_EVEC,
- skip_cortex=SHAPE_SKIPCORTEX,
- num=SHAPE_NUM,
- norm=SHAPE_NORM,
- reweight=SHAPE_REWEIGHT,
- asymmetry=SHAPE_ASYMMETRY,
- )
+ elif shape_status == 3:
- # get a subset of the brainprint results
+ # read results from previous run
+ dstMat = pd.read_csv(brainprint_outdir / (subject + ".brainprint.asymmetry.csv")).to_dict(orient="index")[0]
distDict = {subject: dstMat}
- # return
- shape_status = 0
-
- #
- except Exception as e:
- distDict = {subject: []}
- logging.error("ERROR: the shape module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- shape_status = 1
- if argsDict["exit_on_error"] is True:
- raise
-
# store data
- metricsDict[subject].update(distDict[subject]) # TODO: what if shape_status==3? write to and read from disk?
+ metricsDict[subject].update(distDict[subject])
- else:
- shape_status = 2
+ else:
+ shape_status = 2
- # store data
- statusDict[subject].update({"shape": shape_status})
+ # store data
+ statusDict[subject].update({"shape": shape_status})
- # ----------------------------------------------------------------------
- # run optional modules: screenshots
+ # ----------------------------------------------------------------------
+ # run optional modules: screenshots
- if argsDict["screenshots"] is True or argsDict["screenshots_html"] is True:
+ if argsDict["screenshots"] is True or argsDict["screenshots_html"] is True:
- # determine status
- screenshots_status = 0
- if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
- if statusDict[subject]["screenshots"] == 0 or statusDict[subject]["screenshots"] == 3:
- screenshots_status = 3
- logging.info("Skipping screenshots computation for " + subject)
+ # determine status
+ screenshots_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["screenshots"] == 0 or statusDict[subject]["screenshots"] == 3:
+ screenshots_status = 3
+ logging.info("Skipping screenshots computation for " + subject)
+ else:
+ logging.info("Not skipping screenshots computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping screenshots computation for " + subject + ": statusfile did not indicate ok or skipped")
- else:
- logging.info("Not skipping screenshots computation for " + subject + ": no statusfile was found")
-
- # check / create subject-specific screenshots_outdir
- screenshots_outdir = os.path.join(
- argsDict["output_dir"], "screenshots", subject
- )
- if not os.path.isdir(screenshots_outdir):
- os.makedirs(screenshots_outdir)
- outfile = os.path.join(screenshots_outdir, subject + ".png")
+ logging.info("Not skipping screenshots computation for " + subject + ": no statusfile was found")
- #
- if screenshots_status == 0:
+ # check / create subject-specific screenshots_outdir
+ screenshots_outdir = os.path.join(
+ argsDict["output_dir"], "screenshots", subject
+ )
+ if not os.path.isdir(screenshots_outdir):
+ os.makedirs(screenshots_outdir)
+ outfile = os.path.join(screenshots_outdir, subject + ".png")
#
- try:
- # message
- print("-----------------------------")
- print("Creating screenshots ...")
- print("")
-
- # re-initialize
- screenshots_base_subj = list()
- screenshots_overlay_subj = list()
- screenshots_surf_subj = list()
-
- # check screenshots_base
- if argsDict["screenshots_base"][0] == "default":
- screenshots_base_subj = argsDict["screenshots_base"]
- logging.info("Using default for screenshot base image")
- elif os.path.isfile(argsDict["screenshots_base"][0]):
- screenshots_base_subj = argsDict["screenshots_base"]
- logging.info(
- "Using "
- + screenshots_base_subj[0]
- + " as screenshot base image"
- )
- elif os.path.isfile(
- os.path.join(
- argsDict["subjects_dir"],
- subject,
- "mri",
- argsDict["screenshots_base"][0],
- )
- ):
- screenshots_base_subj = [
- os.path.join(
- argsDict["subjects_dir"],
- subject,
- "mri",
- argsDict["screenshots_base"][0],
- )
- ]
- logging.info(
- "Using "
- + screenshots_base_subj[0]
- + " as screenshot base image"
- )
- else:
- raise FileNotFoundError(
- "ERROR: cannot find the screenshots base file "
- + argsDict["screenshots_base"][0]
- )
-
- # check screenshots_overlay
- if argsDict["screenshots_overlay"] is not None:
- if argsDict["screenshots_overlay"][0] == "default":
- screenshots_overlay_subj = argsDict["screenshots_overlay"]
- logging.info("Using default for screenshot overlay image")
- elif os.path.isfile(argsDict["screenshots_overlay"][0]):
- screenshots_overlay_subj = argsDict["screenshots_overlay"]
+ if screenshots_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Creating screenshots ...")
+ print("")
+
+ # re-initialize
+ screenshots_base_subj = list()
+ screenshots_overlay_subj = list()
+ screenshots_surf_subj = list()
+
+ # check screenshots_base
+ if argsDict["screenshots_base"][0] == "default":
+ screenshots_base_subj = argsDict["screenshots_base"]
+ logging.info("Using default for screenshot base image")
+ elif os.path.isfile(argsDict["screenshots_base"][0]):
+ screenshots_base_subj = argsDict["screenshots_base"]
logging.info(
"Using "
- + screenshots_overlay_subj[0]
- + " as screenshot overlay image"
+ + screenshots_base_subj[0]
+ + " as screenshot base image"
)
elif os.path.isfile(
os.path.join(
argsDict["subjects_dir"],
subject,
"mri",
- argsDict["screenshots_overlay"][0],
+ argsDict["screenshots_base"][0],
)
):
- screenshots_overlay_subj = [
+ screenshots_base_subj = [
os.path.join(
argsDict["subjects_dir"],
subject,
"mri",
- argsDict["screenshots_overlay"][0],
+ argsDict["screenshots_base"][0],
)
]
logging.info(
"Using "
- + screenshots_overlay_subj[0]
- + " as screenshot overlay image"
+ + screenshots_base_subj[0]
+ + " as screenshot base image"
)
else:
raise FileNotFoundError(
- "ERROR: cannot find the screenshots overlay file "
- + argsDict["screenshots_overlay"][0]
+ "ERROR: cannot find the screenshots base file "
+ + argsDict["screenshots_base"][0]
)
- else:
- screenshots_overlay_subj = argsDict["screenshots_overlay"]
-
- # check screenshots_surf
- if argsDict["screenshots_surf"] is not None:
- for screenshots_surf_i in argsDict["screenshots_surf"]:
- if screenshots_surf_i == "default":
- logging.info("Using default for screenshot surface")
- elif os.path.isfile(screenshots_surf_i):
+
+ # check screenshots_overlay
+ if argsDict["screenshots_overlay"] is not None:
+ if argsDict["screenshots_overlay"][0] == "default":
+ screenshots_overlay_subj = argsDict["screenshots_overlay"]
+ logging.info("Using default for screenshot overlay image")
+ elif os.path.isfile(argsDict["screenshots_overlay"][0]):
+ screenshots_overlay_subj = argsDict["screenshots_overlay"]
logging.info(
- "Using " + screenshots_surf_i + " as screenshot surface"
+ "Using "
+ + screenshots_overlay_subj[0]
+ + " as screenshot overlay image"
)
elif os.path.isfile(
os.path.join(
argsDict["subjects_dir"],
subject,
- "surf",
- screenshots_surf_i,
+ "mri",
+ argsDict["screenshots_overlay"][0],
)
):
- screenshots_surf_i = os.path.join(
- argsDict["subjects_dir"],
- subject,
- "surf",
- screenshots_surf_i,
- )
+ screenshots_overlay_subj = [
+ os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "mri",
+ argsDict["screenshots_overlay"][0],
+ )
+ ]
logging.info(
- "Using " + screenshots_surf_i + " as screenshot surface"
+ "Using "
+ + screenshots_overlay_subj[0]
+ + " as screenshot overlay image"
)
else:
raise FileNotFoundError(
- "ERROR: cannot find the screenshots surface file "
- + screenshots_surf_i
+ "ERROR: cannot find the screenshots overlay file "
+ + argsDict["screenshots_overlay"][0]
)
- screenshots_surf_subj.append(screenshots_surf_i)
- else:
- screenshots_surf_subj = None
-
- # process
- createScreenshots(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTFILE=outfile,
- INTERACTIVE=False,
- BASE=screenshots_base_subj,
- OVERLAY=screenshots_overlay_subj,
- SURF=screenshots_surf_subj,
- VIEWS=argsDict["screenshots_views"],
- LAYOUT=argsDict["screenshots_layout"],
- ORIENTATION=argsDict["screenshots_orientation"],
- )
+ else:
+ screenshots_overlay_subj = argsDict["screenshots_overlay"]
- # return
- screenshots_status = 0
+ # check screenshots_surf
+ if argsDict["screenshots_surf"] is not None:
+ for screenshots_surf_i in argsDict["screenshots_surf"]:
+ if screenshots_surf_i == "default":
+ logging.info("Using default for screenshot surface")
+ elif os.path.isfile(screenshots_surf_i):
+ logging.info(
+ "Using " + screenshots_surf_i + " as screenshot surface"
+ )
+ elif os.path.isfile(
+ os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "surf",
+ screenshots_surf_i,
+ )
+ ):
+ screenshots_surf_i = os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "surf",
+ screenshots_surf_i,
+ )
+ logging.info(
+ "Using " + screenshots_surf_i + " as screenshot surface"
+ )
+ else:
+ raise FileNotFoundError(
+ "ERROR: cannot find the screenshots surface file "
+ + screenshots_surf_i
+ )
+ screenshots_surf_subj.append(screenshots_surf_i)
+ else:
+ screenshots_surf_subj = None
+
+ # process
+ createScreenshots(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTFILE=outfile,
+ INTERACTIVE=False,
+ BASE=screenshots_base_subj,
+ OVERLAY=screenshots_overlay_subj,
+ SURF=screenshots_surf_subj,
+ VIEWS=argsDict["screenshots_views"],
+ LAYOUT=argsDict["screenshots_layout"],
+ ORIENTATION=argsDict["screenshots_orientation"],
+ )
- #
- except Exception as e:
- logging.error("ERROR: screenshots module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- screenshots_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ # return
+ screenshots_status = 0
- # store data
- if screenshots_status == 0 or screenshots_status == 3:
- imagesScreenshotsDict[subject] = outfile
- else:
- imagesScreenshotsDict[subject] = []
+ #
+ except Exception as e:
+ logging.error("ERROR: screenshots module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ screenshots_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- else:
- screenshots_status = 2
+ # store data
+ if screenshots_status == 0 or screenshots_status == 3:
+ imagesScreenshotsDict[subject] = outfile
+ else:
+ imagesScreenshotsDict[subject] = []
- # store data
- statusDict[subject].update({"screenshots": screenshots_status})
+ else:
+ screenshots_status = 2
- # ----------------------------------------------------------------------
- # run optional modules: surface plots
+ # store data
+ statusDict[subject].update({"screenshots": screenshots_status})
- if argsDict["surfaces"] is True or argsDict["surfaces_html"] is True:
+ # ----------------------------------------------------------------------
+ # run optional modules: surface plots
- # determine status
- surfaces_status = 0
- if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
- if statusDict[subject]["surfaces"] == 0 or statusDict[subject]["surfaces"] == 3:
- surfaces_status = 3
- logging.info("Skipping surfaces computation for " + subject)
- else:
- logging.info("Not skipping surfaces computation for " + subject + ": statusfile did not indicate ok or skipped")
- else:
- logging.info("Not skipping surfaces computation for " + subject + ": no statusfile was found")
+ if argsDict["surfaces"] is True or argsDict["surfaces_html"] is True:
- # check / create subject-specific surfaces_outdir
- surfaces_outdir = os.path.join(
- argsDict["output_dir"], "surfaces", subject
- )
- if not os.path.isdir(surfaces_outdir):
- os.makedirs(surfaces_outdir)
+ # determine status
+ surfaces_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["surfaces"] == 0 or statusDict[subject]["surfaces"] == 3:
+ surfaces_status = 3
+ logging.info("Skipping surfaces computation for " + subject)
+ else:
+ logging.info("Not skipping surfaces computation for " + subject + ": statusfile did not indicate ok or skipped")
+ else:
+ logging.info("Not skipping surfaces computation for " + subject + ": no statusfile was found")
- #
- if surfaces_status == 0:
+ # check / create subject-specific surfaces_outdir
+ surfaces_outdir = os.path.join(
+ argsDict["output_dir"], "surfaces", subject
+ )
+ if not os.path.isdir(surfaces_outdir):
+ os.makedirs(surfaces_outdir)
#
- try:
- # message
- print("-----------------------------")
- print("Creating surface plots ...")
- print("")
-
- # process
- createSurfacePlots(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- SURFACES_OUTDIR=surfaces_outdir,
- VIEWS=argsDict["surfaces_views"],
- FASTSURFER=argsDict["fastsurfer"],
- )
- # return
- surfaces_status = 0
+ if surfaces_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Creating surface plots ...")
+ print("")
+
+ # process
+ createSurfacePlots(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ SURFACES_OUTDIR=surfaces_outdir,
+ VIEWS=argsDict["surfaces_views"],
+ FASTSURFER=argsDict["fastsurfer"],
+ )
+ # return
+ surfaces_status = 0
- #
- except Exception as e:
- logging.error("ERROR: surfaces module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- surfaces_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ #
+ except Exception as e:
+ logging.error("ERROR: surfaces module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ surfaces_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # store data
- if surfaces_status == 0 or surfaces_status == 3:
- imagesSurfacesDict[subject] = surfaces_outdir
- else:
- imagesSurfacesDict[subject] = []
+ # store data
+ if surfaces_status == 0 or surfaces_status == 3:
+ imagesSurfacesDict[subject] = surfaces_outdir
+ else:
+ imagesSurfacesDict[subject] = []
- else:
- surfaces_status = 2
+ else:
+ surfaces_status = 2
- # store data
- statusDict[subject].update({"surfaces": surfaces_status})
+ # store data
+ statusDict[subject].update({"surfaces": surfaces_status})
- # ----------------------------------------------------------------------
- # run optional modules: skullstrip
+ # ----------------------------------------------------------------------
+ # run optional modules: skullstrip
- if argsDict["skullstrip"] is True or argsDict["skullstrip_html"] is True:
+ if argsDict["skullstrip"] is True or argsDict["skullstrip_html"] is True:
- # determine status
- skullstrip_status = 0
- if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
- if statusDict[subject]["skullstrip"] == 0 or statusDict[subject]["skullstrip"] == 3:
- skullstrip_status = 3
- logging.info("Skipping skullstrip computation for " + subject)
+ # determine status
+ skullstrip_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["skullstrip"] == 0 or statusDict[subject]["skullstrip"] == 3:
+ skullstrip_status = 3
+ logging.info("Skipping skullstrip computation for " + subject)
+ else:
+ logging.info("Not skipping skullstrip computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping skullstrip computation for " + subject + ": statusfile did not indicate ok or skipped")
- else:
- logging.info("Not skipping skullstrip computation for " + subject + ": no statusfile was found")
-
+ logging.info("Not skipping skullstrip computation for " + subject + ": no statusfile was found")
- # check / create subject-specific skullstrip_outdir
- skullstrip_outdir = os.path.join(
- argsDict["output_dir"], "skullstrip", subject
- )
- if not os.path.isdir(skullstrip_outdir):
- os.makedirs(skullstrip_outdir)
- outfile = os.path.join(skullstrip_outdir, subject + ".png")
- #
- if skullstrip_status == 0:
+ # check / create subject-specific skullstrip_outdir
+ skullstrip_outdir = os.path.join(
+ argsDict["output_dir"], "skullstrip", subject
+ )
+ if not os.path.isdir(skullstrip_outdir):
+ os.makedirs(skullstrip_outdir)
+ outfile = os.path.join(skullstrip_outdir, subject + ".png")
#
- try:
- # message
- print("-----------------------------")
- print("Creating skullstrip evaluation ...")
- print("")
-
- # re-initialize
- skullstrip_base_subj = list()
- skullstrip_overlay_subj = list()
-
- # check skullstrip_base
- if os.path.isfile(
- os.path.join(argsDict["subjects_dir"], subject, "mri", "orig.mgz")
- ):
- skullstrip_base_subj = [
- os.path.join(
- argsDict["subjects_dir"], subject, "mri", "orig.mgz"
+ if skullstrip_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Creating skullstrip evaluation ...")
+ print("")
+
+ # re-initialize
+ skullstrip_base_subj = list()
+ skullstrip_overlay_subj = list()
+
+ # check skullstrip_base
+ if os.path.isfile(
+ os.path.join(argsDict["subjects_dir"], subject, "mri", "orig.mgz")
+ ):
+ skullstrip_base_subj = [
+ os.path.join(
+ argsDict["subjects_dir"], subject, "mri", "orig.mgz"
+ )
+ ]
+ logging.info("Using " + "orig.mgz" + " as skullstrip base image")
+ else:
+ raise FileNotFoundError(
+ "ERROR: cannot find the skullstrip base file " + "orig.mgz"
)
- ]
- logging.info("Using " + "orig.mgz" + " as skullstrip base image")
- else:
- raise FileNotFoundError(
- "ERROR: cannot find the skullstrip base file " + "orig.mgz"
- )
- # check skullstrip_overlay
- if os.path.isfile(
- os.path.join(
- argsDict["subjects_dir"], subject, "mri", "brainmask.mgz"
- )
- ):
- skullstrip_overlay_subj = [
+ # check skullstrip_overlay
+ if os.path.isfile(
os.path.join(
argsDict["subjects_dir"], subject, "mri", "brainmask.mgz"
)
- ]
- logging.info(
- "Using " + "brainmask.mgz" + " as skullstrip overlay image"
- )
- else:
- raise FileNotFoundError(
- "ERROR: cannot find the skullstrip overlay file "
- + "brainmask.mgz"
+ ):
+ skullstrip_overlay_subj = [
+ os.path.join(
+ argsDict["subjects_dir"], subject, "mri", "brainmask.mgz"
+ )
+ ]
+ logging.info(
+ "Using " + "brainmask.mgz" + " as skullstrip overlay image"
+ )
+ else:
+ raise FileNotFoundError(
+ "ERROR: cannot find the skullstrip overlay file "
+ + "brainmask.mgz"
+ )
+
+ # process
+ createScreenshots(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTFILE=outfile,
+ INTERACTIVE=False,
+ BASE=skullstrip_base_subj,
+ OVERLAY=skullstrip_overlay_subj,
+ SURF=None,
+ VIEWS=argsDict["screenshots_views"],
+ LAYOUT=argsDict["screenshots_layout"],
+ BINARIZE=True,
+ ORIENTATION=argsDict["screenshots_orientation"],
)
- # process
- createScreenshots(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTFILE=outfile,
- INTERACTIVE=False,
- BASE=skullstrip_base_subj,
- OVERLAY=skullstrip_overlay_subj,
- SURF=None,
- VIEWS=argsDict["screenshots_views"],
- LAYOUT=argsDict["screenshots_layout"],
- BINARIZE=True,
- ORIENTATION=argsDict["screenshots_orientation"],
- )
+ # return
+ skullstrip_status = 0
- # return
- skullstrip_status = 0
+ #
+ except Exception as e:
+ logging.error("ERROR: skullstrip module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ skullstrip_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- #
- except Exception as e:
- logging.error("ERROR: skullstrip module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- skullstrip_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ # store data
+ if skullstrip_status == 0 or skullstrip_status == 3:
+ imagesSkullstripDict[subject] = outfile
+ else:
+ imagesSkullstripDict[subject] = []
- # store data
- if skullstrip_status == 0 or skullstrip_status == 3:
- imagesSkullstripDict[subject] = outfile
else:
- imagesSkullstripDict[subject] = []
-
- else:
- skullstrip_status = 2
+ skullstrip_status = 2
- # store data
- statusDict[subject].update({"skullstrip": skullstrip_status})
+ # store data
+ statusDict[subject].update({"skullstrip": skullstrip_status})
- # ----------------------------------------------------------------------
- # run optional modules: fornix
+ # ----------------------------------------------------------------------
+ # run optional modules: fornix
- if argsDict["fornix"] is True or argsDict["fornix_html"] is True:
+ if argsDict["fornix"] is True or argsDict["fornix_html"] is True:
- # determine status
- fornix_status = 0
- if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
- if statusDict[subject]["fornix"] == 0 or statusDict[subject]["fornix"] == 3:
- fornix_status = 3
- logging.info("Skipping fornix computation for " + subject)
+ # determine status
+ fornix_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["fornix"] == 0 or statusDict[subject]["fornix"] == 3:
+ fornix_status = 3
+ logging.info("Skipping fornix computation for " + subject)
+ else:
+ logging.info("Not skipping fornix computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping fornix computation for " + subject + ": statusfile did not indicate ok or skipped")
- else:
- logging.info("Not skipping fornix computation for " + subject + ": no statusfile was found")
-
- # check / create subject-specific fornix_outdir
- fornix_outdir = os.path.join(argsDict["output_dir"], "fornix", subject)
- if not os.path.isdir(fornix_outdir):
- os.makedirs(fornix_outdir)
- fornix_screenshot_outfile = os.path.join(fornix_outdir, "cc.png")
+ logging.info("Not skipping fornix computation for " + subject + ": no statusfile was found")
- #
- if fornix_status == 0:
+ # check / create subject-specific fornix_outdir
+ fornix_outdir = os.path.join(argsDict["output_dir"], "fornix", subject)
+ if not os.path.isdir(fornix_outdir):
+ os.makedirs(fornix_outdir)
+ fornix_screenshot_outfile = os.path.join(fornix_outdir, "cc.png")
#
- try:
- # message
- print("-----------------------------")
- print("Checking fornix segmentation ...")
- print("")
-
- # process
- fornixShapeOutput = evaluateFornixSegmentation(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTPUT_DIR=fornix_outdir,
- CREATE_SCREENSHOT=FORNIX_SCREENSHOT,
- SCREENSHOTS_OUTFILE=fornix_screenshot_outfile,
- RUN_SHAPEDNA=FORNIX_SHAPE,
- N_EIGEN=FORNIX_N_EIGEN,
- )
+ if fornix_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Checking fornix segmentation ...")
+ print("")
+
+ # process
+ fornixShapeOutput = evaluateFornixSegmentation(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTPUT_DIR=fornix_outdir,
+ CREATE_SCREENSHOT=FORNIX_SCREENSHOT,
+ SCREENSHOTS_OUTFILE=fornix_screenshot_outfile,
+ RUN_SHAPEDNA=FORNIX_SHAPE,
+ N_EIGEN=FORNIX_N_EIGEN,
+ WRITE_EIGEN=FORNIX_WRITE_EIGEN,
+ )
- # create a dictionary from fornix shape output
- fornixShapeDict = {
- subject: dict(
- zip(
- map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
- fornixShapeOutput,
+ # create a dictionary from fornix shape output
+ fornixShapeDict = {
+ subject: dict(
+ zip(
+ map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
+ fornixShapeOutput,
+ )
)
- )
- }
+ }
- # return
- fornix_status = 0
+ # return
+ fornix_status = 0
- #
- except Exception as e:
- fornixShapeDict = {
- subject: dict(
- zip(
- map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
- np.full(FORNIX_N_EIGEN, np.nan),
+ #
+ except Exception as e:
+ fornixShapeDict = {
+ subject: dict(
+ zip(
+ map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
+ np.full(FORNIX_N_EIGEN, np.nan),
+ )
)
- )
- }
- logging.error("ERROR: fornix module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- fornix_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ }
+ logging.error("ERROR: fornix module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ fornix_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
+
+ # store data
+ if FORNIX_SHAPE:
+ metricsDict[subject].update(fornixShapeDict[subject])
+
+ elif fornix_status == 3:
+
+ if FORNIX_SHAPE:
+ # read results from previous run
+ fornixShapeOutput = np.array(pd.read_csv(os.path.join(fornix_outdir, subject + ".fornix.csv")))[0]
+ fornixShapeDict = {subject: dict(zip(map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),fornixShapeOutput))}
+ metricsDict[subject].update(fornixShapeDict[subject])
# store data
- if FORNIX_SHAPE:
- metricsDict[subject].update(fornixShapeDict[subject]) # TODO: what if fornix_status==3? write to and read from disk?
+ if FORNIX_SCREENSHOT and (fornix_status == 0 or fornix_status == 3):
+ imagesFornixDict[subject] = fornix_screenshot_outfile
+ else:
+ imagesFornixDict[subject] = []
- # store data
- if FORNIX_SCREENSHOT and (fornix_status == 0 or fornix_status == 3):
- imagesFornixDict[subject] = fornix_screenshot_outfile
else:
- imagesFornixDict[subject] = []
-
- else:
- fornix_status = 2
+ fornix_status = 2
- # store data
- statusDict[subject].update({"fornix": fornix_status})
+ # store data
+ statusDict[subject].update({"fornix": fornix_status})
- # ----------------------------------------------------------------------
- # run optional modules: hypothalamus
+ # ----------------------------------------------------------------------
+ # run optional modules: hypothalamus
- if argsDict["hypothalamus"] is True or argsDict["hypothalamus_html"] is True:
+ if argsDict["hypothalamus"] is True or argsDict["hypothalamus_html"] is True:
- # determine status
- hypothalamus_status = 0
- if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
- if statusDict[subject]["hypothalamus"] == 0 or statusDict[subject]["hypothalamus"] == 3:
- hypothalamus_status = 3
- logging.info("Skipping hypothalamus computation for " + subject)
+ # determine status
+ hypothalamus_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["hypothalamus"] == 0 or statusDict[subject]["hypothalamus"] == 3:
+ hypothalamus_status = 3
+ logging.info("Skipping hypothalamus computation for " + subject)
+ else:
+ logging.info("Not skipping hypothalamus computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping hypothalamus computation for " + subject + ": statusfile did not indicate ok or skipped")
- else:
- logging.info("Not skipping hypothalamus computation for " + subject + ": no statusfile was found")
-
- # check / create subject-specific hypothalamus_outdir
- hypothalamus_outdir = os.path.join(
- argsDict["output_dir"], "hypothalamus", subject
- )
- if not os.path.isdir(hypothalamus_outdir):
- os.makedirs(hypothalamus_outdir)
- hypothalamus_screenshot_outfile = os.path.join(
- hypothalamus_outdir, "hypothalamus.png"
- )
+ logging.info("Not skipping hypothalamus computation for " + subject + ": no statusfile was found")
- #
- if hypothalamus_status == 0:
+ # check / create subject-specific hypothalamus_outdir
+ hypothalamus_outdir = os.path.join(
+ argsDict["output_dir"], "hypothalamus", subject
+ )
+ if not os.path.isdir(hypothalamus_outdir):
+ os.makedirs(hypothalamus_outdir)
+ hypothalamus_screenshot_outfile = os.path.join(
+ hypothalamus_outdir, "hypothalamus.png"
+ )
#
- try:
- # message
- print("-----------------------------")
- print("Checking hypothalamus segmentation ...")
- print("")
-
- # process
- evaluateHypothalamicSegmentation(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTPUT_DIR=hypothalamus_outdir,
- CREATE_SCREENSHOT=HYPOTHALAMUS_SCREENSHOT,
- SCREENSHOTS_OUTFILE=hypothalamus_screenshot_outfile,
- SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
- )
+ if hypothalamus_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Checking hypothalamus segmentation ...")
+ print("")
+
+ # process
+ evaluateHypothalamicSegmentation(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTPUT_DIR=hypothalamus_outdir,
+ CREATE_SCREENSHOT=HYPOTHALAMUS_SCREENSHOT,
+ SCREENSHOTS_OUTFILE=hypothalamus_screenshot_outfile,
+ SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
+ )
- # return
- hypothalamus_status = 0
+ # return
+ hypothalamus_status = 0
- #
- except Exception as e:
- logging.error(
- "ERROR: hypothalamus module failed for subject " + subject
- )
- logging.error("Reason: " + str(e))
- hypothalamus_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ #
+ except Exception as e:
+ logging.error(
+ "ERROR: hypothalamus module failed for subject " + subject
+ )
+ logging.error("Reason: " + str(e))
+ hypothalamus_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # store data
- if HYPOTHALAMUS_SCREENSHOT and (hypothalamus_status == 0 or hypothalamus_status == 3):
- imagesHypothalamusDict[subject] = hypothalamus_screenshot_outfile
- else:
- imagesHypothalamusDict[subject] = []
+ # store data
+ if HYPOTHALAMUS_SCREENSHOT and (hypothalamus_status == 0 or hypothalamus_status == 3):
+ imagesHypothalamusDict[subject] = hypothalamus_screenshot_outfile
+ else:
+ imagesHypothalamusDict[subject] = []
- else:
- hypothalamus_status = 2
+ else:
+ hypothalamus_status = 2
- # store data
- statusDict[subject].update({"hypothalamus": hypothalamus_status})
+ # store data
+ statusDict[subject].update({"hypothalamus": hypothalamus_status})
- # ----------------------------------------------------------------------
- # run optional modules: hippocampus
+ # ----------------------------------------------------------------------
+ # run optional modules: hippocampus
- if argsDict["hippocampus"] is True or argsDict["hippocampus_html"] is True:
+ if argsDict["hippocampus"] is True or argsDict["hippocampus_html"] is True:
- # determine status
- hippocampus_status = 0
- if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
- if statusDict[subject]["hippocampus"] == 0 or statusDict[subject]["hippocampus"] == 3:
- hippocampus_status = 3
- logging.info("Skipping hippocampus computation for " + subject)
+ # determine status
+ hippocampus_status = 0
+ if argsDict["skip_existing"] is True:
+ if subject in statusDict.keys():
+ if statusDict[subject]["hippocampus"] == 0 or statusDict[subject]["hippocampus"] == 3:
+ hippocampus_status = 3
+ logging.info("Skipping hippocampus computation for " + subject)
+ else:
+ logging.info("Not skipping hippocampus computation for " + subject + ": statusfile did not indicate ok or skipped")
else:
- logging.info("Not skipping hippocampus computation for " + subject + ": statusfile did not indicate ok or skipped")
- else:
- logging.info("Not skipping hippocampus computation for " + subject + ": no statusfile was found")
+ logging.info("Not skipping hippocampus computation for " + subject + ": no statusfile was found")
- # check / create subject-specific hippocampus_outdir
- hippocampus_outdir = os.path.join(
- argsDict["output_dir"], "hippocampus", subject
- )
- if not os.path.isdir(hippocampus_outdir):
- os.makedirs(hippocampus_outdir)
- hippocampus_screenshot_outfile_left = os.path.join(
- hippocampus_outdir, "hippocampus-left.png"
- )
- hippocampus_screenshot_outfile_right = os.path.join(
- hippocampus_outdir, "hippocampus-right.png"
- )
-
- #
- if hippocampus_status == 0:
+ # check / create subject-specific hippocampus_outdir
+ hippocampus_outdir = os.path.join(
+ argsDict["output_dir"], "hippocampus", subject
+ )
+ if not os.path.isdir(hippocampus_outdir):
+ os.makedirs(hippocampus_outdir)
+ hippocampus_screenshot_outfile_left = os.path.join(
+ hippocampus_outdir, "hippocampus-left.png"
+ )
+ hippocampus_screenshot_outfile_right = os.path.join(
+ hippocampus_outdir, "hippocampus-right.png"
+ )
#
- try:
- # message
- print("-----------------------------")
- print("Checking hippocampus segmentation ...")
- print("")
-
- # process left
- evaluateHippocampalSegmentation(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTPUT_DIR=hippocampus_outdir,
- CREATE_SCREENSHOT=HIPPOCAMPUS_SCREENSHOT,
- SCREENSHOTS_OUTFILE=hippocampus_screenshot_outfile_left,
- SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
- HEMI="lh",
- LABEL=argsDict["hippocampus_label"],
- )
- evaluateHippocampalSegmentation(
- SUBJECT=subject,
- SUBJECTS_DIR=argsDict["subjects_dir"],
- OUTPUT_DIR=hippocampus_outdir,
- CREATE_SCREENSHOT=HIPPOCAMPUS_SCREENSHOT,
- SCREENSHOTS_OUTFILE=hippocampus_screenshot_outfile_right,
- SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
- HEMI="rh",
- LABEL=argsDict["hippocampus_label"],
- )
+ if hippocampus_status == 0:
+
+ #
+ try:
+ # message
+ print("-----------------------------")
+ print("Checking hippocampus segmentation ...")
+ print("")
+
+ # process left
+ evaluateHippocampalSegmentation(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTPUT_DIR=hippocampus_outdir,
+ CREATE_SCREENSHOT=HIPPOCAMPUS_SCREENSHOT,
+ SCREENSHOTS_OUTFILE=hippocampus_screenshot_outfile_left,
+ SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
+ HEMI="lh",
+ LABEL=argsDict["hippocampus_label"],
+ )
+ evaluateHippocampalSegmentation(
+ SUBJECT=subject,
+ SUBJECTS_DIR=argsDict["subjects_dir"],
+ OUTPUT_DIR=hippocampus_outdir,
+ CREATE_SCREENSHOT=HIPPOCAMPUS_SCREENSHOT,
+ SCREENSHOTS_OUTFILE=hippocampus_screenshot_outfile_right,
+ SCREENSHOTS_ORIENTATION=argsDict["screenshots_orientation"],
+ HEMI="rh",
+ LABEL=argsDict["hippocampus_label"],
+ )
- # return
- hippocampus_status = 0
+ # return
+ hippocampus_status = 0
- #
- except Exception as e:
- logging.error("ERROR: hippocampus module failed for subject " + subject)
- logging.error("Reason: " + str(e))
- hippocampus_status = 1
- if argsDict["exit_on_error"] is True:
- raise
+ #
+ except Exception as e:
+ logging.error("ERROR: hippocampus module failed for subject " + subject)
+ logging.error("Reason: " + str(e))
+ hippocampus_status = 1
+ if argsDict["exit_on_error"] is True:
+ raise
- # store data
- if HIPPOCAMPUS_SCREENSHOT and (hippocampus_status == 0 or hippocampus_status == 3):
- imagesHippocampusLeftDict[subject] = hippocampus_screenshot_outfile_left
- imagesHippocampusRightDict[
- subject
- ] = hippocampus_screenshot_outfile_right
- else:
- imagesHippocampusLeftDict[subject] = []
- imagesHippocampusRightDict[subject] = []
+ # store data
+ if HIPPOCAMPUS_SCREENSHOT and (hippocampus_status == 0 or hippocampus_status == 3):
+ imagesHippocampusLeftDict[subject] = hippocampus_screenshot_outfile_left
+ imagesHippocampusRightDict[
+ subject
+ ] = hippocampus_screenshot_outfile_right
+ else:
+ imagesHippocampusLeftDict[subject] = []
+ imagesHippocampusRightDict[subject] = []
- else:
- hippocampus_status = 2
+ else:
+ hippocampus_status = 2
- # store data
- statusDict[subject].update({"hippocampus": hippocampus_status})
+ # store data
+ statusDict[subject].update({"hippocampus": hippocampus_status})
- # --------------------------------------------------------------------------
- # write statusfile
- # 0: OK
- # 1: Failed
- # 2: Not done
- # 3: Skipped
- pd.DataFrame(statusDict[subject], index=[subject]).T.to_csv(os.path.join(argsDict["output_dir"], "status", subject, "status.txt"), header=False, sep=":")
+ # --------------------------------------------------------------------------
+ # write statusfile
+ # 0: OK
+ # 1: Failed
+ # 2: Not done
+ # 3: Skipped
+ pd.DataFrame(statusDict[subject], index=[subject]).T.to_csv(os.path.join(argsDict["output_dir"], "status", subject, "status.txt"), header=False, sep=":")
- # --------------------------------------------------------------------------
- # message
- logging.info(
- "Finished subject "
- + subject
- + " at "
- + time.strftime("%Y-%m-%d %H:%M %Z", time.localtime(time.time()))
- )
+ # --------------------------------------------------------------------------
+ # message
+ logging.info(
+ "Finished subject "
+ + subject
+ + " at "
+ + time.strftime("%Y-%m-%d %H:%M %Z", time.localtime(time.time()))
+ )
# --------------------------------------------------------------------------
# run optional modules: outlier detection
- if argsDict["outlier"] is True:
+ if argsDict["no_group"] is False: # todo: what input is required for group_only?
+
#
- try:
+ if argsDict["outlier"] is True:
+
# message
- print("---------------------------------------")
- print("Running outlier detection module ...")
- print("")
+ logging.info("Running outlier detection")
- # determine outlier-table and get data
- if argsDict["outlier_table"] is None:
- outlierDict = outlierTable()
- else:
- outlierDict = dict()
- with open(argsDict["outlier_table"], newline="") as csvfile:
- outlierCsv = csv.DictReader(csvfile, delimiter=",")
- for row in outlierCsv:
- outlierDict.update(
- {
- row["label"]: {
- "lower": float(row["lower"]),
- "upper": float(row["upper"]),
+ #
+ try:
+ # message
+ print("---------------------------------------")
+ print("Running outlier detection module ...")
+ print("")
+
+ # determine outlier-table and get data
+ if argsDict["outlier_table"] is None:
+ outlierDict = outlierTable()
+ else:
+ outlierDict = dict()
+ with open(argsDict["outlier_table"], newline="") as csvfile:
+ outlierCsv = csv.DictReader(csvfile, delimiter=",")
+ for row in outlierCsv:
+ outlierDict.update(
+ {
+ row["label"]: {
+ "lower": float(row["lower"]),
+ "upper": float(row["upper"]),
+ }
}
- }
- )
-
- # process
- outlier_outdir = os.path.join(argsDict["output_dir"], "outliers")
- (
- n_outlier_sample_nonpar,
- n_outlier_sample_param,
- n_outlier_norms,
- ) = outlierDetection(
- argsDict["subjects"],
- argsDict["subjects_dir"],
- outlier_outdir,
- outlierDict,
- min_no_subjects=OUTLIER_N_MIN,
- hypothalamus=argsDict["hypothalamus"],
- hippocampus=argsDict["hippocampus"],
- hippocampus_label=argsDict["hippocampus_label"],
- fastsurfer=argsDict["fastsurfer"],
- )
+ )
- # create a dictionary from outlier module output
- outlierDict = dict()
- for subject in argsDict["subjects"]:
- outlierDict.update(
- {
- subject: {
- "n_outlier_sample_nonpar": n_outlier_sample_nonpar[subject],
- "n_outlier_sample_param": n_outlier_sample_param[subject],
- "n_outlier_norms": n_outlier_norms[subject],
- }
- }
+ # process
+ outlier_outdir = os.path.join(argsDict["output_dir"], "outliers")
+ (
+ n_outlier_sample_nonpar,
+ n_outlier_sample_param,
+ n_outlier_norms,
+ ) = outlierDetection(
+ argsDict["subjects"],
+ argsDict["subjects_dir"],
+ outlier_outdir,
+ outlierDict,
+ min_no_subjects=OUTLIER_N_MIN,
+ hypothalamus=argsDict["hypothalamus"],
+ hippocampus=argsDict["hippocampus"],
+ hippocampus_label=argsDict["hippocampus_label"],
+ fastsurfer=argsDict["fastsurfer"],
)
+ # create a dictionary from outlier module output
+ outlierDict = dict()
+ for subject in argsDict["subjects"]:
+ outlierDict.update(
+ {
+ subject: {
+ "n_outlier_sample_nonpar": n_outlier_sample_nonpar[subject],
+ "n_outlier_sample_param": n_outlier_sample_param[subject],
+ "n_outlier_norms": n_outlier_norms[subject],
+ }
+ }
+ )
- # return
- # outlier_status = 0
+ # return
+ outlier_status = 0
- #
- except Exception as e:
- # create a dictionary from outlier module output
- outlierDict = dict()
- for subject in argsDict["subjects"]:
- outlierDict.update(
- {
- subject: {
- "n_outlier_sample_nonpar": np.nan,
- "n_outlier_sample_param": np.nan,
- "n_outlier_norms": np.nan,
+ #
+ except Exception as e:
+ # create a dictionary from outlier module output
+ outlierDict = dict()
+ for subject in argsDict["subjects"]:
+ outlierDict.update(
+ {
+ subject: {
+ "n_outlier_sample_nonpar": np.nan,
+ "n_outlier_sample_param": np.nan,
+ "n_outlier_norms": np.nan,
+ }
}
- }
- )
-
- logging.error("ERROR: outlier module failed")
- logging.error("Reason: " + str(e))
- if argsDict["exit_on_error"] is True:
- raise
+ )
- # store data
- for subject in argsDict["subjects"]:
- metricsDict[subject].update(outlierDict[subject])
+ logging.error("ERROR: outlier module failed")
+ logging.error("Reason: " + str(e))
+ if argsDict["exit_on_error"] is True:
+ raise
- # message
- logging.info("Done")
+ # store data
+ for subject in argsDict["subjects"]:
+ if argsDict["group_only"] is True:
+ metricsDict.update({subject: {"subject": subject}})
+ metricsDict[subject].update(outlierDict[subject])
# --------------------------------------------------------------------------
# generate output
- metricsFieldnames = ["subject"]
-
- # we pre-specify the fieldnames because we want to have this particular order
- metricsFieldnames.extend(
- [
- "wm_snr_orig",
- "gm_snr_orig",
- "wm_snr_norm",
- "gm_snr_norm",
- "cc_size",
- "holes_lh",
- "holes_rh",
- "defects_lh",
- "defects_rh",
- "topo_lh",
- "topo_rh",
- "con_snr_lh",
- "con_snr_rh",
- "rot_tal_x",
- "rot_tal_y",
- "rot_tal_z",
- ]
- )
+ if argsDict["no_group"] is True:
- # collect other keys; need to iterate over subjects, because not all of them
- # necessarily have the same set of keys
- if argsDict["shape"] is True:
- shapeKeys = list()
- for subject in distDict.keys():
- if len(distDict[subject]) > 0:
- shapeKeys = list(np.unique(shapeKeys + list(distDict[subject].keys())))
- metricsFieldnames.extend(shapeKeys)
+ logging.info("Not generating group output")
- if (
- argsDict["fornix"] is True or argsDict["fornix_html"] is True
- ) and FORNIX_SHAPE is True:
- fornixKeys = list()
- for subject in fornixShapeDict.keys():
- if len(fornixShapeDict[subject]) > 0:
- fornixKeys = list(
- np.unique(fornixKeys + list(fornixShapeDict[subject].keys()))
- )
- metricsFieldnames.extend(sorted(fornixKeys))
+ else:
- if argsDict["outlier"] is True:
- outlierKeys = list()
- for subject in outlierDict.keys():
- if len(outlierDict[subject]) > 0:
- outlierKeys = list(
- np.unique(outlierKeys + list(outlierDict[subject].keys()))
- )
- metricsFieldnames.extend(sorted(outlierKeys))
-
- # determine output file names
- path_data_file = os.path.join(argsDict["output_dir"], "fsqc-results.csv")
- path_html_file = os.path.join(argsDict["output_dir"], "fsqc-results.html")
-
- # write csv
- with open(path_data_file, "w") as datafile:
- csvwriter = csv.DictWriter(
- datafile,
- fieldnames=metricsFieldnames,
- delimiter=",",
- quotechar='"',
- quoting=csv.QUOTE_MINIMAL,
+ #
+ logging.info("Generating group output")
+
+ #
+ metricsFieldnames = ["subject"]
+
+ # we pre-specify the fieldnames because we want to have this particular order
+ metricsFieldnames.extend(
+ [
+ "wm_snr_orig",
+ "gm_snr_orig",
+ "wm_snr_norm",
+ "gm_snr_norm",
+ "cc_size",
+ "holes_lh",
+ "holes_rh",
+ "defects_lh",
+ "defects_rh",
+ "topo_lh",
+ "topo_rh",
+ "con_snr_lh",
+ "con_snr_rh",
+ "rot_tal_x",
+ "rot_tal_y",
+ "rot_tal_z",
+ ]
)
- csvwriter.writeheader()
- for subject in sorted(list(metricsDict.keys())):
- csvwriter.writerow(metricsDict[subject])
- # generate html output
- if (
- (argsDict["screenshots_html"] is True)
- or (argsDict["surfaces_html"] is True)
- or (argsDict["skullstrip_html"] is True)
- or (argsDict["fornix_html"] is True)
- or (argsDict["hypothalamus_html"] is True)
- or (argsDict["hippocampus_html"] is True)
- ):
- with open(path_html_file, "w") as htmlfile:
- print("", file=htmlfile)
- print("", file=htmlfile)
- print("fsqc screenshots ", file=htmlfile)
- print("", file=htmlfile)
- print(
- ")",
- file=htmlfile,
- )
- print('', file=htmlfile)
-
- # screenshots
- if argsDict["screenshots_html"] is True:
- print('Screenshots ', file=htmlfile)
- for subject in sorted(list(imagesScreenshotsDict.keys())):
- print(
- 'Subject ' + subject + " ",
- file=htmlfile,
+ # check if data needs to be read from disk
+ if argsDict['group_only'] is True:
+ for subject in argsDict["subjects"]:
+ # metricsDict may (or not) be populated from previous outlier module
+ if not subject in metricsDict.keys():
+ metricsDict.update({subject: {"subject": subject}})
+ metricsDict[subject] = metricsDict[subject] | pd.read_csv(os.path.join(argsDict["output_dir"], "metrics", subject, "metrics.csv"), dtype={'Unnamed: 0':str, 'subject':str}).set_index('Unnamed: 0').to_dict(orient="index")[subject]
+ #
+ if argsDict["shape"] is True:
+ dstMat = pd.read_csv(Path(os.path.join(argsDict["output_dir"], "brainprint", subject) ) / (subject + ".brainprint.asymmetry.csv")).to_dict(orient="index")[0]
+ distDict = {subject: dstMat}
+ metricsDict[subject].update(distDict[subject])
+ #
+ if (argsDict["fornix"] is True or argsDict["fornix_html"] is True) and FORNIX_SHAPE is True:
+ fornixShapeOutput = np.array(pd.read_csv(os.path.join(argsDict["output_dir"], "fornix", subject, subject + ".fornix.csv")))[0]
+ fornixShapeDict = {subject: dict(zip(map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),fornixShapeOutput))}
+ metricsDict[subject].update(fornixShapeDict[subject])
+
+ # collect other keys; need to iterate over subjects, because not all of them
+ # necessarily have the same set of keys
+ if argsDict["shape"] is True:
+ shapeKeys = list()
+ for subject in distDict.keys():
+ if len(distDict[subject]) > 0:
+ shapeKeys = list(np.unique(shapeKeys + list(distDict[subject].keys())))
+ metricsFieldnames.extend(shapeKeys)
+
+ #
+ if (argsDict["fornix"] is True or argsDict["fornix_html"] is True) and FORNIX_SHAPE is True:
+ fornixKeys = list()
+ for subject in fornixShapeDict.keys():
+ if len(fornixShapeDict[subject]) > 0:
+ fornixKeys = list(
+ np.unique(fornixKeys + list(fornixShapeDict[subject].keys()))
)
- if imagesScreenshotsDict[
- subject
- ]: # should be False for empty string or empty list
- if os.path.isfile(
- os.path.join(
- argsDict["output_dir"],
- "screenshots",
- subject,
- os.path.basename(imagesScreenshotsDict[subject]),
- )
- ):
- print(
- ' 0:
+ outlierKeys = list(
+ np.unique(outlierKeys + list(outlierDict[subject].keys()))
+ )
+ metricsFieldnames.extend(sorted(outlierKeys))
+
+ # determine output file names
+ path_data_file = os.path.join(argsDict["output_dir"], "fsqc-results.csv")
+ path_html_file = os.path.join(argsDict["output_dir"], "fsqc-results.html")
+
+ # write csv
+ with open(path_data_file, "w") as datafile:
+ csvwriter = csv.DictWriter(
+ datafile,
+ fieldnames=metricsFieldnames,
+ delimiter=",",
+ quotechar='"',
+ quoting=csv.QUOTE_MINIMAL,
+ )
+ csvwriter.writeheader()
+ for subject in sorted(list(metricsDict.keys())):
+ csvwriter.writerow(metricsDict[subject])
+
+ # generate html output
+ if (
+ (argsDict["screenshots_html"] is True)
+ or (argsDict["surfaces_html"] is True)
+ or (argsDict["skullstrip_html"] is True)
+ or (argsDict["fornix_html"] is True)
+ or (argsDict["hypothalamus_html"] is True)
+ or (argsDict["hippocampus_html"] is True)
+ ):
+ with open(path_html_file, "w") as htmlfile:
+ print("", file=htmlfile)
+ print("", file=htmlfile)
+ print("fsqc screenshots ", file=htmlfile)
+ print("", file=htmlfile)
+ print(
+ ")",
+ file=htmlfile,
+ )
+ print('', file=htmlfile)
+
+ # screenshots
+ if argsDict["screenshots_html"] is True:
+ print('Screenshots ', file=htmlfile)
+ for subject in sorted(list(imagesScreenshotsDict.keys())):
+ print(
+ 'Subject ' + subject + " ",
+ file=htmlfile,
+ )
+ if imagesScreenshotsDict[
+ subject
+ ]: # should be False for empty string or empty list
+ if os.path.isfile(
+ os.path.join(
+ argsDict["output_dir"],
"screenshots",
subject,
os.path.basename(imagesScreenshotsDict[subject]),
)
- + '">'
- + ' '
+ + '
',
+ file=htmlfile,
)
- + '" '
- + 'alt="Image for subject '
- + subject
- + '" style="width:75vw;min_width:200px;">
',
- file=htmlfile,
- )
- # skullstrip
- if argsDict["skullstrip_html"] is True:
- print('Skullstrip ', file=htmlfile)
- for subject in sorted(list(imagesSkullstripDict.keys())):
- print(
- 'Subject ' + subject + " ",
- file=htmlfile,
- )
- if imagesSkullstripDict[
- subject
- ]: # should be False for empty string or empty list
- if os.path.isfile(
- os.path.join(
- argsDict["output_dir"],
- "skullstrip",
- subject,
- os.path.basename(imagesSkullstripDict[subject]),
- )
- ):
- print(
- 'Skullstrip', file=htmlfile)
+ for subject in sorted(list(imagesSkullstripDict.keys())):
+ print(
+ 'Subject ' + subject + " ",
+ file=htmlfile,
+ )
+ if imagesSkullstripDict[
+ subject
+ ]: # should be False for empty string or empty list
+ if os.path.isfile(
+ os.path.join(
+ argsDict["output_dir"],
"skullstrip",
subject,
os.path.basename(imagesSkullstripDict[subject]),
)
- + '">'
- + ' '
+ + '
',
+ file=htmlfile,
)
- + '" '
- + 'alt="Image for subject '
- + subject
- + '" style="width:75vw;min_width:200px;">',
- file=htmlfile,
- )
- # surfaces
- if argsDict["surfaces_html"] is True:
- print('Surfaces ', file=htmlfile)
- for subject in sorted(list(imagesSurfacesDict.keys())):
- print(
- 'Subject ' + subject + " ",
- file=htmlfile,
- )
- if imagesSurfacesDict[
- subject
- ]: # should be False for empty string or empty list
- # Produce first all plots for pial then for inflated surface.
- # Each view contains a left and right hemispheric plot.
- _views_per_row = 2
-
- from PIL import Image
-
- filepath = os.path.join(
- argsDict["output_dir"],
- "surfaces",
- subject,
- f'lh.pial.{argsDict["surfaces_views"][0]}.png',
+ # surfaces
+ if argsDict["surfaces_html"] is True:
+ print('Surfaces ', file=htmlfile)
+ for subject in sorted(list(imagesSurfacesDict.keys())):
+ print(
+ 'Subject ' + subject + " ",
+ file=htmlfile,
)
- img = Image.open(filepath)
- width, height = img.size
- width *= 2 * _views_per_row + 0.1
+ if imagesSurfacesDict[
+ subject
+ ]: # should be False for empty string or empty list
+ # Produce first all plots for pial then for inflated surface.
+ # Each view contains a left and right hemispheric plot.
+ _views_per_row = 2
- print("", file=htmlfile)
+ from PIL import Image
+
+ filepath = os.path.join(
+ argsDict["output_dir"],
+ "surfaces",
+ subject,
+ f'lh.pial.{argsDict["surfaces_views"][0]}.png',
+ )
+ img = Image.open(filepath)
+ width, height = img.size
+ width *= 2 * _views_per_row + 0.1
+
+ print("
", file=htmlfile)
+ print(
+ f'
',
+ file=htmlfile,
+ )
+ print("
", file=htmlfile)
+ for i, v in enumerate(argsDict["surfaces_views"], start=1):
+ if os.path.isfile(
+ os.path.join(
+ argsDict["output_dir"],
+ "surfaces",
+ subject,
+ f"lh.pial.{v}.png",
+ )
+ ):
+ print(
+ ''
+ + ' ',
+ file=htmlfile,
+ )
+ if os.path.isfile(
+ os.path.join(
+ argsDict["output_dir"],
+ "surfaces",
+ subject,
+ f"rh.pial.{v}.png",
+ )
+ ):
+ print(
+ ''
+ + ' ',
+ file=htmlfile,
+ )
+ if i % _views_per_row == 0:
+ print("
", file=htmlfile)
+ print("
", file=htmlfile)
+ for i, v in enumerate(argsDict["surfaces_views"], start=1):
+ if os.path.isfile(
+ os.path.join(
+ argsDict["output_dir"],
+ "surfaces",
+ subject,
+ f"lh.inflated.{v}.png",
+ )
+ ):
+ print(
+ ''
+ + ' ',
+ file=htmlfile,
+ )
+ if os.path.isfile(
+ os.path.join(
+ argsDict["output_dir"],
+ "surfaces",
+ subject,
+ f"rh.inflated.{v}.png",
+ )
+ ):
+ print(
+ ''
+ + ' ',
+ file=htmlfile,
+ )
+ if i % _views_per_row == 0:
+ print("
", file=htmlfile)
+ print("
", file=htmlfile)
+ print("
", file=htmlfile)
+ print("", file=htmlfile)
+
+ # fornix
+ if argsDict["fornix_html"] is True:
+ print('Fornix ', file=htmlfile)
+ for subject in sorted(list(imagesFornixDict.keys())):
print(
- f'',
+ '
Subject ' + subject + " ",
file=htmlfile,
)
- print("
", file=htmlfile)
- for i, v in enumerate(argsDict["surfaces_views"], start=1):
+ if imagesFornixDict[
+ subject
+ ]: # should be False for empty string or empty list
if os.path.isfile(
os.path.join(
argsDict["output_dir"],
- "surfaces",
+ "fornix",
subject,
- f"lh.pial.{v}.png",
+ os.path.basename(imagesFornixDict[subject]),
)
):
print(
- ''
+ ' ',
+ + '" style="width:75vw;min_width:200px;">
',
file=htmlfile,
)
+
+ # hypothalamus
+ if argsDict["hypothalamus_html"] is True:
+ print('
Hypothalamus ', file=htmlfile)
+ for subject in sorted(list(imagesHypothalamusDict.keys())):
+ print(
+ '
Subject ' + subject + " ",
+ file=htmlfile,
+ )
+ if imagesHypothalamusDict[
+ subject
+ ]: # should be False for empty string or empty list
if os.path.isfile(
os.path.join(
argsDict["output_dir"],
- "surfaces",
+ "hypothalamus",
subject,
- f"rh.pial.{v}.png",
+ os.path.basename(imagesHypothalamusDict[subject]),
)
):
print(
- '
'
+ ' ',
+ + '" style="width:75vw;min_width:200px;">',
file=htmlfile,
)
- if i % _views_per_row == 0:
- print("
", file=htmlfile)
- print("
", file=htmlfile)
- for i, v in enumerate(argsDict["surfaces_views"], start=1):
+
+ # hippocampus
+ if argsDict["hippocampus_html"] is True:
+ print('
hippocampus ', file=htmlfile)
+ for subject in sorted(list(imagesHippocampusLeftDict.keys())):
+ print(
+ '
Subject ' + subject + " ",
+ file=htmlfile,
+ )
+ if imagesHippocampusLeftDict[
+ subject
+ ]: # should be False for empty string or empty list
if os.path.isfile(
os.path.join(
argsDict["output_dir"],
- "surfaces",
+ "hippocampus",
subject,
- f"lh.inflated.{v}.png",
+ os.path.basename(imagesHippocampusLeftDict[subject]),
)
):
print(
- '
'
+ ' ',
+ + '" style="width:75vw;min_width:200px;">',
file=htmlfile,
)
+ if imagesHippocampusRightDict[
+ subject
+ ]: # should be False for empty string or empty list
if os.path.isfile(
os.path.join(
argsDict["output_dir"],
- "surfaces",
+ "hippocampus",
subject,
- f"rh.inflated.{v}.png",
+ os.path.basename(imagesHippocampusRightDict[subject]),
)
):
print(
- '
'
+ ' ',
+ + '" style="width:75vw;min_width:200px;">',
file=htmlfile,
)
- if i % _views_per_row == 0:
- print("
", file=htmlfile)
- print("
", file=htmlfile)
- print("
", file=htmlfile)
- print("", file=htmlfile)
-
- # fornix
- if argsDict["fornix_html"] is True:
- print('Fornix ', file=htmlfile)
- for subject in sorted(list(imagesFornixDict.keys())):
- print(
- 'Subject ' + subject + " ",
- file=htmlfile,
- )
- if imagesFornixDict[
- subject
- ]: # should be False for empty string or empty list
- if os.path.isfile(
- os.path.join(
- argsDict["output_dir"],
- "fornix",
- subject,
- os.path.basename(imagesFornixDict[subject]),
- )
- ):
- print(
- ''
- + '
',
- file=htmlfile,
- )
- # hypothalamus
- if argsDict["hypothalamus_html"] is True:
- print('Hypothalamus ', file=htmlfile)
- for subject in sorted(list(imagesHypothalamusDict.keys())):
- print(
- 'Subject ' + subject + " ",
- file=htmlfile,
- )
- if imagesHypothalamusDict[
- subject
- ]: # should be False for empty string or empty list
- if os.path.isfile(
- os.path.join(
- argsDict["output_dir"],
- "hypothalamus",
- subject,
- os.path.basename(imagesHypothalamusDict[subject]),
- )
- ):
- print(
- ''
- + '
',
- file=htmlfile,
- )
-
- # hippocampus
- if argsDict["hippocampus_html"] is True:
- print('hippocampus ', file=htmlfile)
- for subject in sorted(list(imagesHippocampusLeftDict.keys())):
- print(
- 'Subject ' + subject + " ",
- file=htmlfile,
- )
- if imagesHippocampusLeftDict[
- subject
- ]: # should be False for empty string or empty list
- if os.path.isfile(
- os.path.join(
- argsDict["output_dir"],
- "hippocampus",
- subject,
- os.path.basename(imagesHippocampusLeftDict[subject]),
- )
- ):
- print(
- ''
- + '
',
- file=htmlfile,
- )
- if imagesHippocampusRightDict[
- subject
- ]: # should be False for empty string or empty list
- if os.path.isfile(
- os.path.join(
- argsDict["output_dir"],
- "hippocampus",
- subject,
- os.path.basename(imagesHippocampusRightDict[subject]),
- )
- ):
- print(
- ''
- + '
',
- file=htmlfile,
- )
-
- #
- print("", file=htmlfile)
- print("", file=htmlfile)
+ #
+ print("", file=htmlfile)
+ print("", file=htmlfile)
# ------------------------------------------------------------------------------
@@ -3163,6 +3254,8 @@ def run_fsqc(
outlier=False,
outlier_table=None,
fastsurfer=False,
+ no_group=False,
+ group_only=False,
exit_on_error=False,
skip_existing=False,
logfile=None,
@@ -3244,6 +3337,12 @@ def run_fsqc(
Specify custom norms table for outlier analysis.
fastsurfer : bool, default: False
Use FastSurfer instead of FreeSurfer input.
+ no_group : bool, default: False
+ run script in subject-level mode. will compute individual files and
+ statistics, but not create group-level summaries.
+ group-only : bool, default: False
+ run script in group mode. will create group-level summaries from
+ existing inputs.
exit_on_error : bool, default: False
Exit on error. If False, a warning is thrown and the analysis
continues.
@@ -3295,6 +3394,8 @@ def run_fsqc(
argsDict["outlier"] = outlier
argsDict["outlier_table"] = outlier_table
argsDict["fastsurfer"] = fastsurfer
+ argsDict["no_group"] = no_group
+ argsDict["group_only"] = group_only
argsDict["exit_on_error"] = exit_on_error
argsDict["skip_existing"] = skip_existing
argsDict["logfile"] = logfile
From 021c0aeade3876964136671cc18c9f53c53719e1 Mon Sep 17 00:00:00 2001
From: diersk
Date: Fri, 7 Jun 2024 21:07:28 +0200
Subject: [PATCH 07/25] Updated documentation
---
CHANGES.md | 5 +++++
README.md | 9 +++++++--
doc/Usage.rst | 32 ++++++++++++++++----------------
3 files changed, 28 insertions(+), 18 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index df445d0..9ffd35c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -3,6 +3,11 @@
This is a document summarizing the changes that are associated with (major) updates and releases. Priority is given to changes that are relevant to the user, and those that introduce new features or break compatibility with prior versions.
+## Version 2.1.0
+
+- Added group-only, no-group options
+- Added status file and skip-existing option
+
## Version 2.0.2
- This fixes an issue with the outlier module, which did not run for FastSurfer output due to incorrect expectations for names of stats files.
diff --git a/README.md b/README.md
index f7c1d64..e3c737a 100644
--- a/README.md
+++ b/README.md
@@ -264,12 +264,17 @@ optional arguments:
--outlier-table specify normative values (only in conjunction with
--outlier)
--fastsurfer use FastSurfer instead of FreeSurfer output
+ --no_group run script in subject-level mode. will compute
+ individual files and statistics, but not create
+ group-level summaries.
+ --group-only run script in group mode. will create group-level
+ summaries from existing inputs
--exit-on-error terminate the program when encountering an error;
otherwise, try to continue with the next module or
case
--skip-existing skips processing for a given case if output
- already exists, even with possibly different
- parameters or settings.
+ already exists, even with possibly different
+ parameters or settings
getting help:
-h, --help display this help message and exit
diff --git a/doc/Usage.rst b/doc/Usage.rst
index 7a31c02..2235f6a 100644
--- a/doc/Usage.rst
+++ b/doc/Usage.rst
@@ -6,8 +6,8 @@ As a Command Line Tool
.. code-block:: sh
- run_fsqc
- --subjects_dir
+ run_fsqc
+ --subjects_dir
--output_dir
[--subjects SubjectID]
[--subjects-file ] [--screenshots]
@@ -22,7 +22,7 @@ As a Command Line Tool
Required Arguments:
-------------------
--subjects_dir
- Subjects directory with a set of Freesurfer- or
+ Subjects directory with a set of Freesurfer- or
Fastsurfer-processed individual datasets.
--output_dir
@@ -111,34 +111,34 @@ As a Command Line Tool
---------------
--screenshots_base
Filename of an image that should be used instead of
- norm.mgz as the base image for the screenshots. Can be
- an individual file (which would not be appropriate for
- multi-subject analysis) or can be a file without
+ norm.mgz as the base image for the screenshots. Can be
+ an individual file (which would not be appropriate for
+ multi-subject analysis) or can be a file without
pathname and with the same filename across subjects within the 'mri'
- subdirectory of an individual FreeSurfer results directory
+ subdirectory of an individual FreeSurfer results directory
(which would be appropriate for multi-subject analysis).
--screenshots_overlay
- Path to an image that should be used instead of aseg.mgz
- as the overlay image for the screenshots can also be none.
- Can be an individual file (which would not be appropriate
+ Path to an image that should be used instead of aseg.mgz
+ as the overlay image for the screenshots can also be none.
+ Can be an individual file (which would not be appropriate
for multi-subject analysis) or can be a file without pathname
and with the same filename across subjects within the 'mri' subdirectory
- of an individual FreeSurfer results directory
+ of an individual FreeSurfer results directory
(which would be appropriate for multi-subject analysis).
--screenshots_surf [ ...]
- One or more surface files that should be used instead of
+ One or more surface files that should be used instead of
[lr]h.white and [lr]h.pial; can also be none.
- Can be one or more individual file(s) (which would not
+ Can be one or more individual file(s) (which would not
be appropriate for multi-subject analysis) or
- can be a (list of) file(s) without pathname and with the same
+ can be a (list of) file(s) without pathname and with the same
filename across subjects within the 'surf'
- subdirectory of an individual FreeSurfer results directory
+ subdirectory of an individual FreeSurfer results directory
(which would be appropriate for multi-subject analysis).
--screenshots_views [ ...]
- One or more views to use for the screenshots in the form of
+ One or more views to use for the screenshots in the form of
x= y= and/or z=.
Order does not matter. Default views are x=-10 x=10 y=0 z=0.
From 8be21ce11e0ed2a6791347e4cf5a2c504c2548b3 Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 13 Jun 2024 18:03:46 +0200
Subject: [PATCH 08/25] Updated documentation
---
CHANGES.md | 4 ++--
README.md | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index 9ffd35c..daf54b1 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -5,8 +5,8 @@ This is a document summarizing the changes that are associated with (major) upda
## Version 2.1.0
-- Added group-only, no-group options
-- Added status file and skip-existing option
+- Added group-only, no-group options to allow for running the scripts at the individual or group level only. Default is to run at both levels.
+- Added status file and skip-existing option to allow for incremental updates of a given output directory; additional cases or additional modules will be added. Existing ones will not be recomputed.
## Version 2.0.2
diff --git a/README.md b/README.md
index e3c737a..95a6052 100644
--- a/README.md
+++ b/README.md
@@ -404,7 +404,7 @@ Call `help(fsqc.run_fsqc)` for further usage info and additional options.
### As a Docker image
-We provide a configuration files that can be used to create a Docker or
+We provide configuration files that can be used to create a Docker or
Singularity image for the fsqc scripts. Documentation can be found on the
[Docker](docker/Docker.md) and [Singularity](singularity/Singularity.md) pages.
From 34bbc64eb36331ffcb263e28de45f6893b2846ff Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 13 Jun 2024 22:33:16 +0200
Subject: [PATCH 09/25] Updated skip-existing, statusfile functionality
---
fsqc/fsqcMain.py | 34 +++++++++++++++++++++++-----------
1 file changed, 23 insertions(+), 11 deletions(-)
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index d3b4569..9adb853 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -863,6 +863,12 @@ def _check_arguments(argsDict):
"ERROR: Use either --no-group or --group-only (but not both)."
)
+ # skip-existing cannot be used with group-only
+ if argsDict["skip_existing"] is True and argsDict["group_only"] is True:
+ raise ValueError(
+ "ERROR: Use either --skip_existing or --group-only (but not both)."
+ )
+
# check if screenshots subdirectory exists or can be created and is writable
if argsDict["screenshots"] is True or argsDict["screenshots_html"] is True:
if os.path.isdir(os.path.join(argsDict["output_dir"], "screenshots")):
@@ -1616,6 +1622,7 @@ def _do_fsqc(argsDict):
os.makedirs(status_outdir)
# if it already exists, read statusfile
+ status_dict = dict()
if os.path.exists(os.path.join(status_outdir, "status.txt")):
status_dict = dict(pd.read_csv(os.path.join(status_outdir, "status.txt"), sep=":", header=None, comment="#", names=["module", "status"], dtype=str).to_dict(orient="split")['data'])
for x in ['metrics', 'shape', 'screenshots', 'surfaces', 'skullstrip', 'fornix', 'hypothalamus', 'hippocampus']:
@@ -1639,7 +1646,7 @@ def _do_fsqc(argsDict):
#
metrics_status = 0
if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
+ if len(status_dict)>0:
if statusDict[subject]["metrics"] == 0 or statusDict[subject]["metrics"] == 3:
metrics_status = 3
logging.info("Skipping metrics computation for " + subject)
@@ -1778,6 +1785,9 @@ def _do_fsqc(argsDict):
# write to file
pd.DataFrame(metricsDict[subject], index=[subject]).to_csv(os.path.join(argsDict["output_dir"], "metrics", subject, "metrics.csv"))
+ elif metrics_status == 3:
+ metricsDict[subject] = metricsDict[subject] | pd.read_csv(os.path.join(metrics_outdir, "metrics.csv"), dtype={'Unnamed: 0':str, 'subject':str}).set_index('Unnamed: 0').to_dict(orient="index")[subject]
+
# note that we cannot "not do" the metrics module, only skipping is possible.
# hence no metrics_status == 2 possible.
@@ -1792,7 +1802,7 @@ def _do_fsqc(argsDict):
# determine status
shape_status = 0
if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
+ if len(status_dict)>0:
if statusDict[subject]["shape"] == 0 or statusDict[subject]["shape"] == 3:
shape_status = 3
logging.info("Skipping shape computation for " + subject)
@@ -1870,7 +1880,7 @@ def _do_fsqc(argsDict):
# determine status
screenshots_status = 0
if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
+ if len(status_dict)>0:
if statusDict[subject]["screenshots"] == 0 or statusDict[subject]["screenshots"] == 3:
screenshots_status = 3
logging.info("Skipping screenshots computation for " + subject)
@@ -2061,7 +2071,7 @@ def _do_fsqc(argsDict):
# determine status
surfaces_status = 0
if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
+ if len(status_dict)>0:
if statusDict[subject]["surfaces"] == 0 or statusDict[subject]["surfaces"] == 3:
surfaces_status = 3
logging.info("Skipping surfaces computation for " + subject)
@@ -2126,7 +2136,7 @@ def _do_fsqc(argsDict):
# determine status
skullstrip_status = 0
if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
+ if len(status_dict)>0:
if statusDict[subject]["skullstrip"] == 0 or statusDict[subject]["skullstrip"] == 3:
skullstrip_status = 3
logging.info("Skipping skullstrip computation for " + subject)
@@ -2239,7 +2249,7 @@ def _do_fsqc(argsDict):
# determine status
fornix_status = 0
if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
+ if len(status_dict)>0:
if statusDict[subject]["fornix"] == 0 or statusDict[subject]["fornix"] == 3:
fornix_status = 3
logging.info("Skipping fornix computation for " + subject)
@@ -2337,7 +2347,7 @@ def _do_fsqc(argsDict):
# determine status
hypothalamus_status = 0
if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
+ if len(status_dict)>0:
if statusDict[subject]["hypothalamus"] == 0 or statusDict[subject]["hypothalamus"] == 3:
hypothalamus_status = 3
logging.info("Skipping hypothalamus computation for " + subject)
@@ -2409,7 +2419,7 @@ def _do_fsqc(argsDict):
# determine status
hippocampus_status = 0
if argsDict["skip_existing"] is True:
- if subject in statusDict.keys():
+ if len(status_dict)>0:
if statusDict[subject]["hippocampus"] == 0 or statusDict[subject]["hippocampus"] == 3:
hippocampus_status = 3
logging.info("Skipping hippocampus computation for " + subject)
@@ -2510,7 +2520,7 @@ def _do_fsqc(argsDict):
# --------------------------------------------------------------------------
# run optional modules: outlier detection
- if argsDict["no_group"] is False: # todo: what input is required for group_only?
+ if argsDict["no_group"] is False:
#
if argsDict["outlier"] is True:
@@ -2598,7 +2608,7 @@ def _do_fsqc(argsDict):
# store data
for subject in argsDict["subjects"]:
if argsDict["group_only"] is True:
- metricsDict.update({subject: {"subject": subject}})
+ metricsDict.update({subject: {"subject": subject}})
metricsDict[subject].update(outlierDict[subject])
# --------------------------------------------------------------------------
@@ -2638,7 +2648,9 @@ def _do_fsqc(argsDict):
]
)
- # check if data needs to be read from disk
+ # check if data needs to be read from disk; note that skip-existing is
+ # mutually exclusive with group-only; in case of skip-existing, data
+ # that is already present will have been read earlier already
if argsDict['group_only'] is True:
for subject in argsDict["subjects"]:
# metricsDict may (or not) be populated from previous outlier module
From 475d5effec799b430f3e4e76fb024a63183825d3 Mon Sep 17 00:00:00 2001
From: diersk
Date: Fri, 14 Jun 2024 17:14:42 +0200
Subject: [PATCH 10/25] Updated group-only functionality
---
fsqc/fsqcMain.py | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index 9adb853..eee6864 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -2668,6 +2668,23 @@ def _do_fsqc(argsDict):
fornixShapeDict = {subject: dict(zip(map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),fornixShapeOutput))}
metricsDict[subject].update(fornixShapeDict[subject])
+ # check if other dictionaries need to be populated
+ if argsDict['group_only'] is True:
+ for subject in argsDict["subjects"]:
+ if argsDict["screenshots_html"] is True:
+ imagesScreenshotsDict[subject] = os.path.join(argsDict["output_dir"], "screenshots", subject, subject + ".png")
+ if argsDict["surfaces_html"] is True:
+ imagesSurfacesDict[subject] = os.path.join(argsDict["output_dir"], "surfaces", subject)
+ if argsDict["skullstrip_html"] is True:
+ imagesSkullstripDict[subject] = os.path.join(argsDict["output_dir"], "skullstrip", subject, subject + ".png")
+ if argsDict["fornix_html"] is True:
+ imagesFornixDict[subject] = os.path.join(argsDict["output_dir"], "fornix", subject, "cc.png")
+ if argsDict["hypothalamus_html"] is True:
+ imagesHypothalamusDict[subject] = os.path.join(argsDict["output_dir"], "hypothalamus", subject, "hypothalamus.png")
+ if argsDict["hippocampus_html"] is True:
+ imagesHippocampusLeftDict[subject] = os.path.join(argsDict["output_dir"], "hippocampus", subject, "hippocampus-left.png")
+ imagesHippocampusRightDict[subject] = os.path.join(argsDict["output_dir"], "hippocampus", subject, "hippocampus-right.png")
+
# collect other keys; need to iterate over subjects, because not all of them
# necessarily have the same set of keys
if argsDict["shape"] is True:
@@ -2682,9 +2699,7 @@ def _do_fsqc(argsDict):
fornixKeys = list()
for subject in fornixShapeDict.keys():
if len(fornixShapeDict[subject]) > 0:
- fornixKeys = list(
- np.unique(fornixKeys + list(fornixShapeDict[subject].keys()))
- )
+ fornixKeys = list(np.unique(fornixKeys + list(fornixShapeDict[subject].keys())))
metricsFieldnames.extend(sorted(fornixKeys))
#
@@ -2692,9 +2707,7 @@ def _do_fsqc(argsDict):
outlierKeys = list()
for subject in outlierDict.keys():
if len(outlierDict[subject]) > 0:
- outlierKeys = list(
- np.unique(outlierKeys + list(outlierDict[subject].keys()))
- )
+ outlierKeys = list(np.unique(outlierKeys + list(outlierDict[subject].keys())))
metricsFieldnames.extend(sorted(outlierKeys))
# determine output file names
From 9fbcbbc7150a76e4a1f46029102b5d4770daad37 Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 25 Jul 2024 17:26:30 +0200
Subject: [PATCH 11/25] Fixed brainprint, lapy versions
---
requirements.txt | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index 66e81d3..cde1a48 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
-brainprint>=0.4.0
-lapy>=1.0.0
+brainprint==0.4.0
+lapy>=1.0.0,<2
kaleido
matplotlib
nibabel
From 7dde46bcc8df459b56cc0b0ae9e7fc4aa84195ae Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 25 Jul 2024 17:36:05 +0200
Subject: [PATCH 12/25] Updated version
---
VERSION | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index 7121511..7ec1d6d 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.1.0-beta
+2.1.0
From bb0363b2b4468a31d612b4982e395eed6c8db94f Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 25 Jul 2024 17:46:45 +0200
Subject: [PATCH 13/25] Updated readme, documentation
---
README.md | 7 ++++---
doc/Requirements.rst | 2 +-
doc/Usage.rst | 18 ++++++++++--------
3 files changed, 15 insertions(+), 12 deletions(-)
diff --git a/README.md b/README.md
index 95a6052..6f08483 100644
--- a/README.md
+++ b/README.md
@@ -222,7 +222,8 @@ run_fsqc --subjects_dir --output_dir
[--fornix] [--fornix-html] [--hippocampus]
[--hippocampus-html] [--hippocampus-label ... ]
[--hypothalamus] [--hypothalamus-html] [--shape]
- [--outlier] [--fastsurfer] [exit-on-error]
+ [--outlier] [--fastsurfer] [--no-group]
+ [--group-only] [--exit-on-error]
[--skip-existing] [-h] [--more-help]
[...]
@@ -264,7 +265,7 @@ optional arguments:
--outlier-table specify normative values (only in conjunction with
--outlier)
--fastsurfer use FastSurfer instead of FreeSurfer output
- --no_group run script in subject-level mode. will compute
+ --no-group run script in subject-level mode. will compute
individual files and statistics, but not create
group-level summaries.
--group-only run script in group mode. will create group-level
@@ -476,7 +477,7 @@ ___
all required packages will be installed automatically and manual installation
as detailed above will not be necessary.
-- This software has been tested on Ubuntu 20.04.
+- This software has been tested on Ubuntu 20.04 and 22.04.
- A working [FreeSurfer](https://freesurfer.net) installation (version 6 or
newer) is required for running the 'shape' module of this toolbox. Also make
diff --git a/doc/Requirements.rst b/doc/Requirements.rst
index f285f2a..8ce08a8 100644
--- a/doc/Requirements.rst
+++ b/doc/Requirements.rst
@@ -14,7 +14,7 @@ Requirements
```
- If installing the toolbox as a Python package or if using the Docker image, all required packages will be installed automatically, and manual installation as detailed above will not be necessary.
-- This software has been tested on Ubuntu 20.04.
+- This software has been tested on Ubuntu 20.04 and Ubuntu 22.04.
- A working `FreeSurfer `_ installation (version 6 or newer) is required for running the 'shape' module of this toolbox. Also, make sure that FreeSurfer is sourced (i.e., FREESURFER_HOME is set as an environment variable) before running an analysis.
diff --git a/doc/Usage.rst b/doc/Usage.rst
index 2235f6a..b4411e1 100644
--- a/doc/Usage.rst
+++ b/doc/Usage.rst
@@ -9,14 +9,16 @@ As a Command Line Tool
run_fsqc
--subjects_dir
--output_dir
- [--subjects SubjectID]
+ [--subjects SubjectID [SubjectID ...]]
[--subjects-file ] [--screenshots]
[--screenshots-html] [--surfaces] [--surfaces-html]
[--skullstrip] [--skullstrip-html]
[--fornix] [--fornix-html] [--hippocampus]
[--hippocampus-html] [--hippocampus-label ... ]
[--hypothalamus] [--hypothalamus-html] [--shape]
- [--outlier] [--fastsurfer] [-h] [--more-help]
+ [--outlier] [--fastsurfer] [--no-group]
+ [--group-only] [--exit-on-error]
+ [--skip-existing] [-h] [--more-help]
[...]
Required Arguments:
@@ -88,18 +90,18 @@ As a Command Line Tool
--fastsurfer
Use FastSurfer instead of FreeSurfer output
- --exit-on-error
- Terminate the program when encountering an error; otherwise, try to continue with the next module or case
-
- --skip-existing
- skips processing for a given case if output already exists, even with possibly different parameters or settings
-
--no-group
run script in subject-level mode. will compute individual files and statistics, but not create group-level summaries.
--group-only
run script in group mode. will create group-level summaries from existing inputs. needs to be run on output directory with already existing results.
+ --exit-on-error
+ Terminate the program when encountering an error; otherwise, try to continue with the next module or case
+
+ --skip-existing
+ skips processing for a given case if output already exists, even with possibly different parameters or settings
+
Getting Help:
-------------
-h, --help
From 982f3155959501c366ab4518465cea06b14bec07 Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 25 Jul 2024 18:13:18 +0200
Subject: [PATCH 14/25] Updated docstring
---
fsqc/fsqcMain.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index eee6864..c93aeea 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -3363,10 +3363,10 @@ def run_fsqc(
fastsurfer : bool, default: False
Use FastSurfer instead of FreeSurfer input.
no_group : bool, default: False
- run script in subject-level mode. will compute individual files and
+ Run script in subject-level mode. will compute individual files and
statistics, but not create group-level summaries.
- group-only : bool, default: False
- run script in group mode. will create group-level summaries from
+ group_only : bool, default: False
+ Run script in group mode. will create group-level summaries from
existing inputs.
exit_on_error : bool, default: False
Exit on error. If False, a warning is thrown and the analysis
From 53ab662ecb7ab10630f900bde7421c28a1508d41 Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 25 Jul 2024 18:19:02 +0200
Subject: [PATCH 15/25] Applying ruff changes
---
fsqc/fsqcMain.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index c93aeea..d4fedd2 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -2583,7 +2583,7 @@ def _do_fsqc(argsDict):
)
# return
- outlier_status = 0
+ # outlier_status = 0 # not used currently
#
except Exception as e:
@@ -2654,7 +2654,7 @@ def _do_fsqc(argsDict):
if argsDict['group_only'] is True:
for subject in argsDict["subjects"]:
# metricsDict may (or not) be populated from previous outlier module
- if not subject in metricsDict.keys():
+ if subject not in metricsDict.keys():
metricsDict.update({subject: {"subject": subject}})
metricsDict[subject] = metricsDict[subject] | pd.read_csv(os.path.join(argsDict["output_dir"], "metrics", subject, "metrics.csv"), dtype={'Unnamed: 0':str, 'subject':str}).set_index('Unnamed: 0').to_dict(orient="index")[subject]
#
From c00a8c8173b0bb3a2b2a72d39e63bd7733eae480 Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 25 Jul 2024 18:21:54 +0200
Subject: [PATCH 16/25] Applying isort changes
---
fsqc/fsqcMain.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index d4fedd2..3431d6e 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -1525,12 +1525,11 @@ def _do_fsqc(argsDict):
import logging
import os
import time
+ from pathlib import Path
import numpy as np
import pandas as pd
- from pathlib import Path
-
from fsqc.checkCCSize import checkCCSize
from fsqc.checkContrast import checkContrast
from fsqc.checkRotation import checkRotation
From 943b255a033db5bc0e9b1b15835ac9c74744ba79 Mon Sep 17 00:00:00 2001
From: diersk
Date: Thu, 25 Jul 2024 18:25:54 +0200
Subject: [PATCH 17/25] Applying black changes
---
fsqc/evaluateFornixSegmentation.py | 4 +-
fsqc/fsqcMain.py | 509 ++++++++++++++++++++++-------
2 files changed, 388 insertions(+), 125 deletions(-)
diff --git a/fsqc/evaluateFornixSegmentation.py b/fsqc/evaluateFornixSegmentation.py
index 5480db3..ce4ca7a 100644
--- a/fsqc/evaluateFornixSegmentation.py
+++ b/fsqc/evaluateFornixSegmentation.py
@@ -199,7 +199,9 @@ def evaluateFornixSegmentation(
# write output
if WRITE_EIGEN is True:
- pd.DataFrame(out).transpose().to_csv(os.path.join(OUTPUT_DIR, SUBJECT + ".fornix.csv"), na_rep="NA", index=False)
+ pd.DataFrame(out).transpose().to_csv(
+ os.path.join(OUTPUT_DIR, SUBJECT + ".fornix.csv"), na_rep="NA", index=False
+ )
# --------------------------------------------------------------------------
# return
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index 3431d6e..95fe631 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -859,9 +859,7 @@ def _check_arguments(argsDict):
# check if only one of no_group and group_only is true
if argsDict["no_group"] is True and argsDict["group_only"] is True:
- raise ValueError(
- "ERROR: Use either --no-group or --group-only (but not both)."
- )
+ raise ValueError("ERROR: Use either --no-group or --group-only (but not both).")
# skip-existing cannot be used with group-only
if argsDict["skip_existing"] is True and argsDict["group_only"] is True:
@@ -1587,7 +1585,6 @@ def _do_fsqc(argsDict):
# subject-level processing
if argsDict["group_only"] is False:
-
# loop through the specified subjects
for subject in argsDict["subjects"]:
#
@@ -1623,8 +1620,26 @@ def _do_fsqc(argsDict):
# if it already exists, read statusfile
status_dict = dict()
if os.path.exists(os.path.join(status_outdir, "status.txt")):
- status_dict = dict(pd.read_csv(os.path.join(status_outdir, "status.txt"), sep=":", header=None, comment="#", names=["module", "status"], dtype=str).to_dict(orient="split")['data'])
- for x in ['metrics', 'shape', 'screenshots', 'surfaces', 'skullstrip', 'fornix', 'hypothalamus', 'hippocampus']:
+ status_dict = dict(
+ pd.read_csv(
+ os.path.join(status_outdir, "status.txt"),
+ sep=":",
+ header=None,
+ comment="#",
+ names=["module", "status"],
+ dtype=str,
+ ).to_dict(orient="split")["data"]
+ )
+ for x in [
+ "metrics",
+ "shape",
+ "screenshots",
+ "surfaces",
+ "skullstrip",
+ "fornix",
+ "hypothalamus",
+ "hippocampus",
+ ]:
status_dict[x] = int(status_dict[x])
statusDict[subject] = status_dict
@@ -1645,17 +1660,27 @@ def _do_fsqc(argsDict):
#
metrics_status = 0
if argsDict["skip_existing"] is True:
- if len(status_dict)>0:
- if statusDict[subject]["metrics"] == 0 or statusDict[subject]["metrics"] == 3:
+ if len(status_dict) > 0:
+ if (
+ statusDict[subject]["metrics"] == 0
+ or statusDict[subject]["metrics"] == 3
+ ):
metrics_status = 3
logging.info("Skipping metrics computation for " + subject)
else:
- logging.info("Not skipping metrics computation for " + subject + ": statusfile did not indicate ok or skipped")
+ logging.info(
+ "Not skipping metrics computation for "
+ + subject
+ + ": statusfile did not indicate ok or skipped"
+ )
else:
- logging.info("Not skipping metrics computation for " + subject + ": no statusfile was found")
+ logging.info(
+ "Not skipping metrics computation for "
+ + subject
+ + ": no statusfile was found"
+ )
if metrics_status == 0:
-
# get WM and GM SNR for orig.mgz
try:
wm_snr_orig, gm_snr_orig = checkSNR(
@@ -1732,7 +1757,9 @@ def _do_fsqc(argsDict):
# check contrast
try:
- con_snr_lh, con_snr_rh = checkContrast(argsDict["subjects_dir"], subject)
+ con_snr_lh, con_snr_rh = checkContrast(
+ argsDict["subjects_dir"], subject
+ )
except Exception as e:
logging.error("ERROR: Contrast check failed for " + subject)
@@ -1782,10 +1809,22 @@ def _do_fsqc(argsDict):
)
# write to file
- pd.DataFrame(metricsDict[subject], index=[subject]).to_csv(os.path.join(argsDict["output_dir"], "metrics", subject, "metrics.csv"))
+ pd.DataFrame(metricsDict[subject], index=[subject]).to_csv(
+ os.path.join(
+ argsDict["output_dir"], "metrics", subject, "metrics.csv"
+ )
+ )
elif metrics_status == 3:
- metricsDict[subject] = metricsDict[subject] | pd.read_csv(os.path.join(metrics_outdir, "metrics.csv"), dtype={'Unnamed: 0':str, 'subject':str}).set_index('Unnamed: 0').to_dict(orient="index")[subject]
+ metricsDict[subject] = (
+ metricsDict[subject]
+ | pd.read_csv(
+ os.path.join(metrics_outdir, "metrics.csv"),
+ dtype={"Unnamed: 0": str, "subject": str},
+ )
+ .set_index("Unnamed: 0")
+ .to_dict(orient="index")[subject]
+ )
# note that we cannot "not do" the metrics module, only skipping is possible.
# hence no metrics_status == 2 possible.
@@ -1797,18 +1836,28 @@ def _do_fsqc(argsDict):
# run optional modules: shape analysis
if argsDict["shape"] is True:
-
# determine status
shape_status = 0
if argsDict["skip_existing"] is True:
- if len(status_dict)>0:
- if statusDict[subject]["shape"] == 0 or statusDict[subject]["shape"] == 3:
+ if len(status_dict) > 0:
+ if (
+ statusDict[subject]["shape"] == 0
+ or statusDict[subject]["shape"] == 3
+ ):
shape_status = 3
logging.info("Skipping shape computation for " + subject)
else:
- logging.info("Not skipping shape computation for " + subject + ": statusfile did not indicate ok or skipped")
+ logging.info(
+ "Not skipping shape computation for "
+ + subject
+ + ": statusfile did not indicate ok or skipped"
+ )
else:
- logging.info("Not skipping shape computation for " + subject + ": no statusfile was found")
+ logging.info(
+ "Not skipping shape computation for "
+ + subject
+ + ": no statusfile was found"
+ )
# check / create subject-specific brainprint_outdir
brainprint_outdir = Path(
@@ -1817,7 +1866,6 @@ def _do_fsqc(argsDict):
#
if shape_status == 0:
-
#
try:
# message
@@ -1850,16 +1898,19 @@ def _do_fsqc(argsDict):
#
except Exception as e:
distDict = {subject: []}
- logging.error("ERROR: the shape module failed for subject " + subject)
+ logging.error(
+ "ERROR: the shape module failed for subject " + subject
+ )
logging.error("Reason: " + str(e))
shape_status = 1
if argsDict["exit_on_error"] is True:
raise
elif shape_status == 3:
-
# read results from previous run
- dstMat = pd.read_csv(brainprint_outdir / (subject + ".brainprint.asymmetry.csv")).to_dict(orient="index")[0]
+ dstMat = pd.read_csv(
+ brainprint_outdir / (subject + ".brainprint.asymmetry.csv")
+ ).to_dict(orient="index")[0]
distDict = {subject: dstMat}
# store data
@@ -1875,18 +1926,30 @@ def _do_fsqc(argsDict):
# run optional modules: screenshots
if argsDict["screenshots"] is True or argsDict["screenshots_html"] is True:
-
# determine status
screenshots_status = 0
if argsDict["skip_existing"] is True:
- if len(status_dict)>0:
- if statusDict[subject]["screenshots"] == 0 or statusDict[subject]["screenshots"] == 3:
+ if len(status_dict) > 0:
+ if (
+ statusDict[subject]["screenshots"] == 0
+ or statusDict[subject]["screenshots"] == 3
+ ):
screenshots_status = 3
- logging.info("Skipping screenshots computation for " + subject)
+ logging.info(
+ "Skipping screenshots computation for " + subject
+ )
else:
- logging.info("Not skipping screenshots computation for " + subject + ": statusfile did not indicate ok or skipped")
+ logging.info(
+ "Not skipping screenshots computation for "
+ + subject
+ + ": statusfile did not indicate ok or skipped"
+ )
else:
- logging.info("Not skipping screenshots computation for " + subject + ": no statusfile was found")
+ logging.info(
+ "Not skipping screenshots computation for "
+ + subject
+ + ": no statusfile was found"
+ )
# check / create subject-specific screenshots_outdir
screenshots_outdir = os.path.join(
@@ -1898,7 +1961,6 @@ def _do_fsqc(argsDict):
#
if screenshots_status == 0:
-
#
try:
# message
@@ -1952,10 +2014,16 @@ def _do_fsqc(argsDict):
# check screenshots_overlay
if argsDict["screenshots_overlay"] is not None:
if argsDict["screenshots_overlay"][0] == "default":
- screenshots_overlay_subj = argsDict["screenshots_overlay"]
- logging.info("Using default for screenshot overlay image")
+ screenshots_overlay_subj = argsDict[
+ "screenshots_overlay"
+ ]
+ logging.info(
+ "Using default for screenshot overlay image"
+ )
elif os.path.isfile(argsDict["screenshots_overlay"][0]):
- screenshots_overlay_subj = argsDict["screenshots_overlay"]
+ screenshots_overlay_subj = argsDict[
+ "screenshots_overlay"
+ ]
logging.info(
"Using "
+ screenshots_overlay_subj[0]
@@ -1997,7 +2065,9 @@ def _do_fsqc(argsDict):
logging.info("Using default for screenshot surface")
elif os.path.isfile(screenshots_surf_i):
logging.info(
- "Using " + screenshots_surf_i + " as screenshot surface"
+ "Using "
+ + screenshots_surf_i
+ + " as screenshot surface"
)
elif os.path.isfile(
os.path.join(
@@ -2014,7 +2084,9 @@ def _do_fsqc(argsDict):
screenshots_surf_i,
)
logging.info(
- "Using " + screenshots_surf_i + " as screenshot surface"
+ "Using "
+ + screenshots_surf_i
+ + " as screenshot surface"
)
else:
raise FileNotFoundError(
@@ -2044,7 +2116,9 @@ def _do_fsqc(argsDict):
#
except Exception as e:
- logging.error("ERROR: screenshots module failed for subject " + subject)
+ logging.error(
+ "ERROR: screenshots module failed for subject " + subject
+ )
logging.error("Reason: " + str(e))
screenshots_status = 1
if argsDict["exit_on_error"] is True:
@@ -2066,18 +2140,28 @@ def _do_fsqc(argsDict):
# run optional modules: surface plots
if argsDict["surfaces"] is True or argsDict["surfaces_html"] is True:
-
# determine status
surfaces_status = 0
if argsDict["skip_existing"] is True:
- if len(status_dict)>0:
- if statusDict[subject]["surfaces"] == 0 or statusDict[subject]["surfaces"] == 3:
+ if len(status_dict) > 0:
+ if (
+ statusDict[subject]["surfaces"] == 0
+ or statusDict[subject]["surfaces"] == 3
+ ):
surfaces_status = 3
logging.info("Skipping surfaces computation for " + subject)
else:
- logging.info("Not skipping surfaces computation for " + subject + ": statusfile did not indicate ok or skipped")
+ logging.info(
+ "Not skipping surfaces computation for "
+ + subject
+ + ": statusfile did not indicate ok or skipped"
+ )
else:
- logging.info("Not skipping surfaces computation for " + subject + ": no statusfile was found")
+ logging.info(
+ "Not skipping surfaces computation for "
+ + subject
+ + ": no statusfile was found"
+ )
# check / create subject-specific surfaces_outdir
surfaces_outdir = os.path.join(
@@ -2088,7 +2172,6 @@ def _do_fsqc(argsDict):
#
if surfaces_status == 0:
-
#
try:
# message
@@ -2109,7 +2192,9 @@ def _do_fsqc(argsDict):
#
except Exception as e:
- logging.error("ERROR: surfaces module failed for subject " + subject)
+ logging.error(
+ "ERROR: surfaces module failed for subject " + subject
+ )
logging.error("Reason: " + str(e))
surfaces_status = 1
if argsDict["exit_on_error"] is True:
@@ -2131,19 +2216,30 @@ def _do_fsqc(argsDict):
# run optional modules: skullstrip
if argsDict["skullstrip"] is True or argsDict["skullstrip_html"] is True:
-
# determine status
skullstrip_status = 0
if argsDict["skip_existing"] is True:
- if len(status_dict)>0:
- if statusDict[subject]["skullstrip"] == 0 or statusDict[subject]["skullstrip"] == 3:
+ if len(status_dict) > 0:
+ if (
+ statusDict[subject]["skullstrip"] == 0
+ or statusDict[subject]["skullstrip"] == 3
+ ):
skullstrip_status = 3
- logging.info("Skipping skullstrip computation for " + subject)
+ logging.info(
+ "Skipping skullstrip computation for " + subject
+ )
else:
- logging.info("Not skipping skullstrip computation for " + subject + ": statusfile did not indicate ok or skipped")
+ logging.info(
+ "Not skipping skullstrip computation for "
+ + subject
+ + ": statusfile did not indicate ok or skipped"
+ )
else:
- logging.info("Not skipping skullstrip computation for " + subject + ": no statusfile was found")
-
+ logging.info(
+ "Not skipping skullstrip computation for "
+ + subject
+ + ": no statusfile was found"
+ )
# check / create subject-specific skullstrip_outdir
skullstrip_outdir = os.path.join(
@@ -2155,7 +2251,6 @@ def _do_fsqc(argsDict):
#
if skullstrip_status == 0:
-
#
try:
# message
@@ -2169,32 +2264,45 @@ def _do_fsqc(argsDict):
# check skullstrip_base
if os.path.isfile(
- os.path.join(argsDict["subjects_dir"], subject, "mri", "orig.mgz")
+ os.path.join(
+ argsDict["subjects_dir"], subject, "mri", "orig.mgz"
+ )
):
skullstrip_base_subj = [
os.path.join(
argsDict["subjects_dir"], subject, "mri", "orig.mgz"
)
]
- logging.info("Using " + "orig.mgz" + " as skullstrip base image")
+ logging.info(
+ "Using " + "orig.mgz" + " as skullstrip base image"
+ )
else:
raise FileNotFoundError(
- "ERROR: cannot find the skullstrip base file " + "orig.mgz"
+ "ERROR: cannot find the skullstrip base file "
+ + "orig.mgz"
)
# check skullstrip_overlay
if os.path.isfile(
os.path.join(
- argsDict["subjects_dir"], subject, "mri", "brainmask.mgz"
+ argsDict["subjects_dir"],
+ subject,
+ "mri",
+ "brainmask.mgz",
)
):
skullstrip_overlay_subj = [
os.path.join(
- argsDict["subjects_dir"], subject, "mri", "brainmask.mgz"
+ argsDict["subjects_dir"],
+ subject,
+ "mri",
+ "brainmask.mgz",
)
]
logging.info(
- "Using " + "brainmask.mgz" + " as skullstrip overlay image"
+ "Using "
+ + "brainmask.mgz"
+ + " as skullstrip overlay image"
)
else:
raise FileNotFoundError(
@@ -2222,7 +2330,9 @@ def _do_fsqc(argsDict):
#
except Exception as e:
- logging.error("ERROR: skullstrip module failed for subject " + subject)
+ logging.error(
+ "ERROR: skullstrip module failed for subject " + subject
+ )
logging.error("Reason: " + str(e))
skullstrip_status = 1
if argsDict["exit_on_error"] is True:
@@ -2244,18 +2354,28 @@ def _do_fsqc(argsDict):
# run optional modules: fornix
if argsDict["fornix"] is True or argsDict["fornix_html"] is True:
-
# determine status
fornix_status = 0
if argsDict["skip_existing"] is True:
- if len(status_dict)>0:
- if statusDict[subject]["fornix"] == 0 or statusDict[subject]["fornix"] == 3:
+ if len(status_dict) > 0:
+ if (
+ statusDict[subject]["fornix"] == 0
+ or statusDict[subject]["fornix"] == 3
+ ):
fornix_status = 3
logging.info("Skipping fornix computation for " + subject)
else:
- logging.info("Not skipping fornix computation for " + subject + ": statusfile did not indicate ok or skipped")
+ logging.info(
+ "Not skipping fornix computation for "
+ + subject
+ + ": statusfile did not indicate ok or skipped"
+ )
else:
- logging.info("Not skipping fornix computation for " + subject + ": no statusfile was found")
+ logging.info(
+ "Not skipping fornix computation for "
+ + subject
+ + ": no statusfile was found"
+ )
# check / create subject-specific fornix_outdir
fornix_outdir = os.path.join(argsDict["output_dir"], "fornix", subject)
@@ -2265,7 +2385,6 @@ def _do_fsqc(argsDict):
#
if fornix_status == 0:
-
#
try:
# message
@@ -2289,7 +2408,10 @@ def _do_fsqc(argsDict):
fornixShapeDict = {
subject: dict(
zip(
- map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
+ map(
+ "fornixShapeEV{:0>3}".format,
+ range(FORNIX_N_EIGEN),
+ ),
fornixShapeOutput,
)
)
@@ -2303,12 +2425,17 @@ def _do_fsqc(argsDict):
fornixShapeDict = {
subject: dict(
zip(
- map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),
+ map(
+ "fornixShapeEV{:0>3}".format,
+ range(FORNIX_N_EIGEN),
+ ),
np.full(FORNIX_N_EIGEN, np.nan),
)
)
}
- logging.error("ERROR: fornix module failed for subject " + subject)
+ logging.error(
+ "ERROR: fornix module failed for subject " + subject
+ )
logging.error("Reason: " + str(e))
fornix_status = 1
if argsDict["exit_on_error"] is True:
@@ -2319,11 +2446,24 @@ def _do_fsqc(argsDict):
metricsDict[subject].update(fornixShapeDict[subject])
elif fornix_status == 3:
-
if FORNIX_SHAPE:
# read results from previous run
- fornixShapeOutput = np.array(pd.read_csv(os.path.join(fornix_outdir, subject + ".fornix.csv")))[0]
- fornixShapeDict = {subject: dict(zip(map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),fornixShapeOutput))}
+ fornixShapeOutput = np.array(
+ pd.read_csv(
+ os.path.join(fornix_outdir, subject + ".fornix.csv")
+ )
+ )[0]
+ fornixShapeDict = {
+ subject: dict(
+ zip(
+ map(
+ "fornixShapeEV{:0>3}".format,
+ range(FORNIX_N_EIGEN),
+ ),
+ fornixShapeOutput,
+ )
+ )
+ }
metricsDict[subject].update(fornixShapeDict[subject])
# store data
@@ -2341,19 +2481,34 @@ def _do_fsqc(argsDict):
# ----------------------------------------------------------------------
# run optional modules: hypothalamus
- if argsDict["hypothalamus"] is True or argsDict["hypothalamus_html"] is True:
-
+ if (
+ argsDict["hypothalamus"] is True
+ or argsDict["hypothalamus_html"] is True
+ ):
# determine status
hypothalamus_status = 0
if argsDict["skip_existing"] is True:
- if len(status_dict)>0:
- if statusDict[subject]["hypothalamus"] == 0 or statusDict[subject]["hypothalamus"] == 3:
+ if len(status_dict) > 0:
+ if (
+ statusDict[subject]["hypothalamus"] == 0
+ or statusDict[subject]["hypothalamus"] == 3
+ ):
hypothalamus_status = 3
- logging.info("Skipping hypothalamus computation for " + subject)
+ logging.info(
+ "Skipping hypothalamus computation for " + subject
+ )
else:
- logging.info("Not skipping hypothalamus computation for " + subject + ": statusfile did not indicate ok or skipped")
+ logging.info(
+ "Not skipping hypothalamus computation for "
+ + subject
+ + ": statusfile did not indicate ok or skipped"
+ )
else:
- logging.info("Not skipping hypothalamus computation for " + subject + ": no statusfile was found")
+ logging.info(
+ "Not skipping hypothalamus computation for "
+ + subject
+ + ": no statusfile was found"
+ )
# check / create subject-specific hypothalamus_outdir
hypothalamus_outdir = os.path.join(
@@ -2367,7 +2522,6 @@ def _do_fsqc(argsDict):
#
if hypothalamus_status == 0:
-
#
try:
# message
@@ -2399,7 +2553,9 @@ def _do_fsqc(argsDict):
raise
# store data
- if HYPOTHALAMUS_SCREENSHOT and (hypothalamus_status == 0 or hypothalamus_status == 3):
+ if HYPOTHALAMUS_SCREENSHOT and (
+ hypothalamus_status == 0 or hypothalamus_status == 3
+ ):
imagesHypothalamusDict[subject] = hypothalamus_screenshot_outfile
else:
imagesHypothalamusDict[subject] = []
@@ -2414,18 +2570,30 @@ def _do_fsqc(argsDict):
# run optional modules: hippocampus
if argsDict["hippocampus"] is True or argsDict["hippocampus_html"] is True:
-
# determine status
hippocampus_status = 0
if argsDict["skip_existing"] is True:
- if len(status_dict)>0:
- if statusDict[subject]["hippocampus"] == 0 or statusDict[subject]["hippocampus"] == 3:
+ if len(status_dict) > 0:
+ if (
+ statusDict[subject]["hippocampus"] == 0
+ or statusDict[subject]["hippocampus"] == 3
+ ):
hippocampus_status = 3
- logging.info("Skipping hippocampus computation for " + subject)
+ logging.info(
+ "Skipping hippocampus computation for " + subject
+ )
else:
- logging.info("Not skipping hippocampus computation for " + subject + ": statusfile did not indicate ok or skipped")
+ logging.info(
+ "Not skipping hippocampus computation for "
+ + subject
+ + ": statusfile did not indicate ok or skipped"
+ )
else:
- logging.info("Not skipping hippocampus computation for " + subject + ": no statusfile was found")
+ logging.info(
+ "Not skipping hippocampus computation for "
+ + subject
+ + ": no statusfile was found"
+ )
# check / create subject-specific hippocampus_outdir
hippocampus_outdir = os.path.join(
@@ -2442,7 +2610,6 @@ def _do_fsqc(argsDict):
#
if hippocampus_status == 0:
-
#
try:
# message
@@ -2477,15 +2644,21 @@ def _do_fsqc(argsDict):
#
except Exception as e:
- logging.error("ERROR: hippocampus module failed for subject " + subject)
+ logging.error(
+ "ERROR: hippocampus module failed for subject " + subject
+ )
logging.error("Reason: " + str(e))
hippocampus_status = 1
if argsDict["exit_on_error"] is True:
raise
# store data
- if HIPPOCAMPUS_SCREENSHOT and (hippocampus_status == 0 or hippocampus_status == 3):
- imagesHippocampusLeftDict[subject] = hippocampus_screenshot_outfile_left
+ if HIPPOCAMPUS_SCREENSHOT and (
+ hippocampus_status == 0 or hippocampus_status == 3
+ ):
+ imagesHippocampusLeftDict[
+ subject
+ ] = hippocampus_screenshot_outfile_left
imagesHippocampusRightDict[
subject
] = hippocampus_screenshot_outfile_right
@@ -2505,7 +2678,11 @@ def _do_fsqc(argsDict):
# 1: Failed
# 2: Not done
# 3: Skipped
- pd.DataFrame(statusDict[subject], index=[subject]).T.to_csv(os.path.join(argsDict["output_dir"], "status", subject, "status.txt"), header=False, sep=":")
+ pd.DataFrame(statusDict[subject], index=[subject]).T.to_csv(
+ os.path.join(argsDict["output_dir"], "status", subject, "status.txt"),
+ header=False,
+ sep=":",
+ )
# --------------------------------------------------------------------------
# message
@@ -2520,10 +2697,8 @@ def _do_fsqc(argsDict):
# run optional modules: outlier detection
if argsDict["no_group"] is False:
-
#
if argsDict["outlier"] is True:
-
# message
logging.info("Running outlier detection")
@@ -2574,8 +2749,12 @@ def _do_fsqc(argsDict):
outlierDict.update(
{
subject: {
- "n_outlier_sample_nonpar": n_outlier_sample_nonpar[subject],
- "n_outlier_sample_param": n_outlier_sample_param[subject],
+ "n_outlier_sample_nonpar": n_outlier_sample_nonpar[
+ subject
+ ],
+ "n_outlier_sample_param": n_outlier_sample_param[
+ subject
+ ],
"n_outlier_norms": n_outlier_norms[subject],
}
}
@@ -2614,11 +2793,9 @@ def _do_fsqc(argsDict):
# generate output
if argsDict["no_group"] is True:
-
logging.info("Not generating group output")
else:
-
#
logging.info("Generating group output")
@@ -2650,39 +2827,97 @@ def _do_fsqc(argsDict):
# check if data needs to be read from disk; note that skip-existing is
# mutually exclusive with group-only; in case of skip-existing, data
# that is already present will have been read earlier already
- if argsDict['group_only'] is True:
+ if argsDict["group_only"] is True:
for subject in argsDict["subjects"]:
# metricsDict may (or not) be populated from previous outlier module
if subject not in metricsDict.keys():
metricsDict.update({subject: {"subject": subject}})
- metricsDict[subject] = metricsDict[subject] | pd.read_csv(os.path.join(argsDict["output_dir"], "metrics", subject, "metrics.csv"), dtype={'Unnamed: 0':str, 'subject':str}).set_index('Unnamed: 0').to_dict(orient="index")[subject]
+ metricsDict[subject] = (
+ metricsDict[subject]
+ | pd.read_csv(
+ os.path.join(
+ argsDict["output_dir"], "metrics", subject, "metrics.csv"
+ ),
+ dtype={"Unnamed: 0": str, "subject": str},
+ )
+ .set_index("Unnamed: 0")
+ .to_dict(orient="index")[subject]
+ )
#
if argsDict["shape"] is True:
- dstMat = pd.read_csv(Path(os.path.join(argsDict["output_dir"], "brainprint", subject) ) / (subject + ".brainprint.asymmetry.csv")).to_dict(orient="index")[0]
+ dstMat = pd.read_csv(
+ Path(
+ os.path.join(argsDict["output_dir"], "brainprint", subject)
+ )
+ / (subject + ".brainprint.asymmetry.csv")
+ ).to_dict(orient="index")[0]
distDict = {subject: dstMat}
metricsDict[subject].update(distDict[subject])
#
- if (argsDict["fornix"] is True or argsDict["fornix_html"] is True) and FORNIX_SHAPE is True:
- fornixShapeOutput = np.array(pd.read_csv(os.path.join(argsDict["output_dir"], "fornix", subject, subject + ".fornix.csv")))[0]
- fornixShapeDict = {subject: dict(zip(map("fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)),fornixShapeOutput))}
+ if (
+ argsDict["fornix"] is True or argsDict["fornix_html"] is True
+ ) and FORNIX_SHAPE is True:
+ fornixShapeOutput = np.array(
+ pd.read_csv(
+ os.path.join(
+ argsDict["output_dir"],
+ "fornix",
+ subject,
+ subject + ".fornix.csv",
+ )
+ )
+ )[0]
+ fornixShapeDict = {
+ subject: dict(
+ zip(
+ map(
+ "fornixShapeEV{:0>3}".format, range(FORNIX_N_EIGEN)
+ ),
+ fornixShapeOutput,
+ )
+ )
+ }
metricsDict[subject].update(fornixShapeDict[subject])
# check if other dictionaries need to be populated
- if argsDict['group_only'] is True:
+ if argsDict["group_only"] is True:
for subject in argsDict["subjects"]:
if argsDict["screenshots_html"] is True:
- imagesScreenshotsDict[subject] = os.path.join(argsDict["output_dir"], "screenshots", subject, subject + ".png")
+ imagesScreenshotsDict[subject] = os.path.join(
+ argsDict["output_dir"], "screenshots", subject, subject + ".png"
+ )
if argsDict["surfaces_html"] is True:
- imagesSurfacesDict[subject] = os.path.join(argsDict["output_dir"], "surfaces", subject)
+ imagesSurfacesDict[subject] = os.path.join(
+ argsDict["output_dir"], "surfaces", subject
+ )
if argsDict["skullstrip_html"] is True:
- imagesSkullstripDict[subject] = os.path.join(argsDict["output_dir"], "skullstrip", subject, subject + ".png")
+ imagesSkullstripDict[subject] = os.path.join(
+ argsDict["output_dir"], "skullstrip", subject, subject + ".png"
+ )
if argsDict["fornix_html"] is True:
- imagesFornixDict[subject] = os.path.join(argsDict["output_dir"], "fornix", subject, "cc.png")
+ imagesFornixDict[subject] = os.path.join(
+ argsDict["output_dir"], "fornix", subject, "cc.png"
+ )
if argsDict["hypothalamus_html"] is True:
- imagesHypothalamusDict[subject] = os.path.join(argsDict["output_dir"], "hypothalamus", subject, "hypothalamus.png")
+ imagesHypothalamusDict[subject] = os.path.join(
+ argsDict["output_dir"],
+ "hypothalamus",
+ subject,
+ "hypothalamus.png",
+ )
if argsDict["hippocampus_html"] is True:
- imagesHippocampusLeftDict[subject] = os.path.join(argsDict["output_dir"], "hippocampus", subject, "hippocampus-left.png")
- imagesHippocampusRightDict[subject] = os.path.join(argsDict["output_dir"], "hippocampus", subject, "hippocampus-right.png")
+ imagesHippocampusLeftDict[subject] = os.path.join(
+ argsDict["output_dir"],
+ "hippocampus",
+ subject,
+ "hippocampus-left.png",
+ )
+ imagesHippocampusRightDict[subject] = os.path.join(
+ argsDict["output_dir"],
+ "hippocampus",
+ subject,
+ "hippocampus-right.png",
+ )
# collect other keys; need to iterate over subjects, because not all of them
# necessarily have the same set of keys
@@ -2690,15 +2925,21 @@ def _do_fsqc(argsDict):
shapeKeys = list()
for subject in distDict.keys():
if len(distDict[subject]) > 0:
- shapeKeys = list(np.unique(shapeKeys + list(distDict[subject].keys())))
+ shapeKeys = list(
+ np.unique(shapeKeys + list(distDict[subject].keys()))
+ )
metricsFieldnames.extend(shapeKeys)
#
- if (argsDict["fornix"] is True or argsDict["fornix_html"] is True) and FORNIX_SHAPE is True:
+ if (
+ argsDict["fornix"] is True or argsDict["fornix_html"] is True
+ ) and FORNIX_SHAPE is True:
fornixKeys = list()
for subject in fornixShapeDict.keys():
if len(fornixShapeDict[subject]) > 0:
- fornixKeys = list(np.unique(fornixKeys + list(fornixShapeDict[subject].keys())))
+ fornixKeys = list(
+ np.unique(fornixKeys + list(fornixShapeDict[subject].keys()))
+ )
metricsFieldnames.extend(sorted(fornixKeys))
#
@@ -2706,7 +2947,9 @@ def _do_fsqc(argsDict):
outlierKeys = list()
for subject in outlierDict.keys():
if len(outlierDict[subject]) > 0:
- outlierKeys = list(np.unique(outlierKeys + list(outlierDict[subject].keys())))
+ outlierKeys = list(
+ np.unique(outlierKeys + list(outlierDict[subject].keys()))
+ )
metricsFieldnames.extend(sorted(outlierKeys))
# determine output file names
@@ -2770,14 +3013,18 @@ def _do_fsqc(argsDict):
+ os.path.join(
"screenshots",
subject,
- os.path.basename(imagesScreenshotsDict[subject]),
+ os.path.basename(
+ imagesScreenshotsDict[subject]
+ ),
)
+ '">'
+ ' '
+ '
Date: Thu, 25 Jul 2024 18:30:56 +0200
Subject: [PATCH 18/25] Applying codespell changes
---
CODE_OF_CONDUCT.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 15c9ce1..a8ec1db 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -6,7 +6,7 @@ In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
-level of experience, education, socio-economic status, nationality, personal
+level of experience, education, socioeconomic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
From fcbba4f5d9a7259eb7999f3ade5b26230cf67c43 Mon Sep 17 00:00:00 2001
From: Martin Reuter
Date: Fri, 6 Sep 2024 12:16:42 +0200
Subject: [PATCH 19/25] bump action version and python to 3.10, drop build
py3.8 and add 3.12
---
.github/workflows/build.yml | 7 +++----
.github/workflows/code-style.yml | 8 ++++----
.github/workflows/doc.yml | 8 ++++----
.github/workflows/publish.yml | 8 ++++----
4 files changed, 15 insertions(+), 16 deletions(-)
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 2d25c7e..c8a15fe 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -15,7 +15,7 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu, macos, windows]
- python-version: [3.8, 3.9, "3.10", "3.11"]
+ python-version: ["3.9", "3.10", "3.11", "3.12"]
name: ${{ matrix.os }} - py${{ matrix.python-version }}
runs-on: ${{ matrix.os }}-latest
defaults:
@@ -23,12 +23,11 @@ jobs:
shell: bash
steps:
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- #architecture: 'x64'
- name: Install dependencies
run: |
python -m pip install --progress-bar off --upgrade pip setuptools wheel
diff --git a/.github/workflows/code-style.yml b/.github/workflows/code-style.yml
index 2bf1b11..662d0b6 100644
--- a/.github/workflows/code-style.yml
+++ b/.github/workflows/code-style.yml
@@ -14,11 +14,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
- uses: actions/checkout@v3
- - name: Setup Python 3.9
- uses: actions/setup-python@v4
+ uses: actions/checkout@v4
+ - name: Setup Python 3.10
+ uses: actions/setup-python@v5
with:
- python-version: '3.9'
+ python-version: '3.10'
architecture: 'x64'
- name: Install dependencies
run: |
diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml
index 2547d0d..fefda7e 100644
--- a/.github/workflows/doc.yml
+++ b/.github/workflows/doc.yml
@@ -20,10 +20,10 @@ jobs:
uses: actions/checkout@v4
with:
path: ./main
- - name: Setup Python 3.9
- uses: actions/setup-python@v4
+ - name: Setup Python 3.10
+ uses: actions/setup-python@v5
with:
- python-version: 3.9
+ python-version: '3.10'
architecture: 'x64'
- name: Install package
run: |
@@ -56,7 +56,7 @@ jobs:
name: doc-dev
path: ./doc-dev
- name: Deploy dev documentation
- uses: peaceiris/actions-gh-pages@v3
+ uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./doc-dev
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index 1f81b4a..a301568 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -10,11 +10,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
- uses: actions/checkout@v3
- - name: Setup Python 3.9
- uses: actions/setup-python@v4
+ uses: actions/checkout@v4
+ - name: Setup Python 3.10
+ uses: actions/setup-python@v5
with:
- python-version: '3.9'
+ python-version: '3.10'
architecture: 'x64'
- name: Install dependencies
run: |
From 917d982b4769b39360ea4d6b54aeaccad9907586 Mon Sep 17 00:00:00 2001
From: Martin Reuter
Date: Fri, 6 Sep 2024 12:24:15 +0200
Subject: [PATCH 20/25] add more rules to ruff and drop isort and black
---
.github/workflows/code-style.yml | 7 -----
pyproject.toml | 45 ++++++++++++--------------------
2 files changed, 16 insertions(+), 36 deletions(-)
diff --git a/.github/workflows/code-style.yml b/.github/workflows/code-style.yml
index 662d0b6..5b2e35c 100644
--- a/.github/workflows/code-style.yml
+++ b/.github/workflows/code-style.yml
@@ -26,13 +26,6 @@ jobs:
python -m pip install --progress-bar off .[style]
- name: Run Ruff
run: ruff check .
- - name: Run isort
- uses: isort/isort-action@master
- - name: Run black
- uses: psf/black@stable
- with:
- options: "--check --verbose"
- version: "23.10.1"
- name: Run codespell
uses: codespell-project/actions-codespell@master
with:
diff --git a/pyproject.toml b/pyproject.toml
index 493e34b..f02b54b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ build-backend = 'setuptools.build_meta'
name = 'fsqc'
description = 'Quality control scripts for FastSurfer and FreeSurfer structural MRI data'
license = {file = 'LICENSE'}
-requires-python = '>=3.8'
+requires-python = '>=3.9'
authors = [
{name = 'Kersten Diers', email = 'kersten.diers@dzne.de'},
{name = 'Martin Reuter', email = 'martin.reuter@dzne.de'}
@@ -25,10 +25,10 @@ classifiers = [
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3 :: Only',
- 'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
+ 'Programming Language :: Python :: 3.12',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Science/Research',
@@ -58,9 +58,7 @@ doc = [
]
style = [
'bibclean',
- 'black',
'codespell',
- 'isort',
'pydocstyle[toml]',
'ruff',
]
@@ -100,29 +98,6 @@ dependencies = {file = 'requirements.txt'}
include = ['fsqc', 'fsqc.cli', 'fsqc.commands', 'fsqc.utils']
exclude = ['docker', 'singularity']
-[tool.black]
-line-length = 88
-target-version = ['py38']
-include = '\.pyi?$'
-extend-exclude = '''
-(
- __pycache__
- | \.github
- | setup.py
-)
-'''
-
-[tool.isort]
-profile = 'black'
-multi_line_output = 3
-line_length = 88
-py_version = 38
-extend_skip_glob = [
- 'setup.py',
- 'data/*',
- 'examples/*',
-]
-
[tool.pydocstyle]
convention = 'numpy'
ignore-decorators = '(copy_doc|property|.*setter|.*getter|pyqtSlot|Slot)'
@@ -133,15 +108,27 @@ add_ignore = 'D100,D104,D107'
[tool.ruff]
line-length = 88
extend-exclude = [
+ ".github",
"doc",
+ "docker",
"setup.py",
+ "singularity",
+]
+
+[tool.ruff.lint]
+# https://docs.astral.sh/ruff/linter/#rule-selection
+select = [
+ "E", # pycodestyle
+ "F", # Pyflakes
+ "UP", # pyupgrade
+ "B", # flake8-bugbear
+ "I", # isort
+ # "SIM", # flake8-simplify
]
-ignore = ["E501"] # line too long (black will do that)
[tool.ruff.per-file-ignores]
"__init__.py" = ["F401"]
-
[tool.pytest.ini_options]
minversion = '6.0'
filterwarnings = []
From 5fc3c0da661f6910aa1792be122cd2e9ff479a6c Mon Sep 17 00:00:00 2001
From: Martin Reuter
Date: Fri, 6 Sep 2024 13:03:07 +0200
Subject: [PATCH 21/25] fix ruff B errors
---
fsqc/checkCCSize.py | 2 +-
fsqc/checkContrast.py | 16 +++--
fsqc/checkRotation.py | 14 ++--
fsqc/checkSNR.py | 13 ++--
fsqc/checkTopology.py | 5 +-
fsqc/createScreenshots.py | 49 ++++++++------
fsqc/evaluateFornixSegmentation.py | 12 ++--
fsqc/evaluateHippocampalSegmentation.py | 11 ++--
fsqc/evaluateHypothalamicSegmentation.py | 11 ++--
fsqc/fsqcMain.py | 83 +++++++++++++++---------
fsqc/fsqcUtils.py | 6 +-
fsqc/utils/_config.py | 6 +-
fsqc/utils/_imports.py | 4 +-
pyproject.toml | 2 +-
14 files changed, 141 insertions(+), 93 deletions(-)
diff --git a/fsqc/checkCCSize.py b/fsqc/checkCCSize.py
index 2ed9df3..cfbc94d 100644
--- a/fsqc/checkCCSize.py
+++ b/fsqc/checkCCSize.py
@@ -70,7 +70,7 @@ def checkCCSize(subjects_dir, subject):
relative_cc = sum_cc / intracranial_volume
logging.info(
- "Relative size of the corpus callosum is " + "{:.4}".format(relative_cc)
+ "Relative size of the corpus callosum is " + f"{relative_cc:.4}"
)
# Return
diff --git a/fsqc/checkContrast.py b/fsqc/checkContrast.py
index 30f917d..f3816dd 100644
--- a/fsqc/checkContrast.py
+++ b/fsqc/checkContrast.py
@@ -49,12 +49,14 @@ def checkContrast(subjects_dir, subject):
# Check if files exist
path_pct_lh = os.path.join(subjects_dir, subject, "surf", "lh.w-g.pct.mgh")
if not os.path.exists(path_pct_lh):
- warnings.warn("WARNING: could not find " + path_pct_lh + ", returning NaNs")
+ warnings.warn("WARNING: could not find " + path_pct_lh + ", returning NaNs",
+ stacklevel = 2)
return numpy.nan
path_pct_rh = os.path.join(subjects_dir, subject, "surf", "rh.w-g.pct.mgh")
if not os.path.exists(path_pct_rh):
- warnings.warn("WARNING: could not find " + path_pct_rh + ", returning NaNs")
+ warnings.warn("WARNING: could not find " + path_pct_rh + ", returning NaNs",
+ stacklevel = 2)
return numpy.nan
path_label_cortex_lh = os.path.join(
@@ -62,7 +64,8 @@ def checkContrast(subjects_dir, subject):
)
if not os.path.exists(path_label_cortex_lh):
warnings.warn(
- "WARNING: could not find " + path_label_cortex_lh + ", returning NaNs"
+ "WARNING: could not find " + path_label_cortex_lh + ", returning NaNs",
+ stacklevel = 2
)
return numpy.nan
@@ -71,7 +74,8 @@ def checkContrast(subjects_dir, subject):
)
if not os.path.exists(path_label_cortex_rh):
warnings.warn(
- "WARNING: could not find " + path_label_cortex_rh + ", returning NaNs"
+ "WARNING: could not find " + path_label_cortex_rh + ", returning NaNs",
+ stacklevel = 2
)
return numpy.nan
@@ -91,14 +95,14 @@ def checkContrast(subjects_dir, subject):
con_lh_std = numpy.std(con_lh)
con_lh_snr = con_lh_mean / con_lh_std
logging.info(
- "WM/GM contrast SNR for the left hemisphere: " + "{:.4}".format(con_lh_snr)
+ "WM/GM contrast SNR for the left hemisphere: " + f"{con_lh_snr:.4}"
)
con_rh_mean = numpy.mean(con_rh)
con_rh_std = numpy.std(con_rh)
con_rh_snr = con_rh_mean / con_rh_std
logging.info(
- "WM/GM contrast SNR for the right hemisphere: " + "{:.4}".format(con_rh_snr)
+ "WM/GM contrast SNR for the right hemisphere: " + f"{con_rh_snr:.4}"
)
# Return
diff --git a/fsqc/checkRotation.py b/fsqc/checkRotation.py
index a1f807f..9eb7f48 100644
--- a/fsqc/checkRotation.py
+++ b/fsqc/checkRotation.py
@@ -50,7 +50,8 @@ def checkRotation(subjects_dir, subject):
if importlib.util.find_spec("transforms3d") is None:
warnings.warn(
- "WARNING: 'transforms3d' package required for running this script, returning NaNs."
+ "WARNING: 'transforms3d' package required for running this script, returning NaNs.",
+ stacklevel = 2
)
return np.nan, np.nan, np.nan
else:
@@ -64,12 +65,13 @@ def checkRotation(subjects_dir, subject):
warnings.warn(
"WARNING: could not open "
+ os.path.join(subjects_dir, subject, "mri", "transforms", "talairach.lta")
- + ", returning NaNs."
+ + ", returning NaNs.",
+ stacklevel = 2
)
return np.nan, np.nan, np.nan
with open(
- os.path.join(subjects_dir, subject, "mri", "transforms", "talairach.lta"), "r"
+ os.path.join(subjects_dir, subject, "mri", "transforms", "talairach.lta")
) as datafile:
lines = datafile.readlines()
@@ -102,11 +104,11 @@ def checkRotation(subjects_dir, subject):
logging.info(
"Found Talairach rotation angles: x = "
- + "{:.3}".format(rot_x)
+ + f"{rot_x:.3}"
+ ", y = "
- + "{:.3}".format(rot_y)
+ + f"{rot_y:.3}"
+ ", z = "
- + "{:.3}".format(rot_z)
+ + f"{rot_z:.3}"
+ " radians.",
)
diff --git a/fsqc/checkSNR.py b/fsqc/checkSNR.py
index 703d1cd..134f25f 100644
--- a/fsqc/checkSNR.py
+++ b/fsqc/checkSNR.py
@@ -77,7 +77,8 @@ def checkSNR(
norm_data = norm.get_fdata()
else:
warnings.warn(
- "WARNING: could not open " + path_reference_image + ", returning NaNs."
+ "WARNING: could not open " + path_reference_image + ", returning NaNs.",
+ stacklevel = 2
)
return np.nan, np.nan
@@ -86,7 +87,8 @@ def checkSNR(
aseg = nib.load(path_aseg)
data_aseg = aseg.get_fdata()
else:
- warnings.warn("WARNING: could not open " + path_aseg + ", returning NaNs.")
+ warnings.warn("WARNING: could not open " + path_aseg + ", returning NaNs.",
+ stacklevel = 2)
return np.nan, np.nan
path_aparc_aseg = os.path.join(subjects_dir, subject, "mri", aparc_image)
@@ -95,7 +97,8 @@ def checkSNR(
data_aparc_aseg = inseg.get_fdata()
else:
warnings.warn(
- "WARNING: could not open " + path_aparc_aseg + ", returning NaNs."
+ "WARNING: could not open " + path_aparc_aseg + ", returning NaNs.",
+ stacklevel = 2
)
return np.nan, np.nan
@@ -122,7 +125,7 @@ def checkSNR(
signal_wm_mean = np.mean(signal_wm)
signal_wm_std = np.std(signal_wm)
wm_snr = signal_wm_mean / signal_wm_std
- logging.info("White matter signal to noise ratio: " + "{:.4}".format(wm_snr))
+ logging.info("White matter signal to noise ratio: " + f"{wm_snr:.4}")
# Process gray matter image
@@ -143,7 +146,7 @@ def checkSNR(
signal_gm_mean = np.mean(signal_gm)
signal_gm_std = np.std(signal_gm)
gm_snr = signal_gm_mean / signal_gm_std
- logging.info("Gray matter signal to noise ratio: " + "{:.4}".format(gm_snr))
+ logging.info("Gray matter signal to noise ratio: " + f"{gm_snr:.4}")
# Return
return wm_snr, gm_snr
diff --git a/fsqc/checkTopology.py b/fsqc/checkTopology.py
index ebf715a..b74d8dd 100644
--- a/fsqc/checkTopology.py
+++ b/fsqc/checkTopology.py
@@ -61,10 +61,11 @@ def checkTopology(subjects_dir, subject):
path_log_file = os.path.join(subjects_dir, subject, "scripts", "recon-all.log")
if os.path.exists(path_log_file):
- with open(path_log_file, "r") as logfile:
+ with open(path_log_file) as logfile:
lines_log_file = logfile.read().splitlines()
else:
- warnings.warn("WARNING: could not find " + path_log_file + ", returning NaNs.")
+ warnings.warn("WARNING: could not find " + path_log_file + ", returning NaNs.",
+ stacklevel = 2)
return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
# Initialize
diff --git a/fsqc/createScreenshots.py b/fsqc/createScreenshots.py
index a338bcc..63b253b 100644
--- a/fsqc/createScreenshots.py
+++ b/fsqc/createScreenshots.py
@@ -12,16 +12,16 @@ def createScreenshots(
OUTFILE,
INTERACTIVE=True,
LAYOUT=None,
- BASE=["default"],
- OVERLAY=["default"],
+ BASE="default",
+ OVERLAY="default",
LABELS=None,
- SURF=["default"],
- SURFCOLOR=["default"],
- VIEWS=["default"],
+ SURF="default",
+ SURFCOLOR="default",
+ VIEWS="default",
XLIM=None,
YLIM=None,
BINARIZE=False,
- ORIENTATION=["radiological"],
+ ORIENTATION=None,
):
"""
Function to create screenshots.
@@ -39,17 +39,19 @@ def createScreenshots(
LAYOUT : str, optional
The layout, default is None.
BASE : list, optional
- The base, default is ["default"].
+ The base, default is "default".
+ Load norm.mgz as default.
OVERLAY : list, optional
- The overlay, default is ["default"].
+ The overlay, default is "default".
+ Load aseg.mgz as default.
LABELS : None or str, optional
The labels, default is None.
SURF : list, optional
- The surface, default is ["default"].
+ The surface, default is "default".
SURFCOLOR : list, optional
- The surface color, default is ["default"].
+ The surface color, default is "default".
VIEWS : list, optional
- The views, default is ["default"].
+ The views, default is "default".
XLIM : None or list, optional
The x limits, default is None.
YLIM : None or list, optional
@@ -57,13 +59,14 @@ def createScreenshots(
BINARIZE : bool, optional
Flag for binarization, default is False.
ORIENTATION : list, optional
- The orientation, default is ["radiological"].
+ The orientation, default is None.
+ Will use ["radiological"] per default.
Notes
-----
- BASE, VIEWS must be lists, can be ["default"].
+ BASE, VIEWS must be lists, can be "default".
- OVERLAY, SURF, SURFCOLOR can be lists or None, can be ["default"].
+ OVERLAY, SURF, SURFCOLOR can be lists, None, or "default".
XLIM, YLIM can be lists of list two-element numeric lists or None; if given,
length must match length of VIEWS. x and y refer to final image dimensions,
@@ -92,6 +95,9 @@ def computeLayout(n):
import nibabel as nb
import numpy as np
+ if ORIENTATION is None:
+ ORIENTATION = ["radiological"]
+
if not INTERACTIVE:
matplotlib.use("Agg")
@@ -115,14 +121,14 @@ def computeLayout(n):
# -----------------------------------------------------------------------------
# import image data
- if BASE == ["default"]:
+ if BASE == "default":
norm = nb.load(os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "norm.mgz"))
else:
norm = nb.load(BASE[0])
if OVERLAY is None:
aseg = None
- elif OVERLAY == ["default"]:
+ elif OVERLAY == "default":
aseg = nb.load(os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "aseg.mgz"))
else:
aseg = nb.load(OVERLAY[0])
@@ -130,7 +136,7 @@ def computeLayout(n):
# -----------------------------------------------------------------------------
# import surface data
- if SURF == ["default"]:
+ if SURF == "default":
surflist = [
os.path.join(SUBJECTS_DIR, SUBJECT, "surf", "lh.white"),
os.path.join(SUBJECTS_DIR, SUBJECT, "surf", "rh.white"),
@@ -146,9 +152,9 @@ def computeLayout(n):
for i in range(len(surflist)):
surf.append(nb.freesurfer.io.read_geometry(surflist[i], read_metadata=True))
- if SURFCOLOR == ["default"] and SURF == ["default"]:
+ if SURFCOLOR == "default" and SURF == "default":
surfcolor = ["yellow", "yellow", "red", "red"]
- elif SURFCOLOR == ["default"] and SURF != ["default"]:
+ elif SURFCOLOR == "default" and SURF != "default":
surfcolor = ["yellow"] * len(surf)
else:
surfcolor = SURFCOLOR
@@ -211,7 +217,7 @@ def computeLayout(n):
# -----------------------------------------------------------------------------
# determine VIEWS
- if VIEWS == ["default"]:
+ if VIEWS == "default":
CutsRRAS = [("x", -10), ("x", 10), ("y", 0), ("z", 0)]
else:
CutsRRAS = VIEWS
@@ -678,7 +684,8 @@ def computeLayout(n):
sortIdx = np.delete(sortIdx, findIdx[0, 0])
elif findIdx.shape[0] > 1:
warnings.warn(
- "WARNING: a problem occurred with the surface overlays"
+ "WARNING: a problem occurred with the surface overlays",
+ stacklevel = 2
)
# now final plot
axs[axsx, axsy].plot(
diff --git a/fsqc/evaluateFornixSegmentation.py b/fsqc/evaluateFornixSegmentation.py
index ce4ca7a..28c0494 100644
--- a/fsqc/evaluateFornixSegmentation.py
+++ b/fsqc/evaluateFornixSegmentation.py
@@ -10,7 +10,7 @@ def evaluateFornixSegmentation(
SUBJECTS_DIR,
OUTPUT_DIR,
CREATE_SCREENSHOT=True,
- SCREENSHOTS_OUTFILE=[],
+ SCREENSHOTS_OUTFILE=None,
RUN_SHAPEDNA=True,
N_EIGEN=15,
WRITE_EIGEN=True,
@@ -41,7 +41,7 @@ def evaluateFornixSegmentation(
The output directory.
CREATE_SCREENSHOT : bool, optional (default: True)
Whether to create screenshots.
- SCREENSHOTS_OUTFILE : str or list, optional (default: [])
+ SCREENSHOTS_OUTFILE : str or list, optional (default: None)
File or list of files for screenshots.
RUN_SHAPEDNA : bool, optional (default: True)
Whether to run shape analysis.
@@ -80,7 +80,7 @@ def evaluateFornixSegmentation(
warnings.warn(
"WARNING: could not find "
+ os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "transforms", "cc_up.lta")
- + ", returning NaNs"
+ + ", returning NaNs", stacklevel = 2
)
out = np.empty(N_EIGEN)
@@ -92,7 +92,7 @@ def evaluateFornixSegmentation(
warnings.warn(
"WARNING: could not find "
+ os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "aseg.mgz")
- + ", returning NaNs"
+ + ", returning NaNs", stacklevel = 2
)
out = np.empty(N_EIGEN)
@@ -104,7 +104,7 @@ def evaluateFornixSegmentation(
warnings.warn(
"WARNING: could not find "
+ os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "norm.mgz")
- + ", returning NaNs"
+ + ", returning NaNs", stacklevel = 2
)
out = np.empty(N_EIGEN)
@@ -112,7 +112,7 @@ def evaluateFornixSegmentation(
return out
- if not SCREENSHOTS_OUTFILE:
+ if SCREENSHOTS_OUTFILE is None:
SCREENSHOTS_OUTFILE = os.path.join(OUTPUT_DIR, "cc.png")
# --------------------------------------------------------------------------
diff --git a/fsqc/evaluateHippocampalSegmentation.py b/fsqc/evaluateHippocampalSegmentation.py
index c1dc10b..914842b 100644
--- a/fsqc/evaluateHippocampalSegmentation.py
+++ b/fsqc/evaluateHippocampalSegmentation.py
@@ -10,8 +10,8 @@ def evaluateHippocampalSegmentation(
SUBJECTS_DIR,
OUTPUT_DIR,
CREATE_SCREENSHOT=True,
- SCREENSHOTS_OUTFILE=[],
- SCREENSHOTS_ORIENTATION=["radiological"],
+ SCREENSHOTS_OUTFILE=None,
+ SCREENSHOTS_ORIENTATION=None,
HEMI="lh",
LABEL="T1.v21",
):
@@ -36,7 +36,7 @@ def evaluateHippocampalSegmentation(
The output directory.
CREATE_SCREENSHOT : bool, optional, default: True
Whether to create screenshots.
- SCREENSHOTS_OUTFILE : str or list, optional, default: []
+ SCREENSHOTS_OUTFILE : str or list, optional, default: None
File or list of files for screenshots.
SCREENSHOTS_ORIENTATION : str or list, optional, default: ["radiological"]
Orientation or list of orientations for screenshots.
@@ -68,6 +68,9 @@ def evaluateHippocampalSegmentation(
from fsqc.createScreenshots import createScreenshots
from fsqc.fsqcUtils import binarizeImage
+ if SCREENSHOTS_ORIENTATION is None:
+ SCREENSHOTS_ORIENTATION = ["radiological"]
+
# --------------------------------------------------------------------------
# check files
@@ -101,7 +104,7 @@ def evaluateHippocampalSegmentation(
raise ValueError("File not found")
- if not SCREENSHOTS_OUTFILE:
+ if SCREENSHOTS_OUTFILE is None:
SCREENSHOTS_OUTFILE = os.path.join(OUTPUT_DIR, "hippocampus.png")
# --------------------------------------------------------------------------
diff --git a/fsqc/evaluateHypothalamicSegmentation.py b/fsqc/evaluateHypothalamicSegmentation.py
index a50b01e..533b6c5 100644
--- a/fsqc/evaluateHypothalamicSegmentation.py
+++ b/fsqc/evaluateHypothalamicSegmentation.py
@@ -11,8 +11,8 @@ def evaluateHypothalamicSegmentation(
SUBJECTS_DIR,
OUTPUT_DIR,
CREATE_SCREENSHOT=True,
- SCREENSHOTS_OUTFILE=[],
- SCREENSHOTS_ORIENTATION=["radiological"],
+ SCREENSHOTS_OUTFILE=None,
+ SCREENSHOTS_ORIENTATION=None,
):
"""
Evaluate potential missegmentation of the hypothalamus.
@@ -35,7 +35,7 @@ def evaluateHypothalamicSegmentation(
The output directory.
CREATE_SCREENSHOT : bool, optional, default: True
Whether to create screenshots.
- SCREENSHOTS_OUTFILE : str or list, optional, default: []
+ SCREENSHOTS_OUTFILE : str or list, optional, default: None
File or list of files for screenshots.
SCREENSHOTS_ORIENTATION : str or list, optional, default: ["radiological"]
Orientation or list of orientations for screenshots.
@@ -63,6 +63,9 @@ def evaluateHypothalamicSegmentation(
from fsqc.createScreenshots import createScreenshots
from fsqc.fsqcUtils import binarizeImage
+ if SCREENSHOTS_ORIENTATION is None:
+ SCREENSHOTS_ORIENTATION = ["radiological"]
+
# --------------------------------------------------------------------------
# check files
@@ -88,7 +91,7 @@ def evaluateHypothalamicSegmentation(
raise ValueError("File not found")
- if not SCREENSHOTS_OUTFILE:
+ if SCREENSHOTS_OUTFILE is None:
SCREENSHOTS_OUTFILE = os.path.join(OUTPUT_DIR, "hypothalamus.png")
# --------------------------------------------------------------------------
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index 95fe631..ce9c161 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -1197,7 +1197,8 @@ def _check_arguments(argsDict):
argsDict["subjects_dir"], subject, "stats", "aseg.stats"
)
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
# -files: surf/[lr]h.w-g.pct.mgh, label/[lr]h.cortex.label
@@ -1205,28 +1206,32 @@ def _check_arguments(argsDict):
argsDict["subjects_dir"], subject, "surf", "lh.w-g.pct.mgh"
)
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
path_check = os.path.join(
argsDict["subjects_dir"], subject, "surf", "rh.w-g.pct.mgh"
)
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
path_check = os.path.join(
argsDict["subjects_dir"], subject, "label", "lh.cortex.label"
)
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
path_check = os.path.join(
argsDict["subjects_dir"], subject, "label", "rh.cortex.label"
)
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
# -files: mri/transforms/talairach.lta
@@ -1234,19 +1239,22 @@ def _check_arguments(argsDict):
argsDict["subjects_dir"], subject, "mri", "transforms", "talairach.lta"
)
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
# -files: mri/norm.mgz, mri/aseg.mgz, mri/aparc+aseg.mgz for FreeSurfer
# -files: mri/norm.mgz, mri/aseg.mgz, mri/aparc.DKTatlas+aseg.deep.mgz for FastSurfer
path_check = os.path.join(argsDict["subjects_dir"], subject, "mri", "norm.mgz")
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
path_check = os.path.join(argsDict["subjects_dir"], subject, "mri", "aseg.mgz")
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
if argsDict["fastsurfer"] is True:
@@ -1258,7 +1266,8 @@ def _check_arguments(argsDict):
argsDict["subjects_dir"], subject, "mri", "aparc+aseg.mgz"
)
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
# -files: scripts/recon-all.log
@@ -1266,7 +1275,8 @@ def _check_arguments(argsDict):
argsDict["subjects_dir"], subject, "scripts", "recon-all.log"
)
if not os.path.isfile(path_check):
- warnings.warn("Could not find " + path_check + " for subject " + subject)
+ warnings.warn("Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2)
subjects_to_remove.extend([subject])
# check screenshots
@@ -1279,7 +1289,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1288,7 +1299,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1297,7 +1309,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1306,7 +1319,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1319,7 +1333,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1328,7 +1343,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1337,7 +1353,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1346,7 +1363,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1363,7 +1381,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1380,7 +1399,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1406,7 +1426,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1415,7 +1436,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1427,7 +1449,8 @@ def _check_arguments(argsDict):
)
if not os.path.isfile(path_check):
warnings.warn(
- "Could not find " + path_check + " for subject " + subject
+ "Could not find " + path_check + " for subject " + subject,
+ stacklevel = 2
)
subjects_to_remove.extend([subject])
@@ -1453,13 +1476,9 @@ def _check_packages():
"""
import importlib.util
- import sys
import packaging.version
- if sys.version_info <= (3, 8):
- raise RuntimeError("ERROR: Python version must be 3.8 or greater\n")
-
if importlib.util.find_spec("skimage") is None:
raise ImportError(
"ERROR: the 'skimage' package is required for running this script, please install.\n"
@@ -3523,10 +3542,10 @@ def run_fsqc(
screenshots_surf="default",
screenshots_views="default",
screenshots_layout=None,
- screenshots_orientation=["radiological"],
+ screenshots_orientation=None,
surfaces=False,
surfaces_html=False,
- surfaces_views=["left", "right", "superior", "inferior"],
+ surfaces_views=None,
skullstrip=False,
skullstrip_html=False,
fornix=False,
@@ -3643,6 +3662,12 @@ def run_fsqc(
dict
A dictionary of input arguments and processing directives.
"""
+ # set defauls here to avoid mutable datastructures for default argument B006
+ if screenshots_orientation is None:
+ screenshots_orientation = ["radiological"]
+ if surfaces_views is None:
+ surfaces_views = ["left", "right", "superior", "inferior"]
+
# create argsDict
if argsDict is None and (subjects_dir is None or output_dir is None):
raise ValueError(
diff --git a/fsqc/fsqcUtils.py b/fsqc/fsqcUtils.py
index c8d1ad0..89cca3b 100644
--- a/fsqc/fsqcUtils.py
+++ b/fsqc/fsqcUtils.py
@@ -36,7 +36,7 @@ def importMGH(filename):
logging.captureWarnings(True)
if not os.path.exists(filename):
- warnings.warn("WARNING: could not find " + filename + ", returning NaNs")
+ warnings.warn("WARNING: could not find " + filename + ", returning NaNs", stacklevel = 2)
return numpy.nan
fp = open(filename, "rb")
@@ -70,7 +70,7 @@ def importMGH(filename):
unused_space_size = unused_space_size - USED_SPACE_SIZE
- for i in range(unused_space_size):
+ for _i in range(unused_space_size):
struct.unpack(">b", fp.read(charsize))[0]
nv = ndim1 * ndim2 * ndim3 * nframes
@@ -220,7 +220,7 @@ def readLTA(file):
import numpy as np
- with open(file, "r") as f:
+ with open(file) as f:
lta = f.readlines()
d = dict()
i = 0
diff --git a/fsqc/utils/_config.py b/fsqc/utils/_config.py
index d18a31a..3b2b5a3 100644
--- a/fsqc/utils/_config.py
+++ b/fsqc/utils/_config.py
@@ -3,7 +3,7 @@
import sys
from functools import partial
from importlib.metadata import requires, version
-from typing import IO, Callable, List, Optional
+from typing import IO, Callable, Optional
import psutil
@@ -68,7 +68,7 @@ def sys_info(fid: Optional[IO] = None, developer: bool = False):
_list_dependencies_info(out, ljust, dependencies)
-def _list_dependencies_info(out: Callable, ljust: int, dependencies: List[str]):
+def _list_dependencies_info(out: Callable, ljust: int, dependencies: list[str]):
"""List dependencies names and versions.
Parameters
@@ -77,7 +77,7 @@ def _list_dependencies_info(out: Callable, ljust: int, dependencies: List[str]):
output function
ljust : int
length of returned string
- dependencies : List[str]
+ dependencies : list[str]
list of dependencies
"""
diff --git a/fsqc/utils/_imports.py b/fsqc/utils/_imports.py
index 4bfd727..60856cc 100644
--- a/fsqc/utils/_imports.py
+++ b/fsqc/utils/_imports.py
@@ -51,12 +51,12 @@ def import_optional_dependency(
try:
module = importlib.import_module(name)
- except ImportError:
+ except ImportError as err:
if raise_error:
raise ImportError(
f"Missing optional dependency '{install_name}'. {extra} "
f"Use pip or conda to install {install_name}."
- )
+ ) from err
else:
return None
diff --git a/pyproject.toml b/pyproject.toml
index f02b54b..589533c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -126,7 +126,7 @@ select = [
# "SIM", # flake8-simplify
]
-[tool.ruff.per-file-ignores]
+[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["F401"]
[tool.pytest.ini_options]
From 5354516bca8186cdf59aeff8f2ac4ff9d9f7d1f9 Mon Sep 17 00:00:00 2001
From: Martin Reuter
Date: Fri, 6 Sep 2024 13:05:03 +0200
Subject: [PATCH 22/25] ignore line length for now
---
pyproject.toml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/pyproject.toml b/pyproject.toml
index 589533c..289aef8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -114,6 +114,8 @@ extend-exclude = [
"setup.py",
"singularity",
]
+ignore = ["E501"] # line too long (should be enforced soon)
+
[tool.ruff.lint]
# https://docs.astral.sh/ruff/linter/#rule-selection
From 7232038be14f838c54d169cecd4d1efda92c6e99 Mon Sep 17 00:00:00 2001
From: Martin Reuter
Date: Fri, 6 Sep 2024 13:07:21 +0200
Subject: [PATCH 23/25] fix typo
---
fsqc/fsqcMain.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index ce9c161..74f6c9d 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -3662,7 +3662,7 @@ def run_fsqc(
dict
A dictionary of input arguments and processing directives.
"""
- # set defauls here to avoid mutable datastructures for default argument B006
+ # set defaults here to avoid mutable datastructures for default argument B006
if screenshots_orientation is None:
screenshots_orientation = ["radiological"]
if surfaces_views is None:
From 9abb85bebb42f88cda31138e98acb1ae2f8d188a Mon Sep 17 00:00:00 2001
From: diersk
Date: Tue, 15 Oct 2024 14:58:08 +0200
Subject: [PATCH 24/25] Fixes mutable default argument issues
---
fsqc/createScreenshots.py | 53 +++---
fsqc/evaluateFornixSegmentation.py | 4 +-
fsqc/evaluateHippocampalSegmentation.py | 25 ++-
fsqc/evaluateHypothalamicSegmentation.py | 19 +--
fsqc/fsqcMain.py | 201 ++++++++++++-----------
5 files changed, 147 insertions(+), 155 deletions(-)
diff --git a/fsqc/createScreenshots.py b/fsqc/createScreenshots.py
index 63b253b..7a5a409 100644
--- a/fsqc/createScreenshots.py
+++ b/fsqc/createScreenshots.py
@@ -11,7 +11,7 @@ def createScreenshots(
SUBJECTS_DIR,
OUTFILE,
INTERACTIVE=True,
- LAYOUT=None,
+ LAYOUT="default",
BASE="default",
OVERLAY="default",
LABELS=None,
@@ -21,7 +21,7 @@ def createScreenshots(
XLIM=None,
YLIM=None,
BINARIZE=False,
- ORIENTATION=None,
+ ORIENTATION="radiological",
):
"""
Function to create screenshots.
@@ -36,20 +36,23 @@ def createScreenshots(
The output file path.
INTERACTIVE : bool, optional
Flag for interactive mode, default is True.
- LAYOUT : str, optional
- The layout, default is None.
- BASE : list, optional
+ LAYOUT : list, optional
+ The layout, default is "default".
+ BASE : str, optional
The base, default is "default".
Load norm.mgz as default.
- OVERLAY : list, optional
+ OVERLAY : str, optional
The overlay, default is "default".
Load aseg.mgz as default.
+ Can be None.
LABELS : None or str, optional
The labels, default is None.
SURF : list, optional
The surface, default is "default".
+ Can be None.
SURFCOLOR : list, optional
The surface color, default is "default".
+ Can be None.
VIEWS : list, optional
The views, default is "default".
XLIM : None or list, optional
@@ -58,15 +61,14 @@ def createScreenshots(
The y limits, default is None.
BINARIZE : bool, optional
Flag for binarization, default is False.
- ORIENTATION : list, optional
- The orientation, default is None.
- Will use ["radiological"] per default.
+ ORIENTATION : str, optional
+ The orientation, default is "radiological".
Notes
-----
- BASE, VIEWS must be lists, can be "default".
+ LAYOUT, VIEWS can be lists or "default".
- OVERLAY, SURF, SURFCOLOR can be lists, None, or "default".
+ SURF, SURFCOLOR can be lists, None, or "default".
XLIM, YLIM can be lists of list two-element numeric lists or None; if given,
length must match length of VIEWS. x and y refer to final image dimensions,
@@ -95,9 +97,6 @@ def computeLayout(n):
import nibabel as nb
import numpy as np
- if ORIENTATION is None:
- ORIENTATION = ["radiological"]
-
if not INTERACTIVE:
matplotlib.use("Agg")
@@ -124,14 +123,14 @@ def computeLayout(n):
if BASE == "default":
norm = nb.load(os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "norm.mgz"))
else:
- norm = nb.load(BASE[0])
+ norm = nb.load(BASE)
if OVERLAY is None:
aseg = None
elif OVERLAY == "default":
aseg = nb.load(os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "aseg.mgz"))
else:
- aseg = nb.load(OVERLAY[0])
+ aseg = nb.load(OVERLAY)
# -----------------------------------------------------------------------------
# import surface data
@@ -146,19 +145,21 @@ def computeLayout(n):
else:
surflist = SURF
- surf = list()
-
- if surflist is not None:
- for i in range(len(surflist)):
- surf.append(nb.freesurfer.io.read_geometry(surflist[i], read_metadata=True))
-
- if SURFCOLOR == "default" and SURF == "default":
+ if SURF is None:
+ surfcolor = None
+ elif SURFCOLOR == "default" and SURF == "default":
surfcolor = ["yellow", "yellow", "red", "red"]
elif SURFCOLOR == "default" and SURF != "default":
- surfcolor = ["yellow"] * len(surf)
+ surfcolor = ["yellow"] * len(surflist)
else:
surfcolor = SURFCOLOR
+ surf = list()
+
+ if surflist is not None and surfcolor is not None:
+ for i in range(len(surflist)):
+ surf.append(nb.freesurfer.io.read_geometry(surflist[i], read_metadata=True))
+
# -----------------------------------------------------------------------------
# import colortable, compute auxiliary variables, and transform to matplotlib
# colortable
@@ -378,7 +379,7 @@ def computeLayout(n):
plt.ioff()
# compute layout
- if LAYOUT is None:
+ if LAYOUT == "default":
myLayout = computeLayout(len(CutsRRAS))
else:
myLayout = LAYOUT
@@ -561,7 +562,7 @@ def computeLayout(n):
axs[axsx, axsy].set_ylim(YLIM[p])
# determine left-right orientation for coronal and axial views
- if ORIENTATION == ["radiological"]:
+ if ORIENTATION == "radiological":
if CutsRRAS[p][0] == "y" or CutsRRAS[p][0] == "z":
axs[axsx, axsy].invert_xaxis()
diff --git a/fsqc/evaluateFornixSegmentation.py b/fsqc/evaluateFornixSegmentation.py
index 28c0494..87360c9 100644
--- a/fsqc/evaluateFornixSegmentation.py
+++ b/fsqc/evaluateFornixSegmentation.py
@@ -158,8 +158,8 @@ def evaluateFornixSegmentation(
INTERACTIVE=False,
VIEWS=[("x", x_coord - 1), ("x", x_coord), ("x", x_coord + 1)],
LAYOUT=(1, 3),
- BASE=[os.path.join(OUTPUT_DIR, "normCCup.mgz")],
- OVERLAY=[os.path.join(OUTPUT_DIR, "cc.mgz")],
+ BASE=os.path.join(OUTPUT_DIR, "normCCup.mgz"),
+ OVERLAY=os.path.join(OUTPUT_DIR, "cc.mgz"),
SURF=None,
OUTFILE=SCREENSHOTS_OUTFILE,
)
diff --git a/fsqc/evaluateHippocampalSegmentation.py b/fsqc/evaluateHippocampalSegmentation.py
index 914842b..6cd5714 100644
--- a/fsqc/evaluateHippocampalSegmentation.py
+++ b/fsqc/evaluateHippocampalSegmentation.py
@@ -11,7 +11,7 @@ def evaluateHippocampalSegmentation(
OUTPUT_DIR,
CREATE_SCREENSHOT=True,
SCREENSHOTS_OUTFILE=None,
- SCREENSHOTS_ORIENTATION=None,
+ SCREENSHOTS_ORIENTATION="radiological",
HEMI="lh",
LABEL="T1.v21",
):
@@ -38,8 +38,8 @@ def evaluateHippocampalSegmentation(
Whether to create screenshots.
SCREENSHOTS_OUTFILE : str or list, optional, default: None
File or list of files for screenshots.
- SCREENSHOTS_ORIENTATION : str or list, optional, default: ["radiological"]
- Orientation or list of orientations for screenshots.
+ SCREENSHOTS_ORIENTATION : str, optional, default: "radiological"
+ Orientation for screenshots.
HEMI : str, optional, default: "lh"
Hemisphere to evaluate, either 'lh' or 'rh'.
LABEL : str, optional, default: "T1.v21"
@@ -68,9 +68,6 @@ def evaluateHippocampalSegmentation(
from fsqc.createScreenshots import createScreenshots
from fsqc.fsqcUtils import binarizeImage
- if SCREENSHOTS_ORIENTATION is None:
- SCREENSHOTS_ORIENTATION = ["radiological"]
-
# --------------------------------------------------------------------------
# check files
@@ -177,15 +174,13 @@ def evaluateHippocampalSegmentation(
INTERACTIVE=False,
VIEWS=[("x", ctr_tkr_x0), ("y", ctr_tkr_y0), ("z", ctr_tkr_z0)],
LAYOUT=(1, 3),
- BASE=[os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "norm.mgz")],
- OVERLAY=[
- os.path.join(
- SUBJECTS_DIR,
- SUBJECT,
- "mri",
- HEMI + ".hippoAmygLabels-" + LABEL + ".FSvoxelSpace.mgz",
- )
- ],
+ BASE=os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "norm.mgz"),
+ OVERLAY=os.path.join(
+ SUBJECTS_DIR,
+ SUBJECT,
+ "mri",
+ HEMI + ".hippoAmygLabels-" + LABEL + ".FSvoxelSpace.mgz",
+ ),
SURF=None,
OUTFILE=SCREENSHOTS_OUTFILE,
ORIENTATION=SCREENSHOTS_ORIENTATION,
diff --git a/fsqc/evaluateHypothalamicSegmentation.py b/fsqc/evaluateHypothalamicSegmentation.py
index 533b6c5..01da4d7 100644
--- a/fsqc/evaluateHypothalamicSegmentation.py
+++ b/fsqc/evaluateHypothalamicSegmentation.py
@@ -12,7 +12,7 @@ def evaluateHypothalamicSegmentation(
OUTPUT_DIR,
CREATE_SCREENSHOT=True,
SCREENSHOTS_OUTFILE=None,
- SCREENSHOTS_ORIENTATION=None,
+ SCREENSHOTS_ORIENTATION="radiological",
):
"""
Evaluate potential missegmentation of the hypothalamus.
@@ -37,8 +37,8 @@ def evaluateHypothalamicSegmentation(
Whether to create screenshots.
SCREENSHOTS_OUTFILE : str or list, optional, default: None
File or list of files for screenshots.
- SCREENSHOTS_ORIENTATION : str or list, optional, default: ["radiological"]
- Orientation or list of orientations for screenshots.
+ SCREENSHOTS_ORIENTATION : str, optional, default: "radiological"
+ Orientation for screenshots.
Returns
-------
@@ -63,9 +63,6 @@ def evaluateHypothalamicSegmentation(
from fsqc.createScreenshots import createScreenshots
from fsqc.fsqcUtils import binarizeImage
- if SCREENSHOTS_ORIENTATION is None:
- SCREENSHOTS_ORIENTATION = ["radiological"]
-
# --------------------------------------------------------------------------
# check files
@@ -249,12 +246,10 @@ def evaluateHypothalamicSegmentation(
("z", ctr_tkr_z1),
],
LAYOUT=(1, 9),
- BASE=[os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "norm.mgz")],
- OVERLAY=[
- os.path.join(
- SUBJECTS_DIR, SUBJECT, "mri", "hypothalamic_subunits_seg.v1.mgz"
- )
- ],
+ BASE=os.path.join(SUBJECTS_DIR, SUBJECT, "mri", "norm.mgz"),
+ OVERLAY=os.path.join(
+ SUBJECTS_DIR, SUBJECT, "mri", "hypothalamic_subunits_seg.v1.mgz"
+ ),
SURF=None,
OUTFILE=SCREENSHOTS_OUTFILE,
ORIENTATION=SCREENSHOTS_ORIENTATION,
diff --git a/fsqc/fsqcMain.py b/fsqc/fsqcMain.py
index 74f6c9d..d43e7ca 100644
--- a/fsqc/fsqcMain.py
+++ b/fsqc/fsqcMain.py
@@ -683,7 +683,7 @@ def _parse_arguments():
"--screenshots_views",
dest="screenshots_views",
help="view specification for screenshots",
- default=["x=-10", "x=10", "y=0", "z=0"],
+ default="default",
nargs="+",
metavar="",
required=False,
@@ -692,7 +692,7 @@ def _parse_arguments():
"--screenshots_layout",
dest="screenshots_layout",
help="layout for screenshots",
- default=["1", "4"],
+ default="default",
nargs=2,
metavar="",
required=False,
@@ -701,7 +701,7 @@ def _parse_arguments():
"--screenshots_orientation",
dest="screenshots_orientation",
help=argparse.SUPPRESS,
- default=["radiological"],
+ default="radiological",
nargs=1,
metavar="",
required=False,
@@ -899,27 +899,19 @@ def _check_arguments(argsDict):
logging.error("Reason: " + str(e))
raise
- # check screenshots_base
- argsDict["screenshots_base"] = [argsDict["screenshots_base"]]
-
- # check screenshots_overlay (this is either 'default' or 'none' or a single file or a list; further checks prior to execution of the screenshots module)
+ # check screenshots_overlay (this is either 'default' or 'none' or a single file or a single generic filename; further checks prior to execution of the screenshots module)
if argsDict["screenshots_overlay"].lower() == "none":
argsDict["screenshots_overlay"] = None
logging.info("Found screenshot overlays set to None")
- else:
- argsDict["screenshots_overlay"] = [argsDict["screenshots_overlay"]]
# check screenshots_surf (this is either 'default' or 'none' or a single file or a list; further checks prior to execution of the screenshots module)
if not isinstance(argsDict["screenshots_surf"], list):
- argsDict["screenshots_surf"] = [argsDict["screenshots_surf"]]
- if argsDict["screenshots_surf"][0].lower() == "none":
- argsDict["screenshots_surf"] = None
- logging.info("Found screenshot surfaces set to None")
+ if argsDict["screenshots_surf"].lower() == "none":
+ argsDict["screenshots_surf"] = None
+ logging.info("Found screenshot surfaces set to None")
# check if screenshots_views argument can be evaluated
- if argsDict["screenshots_views"] == "default":
- argsDict["screenshots_views"] = [argsDict["screenshots_views"]]
- else:
+ if isinstance(argsDict["screenshots_views"], list):
for x in argsDict["screenshots_views"]:
isXYZ = (
x.split("=")[0] == "x"
@@ -950,7 +942,7 @@ def _check_arguments(argsDict):
]
# check screenshots_layout
- if argsDict["screenshots_layout"] is not None:
+ if argsDict["screenshots_layout"] != "default":
if all([x.isdigit() for x in argsDict["screenshots_layout"]]):
argsDict["screenshots_layout"] = [
int(x) for x in argsDict["screenshots_layout"]
@@ -961,16 +953,16 @@ def _check_arguments(argsDict):
)
# check screenshots_orientation
- if argsDict["screenshots_orientation"] != ["neurological"] and argsDict[
+ if argsDict["screenshots_orientation"] != "neurological" and argsDict[
"screenshots_orientation"
- ] != ["radiological"]:
+ ] != "radiological":
raise TypeError(
"ERROR: screenshots_orientation argument must be either 'neurological' or 'radiological'."
)
else:
logging.info(
"Found screenshot orientation set to "
- + argsDict["screenshots_orientation"][0]
+ + argsDict["screenshots_orientation"]
)
# check if skullstrip subdirectory exists or can be created and is writable
@@ -1282,7 +1274,7 @@ def _check_arguments(argsDict):
# check screenshots
if (
argsDict["screenshots"] is True or argsDict["screenshots_html"] is True
- ) and argsDict["screenshots_surf"] == ["default"]:
+ ) and argsDict["screenshots_surf"] == "default":
# -files: surf/[lr]h.white (optional), surf/[lr]h.pial (optional)
path_check = os.path.join(
argsDict["subjects_dir"], subject, "surf", "lh.white"
@@ -1945,6 +1937,7 @@ def _do_fsqc(argsDict):
# run optional modules: screenshots
if argsDict["screenshots"] is True or argsDict["screenshots_html"] is True:
+
# determine status
screenshots_status = 0
if argsDict["skip_existing"] is True:
@@ -1993,14 +1986,14 @@ def _do_fsqc(argsDict):
screenshots_surf_subj = list()
# check screenshots_base
- if argsDict["screenshots_base"][0] == "default":
+ if argsDict["screenshots_base"] == "default":
screenshots_base_subj = argsDict["screenshots_base"]
logging.info("Using default for screenshot base image")
- elif os.path.isfile(argsDict["screenshots_base"][0]):
+ elif os.path.isfile(argsDict["screenshots_base"]):
screenshots_base_subj = argsDict["screenshots_base"]
logging.info(
"Using "
- + screenshots_base_subj[0]
+ + screenshots_base_subj
+ " as screenshot base image"
)
elif os.path.isfile(
@@ -2008,44 +2001,42 @@ def _do_fsqc(argsDict):
argsDict["subjects_dir"],
subject,
"mri",
- argsDict["screenshots_base"][0],
+ argsDict["screenshots_base"],
)
):
- screenshots_base_subj = [
- os.path.join(
- argsDict["subjects_dir"],
- subject,
- "mri",
- argsDict["screenshots_base"][0],
- )
- ]
+ screenshots_base_subj = os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "mri",
+ argsDict["screenshots_base"],
+ )
logging.info(
"Using "
- + screenshots_base_subj[0]
+ + screenshots_base_subj
+ " as screenshot base image"
)
else:
raise FileNotFoundError(
"ERROR: cannot find the screenshots base file "
- + argsDict["screenshots_base"][0]
+ + argsDict["screenshots_base"]
)
# check screenshots_overlay
if argsDict["screenshots_overlay"] is not None:
- if argsDict["screenshots_overlay"][0] == "default":
+ if argsDict["screenshots_overlay"] == "default":
screenshots_overlay_subj = argsDict[
"screenshots_overlay"
]
logging.info(
"Using default for screenshot overlay image"
)
- elif os.path.isfile(argsDict["screenshots_overlay"][0]):
+ elif os.path.isfile(argsDict["screenshots_overlay"]):
screenshots_overlay_subj = argsDict[
"screenshots_overlay"
]
logging.info(
"Using "
- + screenshots_overlay_subj[0]
+ + screenshots_overlay_subj
+ " as screenshot overlay image"
)
elif os.path.isfile(
@@ -2053,66 +2044,80 @@ def _do_fsqc(argsDict):
argsDict["subjects_dir"],
subject,
"mri",
- argsDict["screenshots_overlay"][0],
+ argsDict["screenshots_overlay"],
)
):
- screenshots_overlay_subj = [
- os.path.join(
- argsDict["subjects_dir"],
- subject,
- "mri",
- argsDict["screenshots_overlay"][0],
- )
- ]
+ screenshots_overlay_subj = os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "mri",
+ argsDict["screenshots_overlay"],
+ )
logging.info(
"Using "
- + screenshots_overlay_subj[0]
+ + screenshots_overlay_subj
+ " as screenshot overlay image"
)
else:
raise FileNotFoundError(
"ERROR: cannot find the screenshots overlay file "
- + argsDict["screenshots_overlay"][0]
+ + argsDict["screenshots_overlay"]
)
else:
screenshots_overlay_subj = argsDict["screenshots_overlay"]
# check screenshots_surf
if argsDict["screenshots_surf"] is not None:
- for screenshots_surf_i in argsDict["screenshots_surf"]:
- if screenshots_surf_i == "default":
+ if isinstance(argsDict["screenshots_surf"], str):
+ if argsDict["screenshots_surf"] == "default":
+ screenshots_surf_subj = "default"
logging.info("Using default for screenshot surface")
- elif os.path.isfile(screenshots_surf_i):
- logging.info(
- "Using "
- + screenshots_surf_i
- + " as screenshot surface"
- )
- elif os.path.isfile(
- os.path.join(
- argsDict["subjects_dir"],
- subject,
- "surf",
- screenshots_surf_i,
- )
- ):
- screenshots_surf_i = os.path.join(
- argsDict["subjects_dir"],
- subject,
- "surf",
- screenshots_surf_i,
- )
- logging.info(
- "Using "
- + screenshots_surf_i
- + " as screenshot surface"
- )
else:
- raise FileNotFoundError(
- "ERROR: cannot find the screenshots surface file "
- + screenshots_surf_i
- )
- screenshots_surf_subj.append(screenshots_surf_i)
+ if os.path.isfile(argsDict["screenshots_surf"]):
+ logging.info(
+ "Using "
+ + argsDict["screenshots_surf"]
+ + " as screenshot surface"
+ )
+ screenshots_surf_subj = [argsDict["screenshots_surf"]]
+ else:
+ raise FileNotFoundError(
+ "ERROR: cannot find the screenshots surface file "
+ + argsDict["screenshots_surf"]
+ )
+ elif isinstance(argsDict["screenshots_surf"], list):
+ for screenshots_surf_i in argsDict["screenshots_surf"]:
+ if os.path.isfile(screenshots_surf_i):
+ logging.info(
+ "Using "
+ + screenshots_surf_i
+ + " as screenshot surface"
+ )
+ elif os.path.isfile(
+ os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "surf",
+ screenshots_surf_i,
+ )
+ ):
+ screenshots_surf_i = os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "surf",
+ screenshots_surf_i,
+ )
+ logging.info(
+ "Using "
+ + screenshots_surf_i
+ + " as screenshot surface"
+ )
+ else:
+ raise FileNotFoundError(
+ "ERROR: cannot find the screenshots surface file "
+ + screenshots_surf_i
+ )
+ screenshots_surf_subj.append(screenshots_surf_i)
else:
screenshots_surf_subj = None
@@ -2287,11 +2292,9 @@ def _do_fsqc(argsDict):
argsDict["subjects_dir"], subject, "mri", "orig.mgz"
)
):
- skullstrip_base_subj = [
- os.path.join(
- argsDict["subjects_dir"], subject, "mri", "orig.mgz"
- )
- ]
+ skullstrip_base_subj = os.path.join(
+ argsDict["subjects_dir"], subject, "mri", "orig.mgz"
+ )
logging.info(
"Using " + "orig.mgz" + " as skullstrip base image"
)
@@ -2310,14 +2313,12 @@ def _do_fsqc(argsDict):
"brainmask.mgz",
)
):
- skullstrip_overlay_subj = [
- os.path.join(
- argsDict["subjects_dir"],
- subject,
- "mri",
- "brainmask.mgz",
- )
- ]
+ skullstrip_overlay_subj = os.path.join(
+ argsDict["subjects_dir"],
+ subject,
+ "mri",
+ "brainmask.mgz",
+ )
logging.info(
"Using "
+ "brainmask.mgz"
@@ -3541,8 +3542,8 @@ def run_fsqc(
screenshots_overlay="default",
screenshots_surf="default",
screenshots_views="default",
- screenshots_layout=None,
- screenshots_orientation=None,
+ screenshots_layout="default",
+ screenshots_orientation="radiological",
surfaces=False,
surfaces_html=False,
surfaces_views=None,
@@ -3593,15 +3594,15 @@ def run_fsqc(
Filename for base image for screenshots.
screenshots_overlay : str, default: "default"
Filename for overlay image for screenshots.
- screenshots_surf : list of str, default: "default"
+ screenshots_surf : (list of) str, default: "default"
List of filenames of surface files to include in screenshots.
- screenshots_views : list of str, default: "default"
+ screenshots_views : (list of) str, default: "default"
List of parameters to set the views of the screenshots.
Example: ['x=0', 'x=-10', 'x=10', 'y=20', 'z=0'].
- screenshots_layout : list of int, default: None
+ screenshots_layout : str or list of int, default: "default"
Layout describing rows and columns of the screenshots.
Example: [1, 4] (one row, four columns).
- screenshots_orientation : list of str, default: ["radiological"]
+ screenshots_orientation : str, default: "radiological"
Orientation of screenshots. Either "radiological" or "neurological".
surfaces : bool, default: False
Create screenshots of pial and inflated surfaces.
From 9d126fab5b3ebb59528375e40caf5b7d1517dd16 Mon Sep 17 00:00:00 2001
From: diersk
Date: Tue, 15 Oct 2024 15:11:49 +0200
Subject: [PATCH 25/25] Fixes typo
---
singularity/Singularity.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/singularity/Singularity.md b/singularity/Singularity.md
index a6b1d9f..fa3d829 100644
--- a/singularity/Singularity.md
+++ b/singularity/Singularity.md
@@ -10,7 +10,7 @@ cd /home/user/my_singlarity_images
singularity build fsqc-latest.sif docker://deepmi/fsqcdocker:latest
```
-Singularity Images are saved as `.sif` files. Here the _/homer/user/my_singlarity_images_ is the path where you want your file saved. You can change _deepmi/fsqc:latest_ with any tag provided in our [Dockerhub](https://hub.docker.com/r/deepmi/fsqcdocker/tags).
+Singularity Images are saved as `.sif` files. Here the _/home/user/my_singlarity_images_ is the path where you want your file saved. You can change _deepmi/fsqc:latest_ with any tag provided in our [Dockerhub](https://hub.docker.com/r/deepmi/fsqcdocker/tags).
If you want to use a locally available image that you created yourself, instead run: