Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit autoupdate (#38)
Browse files Browse the repository at this point in the history
* [pre-commit.ci] pre-commit autoupdate

updates:
- [github.com/astral-sh/ruff-pre-commit: v0.3.5 → v0.5.0](astral-sh/ruff-pre-commit@v0.3.5...v0.5.0)
- [github.com/codespell-project/codespell: v2.2.6 → v2.3.0](codespell-project/codespell@v2.2.6...v2.3.0)

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* rm architecture constraint

* fix ruff

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: mscheltienne <mathieu.scheltienne@fcbg.ch>
  • Loading branch information
pre-commit-ci[bot] and mscheltienne authored Jul 2, 2024
1 parent 9725cff commit 8931105
Show file tree
Hide file tree
Showing 5 changed files with 77 additions and 53 deletions.
1 change: 0 additions & 1 deletion .github/workflows/pytest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
architecture: 'x64'
- name: Install package
run: |
python -m pip install --progress-bar off --upgrade pip setuptools
Expand Down
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ repos:
files: eeg_flow

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.5
rev: v0.5.0
hooks:
- id: ruff
name: ruff linter
Expand All @@ -20,7 +20,7 @@ repos:
files: eeg_flow

- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
rev: v2.3.0
hooks:
- id: codespell
args: [--write-changes]
Expand Down
6 changes: 3 additions & 3 deletions eeg_flow/tasks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@
)
from .convert_xdf_to_fiff import convert_xdf_to_fiff # noqa: F401
from .create_behavioral_metadata import create_behavioral_metadata # noqa: F401
from .epochs_evoked import ( # noqa: F401
from .epochs_evoked import ( # noqa: F401
create_epochs_evoked_and_behavioral_metadata,
response_to_CSD
)
response_to_CSD,
)
from .ica_decomposition import ( # noqa: F401
apply_ica,
compare_labels,
Expand Down
111 changes: 68 additions & 43 deletions eeg_flow/tasks/epochs_evoked.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,11 @@

_TOO_QUICK_THRESHOLD: float = 0.2


def response_to_CSD(
participant: str,
group: str,
task: str,
task: str,
run: int,
BC_response: str,
*,
Expand All @@ -66,12 +67,14 @@ def response_to_CSD(
derivatives_folder_root, participant, group, task, run
)

# lock the output derivative files
# lock the output derivative files
# create locks
#"400200resp","2000resp","400200resp_CSD","2000resp_CSD"
# "400200resp","2000resp","400200resp_CSD","2000resp_CSD"
derivatives = [
derivatives_folder / f"{fname_stem}_step8b_responselocked-{BC_response}_CSD-cleaned-epo.fif",
derivatives_folder / f"{fname_stem}_step8b_responselocked-{BC_response}_CSD-ave.fif",
derivatives_folder
/ f"{fname_stem}_step8b_responselocked-{BC_response}_CSD-cleaned-epo.fif",
derivatives_folder
/ f"{fname_stem}_step8b_responselocked-{BC_response}_CSD-ave.fif",
]

locks = lock_files(*derivatives, timeout=timeout)
Expand All @@ -82,23 +85,30 @@ def response_to_CSD(

# load previous steps (normal epoching)
epochs = read_epochs(
derivatives_folder / f"{fname_stem}_step8_responselocked-{BC_response}-cleaned-epo.fif",
derivatives_folder
/ f"{fname_stem}_step8_responselocked-{BC_response}-cleaned-epo.fif",
)
epochs_CSD = compute_current_source_density(
epochs.interpolate_bads(), stiffness=3, n_legendre_terms=15, verbose=100
)
epochs_CSD = compute_current_source_density(epochs.interpolate_bads(), stiffness=3, n_legendre_terms=15, verbose=100)

evoked = read_evokeds(
derivatives_folder / f"{fname_stem}_step8_responselocked-{BC_response}-ave.fif",
derivatives_folder
/ f"{fname_stem}_step8_responselocked-{BC_response}-ave.fif",
)[0]
evoked_CSD = compute_current_source_density(evoked.interpolate_bads(), stiffness=3, n_legendre_terms=15, verbose=100)

evoked_CSD = compute_current_source_density(
evoked.interpolate_bads(), stiffness=3, n_legendre_terms=15, verbose=100
)

# save epochs, drop-log and evoked files
if epochs_CSD is not None and evoked_CSD is not None:
epochs_CSD.save(
derivatives_folder
/ f"{fname_stem}_step8b_responselocked-{BC_response}_CSD-cleaned-epo.fif"
/ f"{fname_stem}_step8b_responselocked-{BC_response}_CSD-cleaned-epo.fif" # noqa: E501
)
evoked_CSD.save(
derivatives_folder / f"{fname_stem}_step8b_responselocked-{BC_response}_CSD-ave.fif"
derivatives_folder
/ f"{fname_stem}_step8b_responselocked-{BC_response}_CSD-ave.fif"
)

except FileNotFoundError:
Expand Down Expand Up @@ -181,16 +191,21 @@ def create_epochs_evoked_and_behavioral_metadata(
derivatives_folder / f"{fname_stem}_step8_responselocked-ave.fif",
]

window_response_bc = ["400200resp","2000resp"]
window_response_bc = ["400200resp", "2000resp"]
for window in window_response_bc:
derivatives.extend([derivatives_folder / f"{fname_stem}_step8_responselocked-{window}-cleaned-epo.fif",
derivatives_folder
/ "plots"
/ f"{fname_stem}_step8_responselocked-{window}-epochs-rejected.svg",
derivatives_folder
/ f"{fname_stem}_step8_responselocked-{window}-cleaned-epo-drop-log.csv",
derivatives_folder / f"{fname_stem}_step8_responselocked-{window}-ave.fif",
])
derivatives.extend(
[
derivatives_folder
/ f"{fname_stem}_step8_responselocked-{window}-cleaned-epo.fif",
derivatives_folder
/ "plots"
/ f"{fname_stem}_step8_responselocked-{window}-epochs-rejected.svg",
derivatives_folder
/ f"{fname_stem}_step8_responselocked-{window}-cleaned-epo-drop-log.csv", # noqa: E501
derivatives_folder
/ f"{fname_stem}_step8_responselocked-{window}-ave.fif",
]
)

locks = lock_files(*derivatives, timeout=timeout)

Expand All @@ -217,21 +232,18 @@ def create_epochs_evoked_and_behavioral_metadata(
fig_drops_response,
count_stim_before_response,
count_stim_after_response,

epochs_response_400200resp,
evoked_response_400200resp,
drop_reasons_response_400200resp,
fig_drops_response_400200resp,
count_stim_before_response_400200resp,
count_stim_after_response_400200resp,

epochs_response_2000resp,
evoked_response_2000resp,
drop_reasons_response_2000resp,
fig_drops_response_2000resp,
count_stim_before_response_2000resp,
count_stim_after_response_2000resp,

) = _create_epochs_evoked_and_behavioral_metadata(raw)

# save epochs, drop-log and evoked files
Expand Down Expand Up @@ -290,14 +302,18 @@ def create_epochs_evoked_and_behavioral_metadata(
",Total,Rejected,Bad,PTP\n"
f"Response,{count_stim_before_response[64]},{count_stim_before_response[64] - count_stim_after_response[64]},{drop_reasons_response['response']['bad_segment']},{drop_reasons_response['response']['ptp']}\n" # noqa: E501
)

if epochs_response_400200resp is not None and evoked_response_400200resp is not None:

if (
epochs_response_400200resp is not None
and evoked_response_400200resp is not None
):
epochs_response_400200resp.save(
derivatives_folder
/ f"{fname_stem}_step8_responselocked-400200resp-cleaned-epo.fif"
)
evoked_response_400200resp.save(
derivatives_folder / f"{fname_stem}_step8_responselocked-400200resp-ave.fif"
derivatives_folder
/ f"{fname_stem}_step8_responselocked-400200resp-ave.fif"
)
fig_drops_response_400200resp.get_axes()[0].set_title(
f"{fname_stem}: {fig_drops.get_axes()[0].get_title()}"
Expand All @@ -310,20 +326,24 @@ def create_epochs_evoked_and_behavioral_metadata(
)
with open(
derivatives_folder
/ f"{fname_stem}_step8_responselocked-400200resp-cleaned-epo-drop-log.csv",
/ f"{fname_stem}_step8_responselocked-400200resp-cleaned-epo-drop-log.csv", # noqa: E501
"w",
) as file:
file.write(
",Total,Rejected,Bad,PTP\n"
f"Response,{count_stim_before_response_400200resp[64]},{count_stim_before_response_400200resp[64] - count_stim_after_response_400200resp[64]},{drop_reasons_response_400200resp['response']['bad_segment']},{drop_reasons_response_400200resp['response']['ptp']}\n" # noqa: E501
)
if epochs_response_2000resp is not None and evoked_response_2000resp is not None:
if (
epochs_response_2000resp is not None
and evoked_response_2000resp is not None
):
epochs_response_2000resp.save(
derivatives_folder
/ f"{fname_stem}_step8_responselocked-2000resp-cleaned-epo.fif"
)
evoked_response_2000resp.save(
derivatives_folder / f"{fname_stem}_step8_responselocked-2000resp-ave.fif"
derivatives_folder
/ f"{fname_stem}_step8_responselocked-2000resp-ave.fif"
)
fig_drops_response_2000resp.get_axes()[0].set_title(
f"{fname_stem}: {fig_drops.get_axes()[0].get_title()}"
Expand All @@ -336,13 +356,13 @@ def create_epochs_evoked_and_behavioral_metadata(
)
with open(
derivatives_folder
/ f"{fname_stem}_step8_responselocked-2000resp-cleaned-epo-drop-log.csv",
/ f"{fname_stem}_step8_responselocked-2000resp-cleaned-epo-drop-log.csv", # noqa: E501
"w",
) as file:
file.write(
",Total,Rejected,Bad,PTP\n"
f"Response,{count_stim_before_response_2000resp[64]},{count_stim_before_response_2000resp[64] - count_stim_after_response_2000resp[64]},{drop_reasons_response_2000resp['response']['bad_segment']},{drop_reasons_response_2000resp['response']['ptp']}\n" # noqa: E501
)
)

except FileNotFoundError:
logger.error(
Expand Down Expand Up @@ -393,14 +413,12 @@ def _create_epochs_evoked_and_behavioral_metadata(
plt.Figure,
Counter,
Counter,

Optional[Epochs],
Optional[Evoked],
dict[str, dict[str, int]],
plt.Figure,
Counter,
Counter,

Optional[Epochs],
Optional[Evoked],
dict[str, dict[str, int]],
Expand Down Expand Up @@ -487,7 +505,7 @@ def _create_epochs_evoked_and_behavioral_metadata(
fig_drops_response,
) = _drop_bad_epochs(epochs_response, events_response, reject, response=True)

#redo for other baseline correction
# redo for other baseline correction
epochs_response_400200resp = Epochs(
raw=raw,
tmin=-0.4,
Expand All @@ -507,9 +525,11 @@ def _create_epochs_evoked_and_behavioral_metadata(
count_stim_after_response_400200resp,
drop_reasons_response_400200resp,
fig_drops_response_400200resp,
) = _drop_bad_epochs(epochs_response_400200resp, events_response, reject, response=True)
) = _drop_bad_epochs(
epochs_response_400200resp, events_response, reject, response=True
)

#redo for other baseline correction
# redo for other baseline correction
epochs_response_2000resp = Epochs(
raw=raw,
tmin=-0.2,
Expand All @@ -529,7 +549,9 @@ def _create_epochs_evoked_and_behavioral_metadata(
count_stim_after_response_2000resp,
drop_reasons_response_2000resp,
fig_drops_response_2000resp,
) = _drop_bad_epochs(epochs_response_2000resp, events_response, reject, response=True)
) = _drop_bad_epochs(
epochs_response_2000resp, events_response, reject, response=True
)

else:
metadata = None
Expand Down Expand Up @@ -575,22 +597,25 @@ def _create_epochs_evoked_and_behavioral_metadata(
fig_drops_response,
count_stim_before_response,
count_stim_after_response,

epochs_response_400200resp,
None if epochs_response_400200resp is None else epochs_response_400200resp.average(),
None
if epochs_response_400200resp is None
else epochs_response_400200resp.average(),
drop_reasons_response_400200resp,
fig_drops_response_400200resp,
count_stim_before_response_400200resp,
count_stim_after_response_400200resp,

epochs_response_2000resp,
None if epochs_response_2000resp is None else epochs_response_2000resp.average(),
None
if epochs_response_2000resp is None
else epochs_response_2000resp.average(),
drop_reasons_response_2000resp,
fig_drops_response_2000resp,
count_stim_before_response_2000resp,
count_stim_after_response_2000resp,
count_stim_after_response_2000resp,
)


def _make_metadata(
events: NDArray[+ScalarIntType],
events_id: dict[str, int],
Expand Down
8 changes: 4 additions & 4 deletions eeg_flow/utils/_checks.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def ensure_int(item: Any, item_name: Optional[str] = None) -> int:
raise TypeError
item = int(operator.index(item))
except TypeError:
item_name = "Item" if item_name is None else "'%s'" % item_name
item_name = "Item" if item_name is None else f"'{item_name}'"
raise TypeError(f"{item_name} must be an integer, got {type(item)} instead.")

return item
Expand Down Expand Up @@ -119,7 +119,7 @@ def check_type(item: Any, types: tuple, item_name: Optional[str] = None) -> None
else:
type_name[-1] = "or " + type_name[-1]
type_name = ", ".join(type_name)
item_name = "Item" if item_name is None else "'%s'" % item_name
item_name = "Item" if item_name is None else f"'{item_name}'"
raise TypeError(
f"{item_name} must be an instance of {type_name}, got {type(item)} instead."
)
Expand Down Expand Up @@ -150,15 +150,15 @@ def check_value(
When the value of the item is not one of the valid options.
"""
if item not in allowed_values:
item_name = "" if item_name is None else " '%s'" % item_name
item_name = "" if item_name is None else f" '{item_name}'"
extra = "" if extra is None else " " + extra
msg = (
"Invalid value for the{item_name} parameter{extra}. "
"{options}, but got {item!r} instead."
)
allowed_values = tuple(allowed_values) # e.g., if a dict was given
if len(allowed_values) == 1:
options = "The only allowed value is %s" % repr(allowed_values[0])
options = f"The only allowed value is {repr(allowed_values[0])}"
elif len(allowed_values) == 2:
options = (
f"Allowed values are {repr(allowed_values[0])} "
Expand Down

0 comments on commit 8931105

Please sign in to comment.