From 5f8e353cc261fd5e761eae1592d68403de47448e Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Fri, 1 Nov 2024 14:57:25 -0400 Subject: [PATCH 1/3] linted tests --- .github/workflows/main.yml | 3 +- tests/__init__.py | 1 + tests/mock.py | 8 +- tests/test_abstract_tsd.py | 29 +- tests/test_cnmfe.py | 6 +- tests/test_config.py | 151 +++-- tests/test_correlograms.py | 7 +- tests/test_decoding.py | 12 +- tests/test_filtering.py | 374 ++++++++--- tests/test_folder.py | 45 +- tests/test_interval_set.py | 263 +++++--- tests/test_jitted.py | 203 +++--- tests/test_lazy_loading.py | 156 +++-- tests/test_misc.py | 21 +- tests/test_neurosuite.py | 6 +- tests/test_non_numpy_array.py | 44 +- tests/test_npz_file.py | 79 ++- tests/test_numpy_compatibility.py | 198 +++--- tests/test_nwb.py | 143 +++-- tests/test_perievent.py | 131 ++-- tests/test_phy.py | 6 +- tests/test_power_spectral_density.py | 48 +- tests/test_randomize.py | 45 +- tests/test_spike_trigger_average.py | 105 ++-- tests/test_suite2p.py | 8 +- tests/test_time_index.py | 41 +- tests/test_time_series.py | 898 +++++++++++++++++---------- tests/test_time_units.py | 22 +- tests/test_ts_group.py | 408 +++++++----- tests/test_tuning_curves.py | 813 +++++++++++++++++++----- tests/test_utils.py | 25 +- 31 files changed, 2830 insertions(+), 1469 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e6f62781..8c4f5482 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -31,7 +31,8 @@ jobs: black --check pynapple isort --check pynapple --profile black flake8 pynapple --max-complexity 10 - + black --check tests + isort --check tests --profile black test: needs: lint runs-on: ${{ matrix.os }} diff --git a/tests/__init__.py b/tests/__init__.py index 55f838c8..5638ca52 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,2 +1,3 @@ """Unit test package for pynapple.""" + from . import mock diff --git a/tests/mock.py b/tests/mock.py index bd09021f..ea8fcc77 100644 --- a/tests/mock.py +++ b/tests/mock.py @@ -1,6 +1,6 @@ """Test configuration script.""" -import numpy as np +import numpy as np class MockArray: @@ -19,8 +19,8 @@ def __init__(self, data): A list of data elements that the MockArray will contain. """ self.data = np.asarray(data) - self.shape = self.data.shape # Simplified shape attribute - self.dtype = 'float64' # Simplified dtype; in real scenarios, this should be more dynamic + self.shape = self.data.shape # Simplified shape attribute + self.dtype = "float64" # Simplified dtype; in real scenarios, this should be more dynamic self.ndim = self.data.ndim # Simplified ndim for a 1-dimensional array def __getitem__(self, index): @@ -46,4 +46,4 @@ def __len__(self): """ Returns the length of the mock array. """ - return len(self.data) \ No newline at end of file + return len(self.data) diff --git a/tests/test_abstract_tsd.py b/tests/test_abstract_tsd.py index 7c275d79..3a184dd3 100644 --- a/tests/test_abstract_tsd.py +++ b/tests/test_abstract_tsd.py @@ -1,10 +1,11 @@ -import pynapple as nap import numpy as np import pandas as pd import pytest -from pynapple.core.time_series import _BaseTsd + +import pynapple as nap from pynapple.core.base_class import _Base from pynapple.core.time_index import TsIndex +from pynapple.core.time_series import _BaseTsd class MyClass(_BaseTsd): @@ -21,6 +22,7 @@ def __str__(self): def __repr__(self): return "In repr" + class MyClass2(_Base): def __getitem__(self, key): @@ -35,6 +37,7 @@ def __str__(self): def __repr__(self): return "In repr" + def test_create_atsd(): a = MyClass(t=np.arange(10), d=np.arange(10)) @@ -43,12 +46,12 @@ def test_create_atsd(): assert hasattr(a, "values") assert hasattr(a, "time_support") - assert np.isclose(a.rate, 10/9) + assert np.isclose(a.rate, 10 / 9) assert isinstance(a.index, nap.TsIndex) try: assert isinstance(a.values, np.ndarray) except: - assert nap.core.utils.is_array_like(a.values) + assert nap.core.utils.is_array_like(a.values) assert isinstance(a.time_support, nap.IntervalSet) assert hasattr(a, "t") @@ -76,6 +79,7 @@ def test_create_atsd(): np.testing.assert_array_equal(a.values, b.values) np.testing.assert_array_equal(a.index.values, b.index.values) + def test_create_ats(): a = MyClass2(t=np.arange(10)) @@ -85,14 +89,15 @@ def test_create_ats(): assert hasattr(a, "time_support") assert hasattr(a, "shape") - assert np.isclose(a.rate, 10/9) - assert isinstance(a.index, nap.TsIndex) + assert np.isclose(a.rate, 10 / 9) + assert isinstance(a.index, nap.TsIndex) assert isinstance(a.time_support, nap.IntervalSet) assert a.shape == a.index.shape assert hasattr(a, "t") assert a[0] == 0 + def test_create_ats_from_tsindex(): a = MyClass2(t=TsIndex(np.arange(10))) @@ -102,12 +107,13 @@ def test_create_ats_from_tsindex(): assert hasattr(a, "time_support") assert hasattr(a, "shape") - assert np.isclose(a.rate, 10/9) - assert isinstance(a.index, nap.TsIndex) + assert np.isclose(a.rate, 10 / 9) + assert isinstance(a.index, nap.TsIndex) assert isinstance(a.time_support, nap.IntervalSet) assert a.shape == a.index.shape - assert hasattr(a, "t") + assert hasattr(a, "t") + @pytest.mark.filterwarnings("ignore") def test_create_ats_from_number(): @@ -118,7 +124,7 @@ def test_create_ats_from_number(): assert hasattr(a, "index") assert hasattr(a, "time_support") assert hasattr(a, "shape") - + def test_methods(): a = MyClass(t=[], d=[]) @@ -143,6 +149,3 @@ def test_methods(): assert hasattr(a, "convolve") assert hasattr(a, "smooth") assert hasattr(a, "interpolate") - - - diff --git a/tests/test_cnmfe.py b/tests/test_cnmfe.py index 5dc31c60..8f361059 100644 --- a/tests/test_cnmfe.py +++ b/tests/test_cnmfe.py @@ -6,11 +6,13 @@ """Tests of CNMFE loaders for `pynapple` package.""" -import pynapple as nap +import warnings + import numpy as np import pandas as pd import pytest -import warnings + +import pynapple as nap @pytest.mark.filterwarnings("ignore") diff --git a/tests/test_config.py b/tests/test_config.py index b668ba0b..b2b20eb7 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,12 +1,11 @@ +import warnings from contextlib import nullcontext as does_not_raise +import numpy as np import pytest import pynapple as nap -import numpy as np - -import warnings class MockArray: """ @@ -25,8 +24,8 @@ def __init__(self, data): A list of data elements that the MockArray will contain. """ self.data = np.asarray(data) - self.shape = self.data.shape # Simplified shape attribute - self.dtype = 'float64' # Simplified dtype; in real scenarios, this should be more dynamic + self.shape = self.data.shape # Simplified shape attribute + self.dtype = "float64" # Simplified dtype; in real scenarios, this should be more dynamic self.ndim = self.data.ndim # Simplified ndim for a 1-dimensional array def __getitem__(self, index): @@ -56,6 +55,7 @@ def __len__(self): """ return len(self.data) + ################################## # Test for backend ################################## @@ -65,36 +65,45 @@ def test_change_backend(): assert nap.core.utils.get_backend() == "numba" assert nap.nap_config.backend == "numba" - with pytest.raises(AssertionError, match="Options for backend are 'jax' or 'numba'"): + with pytest.raises( + AssertionError, match="Options for backend are 'jax' or 'numba'" + ): nap.nap_config.set_backend("blabla") # For local tests. - # Should not be installed for github actions + # Should not be installed for github actions # try: # import pynajax - + # nap.nap_config.set_backend("jax") # assert nap.core.utils.get_backend() == "jax" # assert nap.nap_config.backend == "jax" - # except ModuleNotFoundError: - # with warnings.catch_warnings(record=True) as w: - # nap.nap_config.set_backend("jax") + # except ModuleNotFoundError: + # with warnings.catch_warnings(record=True) as w: + # nap.nap_config.set_backend("jax") + + # assert str(w[0].message) == 'Package pynajax is not found. Falling back to numba backend. To use the jax backend for pynapple, please install pynajax' + # assert nap.core.utils.get_backend() == "numba" + # assert nap.nap_config.backend == "numba" + - # assert str(w[0].message) == 'Package pynajax is not found. Falling back to numba backend. To use the jax backend for pynapple, please install pynajax' - # assert nap.core.utils.get_backend() == "numba" - # assert nap.nap_config.backend == "numba" - ################################## # Tests for warnings ################################## -@pytest.mark.parametrize("param, expectation", - [ - (True, does_not_raise()), - (False, does_not_raise()), - (1, pytest.raises(ValueError, - match="suppress_conversion_warnings must be a boolean value")) - ]) +@pytest.mark.parametrize( + "param, expectation", + [ + (True, does_not_raise()), + (False, does_not_raise()), + ( + 1, + pytest.raises( + ValueError, match="suppress_conversion_warnings must be a boolean value" + ), + ), + ], +) def test_config_setter_input_validity(param, expectation): """Test setting suppress_conversion_warnings with various inputs to validate type checking.""" with expectation: @@ -114,22 +123,43 @@ def test_config_restore_default(): assert not nap.nap_config.suppress_conversion_warnings -@pytest.mark.parametrize("cls, t, d, conf, expectation", - [ - (nap.Ts, [0, 1], None, True, does_not_raise()), - (nap.Ts, [0, 1], None, False, - pytest.warns(UserWarning, match=f"Converting 't' to numpy.array.")), - (nap.Tsd, [0, 1], [0, 1], True, does_not_raise()), - (nap.Tsd, [0, 1], [0, 1], False, - pytest.warns(UserWarning, match=f"Converting 't' to numpy.array.")), - (nap.TsdFrame, [0, 1], [[0], [1]], True, does_not_raise()), - (nap.TsdFrame, [0, 1], [[0], [1]], False, - pytest.warns(UserWarning, match=f"Converting 't' to numpy.array.")), - (nap.TsdTensor, [0, 1], [[[0]], [[1]]], True, does_not_raise()), - (nap.TsdTensor, [0, 1], [[[0]], [[1]]], False, - pytest.warns(UserWarning, match=f"Converting 't' to numpy.array.")), - - ]) +@pytest.mark.parametrize( + "cls, t, d, conf, expectation", + [ + (nap.Ts, [0, 1], None, True, does_not_raise()), + ( + nap.Ts, + [0, 1], + None, + False, + pytest.warns(UserWarning, match=f"Converting 't' to numpy.array."), + ), + (nap.Tsd, [0, 1], [0, 1], True, does_not_raise()), + ( + nap.Tsd, + [0, 1], + [0, 1], + False, + pytest.warns(UserWarning, match=f"Converting 't' to numpy.array."), + ), + (nap.TsdFrame, [0, 1], [[0], [1]], True, does_not_raise()), + ( + nap.TsdFrame, + [0, 1], + [[0], [1]], + False, + pytest.warns(UserWarning, match=f"Converting 't' to numpy.array."), + ), + (nap.TsdTensor, [0, 1], [[[0]], [[1]]], True, does_not_raise()), + ( + nap.TsdTensor, + [0, 1], + [[[0]], [[1]]], + False, + pytest.warns(UserWarning, match=f"Converting 't' to numpy.array."), + ), + ], +) def test_config_supress_warning_t(cls, t, d, conf, expectation): """Test if the restore_defaults method correctly resets suppress_conversion_warnings to its default.""" nap.nap_config.suppress_conversion_warnings = conf @@ -142,19 +172,36 @@ def test_config_supress_warning_t(cls, t, d, conf, expectation): finally: nap.nap_config.restore_defaults() -@pytest.mark.parametrize("cls, t, d, conf, expectation", - [ - (nap.Tsd, [0, 1], [0, 1], True, does_not_raise()), - (nap.Tsd, [0, 1], [0, 1], False, - pytest.warns(UserWarning, match=f"Converting 'd' to numpy.array.")), - (nap.TsdFrame, [0, 1], [[0], [1]], True, does_not_raise()), - (nap.TsdFrame, [0, 1], [[0], [1]], False, - pytest.warns(UserWarning, match=f"Converting 'd' to numpy.array.")), - (nap.TsdTensor, [0, 1], [[[0]], [[1]]], True, does_not_raise()), - (nap.TsdTensor, [0, 1], [[[0]], [[1]]], False, - pytest.warns(UserWarning, match=f"Converting 'd' to numpy.array.")), - - ]) + +@pytest.mark.parametrize( + "cls, t, d, conf, expectation", + [ + (nap.Tsd, [0, 1], [0, 1], True, does_not_raise()), + ( + nap.Tsd, + [0, 1], + [0, 1], + False, + pytest.warns(UserWarning, match=f"Converting 'd' to numpy.array."), + ), + (nap.TsdFrame, [0, 1], [[0], [1]], True, does_not_raise()), + ( + nap.TsdFrame, + [0, 1], + [[0], [1]], + False, + pytest.warns(UserWarning, match=f"Converting 'd' to numpy.array."), + ), + (nap.TsdTensor, [0, 1], [[[0]], [[1]]], True, does_not_raise()), + ( + nap.TsdTensor, + [0, 1], + [[[0]], [[1]]], + False, + pytest.warns(UserWarning, match=f"Converting 'd' to numpy.array."), + ), + ], +) def test_config_supress_warning_d(cls, t, d, conf, expectation): """Test if the restore_defaults method correctly resets suppress_conversion_warnings to its default.""" nap.nap_config.suppress_conversion_warnings = conf @@ -166,4 +213,4 @@ def test_config_supress_warning_d(cls, t, d, conf, expectation): def test_get_time_index_precision(): - assert nap.nap_config.time_index_precision == 9 \ No newline at end of file + assert nap.nap_config.time_index_precision == 9 diff --git a/tests/test_correlograms.py b/tests/test_correlograms.py index 7445452e..d5d1f762 100644 --- a/tests/test_correlograms.py +++ b/tests/test_correlograms.py @@ -1,10 +1,13 @@ """Tests of correlograms for `pynapple` package.""" -import pynapple as nap +from itertools import combinations + import numpy as np import pandas as pd import pytest -from itertools import combinations + +import pynapple as nap + def test_cross_correlogram(): t1 = np.array([0]) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index f181badf..594fcf8f 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -7,11 +7,12 @@ """Tests of decoding for `pynapple` package.""" -import pynapple as nap import numpy as np import pandas as pd import pytest +import pynapple as nap + def get_testing_set_1d(): feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) @@ -37,6 +38,7 @@ def test_decode_1d(): tmp[0:50, 1] = 0.0 np.testing.assert_array_almost_equal(proba.values, tmp) + def test_decode_1d_with_dict(): feature, group, tc, ep = get_testing_set_1d() group = dict(group) @@ -52,6 +54,7 @@ def test_decode_1d_with_dict(): tmp[0:50, 1] = 0.0 np.testing.assert_array_almost_equal(proba.values, tmp) + def test_decode_1d_with_feature(): feature, group, tc, ep = get_testing_set_1d() decoded, proba = nap.decode_1d(tc, group, ep, bin_size=1, feature=feature) @@ -66,18 +69,21 @@ def test_decode_1d_with_feature(): tmp[0:50, 1] = 0.0 np.testing.assert_array_almost_equal(proba.values, tmp) + def test_decode_1d_with_wrong_feature(): feature, group, tc, ep = get_testing_set_1d() with pytest.raises(RuntimeError) as e_info: - nap.decode_1d(tc, group, ep, bin_size=1, feature=[1,2,3]) + nap.decode_1d(tc, group, ep, bin_size=1, feature=[1, 2, 3]) assert str(e_info.value) == "Unknown format for feature in decode_1d" + def test_decode_1d_with_time_units(): feature, group, tc, ep = get_testing_set_1d() for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): decoded, proba = nap.decode_1d(tc, group, ep, 1.0 * t, time_units=tu) np.testing.assert_array_almost_equal(feature.values, decoded.values) + def test_decoded_1d_raise_errors(): feature, group, tc, ep = get_testing_set_1d() with pytest.raises(Exception) as e_info: @@ -137,6 +143,7 @@ def test_decode_2d(): tmp[51:100:2, 1] = 1 np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) + def test_decode_2d_with_dict(): features, group, tc, ep, xy = get_testing_set_2d() group = dict(group) @@ -157,6 +164,7 @@ def test_decode_2d_with_dict(): tmp[51:100:2, 1] = 1 np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) + def test_decode_2d_with_feature(): features, group, tc, ep, xy = get_testing_set_2d() decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) diff --git a/tests/test_filtering.py b/tests/test_filtering.py index 19461760..81e9a4ae 100644 --- a/tests/test_filtering.py +++ b/tests/test_filtering.py @@ -1,11 +1,13 @@ -import pytest -import pynapple as nap -import numpy as np -from scipy import signal -import pandas as pd import warnings from contextlib import nullcontext as does_not_raise +import numpy as np +import pandas as pd +import pytest +from scipy import signal + +import pynapple as nap + # @pytest.fixture def sample_data(): @@ -33,9 +35,12 @@ def compare_scipy(tsd, ep, order, freq, fs, btype): out_sci = np.concatenate(out_sci, axis=0) return out_sci + def compare_sinc(tsd, ep, transition_bandwidth, freq, fs, ftype): - kernel = nap.process.filtering._get_windowed_sinc_kernel(freq, ftype, fs, transition_bandwidth) + kernel = nap.process.filtering._get_windowed_sinc_kernel( + freq, ftype, fs, transition_bandwidth + ) return tsd.convolve(kernel, ep).d @@ -45,10 +50,21 @@ def compare_sinc(tsd, ep, transition_bandwidth, freq, fs, ftype): @pytest.mark.parametrize("transition_bandwidth", [0.02]) @pytest.mark.parametrize("shape", [(5000,), (5000, 2), (5000, 2, 3)]) @pytest.mark.parametrize("sampling_frequency", [None, 5000.0]) -@pytest.mark.parametrize("ep", [nap.IntervalSet(start=[0], end=[1]), nap.IntervalSet(start=[0, 0.5], end=[0.4, 1]), ]) -def test_low_pass(freq, mode, order, transition_bandwidth, shape, sampling_frequency, ep): +@pytest.mark.parametrize( + "ep", + [ + nap.IntervalSet(start=[0], end=[1]), + nap.IntervalSet(start=[0, 0.5], end=[0.4, 1]), + ], +) +def test_low_pass( + freq, mode, order, transition_bandwidth, shape, sampling_frequency, ep +): t = np.linspace(0, 1, shape[0]) - y = np.squeeze(np.cos(np.pi * 2 * 80 * t).reshape(-1, *[1] * (len(shape) - 1)) + np.random.normal(size=shape)) + y = np.squeeze( + np.cos(np.pi * 2 * 80 * t).reshape(-1, *[1] * (len(shape) - 1)) + + np.random.normal(size=shape) + ) if len(shape) == 1: tsd = nap.Tsd(t, y, time_support=ep) @@ -59,14 +75,22 @@ def test_low_pass(freq, mode, order, transition_bandwidth, shape, sampling_frequ if sampling_frequency is not None and sampling_frequency != tsd.rate: sampling_frequency = tsd.rate - out = nap.apply_lowpass_filter(tsd, freq, fs=sampling_frequency, mode=mode, order=order, - transition_bandwidth=transition_bandwidth) + out = nap.apply_lowpass_filter( + tsd, + freq, + fs=sampling_frequency, + mode=mode, + order=order, + transition_bandwidth=transition_bandwidth, + ) if mode == "butter": out_sci = compare_scipy(tsd, ep, order, freq, tsd.rate, "lowpass") np.testing.assert_array_almost_equal(out.d, out_sci) if mode == "sinc": - out_sinc = compare_sinc(tsd, ep, transition_bandwidth, freq, tsd.rate, "lowpass") + out_sinc = compare_sinc( + tsd, ep, transition_bandwidth, freq, tsd.rate, "lowpass" + ) np.testing.assert_array_almost_equal(out.d, out_sinc) assert isinstance(out, type(tsd)) @@ -82,10 +106,21 @@ def test_low_pass(freq, mode, order, transition_bandwidth, shape, sampling_frequ @pytest.mark.parametrize("transition_bandwidth", [0.02]) @pytest.mark.parametrize("shape", [(5000,), (5000, 2), (5000, 2, 3)]) @pytest.mark.parametrize("sampling_frequency", [None, 5000.0]) -@pytest.mark.parametrize("ep", [nap.IntervalSet(start=[0], end=[1]), nap.IntervalSet(start=[0, 0.5], end=[0.4, 1]), ]) -def test_high_pass(freq, mode, order, transition_bandwidth, shape, sampling_frequency, ep): +@pytest.mark.parametrize( + "ep", + [ + nap.IntervalSet(start=[0], end=[1]), + nap.IntervalSet(start=[0, 0.5], end=[0.4, 1]), + ], +) +def test_high_pass( + freq, mode, order, transition_bandwidth, shape, sampling_frequency, ep +): t = np.linspace(0, 1, shape[0]) - y = np.squeeze(np.cos(np.pi * 2 * 80 * t).reshape(-1, *[1] * (len(shape) - 1)) + np.random.normal(size=shape)) + y = np.squeeze( + np.cos(np.pi * 2 * 80 * t).reshape(-1, *[1] * (len(shape) - 1)) + + np.random.normal(size=shape) + ) if len(shape) == 1: tsd = nap.Tsd(t, y, time_support=ep) @@ -96,11 +131,19 @@ def test_high_pass(freq, mode, order, transition_bandwidth, shape, sampling_freq if sampling_frequency is not None and sampling_frequency != tsd.rate: sampling_frequency = tsd.rate - out = nap.apply_highpass_filter(tsd, freq, fs=sampling_frequency, mode=mode, order=order, - transition_bandwidth=transition_bandwidth) + out = nap.apply_highpass_filter( + tsd, + freq, + fs=sampling_frequency, + mode=mode, + order=order, + transition_bandwidth=transition_bandwidth, + ) if mode == "sinc": - out_sinc = compare_sinc(tsd, ep, transition_bandwidth, freq, tsd.rate, "highpass") + out_sinc = compare_sinc( + tsd, ep, transition_bandwidth, freq, tsd.rate, "highpass" + ) np.testing.assert_array_almost_equal(out.d, out_sinc) if mode == "butter": @@ -120,10 +163,21 @@ def test_high_pass(freq, mode, order, transition_bandwidth, shape, sampling_freq @pytest.mark.parametrize("transition_bandwidth", [0.02]) @pytest.mark.parametrize("shape", [(5000,), (5000, 2), (5000, 2, 3)]) @pytest.mark.parametrize("sampling_frequency", [None, 5000.0]) -@pytest.mark.parametrize("ep", [nap.IntervalSet(start=[0], end=[1]), nap.IntervalSet(start=[0, 0.5], end=[0.4, 1]), ]) -def test_bandpass(freq, mode, order, transition_bandwidth, shape, sampling_frequency, ep): +@pytest.mark.parametrize( + "ep", + [ + nap.IntervalSet(start=[0], end=[1]), + nap.IntervalSet(start=[0, 0.5], end=[0.4, 1]), + ], +) +def test_bandpass( + freq, mode, order, transition_bandwidth, shape, sampling_frequency, ep +): t = np.linspace(0, 1, shape[0]) - y = np.squeeze(np.cos(np.pi * 2 * 80 * t).reshape(-1, *[1] * (len(shape) - 1)) + np.random.normal(size=shape)) + y = np.squeeze( + np.cos(np.pi * 2 * 80 * t).reshape(-1, *[1] * (len(shape) - 1)) + + np.random.normal(size=shape) + ) if len(shape) == 1: tsd = nap.Tsd(t, y, time_support=ep) @@ -134,11 +188,19 @@ def test_bandpass(freq, mode, order, transition_bandwidth, shape, sampling_frequ if sampling_frequency is not None and sampling_frequency != tsd.rate: sampling_frequency = tsd.rate - out = nap.apply_bandpass_filter(tsd, freq, fs=sampling_frequency, mode=mode, order=order, - transition_bandwidth=transition_bandwidth) + out = nap.apply_bandpass_filter( + tsd, + freq, + fs=sampling_frequency, + mode=mode, + order=order, + transition_bandwidth=transition_bandwidth, + ) if mode == "sinc": - out_sinc = compare_sinc(tsd, ep, transition_bandwidth, freq, tsd.rate, "bandpass") + out_sinc = compare_sinc( + tsd, ep, transition_bandwidth, freq, tsd.rate, "bandpass" + ) np.testing.assert_array_almost_equal(out.d, out_sinc) if mode == "butter": @@ -158,10 +220,21 @@ def test_bandpass(freq, mode, order, transition_bandwidth, shape, sampling_frequ @pytest.mark.parametrize("transition_bandwidth", [0.02]) @pytest.mark.parametrize("shape", [(5000,), (5000, 2), (5000, 2, 3)]) @pytest.mark.parametrize("sampling_frequency", [None, 5000.0]) -@pytest.mark.parametrize("ep", [nap.IntervalSet(start=[0], end=[1]), nap.IntervalSet(start=[0, 0.5], end=[0.4, 1]), ]) -def test_bandstop(freq, mode, order, transition_bandwidth, shape, sampling_frequency, ep): +@pytest.mark.parametrize( + "ep", + [ + nap.IntervalSet(start=[0], end=[1]), + nap.IntervalSet(start=[0, 0.5], end=[0.4, 1]), + ], +) +def test_bandstop( + freq, mode, order, transition_bandwidth, shape, sampling_frequency, ep +): t = np.linspace(0, 1, shape[0]) - y = np.squeeze(np.cos(np.pi * 2 * 80 * t).reshape(-1, *[1] * (len(shape) - 1)) + np.random.normal(size=shape)) + y = np.squeeze( + np.cos(np.pi * 2 * 80 * t).reshape(-1, *[1] * (len(shape) - 1)) + + np.random.normal(size=shape) + ) if len(shape) == 1: tsd = nap.Tsd(t, y, time_support=ep) @@ -172,53 +245,177 @@ def test_bandstop(freq, mode, order, transition_bandwidth, shape, sampling_frequ if sampling_frequency is not None and sampling_frequency != tsd.rate: sampling_frequency = tsd.rate - out = nap.apply_bandstop_filter(tsd, freq, fs=sampling_frequency, mode=mode, order=order, - transition_bandwidth=transition_bandwidth) + out = nap.apply_bandstop_filter( + tsd, + freq, + fs=sampling_frequency, + mode=mode, + order=order, + transition_bandwidth=transition_bandwidth, + ) if mode == "sinc": - out_sinc = compare_sinc(tsd, ep, transition_bandwidth, freq, tsd.rate, "bandstop") + out_sinc = compare_sinc( + tsd, ep, transition_bandwidth, freq, tsd.rate, "bandstop" + ) np.testing.assert_array_almost_equal(out.d, out_sinc) if mode == "butter": out_sci = compare_scipy(tsd, ep, order, freq, tsd.rate, "bandstop") np.testing.assert_array_almost_equal(out.d, out_sci) - assert isinstance(out, type(tsd)) assert np.all(out.t == tsd.t) assert np.all(out.time_support == tsd.time_support) if isinstance(tsd, nap.TsdFrame): assert np.all(tsd.columns == out.columns) + ######################################################################## # Errors ######################################################################## -@pytest.mark.parametrize("func, freq", [ - (nap.apply_lowpass_filter, 10), - (nap.apply_highpass_filter, 10), - (nap.apply_bandpass_filter, [10, 20]), - (nap.apply_bandstop_filter, [10, 20]), -]) -@pytest.mark.parametrize("data, fs, mode, order, transition_bandwidth, expected_exception", [ - (sample_data(), None, "butter", "a", 0.02, pytest.raises(ValueError,match="Invalid value for 'order': Parameter 'order' should be of type int")), - ("invalid_data", None, "butter", 4, 0.02, pytest.raises(ValueError,match="Invalid value: invalid_data. First argument should be of type Tsd, TsdFrame or TsdTensor")), - (sample_data(), None, "invalid_mode", 4, 0.02, pytest.raises(ValueError,match="Unrecognized filter mode. Choose either 'butter' or 'sinc'")), - (sample_data(), "invalid_fs", "butter", 4, 0.02, pytest.raises(ValueError,match="Invalid value for 'fs'. Parameter 'fs' should be of type float or int")), - (sample_data(), None, "sinc", 4, "a", pytest.raises(ValueError,match="Invalid value for 'transition_bandwidth'. 'transition_bandwidth' should be of type float")), - (sample_data_with_nan(), None, "sinc", 4, 0.02, pytest.raises(ValueError,match="The input signal contains NaN values, which are not supported for filtering")), - (sample_data_with_nan(), None, "butter", 4, 0.02, pytest.raises(ValueError, match="The input signal contains NaN values, which are not supported for filtering")) - -]) -def test_compute_filtered_signal_raise_errors(func, freq, data, fs, mode, order, transition_bandwidth, expected_exception): +@pytest.mark.parametrize( + "func, freq", + [ + (nap.apply_lowpass_filter, 10), + (nap.apply_highpass_filter, 10), + (nap.apply_bandpass_filter, [10, 20]), + (nap.apply_bandstop_filter, [10, 20]), + ], +) +@pytest.mark.parametrize( + "data, fs, mode, order, transition_bandwidth, expected_exception", + [ + ( + sample_data(), + None, + "butter", + "a", + 0.02, + pytest.raises( + ValueError, + match="Invalid value for 'order': Parameter 'order' should be of type int", + ), + ), + ( + "invalid_data", + None, + "butter", + 4, + 0.02, + pytest.raises( + ValueError, + match="Invalid value: invalid_data. First argument should be of type Tsd, TsdFrame or TsdTensor", + ), + ), + ( + sample_data(), + None, + "invalid_mode", + 4, + 0.02, + pytest.raises( + ValueError, + match="Unrecognized filter mode. Choose either 'butter' or 'sinc'", + ), + ), + ( + sample_data(), + "invalid_fs", + "butter", + 4, + 0.02, + pytest.raises( + ValueError, + match="Invalid value for 'fs'. Parameter 'fs' should be of type float or int", + ), + ), + ( + sample_data(), + None, + "sinc", + 4, + "a", + pytest.raises( + ValueError, + match="Invalid value for 'transition_bandwidth'. 'transition_bandwidth' should be of type float", + ), + ), + ( + sample_data_with_nan(), + None, + "sinc", + 4, + 0.02, + pytest.raises( + ValueError, + match="The input signal contains NaN values, which are not supported for filtering", + ), + ), + ( + sample_data_with_nan(), + None, + "butter", + 4, + 0.02, + pytest.raises( + ValueError, + match="The input signal contains NaN values, which are not supported for filtering", + ), + ), + ], +) +def test_compute_filtered_signal_raise_errors( + func, freq, data, fs, mode, order, transition_bandwidth, expected_exception +): with expected_exception: - func(data, freq, fs=fs, mode=mode, order=order, transition_bandwidth=transition_bandwidth) - -@pytest.mark.parametrize("func, freq, expected_exception", [ - (nap.apply_lowpass_filter, "a", pytest.raises(ValueError,match=r"lowpass filter require a single number. a provided instead.")), - (nap.apply_highpass_filter, "b", pytest.raises(ValueError,match=r"highpass filter require a single number. b provided instead.")), - (nap.apply_bandpass_filter, [10, "b"], pytest.raises(ValueError,match="bandpass filter require a tuple of two numbers. \[10, 'b'\] provided instead.")), - (nap.apply_bandstop_filter, [10, 20, 30], pytest.raises(ValueError,match=r"bandstop filter require a tuple of two numbers. \[10, 20, 30\] provided instead.")) -]) + func( + data, + freq, + fs=fs, + mode=mode, + order=order, + transition_bandwidth=transition_bandwidth, + ) + + +@pytest.mark.parametrize( + "func, freq, expected_exception", + [ + ( + nap.apply_lowpass_filter, + "a", + pytest.raises( + ValueError, + match=r"lowpass filter require a single number. a provided instead.", + ), + ), + ( + nap.apply_highpass_filter, + "b", + pytest.raises( + ValueError, + match=r"highpass filter require a single number. b provided instead.", + ), + ), + ( + nap.apply_bandpass_filter, + [10, "b"], + pytest.raises( + ValueError, + match="bandpass filter require a tuple of two numbers. \[10, 'b'\] provided instead.", + ), + ), + ( + nap.apply_bandstop_filter, + [10, 20, 30], + pytest.raises( + ValueError, + match=r"bandstop filter require a tuple of two numbers. \[10, 20, 30\] provided instead.", + ), + ), + ], +) def test_compute_filtered_signal_bad_freq(func, freq, expected_exception): with expected_exception: func(sample_data(), freq) @@ -238,33 +435,45 @@ def test_filtering_nyquist_edge_case(nyquist_fraction, order): np.testing.assert_allclose(out.t, data.t) np.testing.assert_allclose(out.time_support, data.time_support) + ################################################################# # Test windowedsinc kernel + @pytest.mark.parametrize("tb", [0.2, 0.3]) def test_get_odd_kernel(tb): - kernel = nap.process.filtering._get_windowed_sinc_kernel(1, "lowpass", 4, transition_bandwidth=tb) - assert len(kernel)%2 != 0 - -@pytest.mark.parametrize("filter_type, expected_exception", [ - ("a", pytest.raises(ValueError)), -]) + kernel = nap.process.filtering._get_windowed_sinc_kernel( + 1, "lowpass", 4, transition_bandwidth=tb + ) + assert len(kernel) % 2 != 0 + + +@pytest.mark.parametrize( + "filter_type, expected_exception", + [ + ("a", pytest.raises(ValueError)), + ], +) def test_get_kernel_error(filter_type, expected_exception): with expected_exception: nap.process.filtering._get_windowed_sinc_kernel(1, filter_type, 4) + def test_get__error(): - with pytest.raises(TypeError, match=r"apply_lowpass_filter\(\) missing 1 required positional argument: 'data'"): + with pytest.raises( + TypeError, + match=r"apply_lowpass_filter\(\) missing 1 required positional argument: 'data'", + ): nap.apply_lowpass_filter(cutoff=0.25) def test_compare_sinc_kernel(): kernel = nap.process.filtering._get_windowed_sinc_kernel(1, "lowpass", 4) - x = np.arange(-(len(kernel)//2), 1+len(kernel)//2) + x = np.arange(-(len(kernel) // 2), 1 + len(kernel) // 2) with warnings.catch_warnings(): warnings.simplefilter("ignore") - kernel2 = np.sin(2*np.pi*x*0.25)/x#(2*np.pi*x*0.25) - kernel2[len(kernel)//2] = 0.25*2*np.pi + kernel2 = np.sin(2 * np.pi * x * 0.25) / x # (2*np.pi*x*0.25) + kernel2[len(kernel) // 2] = 0.25 * 2 * np.pi kernel2 = kernel2 kernel2 = kernel2 * np.blackman(len(kernel2)) kernel2 /= kernel2.sum() @@ -275,27 +484,42 @@ def test_compare_sinc_kernel(): ikernel2[len(ikernel2) // 2] = 1.0 + ikernel2[len(kernel2) // 2] np.testing.assert_allclose(ikernel, ikernel2) -@pytest.mark.parametrize("cutoff, fs, filter_type, mode, order, tb", [ - (250, 1000, "lowpass", "butter", 4, 0.02), - (250, 1000, "lowpass", "sinc", 4, 0.02), -]) + +@pytest.mark.parametrize( + "cutoff, fs, filter_type, mode, order, tb", + [ + (250, 1000, "lowpass", "butter", 4, 0.02), + (250, 1000, "lowpass", "sinc", 4, 0.02), + ], +) def test_get_filter_frequency_response(cutoff, fs, filter_type, mode, order, tb): output = nap.get_filter_frequency_response(cutoff, fs, filter_type, mode, order, tb) assert isinstance(output, pd.Series) if mode == "butter": - sos = nap.process.filtering._get_butter_coefficients(cutoff, filter_type, fs, order) + sos = nap.process.filtering._get_butter_coefficients( + cutoff, filter_type, fs, order + ) w, h = signal.sosfreqz(sos, worN=1024, fs=fs) np.testing.assert_array_almost_equal(w, output.index.values) np.testing.assert_array_almost_equal(np.abs(h), output.values) if mode == "sinc": - kernel = nap.process.filtering._get_windowed_sinc_kernel(cutoff, filter_type, fs, tb) + kernel = nap.process.filtering._get_windowed_sinc_kernel( + cutoff, filter_type, fs, tb + ) fft_result = np.fft.fft(kernel) fft_result = np.fft.fftshift(fft_result) fft_freq = np.fft.fftfreq(n=len(kernel), d=1 / fs) fft_freq = np.fft.fftshift(fft_freq) - np.testing.assert_array_almost_equal(fft_freq[fft_freq >= 0], output.index.values) - np.testing.assert_array_almost_equal(np.abs(fft_result[fft_freq >= 0]), output.values) + np.testing.assert_array_almost_equal( + fft_freq[fft_freq >= 0], output.index.values + ) + np.testing.assert_array_almost_equal( + np.abs(fft_result[fft_freq >= 0]), output.values + ) + def test_get_filter_frequency_response_error(): - with pytest.raises(ValueError, match="Unrecognized filter mode. Choose either 'butter' or 'sinc'"): + with pytest.raises( + ValueError, match="Unrecognized filter mode. Choose either 'butter' or 'sinc'" + ): nap.get_filter_frequency_response(250, 1000, "lowpass", "a", 4, 0.02) diff --git a/tests/test_folder.py b/tests/test_folder.py index 4037eb71..caab88c9 100644 --- a/tests/test_folder.py +++ b/tests/test_folder.py @@ -6,19 +6,21 @@ """Tests of IO folder functions""" -import pynapple as nap +import json +import shutil +import warnings +from pathlib import Path + import numpy as np import pandas as pd import pytest -import warnings -from pathlib import Path -import shutil -import json + +import pynapple as nap # look for tests folder path = Path(__file__).parent -if path.name == 'pynapple': - path = path / "tests" +if path.name == "pynapple": + path = path / "tests" path = path / "npzfilestest" # Recursively remove the folder: @@ -27,25 +29,29 @@ # Populate the folder data = { - "tsd":nap.Tsd(t=np.arange(100), d=np.arange(100)), - "ts":nap.Ts(t=np.sort(np.random.rand(10)*100)), - "tsdframe":nap.TsdFrame(t=np.arange(100), d=np.random.rand(100,10)), - "tsgroup":nap.TsGroup({ + "tsd": nap.Tsd(t=np.arange(100), d=np.arange(100)), + "ts": nap.Ts(t=np.sort(np.random.rand(10) * 100)), + "tsdframe": nap.TsdFrame(t=np.arange(100), d=np.random.rand(100, 10)), + "tsgroup": nap.TsGroup( + { 0: nap.Ts(t=np.arange(0, 200)), 1: nap.Ts(t=np.arange(0, 200, 0.5), time_units="s"), 2: nap.Ts(t=np.arange(0, 300, 0.2), time_units="s"), - }), - "iset":nap.IntervalSet(start=np.array([0.0, 5.0]), end=np.array([1.0, 6.0])) - } + } + ), + "iset": nap.IntervalSet(start=np.array([0.0, 5.0]), end=np.array([1.0, 6.0])), +} for k, d in data.items(): - d.save(path / (k+".npz")) + d.save(path / (k + ".npz")) + @pytest.mark.parametrize("path", [path]) def test_load_folder(path): folder = nap.Folder(path) assert isinstance(folder, nap.Folder) + @pytest.mark.parametrize("path", [path]) def test_get_item(path): folder = nap.Folder(path) @@ -55,20 +61,23 @@ def test_get_item(path): assert type(data[k]) == type(d) assert type(folder.data[k]) == type(d) + ################################################################## folder = nap.Folder(path) + @pytest.mark.parametrize("folder", [folder]) def test_expand(folder): assert folder.expand() == None assert folder.view == None + @pytest.mark.parametrize("folder", [folder]) def test_save(folder): tsd2 = nap.Tsd(t=np.arange(10), d=np.arange(10)) folder.save("tsd2", tsd2, "Test description") - assert isinstance(folder['tsd2'], nap.Tsd) + assert isinstance(folder["tsd2"], nap.Tsd) files = [f.name for f in path.iterdir()] assert "tsd2.json" in files @@ -79,6 +88,7 @@ def test_save(folder): assert "info" in metadata.keys() assert "Test description" == metadata["info"] + # @pytest.mark.parametrize("folder", [folder]) # def test_metadata(folder): # tsd2 = nap.Tsd(t=np.arange(10), d=np.arange(10)) @@ -86,11 +96,12 @@ def test_save(folder): # folder.metadata("tsd2") # folder.doc("tsd2") + @pytest.mark.parametrize("path", [path]) def test_load(path): folder = nap.Folder(path) for k in data.keys(): - assert isinstance(folder.data[k], nap.NPZFile) + assert isinstance(folder.data[k], nap.NPZFile) folder.load() for k in data.keys(): assert type(folder[k]) == type(data[k]) diff --git a/tests/test_interval_set.py b/tests/test_interval_set.py index ec7105d0..23030b9b 100644 --- a/tests/test_interval_set.py +++ b/tests/test_interval_set.py @@ -1,12 +1,15 @@ """Tests for IntervalSet of `pynapple` package.""" -import pynapple as nap +import warnings +from pathlib import Path + import numpy as np import pandas as pd import pytest -import warnings + +import pynapple as nap + from .mock import MockArray -from pathlib import Path def test_create_iset(): @@ -17,10 +20,11 @@ def test_create_iset(): np.testing.assert_array_almost_equal(start, ep.start) np.testing.assert_array_almost_equal(end, ep.end) + def test_iset_properties(): start = [0, 10, 16, 25] end = [5, 15, 20, 40] - ep = nap.IntervalSet(start=start, end=end) + ep = nap.IntervalSet(start=start, end=end) assert isinstance(ep.starts, nap.Ts) assert isinstance(ep.ends, nap.Ts) np.testing.assert_array_almost_equal(np.array(start), ep.starts.index) @@ -30,6 +34,7 @@ def test_iset_properties(): assert ep.ndim == ep.values.ndim assert ep.size == ep.values.size + def test_iset_centers(): start = np.array([0, 10, 16, 25]) end = np.array([5, 15, 20, 40]) @@ -37,21 +42,23 @@ def test_iset_centers(): center_ts = ep.get_intervals_center() assert isinstance(center_ts, nap.Ts) - np.testing.assert_array_almost_equal(center_ts.index, start + (end-start)/2) + np.testing.assert_array_almost_equal(center_ts.index, start + (end - start) / 2) alpha = np.random.rand() center_ts = ep.get_intervals_center(alpha) assert isinstance(center_ts, nap.Ts) - np.testing.assert_array_almost_equal(center_ts.index, start + (end-start)*alpha) + np.testing.assert_array_almost_equal(center_ts.index, start + (end - start) * alpha) with pytest.raises(RuntimeError): ep.get_intervals_center({}) + def test_create_iset_from_scalars(): ep = nap.IntervalSet(start=0, end=10) np.testing.assert_approx_equal(ep.start[0], 0) np.testing.assert_approx_equal(ep.end[0], 10) + def test_create_iset_from_iset(): start = np.array([0, 10, 16, 25]) end = np.array([5, 15, 20, 40]) @@ -60,12 +67,14 @@ def test_create_iset_from_iset(): np.testing.assert_array_almost_equal(ep.start, ep2.start) np.testing.assert_array_almost_equal(ep.end, ep2.end) + def test_create_iset_from_df(): df = pd.DataFrame(data=[[16, 100]], columns=["start", "end"]) ep = nap.IntervalSet(df) np.testing.assert_array_almost_equal(df.start.values, ep.start) np.testing.assert_array_almost_equal(df.end.values, ep.end) + def test_create_iset_from_mock_array(): start = np.array([0, 200]) end = np.array([100, 300]) @@ -73,12 +82,19 @@ def test_create_iset_from_mock_array(): with warnings.catch_warnings(record=True) as w: ep = nap.IntervalSet(MockArray(start), MockArray(end)) - assert str(w[0].message) == "Converting 'start' to numpy.array. The provided array was of type 'MockArray'." - assert str(w[1].message) == "Converting 'end' to numpy.array. The provided array was of type 'MockArray'." - + assert ( + str(w[0].message) + == "Converting 'start' to numpy.array. The provided array was of type 'MockArray'." + ) + assert ( + str(w[1].message) + == "Converting 'end' to numpy.array. The provided array was of type 'MockArray'." + ) + np.testing.assert_array_almost_equal(ep.start, start) np.testing.assert_array_almost_equal(ep.end, end) + def test_create_iset_from_tuple(): start = 0 end = 5 @@ -87,6 +103,7 @@ def test_create_iset_from_tuple(): np.testing.assert_array_almost_equal(start, ep.start[0]) np.testing.assert_array_almost_equal(end, ep.end[0]) + def test_create_iset_from_tuple_iter(): start = [0, 10, 16, 25] end = [5, 15, 20, 40] @@ -96,13 +113,21 @@ def test_create_iset_from_tuple_iter(): np.testing.assert_array_almost_equal(start, ep.start) np.testing.assert_array_almost_equal(end, ep.end) -def test_create_iset_from_unknown_format(): + +def test_create_iset_from_unknown_format(): with pytest.raises(RuntimeError) as e: nap.IntervalSet(start="abc", end=[1, 2]) - assert str(e.value) == "Unknown format for start. Accepted formats are numpy.ndarray, list, tuple or any array-like objects." + assert ( + str(e.value) + == "Unknown format for start. Accepted formats are numpy.ndarray, list, tuple or any array-like objects." + ) with pytest.raises(RuntimeError) as e: - nap.IntervalSet(start=[1,2], end="abc") - assert str(e.value) == "Unknown format for end. Accepted formats are numpy.ndarray, list, tuple or any array-like objects." + nap.IntervalSet(start=[1, 2], end="abc") + assert ( + str(e.value) + == "Unknown format for end. Accepted formats are numpy.ndarray, list, tuple or any array-like objects." + ) + def test_create_iset_from_s(): start = np.array([0, 10, 16, 25]) @@ -127,87 +152,96 @@ def test_create_iset_from_us(): np.testing.assert_array_almost_equal(start * 1e-6, ep.start) np.testing.assert_array_almost_equal(end * 1e-6, ep.end) + def test_modify_iset(): start = np.around(np.array([0, 10, 16], dtype=np.float64), 9) end = np.around(np.array([5, 15, 20], dtype=np.float64), 9) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) with pytest.raises(RuntimeError) as e: - ep[0,0] = 1 - assert str(e.value) == "IntervalSet is immutable. Starts and ends have been already sorted." + ep[0, 0] = 1 + assert ( + str(e.value) + == "IntervalSet is immutable. Starts and ends have been already sorted." + ) + def test_get_iset(): start = np.array([0, 10, 16], dtype=np.float64) end = np.array([5, 15, 20], dtype=np.float64) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) - assert isinstance(ep['start'], np.ndarray) - assert isinstance(ep['end'], np.ndarray) - np.testing.assert_array_almost_equal(ep['start'], start) - np.testing.assert_array_almost_equal(ep['end'], end) + assert isinstance(ep["start"], np.ndarray) + assert isinstance(ep["end"], np.ndarray) + np.testing.assert_array_almost_equal(ep["start"], start) + np.testing.assert_array_almost_equal(ep["end"], end) with pytest.raises(IndexError) as e: - ep['a'] + ep["a"] assert str(e.value) == "Unknown string argument. Should be 'start' or 'end'" # Get a new IntervalSet ep2 = ep[0] assert isinstance(ep2, nap.IntervalSet) - np.testing.assert_array_almost_equal(ep2, np.array([[0., 5.]])) + np.testing.assert_array_almost_equal(ep2, np.array([[0.0, 5.0]])) ep2 = ep[0:2] assert isinstance(ep2, nap.IntervalSet) np.testing.assert_array_almost_equal(ep2, ep.values[0:2]) - ep2 = ep[[0,2]] + ep2 = ep[[0, 2]] assert isinstance(ep2, nap.IntervalSet) - np.testing.assert_array_almost_equal(ep2, ep.values[[0,2]]) + np.testing.assert_array_almost_equal(ep2, ep.values[[0, 2]]) - ep2 = ep[0:2,:] + ep2 = ep[0:2, :] assert isinstance(ep2, nap.IntervalSet) np.testing.assert_array_almost_equal(ep2, ep.values[0:2]) - ep2 = ep[0:2,0:2] + ep2 = ep[0:2, 0:2] assert isinstance(ep2, nap.IntervalSet) np.testing.assert_array_almost_equal(ep2, ep.values[0:2]) - ep2 = ep[:,0] + ep2 = ep[:, 0] np.testing.assert_array_almost_equal(ep2, ep.start) - ep2 = ep[:,1] + ep2 = ep[:, 1] np.testing.assert_array_almost_equal(ep2, ep.end) with pytest.raises(IndexError) as e: - ep[:,0,3] - assert str(e.value) == "too many indices for IntervalSet: IntervalSet is 2-dimensional" + ep[:, 0, 3] + assert ( + str(e.value) == "too many indices for IntervalSet: IntervalSet is 2-dimensional" + ) + def test_get_iset_with_series(): start = np.array([0, 10, 16], dtype=np.float64) end = np.array([5, 15, 20], dtype=np.float64) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) bool_series = pd.Series([True, False, True]) ep2 = ep[bool_series] assert isinstance(ep2, nap.IntervalSet) - np.testing.assert_array_almost_equal(ep2.values, ep[[0,2]].values) + np.testing.assert_array_almost_equal(ep2.values, ep[[0, 2]].values) + def test_iset_loc(): start = np.array([0, 10, 16], dtype=np.float64) end = np.array([5, 15, 20], dtype=np.float64) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) np.testing.assert_array_almost_equal(ep.loc[0], ep.values[0]) assert isinstance(ep.loc[[0]], nap.IntervalSet) np.testing.assert_array_almost_equal(ep.loc[[0]], ep[0]) - np.testing.assert_array_almost_equal(ep.loc['start'], start) - np.testing.assert_array_almost_equal(ep.loc['end'], end) + np.testing.assert_array_almost_equal(ep.loc["start"], start) + np.testing.assert_array_almost_equal(ep.loc["end"], end) def test_array_ufunc(): start = np.array([0, 10, 16], dtype=np.float64) end = np.array([5, 15, 20], dtype=np.float64) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) with warnings.catch_warnings(record=True) as w: out = np.exp(ep) @@ -215,27 +249,29 @@ def test_array_ufunc(): np.testing.assert_array_almost_equal(out, np.exp(ep.values)) with warnings.catch_warnings(record=True) as w: - out = ep*2 + out = ep * 2 assert str(w[0].message) == "Converting IntervalSet to numpy.array" - np.testing.assert_array_almost_equal(out, ep.values*2) + np.testing.assert_array_almost_equal(out, ep.values * 2) with warnings.catch_warnings(record=True) as w: out = ep + ep assert str(w[0].message) == "Converting IntervalSet to numpy.array" - np.testing.assert_array_almost_equal(out, ep.values*2) + np.testing.assert_array_almost_equal(out, ep.values * 2) # test warning from contextlib import nullcontext as does_not_raise + nap.nap_config.suppress_conversion_warnings = True with does_not_raise(): np.exp(ep) nap.nap_config.suppress_conversion_warnings = False + def test_array_func(): start = np.array([0, 10, 16], dtype=np.float64) end = np.array([5, 15, 20], dtype=np.float64) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) with warnings.catch_warnings(record=True) as w: out = np.vstack((ep, ep)) @@ -249,12 +285,14 @@ def test_array_func(): # test warning from contextlib import nullcontext as does_not_raise + nap.nap_config.suppress_conversion_warnings = True with does_not_raise(): out = np.ravel(ep) nap.nap_config.suppress_conversion_warnings = False + def test_timespan(): start = [0, 10, 16, 25] end = [5, 15, 20, 40] @@ -277,17 +315,29 @@ def test_tot_length(): def test_as_units(): ep = nap.IntervalSet(start=0, end=100) - df = pd.DataFrame(data=np.array([[0.0, 100.0]]), columns=["start", "end"], dtype=np.float64) - np.testing.assert_array_almost_equal(df.values, ep.as_units("s").values.astype(np.float64)) - np.testing.assert_array_almost_equal(df * 1e3, ep.as_units("ms").values.astype(np.float64)) + df = pd.DataFrame( + data=np.array([[0.0, 100.0]]), columns=["start", "end"], dtype=np.float64 + ) + np.testing.assert_array_almost_equal( + df.values, ep.as_units("s").values.astype(np.float64) + ) + np.testing.assert_array_almost_equal( + df * 1e3, ep.as_units("ms").values.astype(np.float64) + ) tmp = df * 1e6 - np.testing.assert_array_almost_equal(tmp.values, ep.as_units("us").values.astype(np.float64)) + np.testing.assert_array_almost_equal( + tmp.values, ep.as_units("us").values.astype(np.float64) + ) + def test_as_dataframe(): ep = nap.IntervalSet(start=0, end=100) - df = pd.DataFrame(data=np.array([[0.0, 100.0]]), columns=["start", "end"], dtype=np.float64) + df = pd.DataFrame( + data=np.array([[0.0, 100.0]]), columns=["start", "end"], dtype=np.float64 + ) np.testing.assert_array_almost_equal(df.values, ep.as_dataframe().values) + def test_intersect(): ep = nap.IntervalSet(start=[0, 30], end=[10, 70]) ep2 = nap.IntervalSet(start=40, end=100) @@ -315,11 +365,10 @@ def test_set_diff(): def test_in_interval(): ep = nap.IntervalSet(start=[0, 30], end=[10, 70]) - tsd = nap.Ts(t=np.array([5, 20, 50, 100])) + tsd = nap.Ts(t=np.array([5, 20, 50, 100])) tmp = ep.in_interval(tsd) - np.testing.assert_array_almost_equal( - tmp, np.array([0.0, np.nan, 1.0, np.nan]) - ) + np.testing.assert_array_almost_equal(tmp, np.array([0.0, np.nan, 1.0, np.nan])) + def test_drop_short_intervals(): ep = nap.IntervalSet(start=np.array([0, 10, 16, 25]), end=np.array([5, 15, 20, 40])) @@ -349,7 +398,9 @@ def test_merge_close_intervals(): ep = nap.IntervalSet(start=np.array([0, 10, 16]), end=np.array([5, 15, 20])) ep2 = nap.IntervalSet(start=np.array([0, 10]), end=np.array([5, 20])) np.testing.assert_array_almost_equal(ep.merge_close_intervals(4.0), ep2) - np.testing.assert_array_almost_equal(ep.merge_close_intervals(4.0, time_units="s"), ep2) + np.testing.assert_array_almost_equal( + ep.merge_close_intervals(4.0, time_units="s"), ep2 + ) np.testing.assert_array_almost_equal( ep.merge_close_intervals(4.0 * 1e3, time_units="ms"), ep2 ) @@ -357,6 +408,7 @@ def test_merge_close_intervals(): ep.merge_close_intervals(4.0 * 1e6, time_units="us"), ep2 ) + def test_merge_close_intervals_empty(): ep = nap.IntervalSet(start=np.array([]), end=np.array([])) ep = ep.merge_close_intervals(1) @@ -368,10 +420,11 @@ def test_jitfix_iset(): ends = np.array([5, 15, 20]) ep, to_warn = nap.core._jitted_functions._jitfix_iset(starts, ends) - np.testing.assert_array_almost_equal(starts, ep[:,0]) - np.testing.assert_array_almost_equal(ends, ep[:,1]) + np.testing.assert_array_almost_equal(starts, ep[:, 0]) + np.testing.assert_array_almost_equal(ends, ep[:, 1]) np.testing.assert_array_almost_equal(to_warn, np.zeros(4)) + def test_jitfix_iset_error0(): start = np.around(np.array([0, 10, 15], dtype=np.float64), 9) end = np.around(np.array([10, 15, 20], dtype=np.float64), 9) @@ -380,13 +433,16 @@ def test_jitfix_iset_error0(): end[1:] -= 1e-6 - np.testing.assert_array_almost_equal(start, ep[:,0]) - np.testing.assert_array_almost_equal(end, ep[:,1]) + np.testing.assert_array_almost_equal(start, ep[:, 0]) + np.testing.assert_array_almost_equal(end, ep[:, 1]) np.testing.assert_array_equal(to_warn, np.array([True, False, False, False])) with warnings.catch_warnings(record=True) as w: nap.IntervalSet(start=start, end=end) - assert str(w[0].message) == "Some starts and ends are equal. Removing 1 microsecond!" + assert ( + str(w[0].message) == "Some starts and ends are equal. Removing 1 microsecond!" + ) + def test_jitfix_iset_error1(): """ @@ -397,14 +453,15 @@ def test_jitfix_iset_error1(): ep, to_warn = nap.core._jitted_functions._jitfix_iset(start, end) - np.testing.assert_array_almost_equal(start[[0,2]], ep[:,0]) - np.testing.assert_array_almost_equal(end[[0,2]], ep[:,1]) + np.testing.assert_array_almost_equal(start[[0, 2]], ep[:, 0]) + np.testing.assert_array_almost_equal(end[[0, 2]], ep[:, 1]) np.testing.assert_array_equal(to_warn, np.array([False, True, False, False])) with warnings.catch_warnings(record=True) as w: nap.IntervalSet(start=start, end=end) assert str(w[0].message) == "Some ends precede the relative start. Dropping them!" + def test_jitfix_iset_error2(): """ Some starts precede the previous end. Joining them! @@ -414,14 +471,15 @@ def test_jitfix_iset_error2(): ep, to_warn = nap.core._jitted_functions._jitfix_iset(start, end) - np.testing.assert_array_almost_equal(start[[0,2]], ep[:,0]) - np.testing.assert_array_almost_equal(end[[1,2]], ep[:,1]) + np.testing.assert_array_almost_equal(start[[0, 2]], ep[:, 0]) + np.testing.assert_array_almost_equal(end[[1, 2]], ep[:, 1]) np.testing.assert_array_equal(to_warn, np.array([False, False, True, False])) with warnings.catch_warnings(record=True) as w: nap.IntervalSet(start=start, end=end) assert str(w[0].message) == "Some starts precede the previous end. Joining them!" + def test_jitfix_iset_error3(): """ Some epochs have no duration @@ -431,14 +489,15 @@ def test_jitfix_iset_error3(): ep, to_warn = nap.core._jitted_functions._jitfix_iset(start, end) - np.testing.assert_array_almost_equal(start[[0,2]], ep[:,0]) - np.testing.assert_array_almost_equal(end[[0,2]], ep[:,1]) + np.testing.assert_array_almost_equal(start[[0, 2]], ep[:, 0]) + np.testing.assert_array_almost_equal(end[[0, 2]], ep[:, 1]) np.testing.assert_array_equal(to_warn, np.array([False, False, False, True])) with warnings.catch_warnings(record=True) as w: nap.IntervalSet(start=start, end=end) assert str(w[0].message) == "Some epochs have no duration" + def test_jitfix_iset_random(): for i in range(10): np.random.seed(42) @@ -448,98 +507,108 @@ def test_jitfix_iset_random(): ep, to_warn = nap.core._jitted_functions._jitfix_iset(start, end) if len(ep): - assert np.all(ep[:,1] - ep[:,0] > 0) - assert np.all(np.diff(ep.flatten())>0) + assert np.all(ep[:, 1] - ep[:, 0] > 0) + assert np.all(np.diff(ep.flatten()) > 0) + def test_raise_warning(): start = np.around(np.array([0, 15, 16], dtype=np.float64), 9) end = np.around(np.array([5, 15, 20], dtype=np.float64), 9) with pytest.warns(UserWarning, match=r"Some epochs have no duration"): - nap.IntervalSet(start=start,end=end) + nap.IntervalSet(start=start, end=end) + def test_iset_wrong_columns(): df = pd.DataFrame(data=[[16, 100]], columns=["start", "endssss"]) - + with pytest.raises(Exception) as e_info: nap.IntervalSet(df) + def test_iset_diff_length(): with pytest.raises(Exception) as e_info: nap.IntervalSet(start=np.array([0, 10, 16]), end=np.array([5, 15, 20, 40])) assert str(e_info.value) == "Starts end ends are not of the same length" - + def test_sort_starts(): start = np.around(np.array([10, 0, 16], dtype=np.float64), 9) end = np.around(np.array([5, 15, 20], dtype=np.float64), 9) with pytest.warns(UserWarning, match=r"start is not sorted. Sorting it."): - ep = nap.IntervalSet(start=start,end=end) - np.testing.assert_array_almost_equal(np.sort(start), ep.values[:,0]) + ep = nap.IntervalSet(start=start, end=end) + np.testing.assert_array_almost_equal(np.sort(start), ep.values[:, 0]) + def test_sort_ends(): start = np.around(np.array([0, 10, 16], dtype=np.float64), 9) end = np.around(np.array([15, 5, 20], dtype=np.float64), 9) with pytest.warns(UserWarning, match=r"end is not sorted. Sorting it."): - ep = nap.IntervalSet(start=start,end=end) - np.testing.assert_array_almost_equal(np.sort(end), ep.values[:,1]) + ep = nap.IntervalSet(start=start, end=end) + np.testing.assert_array_almost_equal(np.sort(end), ep.values[:, 1]) + def test_repr_(): start = np.around(np.array([0, 10, 16], dtype=np.float64), 9) end = np.around(np.array([5, 15, 20], dtype=np.float64), 9) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) assert isinstance(ep.__repr__(), str) + def test_str_(): start = np.around(np.array([0, 10, 16], dtype=np.float64), 9) end = np.around(np.array([5, 15, 20], dtype=np.float64), 9) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) assert isinstance(ep.__str__(), str) + def test_save_npz(): start = np.around(np.array([0, 10, 16], dtype=np.float64), 9) end = np.around(np.array([5, 15, 20], dtype=np.float64), 9) - ep = nap.IntervalSet(start=start,end=end) + ep = nap.IntervalSet(start=start, end=end) with pytest.raises(TypeError) as e: ep.save(dict) with pytest.raises(RuntimeError) as e: - ep.save('./') - assert str(e.value) == "Invalid filename input. {} is directory.".format(Path("./").resolve()) + ep.save("./") + assert str(e.value) == "Invalid filename input. {} is directory.".format( + Path("./").resolve() + ) - fake_path = './fake/path' + fake_path = "./fake/path" with pytest.raises(RuntimeError) as e: - ep.save(fake_path+'/file.npz') + ep.save(fake_path + "/file.npz") assert str(e.value) == "Path {} does not exist.".format(Path(fake_path).resolve()) ep.save("ep.npz") - assert "ep.npz" in [f.name for f in Path('.').iterdir()] + assert "ep.npz" in [f.name for f in Path(".").iterdir()] ep.save("ep2") - assert "ep2.npz" in [f.name for f in Path('.').iterdir()] + assert "ep2.npz" in [f.name for f in Path(".").iterdir()] with np.load("ep.npz") as file: keys = list(file.keys()) - assert 'start' in keys - assert 'end' in keys + assert "start" in keys + assert "end" in keys - np.testing.assert_array_almost_equal(file['start'], start) - np.testing.assert_array_almost_equal(file['end'], end) + np.testing.assert_array_almost_equal(file["start"], start) + np.testing.assert_array_almost_equal(file["end"], end) - # Cleaning + # Cleaning Path("ep.npz").unlink() - Path("ep2.npz").unlink() + Path("ep2.npz").unlink() + def test_split(): np.random.seed(0) start = np.round(np.random.uniform(0, 10)) end = np.round(np.random.uniform(90, 100)) - tmp = np.linspace(start, end, 100) + tmp = np.linspace(start, end, 100) interval_size = np.round(tmp[1] - tmp[0], 9) with warnings.catch_warnings(): - warnings.simplefilter("ignore") + warnings.simplefilter("ignore") ep0 = nap.IntervalSet(tmp[0:-1], tmp[1:]) ep = nap.IntervalSet(tmp[0], tmp[-1]) ep1 = ep.split(interval_size) @@ -547,9 +616,9 @@ def test_split(): # Test with a smaller epochs start = np.hstack((tmp[0:-1], np.array([200]))) - end = np.hstack((tmp[1:], np.array([200+0.9*interval_size]))) + end = np.hstack((tmp[1:], np.array([200 + 0.9 * interval_size]))) with warnings.catch_warnings(): - warnings.simplefilter("ignore") + warnings.simplefilter("ignore") ep2 = nap.IntervalSet(start, end) ep = nap.IntervalSet([start[0], 200], end[-2:]) @@ -560,23 +629,21 @@ def test_split(): ep = nap.IntervalSet([], []) assert len(ep.split(1)) == 0 + def test_split_errors(): start = [0, 10, 16, 25] end = [5, 15, 20, 40] ep = nap.IntervalSet(start=start, end=end) - with pytest.raises(IOError, match="Argument interval_size should of type float or int"): - ep.split('a') + with pytest.raises( + IOError, match="Argument interval_size should of type float or int" + ): + ep.split("a") with pytest.raises(IOError) as e: ep.split(0) assert str(e.value) == "Argument interval_size should be strictly larger than 0" - + with pytest.raises(IOError) as e: ep.split(1, time_units=1) assert str(e.value) == "Argument time_units should be of type str" - - - - - diff --git a/tests/test_jitted.py b/tests/test_jitted.py index 5b8a4aa1..61867b86 100644 --- a/tests/test_jitted.py +++ b/tests/test_jitted.py @@ -1,39 +1,37 @@ """Tests of jitted core functions for `pynapple` package.""" -import pynapple as nap +import warnings + import numpy as np import pandas as pd import pytest -import warnings + +import pynapple as nap def get_example_dataset(n=100): with warnings.catch_warnings(): - warnings.simplefilter("ignore") + warnings.simplefilter("ignore") starts = np.sort(np.random.uniform(0, 1000, n)) - ep = nap.IntervalSet( - start = starts, - end = starts + np.random.uniform(1, 10, n) - ) - tsd = nap.Tsd(t=np.sort(np.random.uniform(0, 1000, n*2)), d = np.random.rand(n*2)) - ts = nap.Ts(t=np.sort(np.random.uniform(0, 1000, n*2))) - tsdframe = nap.TsdFrame(t=np.sort(np.random.uniform(0, 1000, n*2)), d = np.random.rand(n*2,3)) + ep = nap.IntervalSet(start=starts, end=starts + np.random.uniform(1, 10, n)) + tsd = nap.Tsd( + t=np.sort(np.random.uniform(0, 1000, n * 2)), d=np.random.rand(n * 2) + ) + ts = nap.Ts(t=np.sort(np.random.uniform(0, 1000, n * 2))) + tsdframe = nap.TsdFrame( + t=np.sort(np.random.uniform(0, 1000, n * 2)), d=np.random.rand(n * 2, 3) + ) return (ep, ts, tsd, tsdframe) + def get_example_isets(n=100): with warnings.catch_warnings(): - warnings.simplefilter("ignore") + warnings.simplefilter("ignore") starts = np.sort(np.random.uniform(0, 1000, n)) - ep1 = nap.IntervalSet( - start = starts, - end = starts + np.random.uniform(1, 10, n) - ) + ep1 = nap.IntervalSet(start=starts, end=starts + np.random.uniform(1, 10, n)) starts = np.sort(np.random.uniform(0, 1000, n)) - ep2 = nap.IntervalSet( - start = starts, - end = starts + np.random.uniform(1, 10, n) - ) + ep2 = nap.IntervalSet(start=starts, end=starts + np.random.uniform(1, 10, n)) return ep1, ep2 @@ -58,98 +56,126 @@ def restrict(ep, tsd): ix3 = np.floor(ix3 / 2) ix3[np.isnan(ix3[:, 0]), 0] = ix3[np.isnan(ix3[:, 0]), 1] - ix = ix3[:,0] + ix = ix3[:, 0] idx = ~np.isnan(ix) if not hasattr(tsd, "values"): return pd.Series(index=tsd.index[idx], dtype="object") else: return pd.Series(index=tsd.index[idx], data=tsd.values[idx]) - + + def test_jitrestrict(): - for i in range(100): + for i in range(100): ep, ts, tsd, tsdframe = get_example_dataset() tsd2 = restrict(ep, tsd) ix = nap.core._jitted_functions.jitrestrict(tsd.index, ep.start, ep.end) - tsd3 = pd.Series(index=tsd.index[ix], data=tsd.values[ix]) + tsd3 = pd.Series(index=tsd.index[ix], data=tsd.values[ix]) np.testing.assert_array_almost_equal(tsd2.values, tsd3.values) np.testing.assert_array_almost_equal(tsd2.index.values, tsd3.index.values) + def test_jitrestrict_with_count(): for i in range(100): ep, ts, tsd, tsdframe = get_example_dataset() tsd2 = restrict(ep, tsd) - ix, count = nap.core._jitted_functions.jitrestrict_with_count(tsd.index, ep.start, ep.end) + ix, count = nap.core._jitted_functions.jitrestrict_with_count( + tsd.index, ep.start, ep.end + ) tsd3 = pd.Series(index=tsd.index[ix], data=tsd.values[ix]) np.testing.assert_array_almost_equal(tsd2.values, tsd3.values) np.testing.assert_array_almost_equal(tsd2.index.values, tsd3.index.values) - bins = ep.values.ravel() - ix = np.array(pd.cut(tsd.index, bins, labels=np.arange(len(bins) - 1, dtype=np.float64))) - ix2 = np.array(pd.cut(tsd.index,bins,labels=np.arange(len(bins) - 1, dtype=np.float64),right=False,)) + ix = np.array( + pd.cut(tsd.index, bins, labels=np.arange(len(bins) - 1, dtype=np.float64)) + ) + ix2 = np.array( + pd.cut( + tsd.index, + bins, + labels=np.arange(len(bins) - 1, dtype=np.float64), + right=False, + ) + ) ix3 = np.vstack((ix, ix2)).T ix3[np.floor(ix3 / 2) * 2 != ix3] = np.nan ix3 = np.floor(ix3 / 2) ix3[np.isnan(ix3[:, 0]), 0] = ix3[np.isnan(ix3[:, 0]), 1] - ix = ix3[:,0] - count2 = np.array([np.sum(ix==j) for j in range(len(ep))]) + ix = ix3[:, 0] + count2 = np.array([np.sum(ix == j) for j in range(len(ep))]) np.testing.assert_array_equal(count, count2) + def test_jitthreshold(): for i in range(100): ep, ts, tsd, tsdframe = get_example_dataset() thr = np.random.rand() - t, d, s, e = nap.core._jitted_functions.jitthreshold(tsd.index, tsd.values, ep.start, ep.end, thr) + t, d, s, e = nap.core._jitted_functions.jitthreshold( + tsd.index, tsd.values, ep.start, ep.end, thr + ) assert len(t) == np.sum(tsd.values > thr) assert len(d) == np.sum(tsd.values > thr) np.testing.assert_array_equal(d, tsd.values[tsd.values > thr]) - t, d, s, e = nap.core._jitted_functions.jitthreshold(tsd.index, tsd.values, ep.start, ep.end, thr, "below") + t, d, s, e = nap.core._jitted_functions.jitthreshold( + tsd.index, tsd.values, ep.start, ep.end, thr, "below" + ) assert len(t) == np.sum(tsd.values < thr) assert len(d) == np.sum(tsd.values < thr) np.testing.assert_array_equal(d, tsd.values[tsd.values < thr]) - t, d, s, e = nap.core._jitted_functions.jitthreshold(tsd.index, tsd.values, ep.start, ep.end, thr, "aboveequal") - + t, d, s, e = nap.core._jitted_functions.jitthreshold( + tsd.index, tsd.values, ep.start, ep.end, thr, "aboveequal" + ) + assert len(t) == np.sum(tsd.values >= thr) assert len(d) == np.sum(tsd.values >= thr) np.testing.assert_array_equal(d, tsd.values[tsd.values >= thr]) - t, d, s, e = nap.core._jitted_functions.jitthreshold(tsd.index, tsd.values, ep.start, ep.end, thr, "belowequal") + t, d, s, e = nap.core._jitted_functions.jitthreshold( + tsd.index, tsd.values, ep.start, ep.end, thr, "belowequal" + ) assert len(t) == np.sum(tsd.values <= thr) assert len(d) == np.sum(tsd.values <= thr) np.testing.assert_array_equal(d, tsd.values[tsd.values <= thr]) - # with warnings.catch_warnings(record=True) as w: # new_ep = nap.IntervalSet(start=s, end=e) # new_tsd = restrict(new_ep, tsd) + def test_jitvalue_from(): for i in range(100): ep, ts, tsd, tsdframe = get_example_dataset() - t, d = nap.core._core_functions._value_from(ts.t, tsd.t, tsd.d, ep.start, ep.end) - + t, d = nap.core._core_functions._value_from( + ts.t, tsd.t, tsd.d, ep.start, ep.end + ) + tsd3 = pd.Series(index=t, data=d) tsd2 = [] for j in ep.index: ix = ts.restrict(ep[j]).index if len(ix): - tsd2.append(tsd.restrict(ep[j]).as_series().reindex(ix, method="nearest").fillna(0.0)) - + tsd2.append( + tsd.restrict(ep[j]) + .as_series() + .reindex(ix, method="nearest") + .fillna(0.0) + ) + tsd2 = pd.concat(tsd2) - + np.testing.assert_array_almost_equal(tsd2.values, tsd3.values) np.testing.assert_array_almost_equal(tsd2.index.values, tsd3.index.values) @@ -162,21 +188,23 @@ def test_jitcount(): starts = ep.start ends = ep.end bin_size = 1.0 - t, d = nap.core._jitted_functions.jitcount(time_array, starts, ends, bin_size, np.int64) - tsd3 = nap.Tsd(t=t, d=d, time_support = ep) + t, d = nap.core._jitted_functions.jitcount( + time_array, starts, ends, bin_size, np.int64 + ) + tsd3 = nap.Tsd(t=t, d=d, time_support=ep) tsd2 = [] for j in ep.index: - bins = np.arange(ep[j,0], ep[j,1]+1.0, 1.0) - idx = np.digitize(ts.restrict(ep[j]).index, bins)-1 - tmp = np.array([np.sum(idx==j) for j in range(len(bins)-1)]) - tmp = nap.Tsd(t = bins[0:-1] + np.diff(bins)/2, d = tmp) - tmp = tmp.restrict(ep[j]) + bins = np.arange(ep[j, 0], ep[j, 1] + 1.0, 1.0) + idx = np.digitize(ts.restrict(ep[j]).index, bins) - 1 + tmp = np.array([np.sum(idx == j) for j in range(len(bins) - 1)]) + tmp = nap.Tsd(t=bins[0:-1] + np.diff(bins) / 2, d=tmp) + tmp = tmp.restrict(ep[j]) tsd2.append(tmp.as_series()) tsd2 = pd.concat(tsd2) - + np.testing.assert_array_almost_equal(tsd2.values, tsd3.values) np.testing.assert_array_almost_equal(tsd2.index.values, tsd3.index.values) @@ -190,22 +218,24 @@ def test_jitbin(): starts = ep.start ends = ep.end bin_size = 1.0 - t, d = nap.core._jitted_functions.jitbin_array(time_array, data_array, starts, ends, bin_size) + t, d = nap.core._jitted_functions.jitbin_array( + time_array, data_array, starts, ends, bin_size + ) # tsd3 = nap.Tsd(t=t, d=d, time_support = ep) tsd3 = pd.Series(index=t, data=d) tsd3 = tsd3.fillna(0.0) tsd2 = [] - for j in ep.index: - bins = np.arange(ep[j,0], ep[j,1]+1.0, 1.0) + for j in ep.index: + bins = np.arange(ep[j, 0], ep[j, 1] + 1.0, 1.0) aa = tsd.restrict(ep[j]) - tmp = np.zeros((len(bins)-1)) + tmp = np.zeros((len(bins) - 1)) if len(aa): - idx = np.digitize(aa.index, bins)-1 + idx = np.digitize(aa.index, bins) - 1 for k in np.unique(idx): - tmp[k] = np.mean(aa.values[idx==k]) - - tmp = nap.Tsd(t = bins[0:-1] + np.diff(bins)/2, d = tmp) + tmp[k] = np.mean(aa.values[idx == k]) + + tmp = nap.Tsd(t=bins[0:-1] + np.diff(bins) / 2, d=tmp) tmp = tmp.restrict(ep[j]) # pd.testing.assert_series_equal(tmp, tsd3.restrict(ep.loc[[j]])) @@ -215,7 +245,7 @@ def test_jitbin(): tsd2 = pd.concat(tsd2) # tsd2 = nap.Tsd(tsd2) tsd2 = tsd2.fillna(0.0) - + np.testing.assert_array_almost_equal(tsd2.values, tsd3.values) np.testing.assert_array_almost_equal(tsd2.index.values, tsd3.index.values) @@ -229,23 +259,24 @@ def test_jitbin_array(): starts = ep.start ends = ep.end bin_size = 1.0 - t, d = nap.core._jitted_functions.jitbin_array(time_array, data_array, starts, ends, bin_size) + t, d = nap.core._jitted_functions.jitbin_array( + time_array, data_array, starts, ends, bin_size + ) tsd3 = pd.DataFrame(index=t, data=d) tsd3 = tsd3.fillna(0.0) # tsd3 = nap.TsdFrame(tsd3, time_support = ep) - tsd2 = [] for j in ep.index: - bins = np.arange(ep[j,0], ep[j,1]+1.0, 1.0) + bins = np.arange(ep[j, 0], ep[j, 1] + 1.0, 1.0) aa = tsdframe.restrict(ep[j]) - tmp = np.zeros((len(bins)-1, tsdframe.shape[1])) + tmp = np.zeros((len(bins) - 1, tsdframe.shape[1])) if len(aa): - idx = np.digitize(aa.index, bins)-1 + idx = np.digitize(aa.index, bins) - 1 for k in np.unique(idx): - tmp[k] = np.mean(aa.values[idx==k], 0) - - tmp = nap.TsdFrame(t = bins[0:-1] + np.diff(bins)/2, d = tmp) + tmp[k] = np.mean(aa.values[idx == k], 0) + + tmp = nap.TsdFrame(t=bins[0:-1] + np.diff(bins) / 2, d=tmp) tmp = tmp.restrict(ep[j]) # pd.testing.assert_series_equal(tmp, tsd3.restrict(ep.loc[[j]])) @@ -256,16 +287,18 @@ def test_jitbin_array(): # tsd2 = nap.TsdFrame(tsd2) np.testing.assert_array_almost_equal(tsd3.values, tsd2.values) - np.testing.assert_array_almost_equal(tsd3.index.values, tsd2.index.values) + np.testing.assert_array_almost_equal(tsd3.index.values, tsd2.index.values) + def test_jitintersect(): for i in range(10): ep1, ep2 = get_example_isets() - s, e = nap.core._jitted_functions.jitintersect(ep1.start, ep1.end, ep2.start, ep2.end) + s, e = nap.core._jitted_functions.jitintersect( + ep1.start, ep1.end, ep2.start, ep2.end + ) ep3 = nap.IntervalSet(s, e) - i_sets = [ep1, ep2] n_sets = len(i_sets) time1 = [i_set["start"] for i_set in i_sets] @@ -273,10 +306,12 @@ def test_jitintersect(): time1.extend(time2) time = np.hstack(time1) - start_end = np.hstack(( + start_end = np.hstack( + ( np.ones(len(time) // 2, dtype=np.int32), -1 * np.ones(len(time) // 2, dtype=np.int32), - )) + ) + ) df = pd.DataFrame({"time": time, "start_end": start_end}) df.sort_values(by="time", inplace=True) @@ -290,14 +325,16 @@ def test_jitintersect(): np.testing.assert_array_almost_equal(ep3, ep4) + def test_jitunion(): for i in range(10): ep1, ep2 = get_example_isets() - s, e = nap.core._jitted_functions.jitunion(ep1.start, ep1.end, ep2.start, ep2.end) + s, e = nap.core._jitted_functions.jitunion( + ep1.start, ep1.end, ep2.start, ep2.end + ) ep3 = nap.IntervalSet(s, e) - i_sets = [ep1, ep2] time = np.hstack( [i_set["start"] for i_set in i_sets] + [i_set["end"] for i_set in i_sets] @@ -314,20 +351,23 @@ def test_jitunion(): df.sort_values(by="time", inplace=True) df.reset_index(inplace=True, drop=True) df["cumsum"] = df["start_end"].cumsum() - ix_stop = (df["cumsum"] == 0).to_numpy().nonzero()[0] + ix_stop = (df["cumsum"] == 0).to_numpy().nonzero()[0] ix_start = np.hstack((0, ix_stop[:-1] + 1)) start = df["time"][ix_start] stop = df["time"][ix_stop] ep4 = nap.IntervalSet(start, stop) - + np.testing.assert_array_almost_equal(ep3, ep4) - + + def test_jitdiff(): for i in range(10): ep1, ep2 = get_example_isets() - s, e = nap.core._jitted_functions.jitdiff(ep1.start, ep1.end, ep2.start, ep2.end) + s, e = nap.core._jitted_functions.jitdiff( + ep1.start, ep1.end, ep2.start, ep2.end + ) ep3 = nap.IntervalSet(s, e) i_sets = (ep1, ep2) @@ -359,9 +399,10 @@ def test_jitdiff(): idx = start != end ep4 = nap.IntervalSet(start[idx], end[idx]) - + np.testing.assert_array_almost_equal(ep3, ep4) + def test_jitunion_isets(): for i in range(10): ep1, ep2 = get_example_isets() @@ -371,7 +412,6 @@ def test_jitunion_isets(): ep6 = nap.core.ts_group._union_intervals(i_sets) - time = np.hstack( [i_set["start"] for i_set in i_sets] + [i_set["end"] for i_set in i_sets] ) @@ -387,13 +427,13 @@ def test_jitunion_isets(): df.sort_values(by="time", inplace=True) df.reset_index(inplace=True, drop=True) df["cumsum"] = df["start_end"].cumsum() - ix_stop = (df["cumsum"] == 0).to_numpy().nonzero()[0] + ix_stop = (df["cumsum"] == 0).to_numpy().nonzero()[0] ix_start = np.hstack((0, ix_stop[:-1] + 1)) start = df["time"][ix_start] stop = df["time"][ix_stop] ep5 = nap.IntervalSet(start, stop) - + np.testing.assert_array_almost_equal(ep5, ep6) @@ -404,7 +444,7 @@ def test_jitin_interval(): inep = nap.core._jitted_functions.jitin_interval(tsd.index, ep.start, ep.end) inep[np.isnan(inep)] = -1 - bins = ep.values.ravel() + bins = ep.values.ravel() ix = np.array( pd.cut(tsd.index, bins, labels=np.arange(len(bins) - 1, dtype=np.float64)) ) @@ -424,4 +464,3 @@ def test_jitin_interval(): inep2[np.isnan(inep2)] = -1 np.testing.assert_array_equal(inep, inep2) - diff --git a/tests/test_lazy_loading.py b/tests/test_lazy_loading.py index b140bf0e..9695497d 100644 --- a/tests/test_lazy_loading.py +++ b/tests/test_lazy_loading.py @@ -12,23 +12,25 @@ import pynapple as nap - - @pytest.mark.parametrize( "time, data, expectation", [ (np.arange(12), np.arange(12), does_not_raise()), - (np.arange(12), "not_an_array", pytest.raises(TypeError, match="Data should be array-like")) - ] + ( + np.arange(12), + "not_an_array", + pytest.raises(TypeError, match="Data should be array-like"), + ), + ], ) def test_lazy_load_hdf5_is_array(time, data, expectation, tmp_path): - file_path = tmp_path / Path('data.h5') + file_path = tmp_path / Path("data.h5") # try: - with h5py.File(file_path, 'w') as f: - f.create_dataset('data', data=data) - with h5py.File(file_path, 'r') as h5_data: + with h5py.File(file_path, "w") as f: + f.create_dataset("data", data=data) + with h5py.File(file_path, "r") as h5_data: with expectation: - nap.Tsd(t=time, d=h5_data['data'], load_array=False) + nap.Tsd(t=time, d=h5_data["data"], load_array=False) # finally: # # delete file # if file_path.exists(): @@ -39,16 +41,16 @@ def test_lazy_load_hdf5_is_array(time, data, expectation, tmp_path): "time, data", [ (np.arange(12), np.arange(12)), - ] + ], ) @pytest.mark.parametrize("convert_flag", [True, False]) def test_lazy_load_hdf5_is_array(time, data, convert_flag, tmp_path): - file_path = tmp_path / Path('data.h5') + file_path = tmp_path / Path("data.h5") # try: - with h5py.File(file_path, 'w') as f: - f.create_dataset('data', data=data) + with h5py.File(file_path, "w") as f: + f.create_dataset("data", data=data) # get the tsd - h5_data = h5py.File(file_path, 'r')["data"] + h5_data = h5py.File(file_path, "r")["data"] tsd = nap.Tsd(t=time, d=h5_data, load_array=convert_flag) if convert_flag: assert not isinstance(tsd.d, h5py.Dataset) @@ -62,19 +64,19 @@ def test_lazy_load_hdf5_is_array(time, data, convert_flag, tmp_path): @pytest.mark.parametrize("time, data", [(np.arange(12), np.arange(12))]) @pytest.mark.parametrize("cls", [nap.Tsd, nap.TsdFrame, nap.TsdTensor]) -@pytest.mark.parametrize("func", [np.exp, lambda x: x*2]) -def test_lazy_load_hdf5_apply_func(time, data, func,cls, tmp_path): +@pytest.mark.parametrize("func", [np.exp, lambda x: x * 2]) +def test_lazy_load_hdf5_apply_func(time, data, func, cls, tmp_path): """Apply a unary function to a lazy loaded array.""" - file_path = tmp_path / Path('data.h5') + file_path = tmp_path / Path("data.h5") # try: if cls is nap.TsdFrame: data = data[:, None] elif cls is nap.TsdTensor: data = data[:, None, None] - with h5py.File(file_path, 'w') as f: - f.create_dataset('data', data=data) + with h5py.File(file_path, "w") as f: + f.create_dataset("data", data=data) # get the tsd - h5_data = h5py.File(file_path, 'r')["data"] + h5_data = h5py.File(file_path, "r")["data"] # lazy load and apply function res = func(cls(t=time, d=h5_data, load_array=False)) assert isinstance(res, cls) @@ -96,31 +98,34 @@ def test_lazy_load_hdf5_apply_func(time, data, func,cls, tmp_path): ("convolve", [np.ones(3)]), ("smooth", [2]), ("dropna", [True]), - ("value_from", [nap.Tsd(t=np.linspace(0, 12, 20), d=np.random.normal(size=20))]), + ( + "value_from", + [nap.Tsd(t=np.linspace(0, 12, 20), d=np.random.normal(size=20))], + ), ("copy", []), - ("get", [2, 7]) - ] + ("get", [2, 7]), + ], ) def test_lazy_load_hdf5_apply_method(time, data, method_name, args, cls, tmp_path): - file_path = tmp_path / Path('data.h5') + file_path = tmp_path / Path("data.h5") # try: if cls is nap.TsdFrame: data = data[:, None] elif cls is nap.TsdTensor: data = data[:, None, None] - with h5py.File(file_path, 'w') as f: - f.create_dataset('data', data=data) + with h5py.File(file_path, "w") as f: + f.create_dataset("data", data=data) # get the tsd - h5_data = h5py.File(file_path, 'r')["data"] + h5_data = h5py.File(file_path, "r")["data"] # lazy load and apply function tsd = cls(t=time, d=h5_data, load_array=False) func = getattr(tsd, method_name) out = func(*args) assert not isinstance(out.d, h5py.Dataset) # finally: - # # delete file - # if file_path.exists(): - # file_path.unlink() + # # delete file + # if file_path.exists(): + # file_path.unlink() @pytest.mark.parametrize("time, data", [(np.arange(12), np.arange(12))]) @@ -129,17 +134,19 @@ def test_lazy_load_hdf5_apply_method(time, data, method_name, args, cls, tmp_pat [ ("threshold", [3], nap.Tsd), ("as_series", [], pd.Series), - ("as_units", ['ms'], pd.Series), - ("to_tsgroup", [], nap.TsGroup) - ] + ("as_units", ["ms"], pd.Series), + ("to_tsgroup", [], nap.TsGroup), + ], ) -def test_lazy_load_hdf5_apply_method_tsd_specific(time, data, method_name, args, expected_out_type, tmp_path): - file_path = tmp_path / Path('data.h5') +def test_lazy_load_hdf5_apply_method_tsd_specific( + time, data, method_name, args, expected_out_type, tmp_path +): + file_path = tmp_path / Path("data.h5") # try: - with h5py.File(file_path, 'w') as f: - f.create_dataset('data', data=data) + with h5py.File(file_path, "w") as f: + f.create_dataset("data", data=data) # get the tsd - h5_data = h5py.File(file_path, 'r')["data"] + h5_data = h5py.File(file_path, "r")["data"] # lazy load and apply function tsd = nap.Tsd(t=time, d=h5_data, load_array=False) func = getattr(tsd, method_name) @@ -155,15 +162,17 @@ def test_lazy_load_hdf5_apply_method_tsd_specific(time, data, method_name, args, "method_name, args, expected_out_type", [ ("as_dataframe", [], pd.DataFrame), - ] + ], ) -def test_lazy_load_hdf5_apply_method_tsdframe_specific(time, data, method_name, args, expected_out_type, tmp_path): - file_path = tmp_path / Path('data.h5') +def test_lazy_load_hdf5_apply_method_tsdframe_specific( + time, data, method_name, args, expected_out_type, tmp_path +): + file_path = tmp_path / Path("data.h5") # try: - with h5py.File(file_path, 'w') as f: - f.create_dataset('data', data=data[:, None]) + with h5py.File(file_path, "w") as f: + f.create_dataset("data", data=data[:, None]) # get the tsd - h5_data = h5py.File(file_path, 'r')["data"] + h5_data = h5py.File(file_path, "r")["data"] # lazy load and apply function tsd = nap.TsdFrame(t=time, d=h5_data, load_array=False) func = getattr(tsd, method_name) @@ -175,13 +184,13 @@ def test_lazy_load_hdf5_apply_method_tsdframe_specific(time, data, method_name, def test_lazy_load_hdf5_tsdframe_loc(tmp_path): - file_path = tmp_path / Path('data.h5') + file_path = tmp_path / Path("data.h5") data = np.arange(10).reshape(5, 2) # try: - with h5py.File(file_path, 'w') as f: - f.create_dataset('data', data=data) + with h5py.File(file_path, "w") as f: + f.create_dataset("data", data=data) # get the tsd - h5_data = h5py.File(file_path, 'r')["data"] + h5_data = h5py.File(file_path, "r")["data"] # lazy load and apply function tsd = nap.TsdFrame(t=np.arange(data.shape[0]), d=h5_data, load_array=False).loc[1] assert isinstance(tsd, nap.Tsd) @@ -192,18 +201,23 @@ def test_lazy_load_hdf5_tsdframe_loc(tmp_path): # if file_path.exists(): # file_path.unlink() + @pytest.mark.parametrize( "lazy", [ (True), (False), - ] + ], ) def test_lazy_load_nwb(lazy): try: - nwb = nap.NWBFile("tests/nwbfilestest/basic/pynapplenwb/A2929-200711.nwb", lazy_loading=lazy) + nwb = nap.NWBFile( + "tests/nwbfilestest/basic/pynapplenwb/A2929-200711.nwb", lazy_loading=lazy + ) except: - nwb = nap.NWBFile("nwbfilestest/basic/pynapplenwb/A2929-200711.nwb", lazy_loading=lazy) + nwb = nap.NWBFile( + "nwbfilestest/basic/pynapplenwb/A2929-200711.nwb", lazy_loading=lazy + ) assert isinstance(nwb["z"].d, h5py.Dataset) is lazy nwb.close() @@ -214,25 +228,31 @@ def test_lazy_load_nwb(lazy): [ (True), (False), - ] + ], ) def test_lazy_load_function(lazy): try: - nwb = nap.load_file("tests/nwbfilestest/basic/pynapplenwb/A2929-200711.nwb", lazy_loading=lazy) + nwb = nap.load_file( + "tests/nwbfilestest/basic/pynapplenwb/A2929-200711.nwb", lazy_loading=lazy + ) except: - nwb = nap.load_file("nwbfilestest/basic/pynapplenwb/A2929-200711.nwb", lazy_loading=lazy) + nwb = nap.load_file( + "nwbfilestest/basic/pynapplenwb/A2929-200711.nwb", lazy_loading=lazy + ) assert isinstance(nwb["z"].d, h5py.Dataset) is lazy nwb.close() @pytest.mark.parametrize("data", [np.ones(10), np.ones((10, 2)), np.ones((10, 2, 2))]) -def test_lazy_load_nwb_no_warnings(data, tmp_path): # tmp_path is a default fixture creating a temporary folder - file_path = tmp_path / Path('data.h5') +def test_lazy_load_nwb_no_warnings( + data, tmp_path +): # tmp_path is a default fixture creating a temporary folder + file_path = tmp_path / Path("data.h5") # try: - with h5py.File(file_path, 'w') as f: - f.create_dataset('data', data=data) + with h5py.File(file_path, "w") as f: + f.create_dataset("data", data=data) time_series = mock_TimeSeries(name="TimeSeries", data=f["data"]) nwbfile = mock_NWBFile() nwbfile.add_acquisition(time_series) @@ -242,7 +262,7 @@ def test_lazy_load_nwb_no_warnings(data, tmp_path): # tmp_path is a default fix tsd = nwb["TimeSeries"] tsd.count(0.1) assert isinstance(tsd.d, h5py.Dataset) - + if len(w): if not str(w[0].message).startswith("Converting 'd' to"): raise RuntimeError @@ -256,30 +276,28 @@ def test_tsgroup_no_warnings(tmp_path): # default fixture n_units = 2 # try: for k in range(n_units): - file_path = tmp_path / Path(f'data_{k}.h5') - with h5py.File(file_path, 'w') as f: - f.create_dataset('spks', data=np.sort(np.random.uniform(0, 10, size=20))) - with warnings.catch_warnings(record=True) as w: + file_path = tmp_path / Path(f"data_{k}.h5") + with h5py.File(file_path, "w") as f: + f.create_dataset("spks", data=np.sort(np.random.uniform(0, 10, size=20))) + with warnings.catch_warnings(record=True) as w: nwbfile = mock_NWBFile() for k in range(n_units): - file_path = tmp_path / Path(f'data_{k}.h5') - spike_times = h5py.File(file_path, "r")['spks'] + file_path = tmp_path / Path(f"data_{k}.h5") + spike_times = h5py.File(file_path, "r")["spks"] nwbfile.add_unit(spike_times=spike_times) - + nwb = nap.NWBFile(nwbfile) tsgroup = nwb["units"] tsgroup.count(0.1) - + if len(w): if not str(w[0].message).startswith("Converting 'd' to"): raise RuntimeError - # finally: # for k in range(n_units): # file_path = Path(f'data_{k}.h5') # if file_path.exists(): # file_path.unlink() - diff --git a/tests/test_misc.py b/tests/test_misc.py index 61c12b56..83d87c60 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -6,18 +6,20 @@ """Tests of IO misc functions""" -import pynapple as nap +import shutil +import warnings +from pathlib import Path + import numpy as np import pandas as pd import pytest -import warnings -from pathlib import Path -import shutil + +import pynapple as nap # look for tests folder path = Path(__file__).parent -if path.name == 'pynapple': - path = path / "tests" +if path.name == "pynapple": + path = path / "tests" path = path / "npzfilestest" # Recursively remove the folder: @@ -30,7 +32,7 @@ @pytest.mark.parametrize("path", [path]) def test_load_file(path): - tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) + tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) file_path = path / "tsd.npz" tsd.save(file_path) tsd2 = nap.load_file(file_path) @@ -42,6 +44,7 @@ def test_load_file(path): # file_path.unlink() + @pytest.mark.parametrize("path", [path]) def test_load_file_filenotfound(path): with pytest.raises(FileNotFoundError) as e: @@ -49,6 +52,7 @@ def test_load_file_filenotfound(path): assert str(e.value) == "File themissingfile.npz does not exist" + @pytest.mark.parametrize("path", [path]) def test_load_wrong_format(path): file_path = path / "test.npy" @@ -59,14 +63,15 @@ def test_load_wrong_format(path): assert str(e.value) == "File format not supported" # file_path.unlink() + @pytest.mark.parametrize("path", [path]) def test_load_folder(path): folder = nap.load_folder(path) assert isinstance(folder, nap.io.Folder) + def test_load_folder_foldernotfound(): with pytest.raises(FileNotFoundError) as e: nap.load_folder("MissingFolder") assert str(e.value) == "Folder MissingFolder does not exist" - diff --git a/tests/test_neurosuite.py b/tests/test_neurosuite.py index ae1b9a09..de055634 100644 --- a/tests/test_neurosuite.py +++ b/tests/test_neurosuite.py @@ -6,11 +6,13 @@ """Tests of neurosuite loader for `pynapple` package.""" -import pynapple as nap +import warnings + import numpy as np import pandas as pd import pytest -import warnings + +import pynapple as nap @pytest.mark.filterwarnings("ignore") diff --git a/tests/test_non_numpy_array.py b/tests/test_non_numpy_array.py index 7553e661..a5f80d8b 100644 --- a/tests/test_non_numpy_array.py +++ b/tests/test_non_numpy_array.py @@ -18,7 +18,8 @@ class TestTsArray: ( "abc", pytest.raises( - RuntimeError, match="Unknown format for t. Accepted formats are numpy.ndarray, list, tuple or any array-like objects." + RuntimeError, + match="Unknown format for t. Accepted formats are numpy.ndarray, list, tuple or any array-like objects.", ), ), ], @@ -31,9 +32,7 @@ def test_ts_init(self, time, expectation): @pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( "time, expectation", - [ - (MockArray(np.array([1, 2, 3])), does_not_raise()) - ], + [(MockArray(np.array([1, 2, 3])), does_not_raise())], ) def test_ts_type(self, time, expectation): """Verify that the time attribute 't' of a Ts object is stored as a numpy.ndarray.""" @@ -70,16 +69,18 @@ class TestTsdArray: MockArray([1, 2, 3]), "abc", pytest.raises( - RuntimeError, match="Unknown format for d. Accepted formats are numpy.ndarray, list, tuple or any array-like objects." + RuntimeError, + match="Unknown format for d. Accepted formats are numpy.ndarray, list, tuple or any array-like objects.", ), ), ( "abc", - MockArray([1, 2, 3]), + MockArray([1, 2, 3]), pytest.raises( - RuntimeError, match="Unknown format for t. Accepted formats are numpy.ndarray, list, tuple or any array-like objects." + RuntimeError, + match="Unknown format for t. Accepted formats are numpy.ndarray, list, tuple or any array-like objects.", ), - ), + ), ], ) def test_tsd_init(self, time, data, expectation): @@ -108,9 +109,9 @@ def test_tsd_type_d(self, time, data, expectation): [ (np.array([1, 2, 3]), np.array([1, 2, 3]), does_not_raise()), ( - MockArray(np.array([1, 2, 3])), - np.array([1, 2, 3]), - does_not_raise(), + MockArray(np.array([1, 2, 3])), + np.array([1, 2, 3]), + does_not_raise(), ), ], ) @@ -149,7 +150,8 @@ class TestTsdFrameArray: MockArray([1, 2, 3]), "abc", pytest.raises( - RuntimeError, match="Unknown format for d. Accepted formats are numpy.ndarray, list, tuple or any array-like objects." + RuntimeError, + match="Unknown format for d. Accepted formats are numpy.ndarray, list, tuple or any array-like objects.", ), ), ], @@ -180,9 +182,9 @@ def test_tsdframe_type(self, time, data, expectation): [ (np.array([1, 2, 3]), np.array([[1], [2], [3]]), does_not_raise()), ( - MockArray(np.array([1, 2, 3])), - np.array([[1], [2], [3]]), - does_not_raise(), + MockArray(np.array([1, 2, 3])), + np.array([[1], [2], [3]]), + does_not_raise(), ), ], ) @@ -224,7 +226,10 @@ class TestTsdTensorArray: ( MockArray([1, 2, 3]), "abc", - pytest.raises(RuntimeError, match="Unknown format for d. Accepted formats are numpy.ndarray, list, tuple or any array-like objects."), + pytest.raises( + RuntimeError, + match="Unknown format for d. Accepted formats are numpy.ndarray, list, tuple or any array-like objects.", + ), ), ], ) @@ -258,9 +263,9 @@ def test_tsdtensor_type_d(self, time, data, expectation): [ (np.array([1, 2, 3]), np.array([[[1]], [[2]], [[3]]]), does_not_raise()), ( - MockArray(np.array([1, 2, 3])), - np.array([[[1]], [[2]], [[3]]]), - does_not_raise(), + MockArray(np.array([1, 2, 3])), + np.array([[[1]], [[2]], [[3]]]), + does_not_raise(), ), ], ) @@ -286,4 +291,3 @@ def test_tsdtensor_warn(self, data, expectation): if nap.nap_config.backend == "numba": with expectation: nap.TsdTensor(t=np.ravel(np.array(data)), d=data) - diff --git a/tests/test_npz_file.py b/tests/test_npz_file.py index f9de9158..8e0207ac 100644 --- a/tests/test_npz_file.py +++ b/tests/test_npz_file.py @@ -6,18 +6,20 @@ """Tests of NPZ file functions""" -import pynapple as nap +import shutil +import warnings +from pathlib import Path + import numpy as np import pandas as pd import pytest -import warnings -from pathlib import Path -import shutil + +import pynapple as nap # look for tests folder path = Path(__file__).parent -if path.name == 'pynapple': - path = path / "tests" +if path.name == "pynapple": + path = path / "tests" path = path / "npzfilestest" # Recursively remove the folder: @@ -30,22 +32,25 @@ # Populate the folder data = { - "tsd":nap.Tsd(t=np.arange(100), d=np.arange(100)), - "ts":nap.Ts(t=np.sort(np.random.rand(10)*100)), - "tsdframe":nap.TsdFrame(t=np.arange(100), d=np.random.rand(100,10)), - "tsgroup":nap.TsGroup({ + "tsd": nap.Tsd(t=np.arange(100), d=np.arange(100)), + "ts": nap.Ts(t=np.sort(np.random.rand(10) * 100)), + "tsdframe": nap.TsdFrame(t=np.arange(100), d=np.random.rand(100, 10)), + "tsgroup": nap.TsGroup( + { 0: nap.Ts(t=np.arange(0, 200)), 1: nap.Ts(t=np.arange(0, 200, 0.5), time_units="s"), 2: nap.Ts(t=np.arange(0, 300, 0.2), time_units="s"), - }), - "iset":nap.IntervalSet(start=np.array([0.0, 5.0]), end=np.array([1.0, 6.0])) - } + } + ), + "iset": nap.IntervalSet(start=np.array([0.0, 5.0]), end=np.array([1.0, 6.0])), +} for k, d in data.items(): - d.save(path / (k+".npz")) + d.save(path / (k + ".npz")) + @pytest.mark.parametrize("path", [path]) def test_init(path): - tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) + tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) file_path = path / "tsd.npz" tsd.save(file_path) file = nap.NPZFile(file_path) @@ -53,73 +58,81 @@ def test_init(path): assert isinstance(file.type, np.str_) assert file.type == "Tsd" + @pytest.mark.parametrize("path", [path]) -@pytest.mark.parametrize("k", ['tsd', 'ts', 'tsdframe', 'tsgroup', 'iset']) +@pytest.mark.parametrize("k", ["tsd", "ts", "tsdframe", "tsgroup", "iset"]) def test_load(path, k): - file_path = path / (k+".npz") + file_path = path / (k + ".npz") file = nap.NPZFile(file_path) tmp = file.load() assert isinstance(tmp, type(data[k])) + @pytest.mark.parametrize("path", [path]) -@pytest.mark.parametrize("k", ['tsgroup']) +@pytest.mark.parametrize("k", ["tsgroup"]) def test_load_tsgroup(path, k): - file_path = path / (k+".npz") + file_path = path / (k + ".npz") file = nap.NPZFile(file_path) tmp = file.load() assert isinstance(tmp, type(data[k])) assert tmp.keys() == data[k].keys() assert np.all(tmp._metadata == data[k]._metadata) assert np.all(tmp[neu] == data[k][neu] for neu in tmp.keys()) - np.testing.assert_array_almost_equal(tmp.time_support.values, data[k].time_support.values) + np.testing.assert_array_almost_equal( + tmp.time_support.values, data[k].time_support.values + ) @pytest.mark.parametrize("path", [path]) -@pytest.mark.parametrize("k", ['tsd']) +@pytest.mark.parametrize("k", ["tsd"]) def test_load_tsd(path, k): - file_path = path / (k+".npz") + file_path = path / (k + ".npz") file = nap.NPZFile(file_path) tmp = file.load() assert isinstance(tmp, type(data[k])) assert np.all(tmp.d == data[k].d) assert np.all(tmp.t == data[k].t) - np.testing.assert_array_almost_equal(tmp.time_support.values, data[k].time_support.values) + np.testing.assert_array_almost_equal( + tmp.time_support.values, data[k].time_support.values + ) @pytest.mark.parametrize("path", [path]) -@pytest.mark.parametrize("k", ['ts']) +@pytest.mark.parametrize("k", ["ts"]) def test_load_ts(path, k): - file_path = path / (k+".npz") + file_path = path / (k + ".npz") file = nap.NPZFile(file_path) tmp = file.load() assert isinstance(tmp, type(data[k])) assert np.all(tmp.t == data[k].t) - np.testing.assert_array_almost_equal(tmp.time_support.values, data[k].time_support.values) - + np.testing.assert_array_almost_equal( + tmp.time_support.values, data[k].time_support.values + ) @pytest.mark.parametrize("path", [path]) -@pytest.mark.parametrize("k", ['tsdframe']) +@pytest.mark.parametrize("k", ["tsdframe"]) def test_load_tsdframe(path, k): - file_path = path / (k+".npz") + file_path = path / (k + ".npz") file = nap.NPZFile(file_path) tmp = file.load() assert isinstance(tmp, type(data[k])) assert np.all(tmp.t == data[k].t) - np.testing.assert_array_almost_equal(tmp.time_support.values, data[k].time_support.values) + np.testing.assert_array_almost_equal( + tmp.time_support.values, data[k].time_support.values + ) assert np.all(tmp.columns == data[k].columns) assert np.all(tmp.d == data[k].d) - @pytest.mark.parametrize("path", [path]) def test_load_non_npz(path): file_path = path / "random.npz" tmp = np.random.rand(100) np.savez(file_path, a=tmp) - file = nap.NPZFile(file_path) + file = nap.NPZFile(file_path) assert file.type == "npz" a = file.load() assert isinstance(a, np.lib.npyio.NpzFile) - np.testing.assert_array_equal(tmp, a['a']) + np.testing.assert_array_equal(tmp, a["a"]) diff --git a/tests/test_numpy_compatibility.py b/tests/test_numpy_compatibility.py index 7b1b3777..c4323147 100644 --- a/tests/test_numpy_compatibility.py +++ b/tests/test_numpy_compatibility.py @@ -1,25 +1,30 @@ -import pynapple as nap import numpy as np +import numpy.core.umath as _umath import pytest from numpy import ufunc as _ufunc -import numpy.core.umath as _umath +import pynapple as nap -ufuncs = {k:obj for k, obj in _umath.__dict__.items() if isinstance(obj, _ufunc)} +ufuncs = {k: obj for k, obj in _umath.__dict__.items() if isinstance(obj, _ufunc)} tsd = nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 5, 3), time_units="s") # tsd = nap.TsdFrame(t=np.arange(100), d=np.random.randn(100, 6)) -tsd.d[tsd.values>0.9] = np.nan +tsd.d[tsd.values > 0.9] = np.nan @pytest.mark.parametrize( "tsd", [ nap.Tsd(t=np.arange(100), d=np.random.rand(100), time_units="s"), - nap.TsdFrame(t=np.arange(100), d=np.random.rand(100, 5), time_units="s", columns=['a', 'b', 'c', 'd', 'e']), - nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 5, 3), time_units="s") + nap.TsdFrame( + t=np.arange(100), + d=np.random.rand(100, 5), + time_units="s", + columns=["a", "b", "c", "d", "e"], + ), + nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 5, 3), time_units="s"), ], ) class Test_Time_Series_1: @@ -29,27 +34,39 @@ def test_ufuncs(self, tsd): for ufunc in ufuncs.values(): print(ufunc) # Bit-twiddling functions - if ufunc.__name__ in ['bitwise_and', 'bitwise_or', 'bitwise_xor', - 'invert', 'left_shift', 'right_shift', 'isnat', 'gcd', 'lcm', - 'ldexp', 'arccosh']: + if ufunc.__name__ in [ + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "invert", + "left_shift", + "right_shift", + "isnat", + "gcd", + "lcm", + "ldexp", + "arccosh", + ]: a.append(ufunc.__name__) pass - elif ufunc.__name__ in ['matmul', 'dot']: + elif ufunc.__name__ in ["matmul", "dot"]: break if tsd.ndim > 1: x = np.random.rand(*tsd.shape[1:]).T out = ufunc(tsd, x) assert isinstance(out, tsd.__class__) np.testing.assert_array_almost_equal(out.index, tsd.index) - np.testing.assert_array_almost_equal(out.values, ufunc(tsd.values, x)) + np.testing.assert_array_almost_equal( + out.values, ufunc(tsd.values, x) + ) with pytest.raises(TypeError): ufunc(tsd, tsd) a.append(ufunc.__name__) - elif ufunc.__name__ in ['logaddexp', 'logaddexp2', 'true_divide']: + elif ufunc.__name__ in ["logaddexp", "logaddexp2", "true_divide"]: x = np.random.rand(*tsd.shape) out = ufunc(tsd, x) assert isinstance(out, tsd.__class__) @@ -75,7 +92,7 @@ def test_ufuncs(self, tsd): np.testing.assert_array_almost_equal(out.index, tsd.index) np.testing.assert_array_almost_equal(out.values, ufunc(tsd.values, 1)) - # Testing with array + # Testing with array x = np.random.rand(*tsd.shape) out = ufunc(tsd, x) assert isinstance(out, tsd.__class__) @@ -93,7 +110,9 @@ def test_ufuncs(self, tsd): out = ufunc(tsd, 0.2, 0.6) assert isinstance(out, tsd.__class__) np.testing.assert_array_almost_equal(out.index, tsd.index) - np.testing.assert_array_almost_equal(out.values, ufunc(tsd.values, 0.2, 0.6)) + np.testing.assert_array_almost_equal( + out.values, ufunc(tsd.values, 0.2, 0.6) + ) a.append(ufunc.__name__) @@ -108,7 +127,7 @@ def test_funcs(self, tsd): assert tsd.shape == tsd.values.shape - if tsd.ndim>1: + if tsd.ndim > 1: a = np.reshape(tsd, (np.prod(tsd.shape), 1)) assert isinstance(a, np.ndarray) @@ -116,7 +135,9 @@ def test_funcs(self, tsd): a = np.reshape(tsd, (tsd.shape[0], np.prod(tsd.shape[1:]))) assert isinstance(a, nap.TsdFrame) np.testing.assert_array_almost_equal(a.index, tsd.index) - np.testing.assert_array_almost_equal(a.values, np.reshape(tsd.values, (tsd.shape[0], np.prod(tsd.shape[1:])))) + np.testing.assert_array_almost_equal( + a.values, np.reshape(tsd.values, (tsd.shape[0], np.prod(tsd.shape[1:]))) + ) a = np.ravel(tsd.values) np.testing.assert_array_almost_equal(a, np.ravel(tsd)) @@ -135,9 +156,9 @@ def test_funcs(self, tsd): assert isinstance(a, np.ndarray) if tsd.nap_class == "TsdFrame": - a = np.column_stack((tsd[:,0],tsd[:,1])) + a = np.column_stack((tsd[:, 0], tsd[:, 1])) assert isinstance(a, nap.TsdFrame) - np.testing.assert_array_almost_equal(a.values, tsd.values[:,0:2]) + np.testing.assert_array_almost_equal(a.values, tsd.values[:, 0:2]) a = np.isnan(tsd) assert isinstance(a, tsd.__class__) @@ -149,7 +170,9 @@ def test_attributes(self, tsd): with pytest.raises(AttributeError) as e_info: tsd.blabla() - assert str(e_info.value) == "Time series object does not have the attribute blabla" + assert ( + str(e_info.value) == "Time series object does not have the attribute blabla" + ) def test_split(self, tsd): a = np.split(tsd, 4) @@ -159,7 +182,7 @@ def test_split(self, tsd): np.testing.assert_array_almost_equal(a[i].values, b[i]) np.testing.assert_array_almost_equal(a[i].index, c[i]) - if tsd.ndim>1: + if tsd.ndim > 1: a = np.split(tsd, 1, 1) assert isinstance(a, list) @@ -169,11 +192,11 @@ def test_split(self, tsd): for i in range(4): np.testing.assert_array_almost_equal(a[i].values, b[i]) np.testing.assert_array_almost_equal(a[i].index, c[i]) - if tsd.ndim>1: + if tsd.ndim > 1: a = np.array_split(tsd, 1, 1) assert isinstance(a, list) - if tsd.ndim>1: + if tsd.ndim > 1: a = np.vsplit(tsd, 4) b = np.vsplit(tsd.values, 4) c = np.split(tsd.index, 4) @@ -222,17 +245,17 @@ def test_operators(self, tsd): a = tsd // 0.5 assert isinstance(a, tsd.__class__) np.testing.assert_array_almost_equal(tsd.index, a.index) - assert np.all(a.values == (v // 0.5)) + assert np.all(a.values == (v // 0.5)) a = tsd % 0.5 assert isinstance(a, tsd.__class__) np.testing.assert_array_almost_equal(tsd.index, a.index) assert np.all(a.values == (v % 0.5)) - a = tsd ** 0.5 + a = tsd**0.5 assert isinstance(a, tsd.__class__) np.testing.assert_array_almost_equal(tsd.index, a.index) - np.testing.assert_array_almost_equal(a.values, v**0.5) + np.testing.assert_array_almost_equal(a.values, v**0.5) a = tsd > 0.5 assert isinstance(a, tsd.__class__) @@ -252,8 +275,8 @@ def test_operators(self, tsd): a = tsd <= 0.5 assert isinstance(a, tsd.__class__) np.testing.assert_array_almost_equal(tsd.index, a.index) - assert np.all(a.values == (v <= 0.5)) - + assert np.all(a.values == (v <= 0.5)) + a = tsd == 0.5 assert isinstance(a, tsd.__class__) np.testing.assert_array_almost_equal(tsd.index, a.index) @@ -262,7 +285,7 @@ def test_operators(self, tsd): a = tsd != 0.5 assert isinstance(a, tsd.__class__) np.testing.assert_array_almost_equal(tsd.index, a.index) - assert np.all(a.values == (v != 0.5)) + assert np.all(a.values == (v != 0.5)) def test_slice(self, tsd): a = tsd[0:10] @@ -271,52 +294,52 @@ def test_slice(self, tsd): np.testing.assert_array_almost_equal(tsd.values[0:10], a.values) if tsd.nap_class == "TsdTensor": - a = tsd[:,0:2,2:4] + a = tsd[:, 0:2, 2:4] assert isinstance(a, nap.TsdTensor) np.testing.assert_array_almost_equal(tsd.index, a.index) - np.testing.assert_array_almost_equal(tsd.values[:,0:2,2:4], a.values) + np.testing.assert_array_almost_equal(tsd.values[:, 0:2, 2:4], a.values) - a = tsd[:,0] + a = tsd[:, 0] assert isinstance(a, nap.TsdFrame) np.testing.assert_array_almost_equal(tsd.index, a.index) - np.testing.assert_array_almost_equal(tsd.values[:,0], a.values) - - a = tsd[:,0,0] + np.testing.assert_array_almost_equal(tsd.values[:, 0], a.values) + + a = tsd[:, 0, 0] assert isinstance(a, nap.Tsd) np.testing.assert_array_almost_equal(tsd.index, a.index) - np.testing.assert_array_almost_equal(tsd.values[:,0,0], a.values) + np.testing.assert_array_almost_equal(tsd.values[:, 0, 0], a.values) if tsd.nap_class == "TsdFrame": - a = tsd[:,0] + a = tsd[:, 0] assert isinstance(a, nap.Tsd) np.testing.assert_array_almost_equal(tsd.index, a.index) - np.testing.assert_array_almost_equal(tsd.values[:,0], a.values) + np.testing.assert_array_almost_equal(tsd.values[:, 0], a.values) - a = tsd.loc['a'] + a = tsd.loc["a"] assert isinstance(a, nap.Tsd) np.testing.assert_array_almost_equal(tsd.index, a.index) - np.testing.assert_array_almost_equal(tsd.values[:,0], a.values) + np.testing.assert_array_almost_equal(tsd.values[:, 0], a.values) - a = tsd.loc[['a', 'c']] + a = tsd.loc[["a", "c"]] assert isinstance(a, nap.TsdFrame) np.testing.assert_array_almost_equal(tsd.index, a.index) - np.testing.assert_array_almost_equal(tsd.values[:,[0,2]], a.values) + np.testing.assert_array_almost_equal(tsd.values[:, [0, 2]], a.values) def test_sorting(self, tsd): with pytest.raises(TypeError): np.sort(tsd) with pytest.raises(TypeError): - np.lexsort(tsd) + np.lexsort(tsd) with pytest.raises(TypeError): np.sort_complex(tsd) with pytest.raises(TypeError): - np.partition(tsd) + np.partition(tsd) with pytest.raises(TypeError): - np.argpartition(tsd) + np.argpartition(tsd) def test_searching(self, tsd): for func in [np.argmax, np.nanargmax, np.argmin, np.nanargmin]: @@ -324,18 +347,20 @@ def test_searching(self, tsd): a = func(tsd) assert a == func(tsd.values) - if tsd.ndim>1: + if tsd.ndim > 1: a = func(tsd, 1) - if a.ndim == 1: assert isinstance(a, nap.Tsd) - if a.ndim == 2: assert isinstance(a, nap.TsdFrame) + if a.ndim == 1: + assert isinstance(a, nap.Tsd) + if a.ndim == 2: + assert isinstance(a, nap.TsdFrame) np.testing.assert_array_equal(a.values, func(tsd.values, 1)) np.testing.assert_array_almost_equal(a.index, tsd.index) - + for func in [np.argwhere]: a = func(tsd) np.testing.assert_array_equal(a, func(tsd.values)) - a = np.where(tsd>0.5) + a = np.where(tsd > 0.5) assert isinstance(a, tuple) def test_statistics(self, tsd): @@ -344,15 +369,25 @@ def test_statistics(self, tsd): a = np.percentile(tsd, 50) assert a == np.percentile(tsd.values, 50) - if tsd.ndim>1: + if tsd.ndim > 1: a = np.percentile(tsd, 50, 1) assert isinstance(a, (nap.Tsd, nap.TsdFrame, nap.TsdTensor)) - for func in [np.median, np.average, np.mean, np.std, np.var, np.nanmedian, np.nanmean, np.nanstd, np.nanvar]: + for func in [ + np.median, + np.average, + np.mean, + np.std, + np.var, + np.nanmedian, + np.nanmean, + np.nanstd, + np.nanvar, + ]: a = np.percentile(tsd, 50) assert a == np.percentile(tsd.values, 50) - if tsd.ndim>1: + if tsd.ndim > 1: a = np.percentile(tsd, 50, 1) assert isinstance(a, (nap.Tsd, nap.TsdFrame, nap.TsdTensor)) @@ -364,19 +399,19 @@ def test_statistics(self, tsd): a = np.cov(tsd) assert isinstance(a, nap.TsdFrame) np.testing.assert_array_almost_equal(a.index, tsd.index) - - a = np.correlate(tsd[:,0], tsd[:,1]) - b = np.correlate(tsd[:,0].values, tsd[:,1].values) + + a = np.correlate(tsd[:, 0], tsd[:, 1]) + b = np.correlate(tsd[:, 0].values, tsd[:, 1].values) assert isinstance(a, np.ndarray) np.testing.assert_array_almost_equal(a, b) - a = np.correlate(tsd[:,0], tsd[:,1], 'same') - b = np.correlate(tsd[:,0].values, tsd[:,1].values, 'same') + a = np.correlate(tsd[:, 0], tsd[:, 1], "same") + b = np.correlate(tsd[:, 0].values, tsd[:, 1].values, "same") assert isinstance(a, nap.Tsd) np.testing.assert_array_almost_equal(a, b) - a = np.correlate(tsd[:,0], tsd[:,1], 'full') - b = np.correlate(tsd[:,0].values, tsd[:,1].values, 'full') + a = np.correlate(tsd[:, 0], tsd[:, 1], "full") + b = np.correlate(tsd[:, 0].values, tsd[:, 1].values, "full") assert isinstance(a, np.ndarray) np.testing.assert_array_almost_equal(a, b) @@ -389,15 +424,20 @@ def test_statistics(self, tsd): def test_concatenate(self, tsd): - with pytest.raises(RuntimeError, match=r"The order of the time series indexes should be strictly increasing and non overlapping."): + with pytest.raises( + RuntimeError, + match=r"The order of the time series indexes should be strictly increasing and non overlapping.", + ): np.concatenate((tsd, tsd), 0) - tsd2 = tsd.__class__(t=tsd.index+150, d=tsd.values) + tsd2 = tsd.__class__(t=tsd.index + 150, d=tsd.values) a = np.concatenate((tsd, tsd2)) assert isinstance(a, tsd.__class__) assert len(a) == len(tsd) + len(tsd2) - np.testing.assert_array_almost_equal(a.values, np.concatenate((tsd.values, tsd2.values))) + np.testing.assert_array_almost_equal( + a.values, np.concatenate((tsd.values, tsd2.values)) + ) time_support = nap.IntervalSet(start=[0, 150], end=[99, 249]) np.testing.assert_array_almost_equal(time_support.values, a.time_support.values) @@ -420,40 +460,42 @@ def test_concatenate(self, tsd): out = np.concatenate((tsd, tsd), 1) assert isinstance(out, tsd.__class__) np.testing.assert_array_almost_equal( - out.values, - np.concatenate((tsd.values, tsd.values), 1) - ) + out.values, np.concatenate((tsd.values, tsd.values), 1) + ) out = np.concatenate((tsd.values, tsd), 1) assert isinstance(out, tsd.__class__) - np.testing.assert_array_almost_equal(out.values,np.concatenate((tsd.values, tsd.values), 1)) - np.testing.assert_array_almost_equal(tsd.index.values,out.index.values) + np.testing.assert_array_almost_equal( + out.values, np.concatenate((tsd.values, tsd.values), 1) + ) + np.testing.assert_array_almost_equal(tsd.index.values, out.index.values) - msg = 'Time indexes and time supports are not all equals up to pynapple precision. Returning numpy array!' - with pytest.warns(match = msg): + msg = "Time indexes and time supports are not all equals up to pynapple precision. Returning numpy array!" + with pytest.warns(match=msg): out = np.concatenate((tsd, tsd2), 1) assert isinstance(out, np.ndarray) - iset = nap.IntervalSet(start = 0, end = 500) + iset = nap.IntervalSet(start=0, end=500) msg = "Time indexes are not all equals up to pynapple precision. Returning numpy array!" - with pytest.warns(match=msg): + with pytest.warns(match=msg): out = np.concatenate((tsd.restrict(iset), tsd2.restrict(iset)), 1) msg = "Time supports are not all equals up to pynapple precision. Returning numpy array!" - with pytest.warns(match=msg): - out = np.concatenate((tsd, tsd.restrict(iset)), 1) + with pytest.warns(match=msg): + out = np.concatenate((tsd, tsd.restrict(iset)), 1) if tsd.ndim == 3: out = np.concatenate((tsd, tsd), 2) assert isinstance(out, tsd.__class__) np.testing.assert_array_almost_equal( - out.values, - np.concatenate((tsd.values, tsd.values), 2) - ) + out.values, np.concatenate((tsd.values, tsd.values), 2) + ) out = np.concatenate((tsd.values, tsd), 2) assert isinstance(out, tsd.__class__) - np.testing.assert_array_almost_equal(out.values,np.concatenate((tsd.values, tsd.values), 2)) - np.testing.assert_array_almost_equal(tsd.index.values,out.index.values) + np.testing.assert_array_almost_equal( + out.values, np.concatenate((tsd.values, tsd.values), 2) + ) + np.testing.assert_array_almost_equal(tsd.index.values, out.index.values) def test_fft(self, tsd): with pytest.raises(TypeError): diff --git a/tests/test_nwb.py b/tests/test_nwb.py index a438aa04..c3fefc0e 100644 --- a/tests/test_nwb.py +++ b/tests/test_nwb.py @@ -6,15 +6,16 @@ """Tests of nwb reading for `pynapple` package.""" -import pynapple as nap +import warnings + import numpy as np import pandas as pd -import pytest -import warnings import pynwb +import pytest from pynwb.testing.mock.file import mock_NWBFile from pynwb.testing.mock.utils import name_generator_registry +import pynapple as nap ############################################################ # DEPRECATED PART ########################################## @@ -59,7 +60,7 @@ def test_time_support(self, data): @pytest.mark.filterwarnings("ignore") def test_nwb_meta_info(self, data): - from pynwb import NWBFile, NWBHDF5IO + from pynwb import NWBHDF5IO, NWBFile io = NWBHDF5IO(data.nwbfilepath, "r") nwbfile = io.read() @@ -99,19 +100,21 @@ def test_NWBFile_wrong_input(): with pytest.raises(TypeError): nap.NWBFile(1) + def test_wrong_key(): nwbfile = mock_NWBFile() - nwb = nap.NWBFile(nwbfile) + nwb = nap.NWBFile(nwbfile) with pytest.raises(KeyError): nwb["a"] def test_failed_to_build(): from pynwb.file import Subject + nwbfile = mock_NWBFile(subject=Subject(subject_id="mouse1")) - nwb = nap.NWBFile(nwbfile) - for oid, obj in nwbfile.objects.items(): - nwb.key_to_id[obj.name] = oid + nwb = nap.NWBFile(nwbfile) + for oid, obj in nwbfile.objects.items(): + nwb.key_to_id[obj.name] = oid nwb[obj.name] = {"id": oid, "type": "Tsd"} with pytest.warns(UserWarning) as record: @@ -120,7 +123,8 @@ def test_failed_to_build(): record[0].message.args[0] == "Failed to build Tsd.\n Returning the NWB object for manual inspection" ) - + + def test_add_TimeSeries(): from pynwb.testing.mock.base import mock_TimeSeries @@ -139,7 +143,9 @@ def test_add_TimeSeries(): # Tsd with timestamps name_generator_registry.clear() nwbfile = mock_NWBFile() - time_series = mock_TimeSeries(timestamps=np.arange(10), data=np.arange(10), rate=None) + time_series = mock_TimeSeries( + timestamps=np.arange(10), data=np.arange(10), rate=None + ) nwbfile.add_acquisition(time_series) nwb = nap.NWBFile(nwbfile) assert len(nwb) == 1 @@ -149,11 +155,10 @@ def test_add_TimeSeries(): np.testing.assert_array_almost_equal(tsd.index, np.arange(10)) np.testing.assert_array_almost_equal(tsd.values, np.arange(10)) - # TsdFrame name_generator_registry.clear() nwbfile = mock_NWBFile() - time_series = mock_TimeSeries(data=np.zeros((10,3))) + time_series = mock_TimeSeries(data=np.zeros((10, 3))) nwbfile.add_acquisition(time_series) nwb = nap.NWBFile(nwbfile) assert len(nwb) == 1 @@ -166,10 +171,10 @@ def test_add_TimeSeries(): def test_add_SpatialSeries(): from pynwb.testing.mock.behavior import ( - mock_SpatialSeries, + mock_CompassDirection, mock_Position, mock_PupilTracking, - mock_CompassDirection, + mock_SpatialSeries, ) for name, Series in zip( @@ -187,25 +192,25 @@ def test_add_SpatialSeries(): # Test for 2d and 3d name_generator_registry.clear() nwbfile = mock_NWBFile() - nwbfile.add_acquisition(mock_SpatialSeries(data=np.zeros((4,2)))) + nwbfile.add_acquisition(mock_SpatialSeries(data=np.zeros((4, 2)))) nwb = nap.NWBFile(nwbfile) assert len(nwb) == 1 assert "SpatialSeries" in nwb.keys() or "TimeSeries" in nwb.keys() data = nwb[list(nwb.keys())[0]] assert isinstance(data, nap.TsdFrame) - np.testing.assert_array_equal(data.values,np.zeros((4, 2))) - np.testing.assert_array_equal(data.columns, ['x', 'y']) + np.testing.assert_array_equal(data.values, np.zeros((4, 2))) + np.testing.assert_array_equal(data.columns, ["x", "y"]) name_generator_registry.clear() nwbfile = mock_NWBFile() - nwbfile.add_acquisition(mock_SpatialSeries(data=np.zeros((4,3)))) + nwbfile.add_acquisition(mock_SpatialSeries(data=np.zeros((4, 3)))) nwb = nap.NWBFile(nwbfile) assert len(nwb) == 1 assert "SpatialSeries" in nwb.keys() or "TimeSeries" in nwb.keys() data = nwb[list(nwb.keys())[0]] assert isinstance(data, nap.TsdFrame) - np.testing.assert_array_equal(data.values,np.zeros((4, 3))) - np.testing.assert_array_equal(data.columns, ['x', 'y', 'z']) + np.testing.assert_array_equal(data.values, np.zeros((4, 3))) + np.testing.assert_array_equal(data.columns, ["x", "y", "z"]) def test_add_Device(): @@ -219,8 +224,8 @@ def test_add_Device(): def test_add_Ecephys(): from pynwb.testing.mock.ecephys import ( - mock_ElectrodeGroup, mock_ElectricalSeries, + mock_ElectrodeGroup, mock_SpikeEventSeries, ) @@ -245,7 +250,9 @@ def test_add_Ecephys(): np.testing.assert_array_almost_equal( data.index, obj.starting_time + np.arange(obj.num_samples) / obj.rate ) - np.testing.assert_array_almost_equal(data.columns.values, obj.electrodes["id"][:]) + np.testing.assert_array_almost_equal( + data.columns.values, obj.electrodes["id"][:] + ) # Try ElectrialSeries without channel mapping name_generator_registry.clear() @@ -261,8 +268,9 @@ def test_add_Ecephys(): np.testing.assert_array_almost_equal( data.index, obj.starting_time + np.arange(obj.num_samples) / obj.rate ) - np.testing.assert_array_almost_equal(data.columns.values, np.arange(obj.data.shape[1])) - + np.testing.assert_array_almost_equal( + data.columns.values, np.arange(obj.data.shape[1]) + ) name_generator_registry.clear() nwbfile = mock_NWBFile() @@ -275,18 +283,20 @@ def test_add_Ecephys(): obj = nwbfile.acquisition["SpikeEventSeries"] np.testing.assert_array_almost_equal(data.values, obj.data[:]) np.testing.assert_array_almost_equal(data.index, obj.timestamps[:]) - np.testing.assert_array_almost_equal(data.columns.values, obj.electrodes["id"][:]) + np.testing.assert_array_almost_equal( + data.columns.values, obj.electrodes["id"][:] + ) def test_add_Icephys(): try: from pynwb.testing.mock.icephys import ( - mock_IntracellularElectrode, - mock_VoltageClampStimulusSeries, - mock_VoltageClampSeries, mock_CurrentClampSeries, mock_CurrentClampStimulusSeries, + mock_IntracellularElectrode, mock_IZeroClampSeries, + mock_VoltageClampSeries, + mock_VoltageClampStimulusSeries, ) with warnings.catch_warnings(): @@ -335,8 +345,8 @@ def test_add_Icephys(): def test_add_Ogen(): from pynwb.testing.mock.ogen import ( - mock_OptogeneticStimulusSite, mock_OptogeneticSeries, + mock_OptogeneticStimulusSite, ) name_generator_registry.clear() @@ -363,15 +373,16 @@ def test_add_Ogen(): def test_add_Ophys(): try: from pynwb.testing.mock.ophys import ( + mock_DfOverF, + mock_Fluorescence, + mock_ImageSegmentation, mock_ImagingPlane, mock_OnePhotonSeries, - mock_TwoPhotonSeries, mock_PlaneSegmentation, - mock_ImageSegmentation, mock_RoiResponseSeries, - mock_DfOverF, - mock_Fluorescence, + mock_TwoPhotonSeries, ) + with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -407,7 +418,9 @@ def test_add_Ophys(): data.index, obj.starting_time + np.arange(obj.num_samples) / obj.rate, ) - np.testing.assert_array_almost_equal(data.columns.values, obj.rois["id"][:]) + np.testing.assert_array_almost_equal( + data.columns.values, obj.rois["id"][:] + ) except: pass # some issues with pynwb version @@ -516,60 +529,70 @@ def test_add_Units(): spks = {} alpha = np.random.randint(0, 10, n_units) for n_units_per_shank in range(n_units): - spike_times = np.where(np.random.rand((res * duration)) < (firing_rate / res))[0] / res - nwbfile.add_unit(spike_times=spike_times, quality="good", alpha=alpha[n_units_per_shank]) + spike_times = ( + np.where(np.random.rand((res * duration)) < (firing_rate / res))[0] / res + ) + nwbfile.add_unit( + spike_times=spike_times, quality="good", alpha=alpha[n_units_per_shank] + ) spks[n_units_per_shank] = spike_times nwb_tsgroup = nap.NWBFile(nwbfile) assert len(nwb_tsgroup) == 1 assert "units" in nwb_tsgroup.keys() - - data = nwb_tsgroup['units'] + + data = nwb_tsgroup["units"] assert isinstance(data, nap.TsGroup) assert len(data) == n_units for n in data.keys(): np.testing.assert_array_almost_equal(data[n].index, spks[n]) - np.testing.assert_array_equal(data._metadata["quality"].values, np.array(["good"]*n_units)) + np.testing.assert_array_equal( + data._metadata["quality"].values, np.array(["good"] * n_units) + ) np.testing.assert_array_equal(data._metadata["alpha"].values, alpha) def test_add_Timestamps(): - from pynwb.misc import AnnotationSeries from pynwb.core import DynamicTable, VectorData + from pynwb.misc import AnnotationSeries nwbfile = mock_NWBFile() - nwbfile.add_acquisition(AnnotationSeries("test_ts", data=np.array(["test"]*100), timestamps=np.arange(100))) + nwbfile.add_acquisition( + AnnotationSeries( + "test_ts", data=np.array(["test"] * 100), timestamps=np.arange(100) + ) + ) nwb = nap.NWBFile(nwbfile) assert len(nwb) == 1 assert "test_ts" in nwb.keys() - data = nwb['test_ts'] + data = nwb["test_ts"] assert isinstance(data, nap.Ts) assert len(data) == 100 np.testing.assert_array_almost_equal(data.index, np.arange(100)) - # One ts only - nwbfile = mock_NWBFile() + # One ts only + nwbfile = mock_NWBFile() test_ts = DynamicTable( name="test_ts", description="Test Timestamps", colnames=[ "ts_times", - ], + ], columns=[ VectorData( name="ts_times", data=np.arange(10), description="Test", - ), + ), ], - ) + ) nwbfile.add_acquisition(test_ts) - + nwb = nap.NWBFile(nwbfile) assert len(nwb) == 1 assert "test_ts" in nwb.keys() - data = nwb['test_ts'] + data = nwb["test_ts"] assert isinstance(data, nap.Ts) assert len(data) == 10 np.testing.assert_array_almost_equal(data.index, np.arange(10)) @@ -579,31 +602,27 @@ def test_add_Timestamps(): test_ts = DynamicTable( name="test_ts", description="Test Timestamps", - colnames=[ - "ts_times", - "ts2_times" - ], + colnames=["ts_times", "ts2_times"], columns=[ VectorData( name="ts_times", data=np.arange(10), description="Test", - ), + ), VectorData( name="ts2_times", - data=np.arange(10)+1, + data=np.arange(10) + 1, description="Test", - ), - ], - ) + ), + ], + ) nwbfile.add_acquisition(test_ts) - + nwb = nap.NWBFile(nwbfile) assert len(nwb) == 1 assert "test_ts" in nwb.keys() - data = nwb['test_ts'] + data = nwb["test_ts"] assert isinstance(data, dict) assert len(data) == 2 for i, k in enumerate(data.keys()): - np.testing.assert_array_almost_equal(data[k].index, np.arange(10)+i) - + np.testing.assert_array_almost_equal(data[k].index, np.arange(10) + i) diff --git a/tests/test_perievent.py b/tests/test_perievent.py index 761e99cd..9118b871 100644 --- a/tests/test_perievent.py +++ b/tests/test_perievent.py @@ -7,11 +7,12 @@ """Tests of perievent for `pynapple` package.""" -import pynapple as nap import numpy as np import pandas as pd import pytest +import pynapple as nap + def test_align_tsd(): tsd = nap.Ts(t=np.arange(100)) @@ -23,6 +24,7 @@ def test_align_tsd(): for i, j in zip(peth.keys(), np.arange(0, 100, 10)): np.testing.assert_array_almost_equal(peth[i].index, np.arange(-10, 10)) + def test_compute_perievent_with_tsd(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) tref = nap.Ts(t=np.arange(10, 100, 10)) @@ -30,13 +32,12 @@ def test_compute_perievent_with_tsd(): assert isinstance(peth, nap.TsGroup) assert len(peth) == len(tref) - np.testing.assert_array_almost_equal( - peth.get_info("ref_times").values, tref.index - ) + np.testing.assert_array_almost_equal(peth.get_info("ref_times").values, tref.index) for i, j in zip(peth.keys(), np.arange(0, 100, 10)): np.testing.assert_array_almost_equal(peth[i].index, np.arange(-10, 10)) np.testing.assert_array_almost_equal(peth[i].values, np.arange(j, j + 20)) + def test_compute_perievent_minmax(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) tref = nap.Ts(t=np.arange(10, 100, 10)) @@ -44,26 +45,25 @@ def test_compute_perievent_minmax(): assert isinstance(peth, nap.TsGroup) assert len(peth) == len(tref) - np.testing.assert_array_almost_equal( - peth.get_info("ref_times").values, tref.index - ) + np.testing.assert_array_almost_equal(peth.get_info("ref_times").values, tref.index) for i, j in zip(peth.keys(), np.arange(0, 100, 10)): np.testing.assert_array_almost_equal(peth[i].index, np.arange(-10, 10)) np.testing.assert_array_almost_equal(peth[i].values, np.arange(j, j + 20)) + def test_compute_perievent_raise_error(): tsd = nap.Ts(t=np.arange(100)) tref = nap.Ts(t=np.arange(10, 100, 10)) with pytest.raises(AssertionError) as e_info: - nap.compute_perievent(tsd, [0,1,2], minmax=(-10, 10)) + nap.compute_perievent(tsd, [0, 1, 2], minmax=(-10, 10)) assert str(e_info.value) == "tref should be a Ts or Tsd object." with pytest.raises(AssertionError) as e_info: - nap.compute_perievent([0,1,2], tref, minmax=(-10, 10)) + nap.compute_perievent([0, 1, 2], tref, minmax=(-10, 10)) assert str(e_info.value) == "data should be a Ts, Tsd or TsGroup." with pytest.raises(AssertionError) as e_info: - nap.compute_perievent(tsd, tref, minmax={0:1}) + nap.compute_perievent(tsd, tref, minmax={0: 1}) assert str(e_info.value) == "minmax should be a tuple or int or float." with pytest.raises(AssertionError) as e_info: @@ -71,9 +71,10 @@ def test_compute_perievent_raise_error(): assert str(e_info.value) == "time_unit should be a str." with pytest.raises(AssertionError) as e_info: - nap.compute_perievent(tsd, tref, minmax=10, time_unit='a') + nap.compute_perievent(tsd, tref, minmax=10, time_unit="a") assert str(e_info.value) == "time_unit should be 's', 'ms' or 'us'" + def test_compute_perievent_with_tsgroup(): tsgroup = nap.TsGroup( {0: nap.Ts(t=np.arange(0, 100)), 1: nap.Ts(t=np.arange(0, 200))} @@ -89,9 +90,8 @@ def test_compute_perievent_with_tsgroup(): peth[i].get_info("ref_times").values, tref.index ) for j, k in zip(peth[i].keys(), np.arange(0, 100, 10)): - np.testing.assert_array_almost_equal( - peth[i][j].index, np.arange(-10, 10) - ) + np.testing.assert_array_almost_equal(peth[i][j].index, np.arange(-10, 10)) + def test_compute_perievent_time_units(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) @@ -108,8 +108,8 @@ def test_compute_perievent_time_units(): def test_compute_perievent_continuous(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) tref = nap.Ts(t=np.array([20, 60])) - minmax=(-5, 10) - + minmax = (-5, 10) + # time_array = tsd.t # data_array = tsd.d # time_target_array = tref.t @@ -120,81 +120,95 @@ def test_compute_perievent_continuous(): # idx1 = -np.arange(0, window[0] + binsize, binsize)[::-1][:-1] # idx2 = np.arange(0, window[1] + binsize, binsize)[1:] # time_idx = np.hstack((idx1, np.zeros(1), idx2)) - # windowsize = np.array([idx1.shape[0], idx2.shape[0]]) + # windowsize = np.array([idx1.shape[0], idx2.shape[0]]) - pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) + pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) assert isinstance(pe, nap.TsdFrame) assert pe.shape[1] == len(tref) - np.testing.assert_array_almost_equal(pe.index.values, np.arange(minmax[0], minmax[-1]+1)) - tmp = np.array([np.arange(t+minmax[0], t+minmax[1]+1) for t in tref.t]).T + np.testing.assert_array_almost_equal( + pe.index.values, np.arange(minmax[0], minmax[-1] + 1) + ) + tmp = np.array([np.arange(t + minmax[0], t + minmax[1] + 1) for t in tref.t]).T np.testing.assert_array_almost_equal(pe.values, tmp) - minmax=(5, 10) - pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) + minmax = (5, 10) + pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) np.testing.assert_array_almost_equal(pe.values, tmp) tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) tref = nap.Ts(t=np.array([20, 60])) - minmax=5 - pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) + minmax = 5 + pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) assert isinstance(pe, nap.TsdFrame) assert pe.shape[1] == len(tref) - np.testing.assert_array_almost_equal(pe.index.values, np.arange(-minmax, minmax+1)) - tmp = np.array([np.arange(t-minmax, t+minmax+1) for t in tref.t]).T + np.testing.assert_array_almost_equal( + pe.index.values, np.arange(-minmax, minmax + 1) + ) + tmp = np.array([np.arange(t - minmax, t + minmax + 1) for t in tref.t]).T np.testing.assert_array_almost_equal(pe.values, tmp) tsd = nap.TsdFrame(t=np.arange(100), d=np.random.randn(100, 3)) tref = nap.Ts(t=np.array([20, 60])) - minmax=(-5, 10) - pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) + minmax = (-5, 10) + pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) assert isinstance(pe, nap.TsdTensor) assert pe.d.ndim == 3 assert pe.shape[1:] == (len(tref), tsd.shape[1]) - np.testing.assert_array_almost_equal(pe.index.values, np.arange(minmax[0], minmax[-1]+1)) + np.testing.assert_array_almost_equal( + pe.index.values, np.arange(minmax[0], minmax[-1] + 1) + ) tmp = np.zeros(pe.shape) - for i,t in enumerate(tref.t): + for i, t in enumerate(tref.t): idx = np.where(tsd.t == t)[0][0] - tmp[:,i,:] = tsd.values[idx+minmax[0]:idx+minmax[1]+1] + tmp[:, i, :] = tsd.values[idx + minmax[0] : idx + minmax[1] + 1] np.testing.assert_array_almost_equal(pe.values, tmp) tsd = nap.TsdTensor(t=np.arange(100), d=np.random.randn(100, 3, 4)) tref = nap.Ts(t=np.array([20, 60])) - minmax=(-5, 10) - pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) + minmax = (-5, 10) + pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax) assert isinstance(pe, nap.TsdTensor) assert pe.d.ndim == 4 assert pe.shape[1:] == (len(tref), *tsd.shape[1:]) - np.testing.assert_array_almost_equal(pe.index.values, np.arange(minmax[0], minmax[-1]+1)) + np.testing.assert_array_almost_equal( + pe.index.values, np.arange(minmax[0], minmax[-1] + 1) + ) tmp = np.zeros(pe.shape) - for i,t in enumerate(tref.t): + for i, t in enumerate(tref.t): idx = np.where(tsd.t == t)[0][0] - tmp[:,i,:] = tsd.values[idx+minmax[0]:idx+minmax[1]+1] + tmp[:, i, :] = tsd.values[idx + minmax[0] : idx + minmax[1] + 1] np.testing.assert_array_almost_equal(pe.values, tmp) def test_compute_perievent_continuous_time_units(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) - tref = nap.Ts(t=np.array([20, 60])) + tref = nap.Ts(t=np.array([20, 60])) minmax = (-5, 10) for tu, fa in zip(["s", "ms", "us"], [1, 1e3, 1e6]): - pe = nap.compute_perievent_continuous(tsd, tref, minmax=(minmax[0] * fa, minmax[1] * fa), time_unit=tu) - np.testing.assert_array_almost_equal(pe.index.values, np.arange(minmax[0], minmax[1]+1)) - tmp = np.array([np.arange(t+minmax[0], t+minmax[1]+1) for t in tref.t]).T + pe = nap.compute_perievent_continuous( + tsd, tref, minmax=(minmax[0] * fa, minmax[1] * fa), time_unit=tu + ) + np.testing.assert_array_almost_equal( + pe.index.values, np.arange(minmax[0], minmax[1] + 1) + ) + tmp = np.array([np.arange(t + minmax[0], t + minmax[1] + 1) for t in tref.t]).T np.testing.assert_array_almost_equal(pe.values, tmp) def test_compute_perievent_continuous_with_ep(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) tref = nap.Ts(t=np.array([10, 50, 80])) - minmax=(-5, 10) - ep = nap.IntervalSet(start = [0, 60], end = [40, 99]) + minmax = (-5, 10) + ep = nap.IntervalSet(start=[0, 60], end=[40, 99]) pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax, ep=ep) - - assert pe.shape[1] == len(tref)-1 - tmp = np.array([np.arange(t+minmax[0], t+minmax[1]+1) for t in tref.restrict(ep).t]).T + + assert pe.shape[1] == len(tref) - 1 + tmp = np.array( + [np.arange(t + minmax[0], t + minmax[1] + 1) for t in tref.restrict(ep).t] + ).T np.testing.assert_array_almost_equal(pe.values, tmp) - + tref = ep.starts # time_array = tsd.t @@ -207,21 +221,20 @@ def test_compute_perievent_continuous_with_ep(): # idx1 = -np.arange(0, window[0] + binsize, binsize)[::-1][:-1] # idx2 = np.arange(0, window[1] + binsize, binsize)[1:] # time_idx = np.hstack((idx1, np.zeros(1), idx2)) - # windowsize = np.array([idx1.shape[0], idx2.shape[0]]) - + # windowsize = np.array([idx1.shape[0], idx2.shape[0]]) pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax, ep=ep) - tmp = np.array([np.arange(t, t+minmax[1]+1) for t in tref.restrict(ep).t]).T - np.testing.assert_array_almost_equal(pe.values[abs(minmax[0]):], tmp) + tmp = np.array([np.arange(t, t + minmax[1] + 1) for t in tref.restrict(ep).t]).T + np.testing.assert_array_almost_equal(pe.values[abs(minmax[0]) :], tmp) tref = ep.ends pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax, ep=ep) - tmp = np.array([np.arange(t+minmax[0], t+1) for t in tref.restrict(ep).t]).T - np.testing.assert_array_almost_equal(pe.values[:-abs(minmax[1])], tmp) + tmp = np.array([np.arange(t + minmax[0], t + 1) for t in tref.restrict(ep).t]).T + np.testing.assert_array_almost_equal(pe.values[: -abs(minmax[1])], tmp) - ep = nap.IntervalSet(start = [100], end = [200]) + ep = nap.IntervalSet(start=[100], end=[200]) tref = nap.Ts(t=np.array([120, 150, 180])) - pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax, ep=ep) + pe = nap.compute_perievent_continuous(tsd, tref, minmax=minmax, ep=ep) assert np.all(np.isnan(pe.values)) @@ -229,15 +242,15 @@ def test_compute_perievent_continuous_raise_error(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) tref = nap.Ts(t=np.arange(10, 100, 10)) with pytest.raises(AssertionError) as e_info: - nap.compute_perievent_continuous(tsd, [0,1,2], minmax=(-10, 10)) + nap.compute_perievent_continuous(tsd, [0, 1, 2], minmax=(-10, 10)) assert str(e_info.value) == "tref should be a Ts or Tsd object." with pytest.raises(AssertionError) as e_info: - nap.compute_perievent_continuous([0,1,2], tref, minmax=(-10, 10)) + nap.compute_perievent_continuous([0, 1, 2], tref, minmax=(-10, 10)) assert str(e_info.value) == "data should be a Tsd, TsdFrame or TsdTensor." with pytest.raises(AssertionError) as e_info: - nap.compute_perievent_continuous(tsd, tref, minmax={0:1}) + nap.compute_perievent_continuous(tsd, tref, minmax={0: 1}) assert str(e_info.value) == "minmax should be a tuple or int or float." with pytest.raises(AssertionError) as e_info: @@ -245,9 +258,9 @@ def test_compute_perievent_continuous_raise_error(): assert str(e_info.value) == "time_unit should be a str." with pytest.raises(AssertionError) as e_info: - nap.compute_perievent_continuous(tsd, tref, minmax=10, time_unit='a') + nap.compute_perievent_continuous(tsd, tref, minmax=10, time_unit="a") assert str(e_info.value) == "time_unit should be 's', 'ms' or 'us'" with pytest.raises(AssertionError) as e_info: - nap.compute_perievent_continuous(tsd, tref, minmax=10, ep='a') + nap.compute_perievent_continuous(tsd, tref, minmax=10, ep="a") assert str(e_info.value) == "ep should be an IntervalSet object." diff --git a/tests/test_phy.py b/tests/test_phy.py index c9c9b5a7..8427cd0e 100644 --- a/tests/test_phy.py +++ b/tests/test_phy.py @@ -6,11 +6,13 @@ """Tests of phy loader for `pynapple` package.""" -import pynapple as nap +import warnings + import numpy as np import pandas as pd import pytest -import warnings + +import pynapple as nap @pytest.mark.filterwarnings("ignore") diff --git a/tests/test_power_spectral_density.py b/tests/test_power_spectral_density.py index 92427e98..1a701306 100644 --- a/tests/test_power_spectral_density.py +++ b/tests/test_power_spectral_density.py @@ -8,7 +8,6 @@ import pynapple as nap - ############################################################ # Test for power_spectral_density ############################################################ @@ -146,15 +145,18 @@ def test_compute_power_spectral_density_raise_errors(sig, kwargs, expectation): ############################################################ -def get_signal_and_output(f=2, fs=1000, duration=100, interval_size=10, overlap = 0.25): +def get_signal_and_output(f=2, fs=1000, duration=100, interval_size=10, overlap=0.25): t = np.arange(0, duration, 1 / fs) d = np.cos(2 * np.pi * f * t) sig = nap.Tsd(t=t, d=d, time_support=nap.IntervalSet(0, 100)) tmp = [] - slice = (0, int(fs*interval_size)+1) + slice = (0, int(fs * interval_size) + 1) while slice[1] < len(d): - tmp.append(d[slice[0]:slice[1]]) - new_slice = (slice[1]-int(fs*interval_size*overlap)-1, slice[1]+int(fs*interval_size*(1-overlap))) + tmp.append(d[slice[0] : slice[1]]) + new_slice = ( + slice[1] - int(fs * interval_size * overlap) - 1, + slice[1] + int(fs * interval_size * (1 - overlap)), + ) slice = new_slice tmp = np.transpose(np.array(tmp)) @@ -275,15 +277,15 @@ def test_compute_mean_power_spectral_density(): ), ), ( - get_signal_and_output()[0], - 10, - {"overlap": "a"}, - pytest.raises( - TypeError, - match=re.escape( - "Invalid type. Parameter overlap must be of type ." - ), + get_signal_and_output()[0], + 10, + {"overlap": "a"}, + pytest.raises( + TypeError, + match=re.escape( + "Invalid type. Parameter overlap must be of type ." ), + ), ), ( get_signal_and_output()[0], @@ -313,21 +315,17 @@ def test_compute_mean_power_spectral_density(): {"overlap": -0.1}, pytest.raises( ValueError, - match=re.escape( - "Overlap should be in intervals [0.0, 1.0)." - ), + match=re.escape("Overlap should be in intervals [0.0, 1.0)."), ), ), ( - get_signal_and_output()[0], - 10, - {"overlap": 1.1}, - pytest.raises( - ValueError, - match=re.escape( - "Overlap should be in intervals [0.0, 1.0)." - ), - ), + get_signal_and_output()[0], + 10, + {"overlap": 1.1}, + pytest.raises( + ValueError, + match=re.escape("Overlap should be in intervals [0.0, 1.0)."), + ), ), ], ) diff --git a/tests/test_randomize.py b/tests/test_randomize.py index 2af421d5..5a365073 100644 --- a/tests/test_randomize.py +++ b/tests/test_randomize.py @@ -1,16 +1,17 @@ """Tests of randomize for `pynapple` package.""" -import pynapple as nap import numpy as np import pandas as pd import pytest +import pynapple as nap + def test_shift_ts(): ts = nap.Ts(t=np.arange(0, 100)) - shift_ts = nap.randomize.shift_timestamps(ts,min_shift=0.1,max_shift=0.2) + shift_ts = nap.randomize.shift_timestamps(ts, min_shift=0.1, max_shift=0.2) - assert isinstance(shift_ts,nap.Ts) + assert isinstance(shift_ts, nap.Ts) assert len(ts) == len(shift_ts) assert (ts.time_support.values == shift_ts.time_support.values).all() @@ -19,13 +20,15 @@ def test_shift_tsgroup(): tsgroup = nap.TsGroup( {0: nap.Ts(t=np.arange(0, 100)), 1: nap.Ts(t=np.arange(0, 200))} ) - shift_tsgroup = nap.randomize.shift_timestamps(tsgroup,min_shift=0.1,max_shift=0.2) + shift_tsgroup = nap.randomize.shift_timestamps( + tsgroup, min_shift=0.1, max_shift=0.2 + ) - assert isinstance(shift_tsgroup,nap.TsGroup) + assert isinstance(shift_tsgroup, nap.TsGroup) assert len(tsgroup) == len(shift_tsgroup) assert (tsgroup.time_support.values == shift_tsgroup.time_support.values).all() - for j,k in zip(tsgroup.keys(),shift_tsgroup.keys()): + for j, k in zip(tsgroup.keys(), shift_tsgroup.keys()): assert j == k assert len(tsgroup[j]) == len(shift_tsgroup[k]) @@ -35,7 +38,7 @@ def test_shuffle_intervals_ts(): shuff_ts = nap.randomize.shuffle_ts_intervals(ts) assert len(ts) == len(shuff_ts) - assert isinstance(shuff_ts,nap.Ts) + assert isinstance(shuff_ts, nap.Ts) assert ts.time_support.values == pytest.approx(shuff_ts.time_support.values) assert np.diff(ts.times()) == pytest.approx(np.diff(shuff_ts.times())) @@ -46,21 +49,25 @@ def test_resample_tsgroup(): ) shuff_tsgroup = nap.randomize.shuffle_ts_intervals(tsgroup) - assert isinstance(shuff_tsgroup,nap.TsGroup) + assert isinstance(shuff_tsgroup, nap.TsGroup) assert len(tsgroup) == len(shuff_tsgroup) - assert tsgroup.time_support.values == pytest.approx(shuff_tsgroup.time_support.values) + assert tsgroup.time_support.values == pytest.approx( + shuff_tsgroup.time_support.values + ) - for j,k in zip(tsgroup.keys(),shuff_tsgroup.keys()): + for j, k in zip(tsgroup.keys(), shuff_tsgroup.keys()): assert j == k assert len(tsgroup[j]) == len(shuff_tsgroup[k]) - assert np.diff(tsgroup[j].times()) == pytest.approx(np.diff(shuff_tsgroup[k].times())) + assert np.diff(tsgroup[j].times()) == pytest.approx( + np.diff(shuff_tsgroup[k].times()) + ) def test_jitter_ts(): ts = nap.Ts(t=np.arange(0, 100)) - jitter_ts = nap.randomize.jitter_timestamps(ts,max_jitter=0.1) + jitter_ts = nap.randomize.jitter_timestamps(ts, max_jitter=0.1) - assert isinstance(jitter_ts,nap.Ts) + assert isinstance(jitter_ts, nap.Ts) assert len(ts) == len(jitter_ts) @@ -68,12 +75,12 @@ def test_jitter_tsgroup(): tsgroup = nap.TsGroup( {0: nap.Ts(t=np.arange(0, 100)), 1: nap.Ts(t=np.arange(0, 200))} ) - jitter_tsgroup = nap.randomize.jitter_timestamps(tsgroup,max_jitter=0.1) + jitter_tsgroup = nap.randomize.jitter_timestamps(tsgroup, max_jitter=0.1) - assert isinstance(jitter_tsgroup,nap.TsGroup) + assert isinstance(jitter_tsgroup, nap.TsGroup) assert len(tsgroup) == len(jitter_tsgroup) - for j,k in zip(tsgroup.keys(),jitter_tsgroup.keys()): + for j, k in zip(tsgroup.keys(), jitter_tsgroup.keys()): assert j == k assert len(tsgroup[j]) == len(jitter_tsgroup[k]) @@ -83,7 +90,7 @@ def test_resample_ts(): resampled_ts = nap.randomize.resample_timestamps(ts) assert len(ts) == len(resampled_ts) - assert isinstance(resampled_ts,nap.Ts) + assert isinstance(resampled_ts, nap.Ts) assert (ts.time_support.values == resampled_ts.time_support.values).all() @@ -93,10 +100,10 @@ def test_resample_tsgroup(): ) resampled_tsgroup = nap.randomize.resample_timestamps(tsgroup) - assert isinstance(resampled_tsgroup,nap.TsGroup) + assert isinstance(resampled_tsgroup, nap.TsGroup) assert len(tsgroup) == len(resampled_tsgroup) assert (tsgroup.time_support.values == resampled_tsgroup.time_support.values).all() - for j,k in zip(tsgroup.keys(),resampled_tsgroup.keys()): + for j, k in zip(tsgroup.keys(), resampled_tsgroup.keys()): assert j == k assert len(tsgroup[j]) == len(resampled_tsgroup[k]) diff --git a/tests/test_spike_trigger_average.py b/tests/test_spike_trigger_average.py index ae8c5dfb..e0616cc5 100644 --- a/tests/test_spike_trigger_average.py +++ b/tests/test_spike_trigger_average.py @@ -2,10 +2,12 @@ """Tests of spike trigger average for `pynapple` package.""" -import pynapple as nap import numpy as np import pandas as pd import pytest + +import pynapple as nap + # from matplotlib.pyplot import * @@ -14,14 +16,12 @@ def test_compute_spike_trigger_average_tsd(): d = np.zeros(int(101 / 0.01)) t1 = np.arange(1, 100) x = np.arange(100, 10000, 100) - d[x] = 1.0 - feature = nap.Tsd( - t=np.arange(0, 101, 0.01), d=d, time_support=ep - ) + d[x] = 1.0 + feature = nap.Tsd(t=np.arange(0, 101, 0.01), d=d, time_support=ep) spikes = nap.TsGroup( {0: nap.Ts(t1), 1: nap.Ts(t1 - 0.1), 2: nap.Ts(t1 + 0.2)}, time_support=ep ) - + sta = nap.compute_event_trigger_average(spikes, feature, 0.2, (0.6, 0.6), ep) output = np.zeros((7, 3)) @@ -33,15 +33,14 @@ def test_compute_spike_trigger_average_tsd(): assert sta.shape == output.shape np.testing.assert_array_almost_equal(sta.values, output) + def test_compute_spike_trigger_average_tsdframe(): ep = nap.IntervalSet(0, 100) - d = np.zeros((int(101 / 0.01),1)) + d = np.zeros((int(101 / 0.01), 1)) x = np.arange(100, 10000, 100) d[x] = 1.0 - feature = nap.TsdFrame( - t=np.arange(0, 101, 0.01), d=d, time_support=ep - ) - t1 = np.arange(1, 100) + feature = nap.TsdFrame(t=np.arange(0, 101, 0.01), d=d, time_support=ep) + t1 = np.arange(1, 100) spikes = nap.TsGroup( {0: nap.Ts(t1), 1: nap.Ts(t1 - 0.1), 2: nap.Ts(t1 + 0.2)}, time_support=ep ) @@ -57,15 +56,14 @@ def test_compute_spike_trigger_average_tsdframe(): assert sta.shape == (*output.shape, 1) np.testing.assert_array_almost_equal(sta.values, np.expand_dims(output, 2)) + def test_compute_spike_trigger_average_tsdtensor(): ep = nap.IntervalSet(0, 100) - d=np.zeros((int(101 / 0.01),1,1)) + d = np.zeros((int(101 / 0.01), 1, 1)) x = np.arange(100, 10000, 100) d[x] = 1.0 - feature = nap.TsdTensor( - t=np.arange(0, 101, 0.01), d=d, time_support=ep - ) - t1 = np.arange(1, 100) + feature = nap.TsdTensor(t=np.arange(0, 101, 0.01), d=d, time_support=ep) + t1 = np.arange(1, 100) spikes = nap.TsGroup( {0: nap.Ts(t1), 1: nap.Ts(t1 - 0.1), 2: nap.Ts(t1 + 0.2)}, time_support=ep ) @@ -81,15 +79,14 @@ def test_compute_spike_trigger_average_tsdtensor(): assert sta.shape == output.shape np.testing.assert_array_almost_equal(sta.values, output) + def test_compute_spike_trigger_average_random_feature(): ep = nap.IntervalSet(0, 100) feature = nap.Tsd( t=np.arange(0, 100, 0.001), d=np.random.randn(100000), time_support=ep ) t1 = np.sort(np.random.uniform(0, 100, 1000)) - spikes = nap.TsGroup( - {0: nap.Ts(t1)}, time_support=ep - ) + spikes = nap.TsGroup({0: nap.Ts(t1)}, time_support=ep) group = spikes binsize = 0.1 @@ -104,27 +101,27 @@ def test_compute_spike_trigger_average_random_feature(): count = group.count(binsize, ep) tmp = feature.bin_average(binsize, ep) from scipy.linalg import hankel + # Build the Hankel matrix n_p = len(idx1) n_f = len(idx2) pad_tmp = np.pad(tmp.values, (n_p, n_f)) - offset_tmp = hankel(pad_tmp, pad_tmp[-(n_p + n_f + 1) :])[0 : len(tmp)] + offset_tmp = hankel(pad_tmp, pad_tmp[-(n_p + n_f + 1) :])[0 : len(tmp)] sta2 = np.dot(offset_tmp.T, count.values) - sta2 = sta2 / np.sum(count.values, 0) + sta2 = sta2 / np.sum(count.values, 0) np.testing.assert_array_almost_equal(sta.values, sta2) + def test_compute_spike_trigger_average_add_nan(): ep = nap.IntervalSet(0, 110) - d=np.zeros(int(110 / 0.01)) + d = np.zeros(int(110 / 0.01)) x = np.arange(100, 10000, 100) d[x] = 1.0 - d[-1001:] = np.nan - feature = nap.Tsd( - t=np.arange(0, 110, 0.01), d=d, time_support=ep - ) + d[-1001:] = np.nan + feature = nap.Tsd(t=np.arange(0, 110, 0.01), d=d, time_support=ep) t1 = np.arange(1, 100) - + spikes = nap.TsGroup( {0: nap.Ts(t1), 1: nap.Ts(t1 - 0.1), 2: nap.Ts(t1 + 0.2)}, time_support=ep ) @@ -140,14 +137,13 @@ def test_compute_spike_trigger_average_add_nan(): assert sta.shape == output.shape np.testing.assert_array_almost_equal(sta.values, output) + def test_compute_spike_trigger_average_raise_error(): ep = nap.IntervalSet(0, 101) - d=np.zeros(int(101 / 0.01)) - x = np.arange(100, 10000, 100)+1 + d = np.zeros(int(101 / 0.01)) + x = np.arange(100, 10000, 100) + 1 d[x] = 1.0 - feature = nap.Tsd( - t=np.arange(0, 101, 0.01), d=d , time_support=ep - ) + feature = nap.Tsd(t=np.arange(0, 101, 0.01), d=d, time_support=ep) t1 = np.arange(1, 101) + 0.01 spikes = nap.TsGroup( {0: nap.Ts(t1), 1: nap.Ts(t1 - 0.1), 2: nap.Ts(t1 + 0.2)}, time_support=ep @@ -164,13 +160,17 @@ def test_compute_spike_trigger_average_raise_error(): with pytest.raises(Exception) as e_info: nap.compute_event_trigger_average(spikes, feature, "0.1", (0.5, 0.5), ep) assert str(e_info.value) == "binsize should be int or float." - + with pytest.raises(Exception) as e_info: - nap.compute_event_trigger_average(spikes, feature, 0.1, (0.5, 0.5), ep, time_unit=1) + nap.compute_event_trigger_average( + spikes, feature, 0.1, (0.5, 0.5), ep, time_unit=1 + ) assert str(e_info.value) == "time_unit should be a str." with pytest.raises(Exception) as e_info: - nap.compute_event_trigger_average(spikes, feature, 0.1, (0.5, 0.5), ep, time_unit="a") + nap.compute_event_trigger_average( + spikes, feature, 0.1, (0.5, 0.5), ep, time_unit="a" + ) assert str(e_info.value) == "time_unit should be 's', 'ms' or 'us'" with pytest.raises(Exception) as e_info: @@ -178,11 +178,11 @@ def test_compute_spike_trigger_average_raise_error(): assert str(e_info.value) == "windowsize should be a tuple of 2 elements (-t, +t)" with pytest.raises(Exception) as e_info: - nap.compute_event_trigger_average(spikes, feature, 0.1, ('a', 'b'), ep) + nap.compute_event_trigger_average(spikes, feature, 0.1, ("a", "b"), ep) assert str(e_info.value) == "windowsize should be a tuple of int/float" with pytest.raises(Exception) as e_info: - nap.compute_event_trigger_average(spikes, feature, 0.1, (0.5, 0.5), [1,2,3]) + nap.compute_event_trigger_average(spikes, feature, 0.1, (0.5, 0.5), [1, 2, 3]) assert str(e_info.value) == "ep should be an IntervalSet object." @@ -192,9 +192,9 @@ def test_compute_spike_trigger_average_time_unit(): d = np.zeros(int(101 / 0.01)) t1 = np.arange(1, 100) for i in range(len(t1)): - d[t==t1[i]] = 1.0 + d[t == t1[i]] = 1.0 feature = nap.Tsd(t=t, d=d, time_support=ep) - + spikes = nap.TsGroup( {0: nap.Ts(t1), 1: nap.Ts(t1 - 0.1), 2: nap.Ts(t1 + 0.2)}, time_support=ep ) @@ -217,6 +217,7 @@ def test_compute_spike_trigger_average_time_unit(): assert sta.shape == output.shape np.testing.assert_array_almost_equal(sta.values, output) + @pytest.mark.filterwarnings("ignore") def test_compute_spike_trigger_average_no_windows(): ep = nap.IntervalSet(0, 100) @@ -240,28 +241,32 @@ def test_compute_spike_trigger_average_no_windows(): def test_compute_spike_trigger_average_multiple_epochs(): - ep = nap.IntervalSet(start = [0, 200], end=[100,300]) + ep = nap.IntervalSet(start=[0, 200], end=[100, 300]) feature = nap.Tsd( - t=np.hstack((np.arange(0, 100, 0.001), np.arange(200, 300, 0.001))), + t=np.hstack((np.arange(0, 100, 0.001), np.arange(200, 300, 0.001))), d=np.hstack((np.random.randn(100000), np.random.randn(100000))), - time_support=ep + time_support=ep, ) - t1 = np.hstack((np.sort(np.random.uniform(0, 100, 1000)), np.sort(np.random.uniform(200, 300, 1000)))) - spikes = nap.TsGroup( - {0: nap.Ts(t1)}, time_support=ep + t1 = np.hstack( + ( + np.sort(np.random.uniform(0, 100, 1000)), + np.sort(np.random.uniform(200, 300, 1000)), + ) ) + spikes = nap.TsGroup({0: nap.Ts(t1)}, time_support=ep) group = spikes binsize = 0.1 windowsize = (1.0, 1.0) sta = nap.compute_event_trigger_average(spikes, feature, binsize, windowsize, ep) - + start, end = windowsize idx1 = -np.arange(0, start + binsize, binsize)[::-1][:-1] idx2 = np.arange(0, end + binsize, binsize)[1:] time_idx = np.hstack((idx1, np.zeros(1), idx2)) from scipy.linalg import hankel + n_p = len(idx1) n_f = len(idx2) @@ -269,14 +274,14 @@ def test_compute_spike_trigger_average_multiple_epochs(): for i in range(2): count = group.count(binsize, ep[i]) tmp = feature.bin_average(binsize, ep[i]) - + # Build the Hankel matrix pad_tmp = np.pad(tmp.values, (n_p, n_f)) - offset_tmp = hankel(pad_tmp, pad_tmp[-(n_p + n_f + 1) :])[0 : len(tmp)] + offset_tmp = hankel(pad_tmp, pad_tmp[-(n_p + n_f + 1) :])[0 : len(tmp)] stai = np.dot(offset_tmp.T, count.values) stai = stai / np.sum(count.values, 0) sta2.append(stai) - sta2 = np.hstack(sta2).mean(1) + sta2 = np.hstack(sta2).mean(1) - np.testing.assert_array_almost_equal(sta.values[:,0], sta2) \ No newline at end of file + np.testing.assert_array_almost_equal(sta.values[:, 0], sta2) diff --git a/tests/test_suite2p.py b/tests/test_suite2p.py index 1aeeecf4..b4924826 100644 --- a/tests/test_suite2p.py +++ b/tests/test_suite2p.py @@ -6,12 +6,14 @@ """Tests of Suite2P loader for `pynappple` package.""" -import pynapple as nap +import os +import warnings + import numpy as np import pandas as pd import pytest -import warnings -import os + +import pynapple as nap path = "nwbfilestest/suite2p/suite2p/plane0" diff --git a/tests/test_time_index.py b/tests/test_time_index.py index 711d7d64..a85bbdc8 100644 --- a/tests/test_time_index.py +++ b/tests/test_time_index.py @@ -5,38 +5,53 @@ # @Last Modified time: 2024-02-26 13:14:41 """Tests of time units for `pynapple` package.""" -import pynapple as nap -import numpy as np -import pandas as pd -import pytest # from pynapple.core.time_units import format_timestamps, return_timestamps, sort_timestamps # from pynapple.core.time_index import TsIndex import warnings +import numpy as np +import pandas as pd +import pytest + +import pynapple as nap + + def test_format_timestamps(): t = np.random.rand(100) np.testing.assert_array_almost_equal(t, nap.TsIndex.format_timestamps(t)) - np.testing.assert_array_almost_equal(t/1e3, nap.TsIndex.format_timestamps(t, 'ms')) - np.testing.assert_array_almost_equal(t/1e6, nap.TsIndex.format_timestamps(t, 'us')) + np.testing.assert_array_almost_equal( + t / 1e3, nap.TsIndex.format_timestamps(t, "ms") + ) + np.testing.assert_array_almost_equal( + t / 1e6, nap.TsIndex.format_timestamps(t, "us") + ) with pytest.raises(ValueError, match=r"unrecognized time units type"): - nap.TsIndex.format_timestamps(t, 'aaaa') + nap.TsIndex.format_timestamps(t, "aaaa") + def test_return_timestamps(): t = np.random.rand(100) np.testing.assert_array_almost_equal(t, nap.TsIndex.return_timestamps(t)) - np.testing.assert_array_almost_equal(t*1e3, nap.TsIndex.return_timestamps(t, 'ms')) - np.testing.assert_array_almost_equal(t*1e6, nap.TsIndex.return_timestamps(t, 'us')) + np.testing.assert_array_almost_equal( + t * 1e3, nap.TsIndex.return_timestamps(t, "ms") + ) + np.testing.assert_array_almost_equal( + t * 1e6, nap.TsIndex.return_timestamps(t, "us") + ) with pytest.raises(ValueError, match="unrecognized time units type"): - nap.TsIndex.return_timestamps(t, units='aaaa') + nap.TsIndex.return_timestamps(t, units="aaaa") + def test_sort_timestamps(): t = np.random.rand(100) - np.testing.assert_array_almost_equal(np.sort(t), nap.TsIndex.sort_timestamps(t, False)) + np.testing.assert_array_almost_equal( + np.sort(t), nap.TsIndex.sort_timestamps(t, False) + ) with warnings.catch_warnings(record=True) as w: nap.TsIndex.sort_timestamps(t, True) @@ -50,8 +65,8 @@ def test_TsIndex(): np.testing.assert_array_equal(a.to_numpy(), np.arange(10)) np.testing.assert_array_equal(a.in_units("s"), np.arange(10)) - np.testing.assert_array_equal(a.in_units("ms"), np.arange(10)*1e3) - np.testing.assert_array_equal(a.in_units("us"), np.arange(10)*1e6) + np.testing.assert_array_equal(a.in_units("ms"), np.arange(10) * 1e3) + np.testing.assert_array_equal(a.in_units("us"), np.arange(10) * 1e6) with pytest.raises(RuntimeError, match=r"TsIndex object is not mutable."): a[0] = 1 diff --git a/tests/test_time_series.py b/tests/test_time_series.py index ec0736a2..9556df86 100755 --- a/tests/test_time_series.py +++ b/tests/test_time_series.py @@ -1,11 +1,12 @@ """Tests of time series for `pynapple` package.""" import pickle +from contextlib import nullcontext as does_not_raise +from pathlib import Path + import numpy as np import pandas as pd import pytest -from pathlib import Path -from contextlib import nullcontext as does_not_raise import pynapple as nap @@ -19,21 +20,27 @@ def test_create_tsd(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) assert isinstance(tsd, nap.Tsd) + def test_create_empty_tsd(): tsd = nap.Tsd(t=np.array([]), d=np.array([])) assert len(tsd) == 0 + @pytest.mark.filterwarnings("ignore") def test_create_tsd_from_number(): tsd = nap.Tsd(t=1, d=2) + def test_create_tsdframe(): tsdframe = nap.TsdFrame(t=np.arange(100), d=np.random.rand(100, 4)) assert isinstance(tsdframe, nap.TsdFrame) - tsdframe = nap.TsdFrame(t=np.arange(100), d=np.random.rand(100, 4), columns=['a', 'b', 'c', 'd']) + tsdframe = nap.TsdFrame( + t=np.arange(100), d=np.random.rand(100, 4), columns=["a", "b", "c", "d"] + ) assert isinstance(tsdframe, nap.TsdFrame) - assert np.all(tsdframe.columns == np.array(['a', 'b', 'c', 'd'])) + assert np.all(tsdframe.columns == np.array(["a", "b", "c", "d"])) + @pytest.mark.filterwarnings("ignore") def test_create_empty_tsdframe(): @@ -44,10 +51,12 @@ def test_create_empty_tsdframe(): with pytest.raises(AssertionError): tsdframe = nap.TsdFrame(t=np.arange(100)) + def test_create_1d_tsdframe(): tsdframe = nap.TsdFrame(t=np.arange(100), d=np.random.rand(100)) assert isinstance(tsdframe, nap.TsdFrame) + def test_create_ts(): ts = nap.Ts(t=np.arange(100)) assert isinstance(ts, nap.Ts) @@ -104,12 +113,14 @@ def test_create_ts_wrong_units(): with pytest.raises(ValueError): nap.Ts(t=a, time_units="min") + def test_create_tsd_wrong_units(): a = np.random.randint(0, 1000, 100) a.sort() with pytest.raises(ValueError): nap.Tsd(t=a, d=a, time_units="min") + def test_create_tsdframe_wrong_units(): a = np.random.randint(0, 1000, 100) d = np.random.rand(100, 3) @@ -160,28 +171,37 @@ def test_create_tsdframe_with_time_support(): np.testing.assert_array_almost_equal(tsdframe.time_support.start, ep.start) np.testing.assert_array_almost_equal(tsdframe.time_support.end, ep.end) + @pytest.mark.filterwarnings("ignore") def test_create_tsdtensor(): tsd = nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 3, 2)) assert isinstance(tsd, nap.TsdTensor) tsd = nap.TsdTensor(t=list(np.arange(100)), d=list(np.random.rand(100, 3, 2))) - assert isinstance(tsd, nap.TsdTensor) + assert isinstance(tsd, nap.TsdTensor) + def test_create_empty_tsd(): - tsd = nap.TsdTensor(t=np.array([]), d=np.empty(shape=(0,2,3))) + tsd = nap.TsdTensor(t=np.array([]), d=np.empty(shape=(0, 2, 3))) assert len(tsd) == 0 + @pytest.mark.filterwarnings("ignore") def test_raise_error_tsdtensor_init(): - with pytest.raises(RuntimeError, match=r"Unknown format for d. Accepted formats are numpy.ndarray, list, tuple or any array-like objects."): + with pytest.raises( + RuntimeError, + match=r"Unknown format for d. Accepted formats are numpy.ndarray, list, tuple or any array-like objects.", + ): nap.TsdTensor(t=np.arange(100), d=None) # with pytest.raises(AssertionError, match=r"Data should have more than 2 dimensions. If ndim < 3, use TsdFrame or Tsd object"): # nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 10)) - with pytest.raises(AssertionError):#, match=r"Length of values (10) does not match length of index (100)"): - nap.TsdTensor(t=np.arange(100), d=np.random.rand(10, 10,3)) + with pytest.raises( + AssertionError + ): # , match=r"Length of values (10) does not match length of index (100)"): + nap.TsdTensor(t=np.arange(100), d=np.random.rand(10, 10, 3)) + def test_index_error(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) @@ -194,6 +214,7 @@ def test_index_error(): with pytest.raises(IndexError): ts[1000] + def test_find_support(): tsd = nap.Tsd(t=np.arange(100), d=np.arange(100)) ep = tsd.find_support(1.0) @@ -204,12 +225,13 @@ def test_find_support(): tsd = nap.Tsd(t=t, d=np.arange(20)) ep = tsd.find_support(1.0) np.testing.assert_array_equal(ep.start, np.array([0.0, 20.0])) - np.testing.assert_array_equal(ep.end, np.array([9.0+1e-6, 29+1e-6])) + np.testing.assert_array_equal(ep.end, np.array([9.0 + 1e-6, 29 + 1e-6])) + def test_properties(): t = np.arange(100) - d = np.random.rand(100).astype(np.float32) # to match pynajax - tsd = nap.Tsd(t=t, d = d) + d = np.random.rand(100).astype(np.float32) # to match pynajax + tsd = nap.Tsd(t=t, d=d) assert hasattr(tsd, "t") assert hasattr(tsd, "d") @@ -230,6 +252,7 @@ def test_properties(): with pytest.raises(RuntimeError): tsd.rate = 0 + def test_base_tsd_class(): class DummyTsd(nap.core.time_series._BaseTsd): def __init__(self, t, d): @@ -244,12 +267,12 @@ def __getitem__(self, key): try: assert isinstance(tsd.values, np.ndarray) except AssertionError: - assert nap.core.utils.is_array_like(tsd.values) # for pynajax + assert nap.core.utils.is_array_like(tsd.values) # for pynajax assert isinstance(tsd.__repr__(), str) with pytest.raises((IndexError, TypeError)): - tsd['a'] + tsd["a"] #################################################### @@ -269,12 +292,8 @@ def test_as_units(self, tsd): if hasattr(tsd, "as_units"): tmp2 = tsd.index np.testing.assert_array_almost_equal(tsd.as_units("s").index, tmp2) - np.testing.assert_array_almost_equal( - tsd.as_units("ms").index, tmp2 * 1e3 - ) - np.testing.assert_array_almost_equal( - tsd.as_units("us").index, tmp2 * 1e6 - ) + np.testing.assert_array_almost_equal(tsd.as_units("ms").index, tmp2 * 1e3) + np.testing.assert_array_almost_equal(tsd.as_units("us").index, tmp2 * 1e6) # np.testing.assert_array_almost_equal(tsd.as_units(units="a").index.values, tmp2) def test_rate(self, tsd): @@ -311,15 +330,18 @@ def test_value_from_tsd(self, tsd): np.testing.assert_array_almost_equal(tsd2.values[::10], tsd3.values) def test_value_from_tsdframe(self, tsd): - tsdframe = nap.TsdFrame(t=np.arange(0, 100, 0.1), d=np.random.rand(1000,3)) + tsdframe = nap.TsdFrame(t=np.arange(0, 100, 0.1), d=np.random.rand(1000, 3)) tsdframe2 = tsd.value_from(tsdframe) assert len(tsd) == len(tsdframe2) np.testing.assert_array_almost_equal(tsdframe.values[::10], tsdframe2.values) def test_value_from_value_error(self, tsd): - with pytest.raises(AssertionError, match=r"First argument should be an instance of Tsd, TsdFrame or TsdTensor"): + with pytest.raises( + AssertionError, + match=r"First argument should be an instance of Tsd, TsdFrame or TsdTensor", + ): tsd.value_from(np.arange(10)) - + def test_value_from_with_restrict(self, tsd): ep = nap.IntervalSet(start=0, end=50, time_units="s") tsd2 = nap.Tsd(t=np.arange(0, 100, 0.1), d=np.random.rand(1000)) @@ -329,7 +351,7 @@ def test_value_from_with_restrict(self, tsd): tsd2.restrict(ep).values[::10], tsd3.values ) - tsdframe = nap.TsdFrame(t=np.arange(0, 100, 0.1), d=np.random.rand(1000,2)) + tsdframe = nap.TsdFrame(t=np.arange(0, 100, 0.1), d=np.random.rand(1000, 2)) tsdframe2 = tsd.value_from(tsdframe, ep) assert len(tsd.restrict(ep)) == len(tsdframe2) np.testing.assert_array_almost_equal( @@ -340,7 +362,7 @@ def test_restrict(self, tsd): ep = nap.IntervalSet(start=0, end=50) assert len(tsd.restrict(ep)) == 51 - def test_restrict_error(self, tsd): + def test_restrict_error(self, tsd): with pytest.raises(AssertionError, match=r"Argument should be IntervalSet"): tsd.restrict([0, 1]) @@ -388,13 +410,15 @@ def test_dropna(self, tsd): np.testing.assert_array_equal(tsd.values, new_tsd.values) tmp = np.random.rand(*tsd.shape) - tmp[tmp>0.9] = np.nan + tmp[tmp > 0.9] = np.nan tsd = tsd.__class__(t=tsd.t, d=tmp) - + new_tsd = tsd.dropna() assert not np.all(np.isnan(new_tsd.values)) - tokeep = np.array([~np.any(np.isnan(tsd[i])) for i in range(len(tsd))]) - np.testing.assert_array_equal(tsd.index.values[tokeep], new_tsd.index.values) + tokeep = np.array([~np.any(np.isnan(tsd[i])) for i in range(len(tsd))]) + np.testing.assert_array_equal( + tsd.index.values[tokeep], new_tsd.index.values + ) np.testing.assert_array_equal(tsd.values[tokeep], new_tsd.values) newtsd2 = tsd.restrict(new_tsd.time_support) @@ -402,11 +426,13 @@ def test_dropna(self, tsd): np.testing.assert_array_equal(newtsd2.values, new_tsd.values) new_tsd = tsd.dropna(update_time_support=False) - np.testing.assert_array_equal(tsd.index.values[tokeep], new_tsd.index.values) + np.testing.assert_array_equal( + tsd.index.values[tokeep], new_tsd.index.values + ) np.testing.assert_array_equal(tsd.values[tokeep], new_tsd.values) np.testing.assert_array_equal(new_tsd.time_support, tsd.time_support) - tsd = tsd.__class__(t=tsd.t, d=np.ones(tsd.shape)*np.nan) + tsd = tsd.__class__(t=tsd.t, d=np.ones(tsd.shape) * np.nan) new_tsd = tsd.dropna() assert len(new_tsd) == 0 assert len(new_tsd.time_support) == 0 @@ -415,26 +441,31 @@ def test_convolve_raise_errors(self, tsd): if not isinstance(tsd, nap.Ts): with pytest.raises(IOError) as e_info: - tsd.convolve([1,2,3]) - assert str(e_info.value) == "Input should be a numpy array (or jax array if pynajax is installed)." + tsd.convolve([1, 2, 3]) + assert ( + str(e_info.value) + == "Input should be a numpy array (or jax array if pynajax is installed)." + ) with pytest.raises(IOError) as e_info: tsd.convolve(np.array([])) assert str(e_info.value) == "Input array is length 0" with pytest.raises(IOError) as e_info: - tsd.convolve(np.ones(3), trim='a') - assert str(e_info.value) == "Unknow argument. trim should be 'both', 'left' or 'right'." + tsd.convolve(np.ones(3), trim="a") + assert ( + str(e_info.value) + == "Unknow argument. trim should be 'both', 'left' or 'right'." + ) with pytest.raises(IOError) as e_info: - tsd.convolve(np.ones((2,3,4))) + tsd.convolve(np.ones((2, 3, 4))) assert str(e_info.value) == "Array should be 1 or 2 dimension." with pytest.raises(IOError) as e_info: - tsd.convolve(np.ones(3), ep=[1,2,3,4]) + tsd.convolve(np.ones(3), ep=[1, 2, 3, 4]) assert str(e_info.value) == "ep should be an object of type IntervalSet" - def test_convolve_1d_kernel(self, tsd): array = np.random.randn(10) if not isinstance(tsd, nap.Ts): @@ -442,38 +473,38 @@ def test_convolve_1d_kernel(self, tsd): tmp = tsd.values.reshape(tsd.shape[0], -1) tmp2 = np.zeros_like(tmp) for i in range(tmp.shape[-1]): - tmp2[:,i] = np.convolve(tmp[:,i], array, mode='full')[4:-5] + tmp2[:, i] = np.convolve(tmp[:, i], array, mode="full")[4:-5] np.testing.assert_array_almost_equal( - tmp2, - tsd2.values.reshape(tsd2.shape[0], -1) - ) + tmp2, tsd2.values.reshape(tsd2.shape[0], -1) + ) - ep = nap.IntervalSet(start=[0, 60], end=[40,100]) + ep = nap.IntervalSet(start=[0, 60], end=[40, 100]) tsd3 = tsd.convolve(array, ep) - + tmp3 = [] for i in range(len(ep)): tmp2 = tsd.restrict(ep[i]).values - tmp2 = np.array(tmp2.reshape(tmp2.shape[0], -1)) # for pynajax + tmp2 = np.array(tmp2.reshape(tmp2.shape[0], -1)) # for pynajax for j in range(tmp2.shape[-1]): - tmp2[:,j] = np.convolve(tmp2[:,j], array, mode='full')[4:-5] + tmp2[:, j] = np.convolve(tmp2[:, j], array, mode="full")[4:-5] tmp3.append(tmp2) np.testing.assert_array_almost_equal( - tmp2, - tsd3.restrict(ep[i]).values.reshape(tmp2.shape[0], -1) - ) + tmp2, tsd3.restrict(ep[i]).values.reshape(tmp2.shape[0], -1) + ) # Trim - for trim, sl in zip(['left', 'both', 'right'], [slice(9,None),slice(4,-5),slice(None,-9)]): + for trim, sl in zip( + ["left", "both", "right"], + [slice(9, None), slice(4, -5), slice(None, -9)], + ): tsd2 = tsd.convolve(array, trim=trim) tmp = tsd.values.reshape(tsd.shape[0], -1) tmp2 = np.zeros_like(tmp) for i in range(tmp.shape[-1]): - tmp2[:,i] = np.convolve(tmp[:,i], array, mode='full')[sl] + tmp2[:, i] = np.convolve(tmp[:, i], array, mode="full")[sl] np.testing.assert_array_almost_equal( - tmp2, - tsd2.values.reshape(tsd2.shape[0], -1) - ) + tmp2, tsd2.values.reshape(tsd2.shape[0], -1) + ) def test_convolve_2d_kernel(self, tsd): array = np.random.randn(10, 3) @@ -481,22 +512,24 @@ def test_convolve_2d_kernel(self, tsd): # no epochs tsd2 = tsd.convolve(array) tmp = tsd.values.reshape(tsd.shape[0], -1) - + output = [] for i in range(tmp.shape[1]): for j in range(array.shape[1]): output.append( - np.convolve(tmp[:,i], array[:,j], mode='full')[4:-5] - ) + np.convolve(tmp[:, i], array[:, j], mode="full")[4:-5] + ) - output = np.array(output).T - np.testing.assert_array_almost_equal(output,tsd2.values.reshape(tsd.shape[0], -1)) + output = np.array(output).T + np.testing.assert_array_almost_equal( + output, tsd2.values.reshape(tsd.shape[0], -1) + ) # epochs - ep = nap.IntervalSet(start=[0, 60], end=[40,100]) + ep = nap.IntervalSet(start=[0, 60], end=[40, 100]) tsd2 = tsd.convolve(array, ep) - + for k in range(len(ep)): tmp = tsd.restrict(ep[k]) tmp2 = tmp.values.reshape(tmp.shape[0], -1) @@ -504,73 +537,79 @@ def test_convolve_2d_kernel(self, tsd): for i in range(tmp2.shape[1]): for j in range(array.shape[1]): output.append( - np.convolve(tmp2[:,i], array[:,j], mode='full')[4:-5] - ) - output = np.array(output).T + np.convolve(tmp2[:, i], array[:, j], mode="full")[4:-5] + ) + output = np.array(output).T np.testing.assert_array_almost_equal( - output,tsd2.restrict(ep[k]).values.reshape(tmp.shape[0], -1) - ) + output, tsd2.restrict(ep[k]).values.reshape(tmp.shape[0], -1) + ) def test_smooth(self, tsd): - if not isinstance(tsd, nap.Ts): + if not isinstance(tsd, nap.Ts): from scipy import signal + tsd2 = tsd.smooth(1, size_factor=10) tmp = tsd.values.reshape(tsd.shape[0], -1) tmp2 = np.zeros_like(tmp) std = int(tsd.rate * 1) - M = std*11 + M = std * 11 window = signal.windows.gaussian(M, std=std) - window = window / window.sum() + window = window / window.sum() for i in range(tmp.shape[-1]): - tmp2[:,i] = np.convolve(tmp[:,i], window, mode='full')[M//2:1-M//2-1] + tmp2[:, i] = np.convolve(tmp[:, i], window, mode="full")[ + M // 2 : 1 - M // 2 - 1 + ] np.testing.assert_array_almost_equal( - tmp2, - tsd2.values.reshape(tsd2.shape[0], -1) - ) + tmp2, tsd2.values.reshape(tsd2.shape[0], -1) + ) - tsd2 = tsd.smooth(1000, time_units='ms') - np.testing.assert_array_almost_equal(tmp2, - tsd2.values.reshape(tsd2.shape[0], -1)) + tsd2 = tsd.smooth(1000, time_units="ms") + np.testing.assert_array_almost_equal( + tmp2, tsd2.values.reshape(tsd2.shape[0], -1) + ) - tsd2 = tsd.smooth(1000000, time_units='us') - np.testing.assert_array_almost_equal(tmp2, - tsd2.values.reshape(tsd2.shape[0], -1)) + tsd2 = tsd.smooth(1000000, time_units="us") + np.testing.assert_array_almost_equal( + tmp2, tsd2.values.reshape(tsd2.shape[0], -1) + ) tsd2 = tsd.smooth(1, size_factor=200, norm=False) tmp = tsd.values.reshape(tsd.shape[0], -1) tmp2 = np.zeros_like(tmp) std = int(tsd.rate * 1) - M = std*201 - window = signal.windows.gaussian(M, std=std) + M = std * 201 + window = signal.windows.gaussian(M, std=std) for i in range(tmp.shape[-1]): - tmp2[:,i] = np.convolve(tmp[:,i], window, mode='full')[M//2:1-M//2-1] + tmp2[:, i] = np.convolve(tmp[:, i], window, mode="full")[ + M // 2 : 1 - M // 2 - 1 + ] np.testing.assert_array_almost_equal( - tmp2, - tsd2.values.reshape(tsd2.shape[0], -1) - ) + tmp2, tsd2.values.reshape(tsd2.shape[0], -1) + ) - tsd2 = tsd.smooth(1, windowsize = 10, norm=False) + tsd2 = tsd.smooth(1, windowsize=10, norm=False) tmp = tsd.values.reshape(tsd.shape[0], -1) tmp2 = np.zeros_like(tmp) std = int(tsd.rate * 1) M = int(tsd.rate * 11) - window = signal.windows.gaussian(M, std=std) + window = signal.windows.gaussian(M, std=std) for i in range(tmp.shape[-1]): - tmp2[:,i] = np.convolve(tmp[:,i], window, mode='full')[M//2:1-M//2-1] + tmp2[:, i] = np.convolve(tmp[:, i], window, mode="full")[ + M // 2 : 1 - M // 2 - 1 + ] np.testing.assert_array_almost_equal( - tmp2, - tsd2.values.reshape(tsd2.shape[0], -1) - ) + tmp2, tsd2.values.reshape(tsd2.shape[0], -1) + ) def test_smooth_raise_error(self, tsd): if not isinstance(tsd, nap.Ts): with pytest.raises(IOError) as e_info: - tsd.smooth('a') + tsd.smooth("a") assert str(e_info.value) == "std should be type int or float" with pytest.raises(IOError) as e_info: - tsd.smooth(1, size_factor='b') + tsd.smooth(1, size_factor="b") assert str(e_info.value) == "size_factor should be of type int" with pytest.raises(IOError) as e_info: @@ -578,13 +617,14 @@ def test_smooth_raise_error(self, tsd): assert str(e_info.value) == "norm should be of type boolean" with pytest.raises(IOError) as e_info: - tsd.smooth(1, time_units = 0) + tsd.smooth(1, time_units=0) assert str(e_info.value) == "time_units should be of type str" with pytest.raises(IOError) as e_info: - tsd.smooth(1, windowsize='a') + tsd.smooth(1, windowsize="a") assert str(e_info.value) == "windowsize should be type int or float" + #################################################### # Test for tsd #################################################### @@ -604,15 +644,13 @@ def test__getitems__(self, tsd): assert isinstance(a, nap.Tsd) np.testing.assert_array_almost_equal(a.index, b.index) np.testing.assert_array_almost_equal(a.values, b.values) - np.testing.assert_array_almost_equal( - a.time_support, tsd.time_support - ) + np.testing.assert_array_almost_equal(a.time_support, tsd.time_support) def test_count(self, tsd): count = tsd.count(1) assert len(count) == 99 np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) - + count = tsd.count(bin_size=1) assert len(count) == 99 np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) @@ -621,16 +659,15 @@ def test_count(self, tsd): assert len(count) == 99 assert count.dtype == np.dtype(np.int16) - def test_count_time_units(self, tsd): - for b, tu in zip([1, 1e3, 1e6],['s', 'ms', 'us']): - count = tsd.count(b, time_units = tu) + for b, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): + count = tsd.count(b, time_units=tu) assert len(count) == 99 np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) - + count = tsd.count(b, tu) assert len(count) == 99 - np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) + np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) def test_count_with_ep(self, tsd): ep = nap.IntervalSet(start=0, end=100) @@ -656,24 +693,23 @@ def test_count_with_ep_only(self, tsd): assert len(count) == 1 np.testing.assert_array_almost_equal(count.values, np.array([100])) - - def test_count_errors(self, tsd): + def test_count_errors(self, tsd): with pytest.raises(ValueError): - tsd.count(bin_size = {}) + tsd.count(bin_size={}) with pytest.raises(ValueError): - tsd.count(ep = {}) + tsd.count(ep={}) with pytest.raises(ValueError): - tsd.count(time_units = {}) + tsd.count(time_units={}) def test_bin_average(self, tsd): meantsd = tsd.bin_average(10) assert len(meantsd) == 10 np.testing.assert_array_almost_equal(meantsd.index, np.arange(5, 100, 10)) - bins = np.arange(tsd.time_support.start[0], tsd.time_support.end[0]+1, 10) + bins = np.arange(tsd.time_support.start[0], tsd.time_support.end[0] + 1, 10) idx = np.digitize(tsd.index, bins) - tmp = np.array([np.mean(tsd.values[idx==i]) for i in np.unique(idx)]) + tmp = np.array([np.mean(tsd.values[idx == i]) for i in np.unique(idx)]) np.testing.assert_array_almost_equal(meantsd.values, tmp) def test_bin_average_with_ep(self, tsd): @@ -681,10 +717,10 @@ def test_bin_average_with_ep(self, tsd): meantsd = tsd.bin_average(10, ep) assert len(meantsd) == 4 np.testing.assert_array_almost_equal(meantsd.index, np.arange(5, 40, 10)) - bins = np.arange(ep.start[0], ep.end[0]+10, 10) + bins = np.arange(ep.start[0], ep.end[0] + 10, 10) # tsd = tsd.restrict(ep) idx = np.digitize(tsd.index, bins) - tmp = np.array([np.mean(tsd.values[idx==i]) for i in np.unique(idx)]) + tmp = np.array([np.mean(tsd.values[idx == i]) for i in np.unique(idx)]) np.testing.assert_array_almost_equal(meantsd.values, tmp[0:-1]) def test_threshold(self, tsd): @@ -705,7 +741,9 @@ def test_threshold_time_support(self, tsd): with pytest.raises(ValueError) as e_info: tsd.threshold(0.5, "bla") - assert str(e_info.value) == "Method {} for thresholding is not accepted.".format("bla") + assert str( + e_info.value + ) == "Method {} for thresholding is not accepted.".format("bla") def test_data(self, tsd): np.testing.assert_array_almost_equal(tsd.values, tsd.data()) @@ -723,8 +761,8 @@ def test_to_tsgroup(self, tsd): d = [] group = {} for i in range(3): - t.append(np.sort(np.random.rand(10)*100)) - d.append(np.ones(10)*i) + t.append(np.sort(np.random.rand(10) * 100)) + d.append(np.ones(10) * i) group[i] = nap.Ts(t=t[-1]) times = np.array(t).flatten() @@ -741,95 +779,102 @@ def test_to_tsgroup(self, tsd): np.testing.assert_array_almost_equal(np.arange(3), tsgroup.index) for i in range(3): np.testing.assert_array_almost_equal(tsgroup[i].index, t[i]) - + def test_save_npz(self, tsd): with pytest.raises(TypeError) as e: tsd.save(dict) with pytest.raises(RuntimeError) as e: - tsd.save('./') - assert str(e.value) == "Invalid filename input. {} is directory.".format(Path("./").resolve()) + tsd.save("./") + assert str(e.value) == "Invalid filename input. {} is directory.".format( + Path("./").resolve() + ) - fake_path = './fake/path' + fake_path = "./fake/path" with pytest.raises(RuntimeError) as e: - tsd.save(fake_path+'/file.npz') - assert str(e.value) == "Path {} does not exist.".format(Path(fake_path).resolve()) + tsd.save(fake_path + "/file.npz") + assert str(e.value) == "Path {} does not exist.".format( + Path(fake_path).resolve() + ) tsd.save("tsd.npz") - assert "tsd.npz" in [f.name for f in Path('.').iterdir()] + assert "tsd.npz" in [f.name for f in Path(".").iterdir()] tsd.save("tsd2") - assert "tsd2.npz" in [f.name for f in Path('.').iterdir()] + assert "tsd2.npz" in [f.name for f in Path(".").iterdir()] file = np.load("tsd.npz") keys = list(file.keys()) - assert 't' in keys - assert 'd' in keys - assert 'start' in keys - assert 'end' in keys + assert "t" in keys + assert "d" in keys + assert "start" in keys + assert "end" in keys - np.testing.assert_array_almost_equal(file['t'], tsd.index) - np.testing.assert_array_almost_equal(file['d'], tsd.values) - np.testing.assert_array_almost_equal(file['start'], tsd.time_support.start) - np.testing.assert_array_almost_equal(file['end'], tsd.time_support.end) + np.testing.assert_array_almost_equal(file["t"], tsd.index) + np.testing.assert_array_almost_equal(file["d"], tsd.values) + np.testing.assert_array_almost_equal(file["start"], tsd.time_support.start) + np.testing.assert_array_almost_equal(file["end"], tsd.time_support.end) # Path("tsd.npz").unlink() # Path("tsd2.npz").unlink() def test_interpolate(self, tsd): - + y = np.arange(0, 1001) tsd = nap.Tsd(t=np.arange(0, 101), d=y[0::10]) # Ts - ts = nap.Ts(t=y/10) + ts = nap.Ts(t=y / 10) tsd2 = tsd.interpolate(ts) np.testing.assert_array_almost_equal(tsd2.values, y) # Tsd - ts = nap.Tsd(t=y/10, d=np.zeros_like(y)) + ts = nap.Tsd(t=y / 10, d=np.zeros_like(y)) tsd2 = tsd.interpolate(ts) np.testing.assert_array_almost_equal(tsd2.values, y) # TsdFrame - ts = nap.TsdFrame(t=y/10, d=np.zeros((len(y), 2))) + ts = nap.TsdFrame(t=y / 10, d=np.zeros((len(y), 2))) tsd2 = tsd.interpolate(ts) np.testing.assert_array_almost_equal(tsd2.values, y) - + with pytest.raises(IOError) as e: tsd.interpolate([0, 1, 2]) - assert str(e.value) == "First argument should be an instance of Ts, Tsd, TsdFrame or TsdTensor" + assert ( + str(e.value) + == "First argument should be an instance of Ts, Tsd, TsdFrame or TsdTensor" + ) with pytest.raises(IOError) as e: - tsd.interpolate(ts, left='a') + tsd.interpolate(ts, left="a") assert str(e.value) == "Argument left should be of type float or int" with pytest.raises(IOError) as e: - tsd.interpolate(ts, right='a') + tsd.interpolate(ts, right="a") assert str(e.value) == "Argument right should be of type float or int" with pytest.raises(IOError) as e: - tsd.interpolate(ts, ep=[1,2,3,4]) + tsd.interpolate(ts, ep=[1, 2, 3, 4]) assert str(e.value) == "ep should be an object of type IntervalSet" # Right left ep = nap.IntervalSet(start=0, end=5) - tsd = nap.Tsd(t=np.arange(1,4), d=np.arange(3), time_support=ep) + tsd = nap.Tsd(t=np.arange(1, 4), d=np.arange(3), time_support=ep) ts = nap.Ts(t=np.arange(0, 5)) tsd2 = tsd.interpolate(ts, left=1234) assert float(tsd2.values[0]) == 1234.0 tsd2 = tsd.interpolate(ts, right=4321) assert float(tsd2.values[-1]) == 4321.0 - def test_interpolate_with_ep(self, tsd): + def test_interpolate_with_ep(self, tsd): y = np.arange(0, 1001) - tsd = nap.Tsd(t=np.arange(0, 101), d=y[0::10]) - ts = nap.Ts(t=y/10) + tsd = nap.Tsd(t=np.arange(0, 101), d=y[0::10]) + ts = nap.Ts(t=y / 10) ep = nap.IntervalSet(start=np.arange(0, 100, 20), end=np.arange(10, 110, 20)) tsd2 = tsd.interpolate(ts, ep) - tmp = ts.restrict(ep).index*10 + tmp = ts.restrict(ep).index * 10 np.testing.assert_array_almost_equal(tmp, tsd2.values) # Empty ep @@ -837,6 +882,7 @@ def test_interpolate_with_ep(self, tsd): tsd2 = tsd.interpolate(ts, ep) assert len(tsd2) == 0 + #################################################### # Test for tsdframe #################################################### @@ -851,33 +897,53 @@ def test_as_dataframe(self, tsdframe): assert isinstance(tsdframe.as_dataframe(), pd.DataFrame) def test_horizontal_slicing(self, tsdframe): - assert isinstance(tsdframe[:,0], nap.Tsd) - np.testing.assert_array_almost_equal(tsdframe[:,0].values, tsdframe.values[:,0]) - assert isinstance(tsdframe[:,0].time_support, nap.IntervalSet) - np.testing.assert_array_almost_equal(tsdframe.time_support, tsdframe[:,0].time_support) - - assert isinstance(tsdframe[:,[0,2]], nap.TsdFrame) - np.testing.assert_array_almost_equal(tsdframe.values[:,[0,2]], tsdframe[:,[0,2]].values) - assert isinstance(tsdframe[:,[0,2]].time_support, nap.IntervalSet) - np.testing.assert_array_almost_equal(tsdframe.time_support, tsdframe[:,[0,2]].time_support) + assert isinstance(tsdframe[:, 0], nap.Tsd) + np.testing.assert_array_almost_equal( + tsdframe[:, 0].values, tsdframe.values[:, 0] + ) + assert isinstance(tsdframe[:, 0].time_support, nap.IntervalSet) + np.testing.assert_array_almost_equal( + tsdframe.time_support, tsdframe[:, 0].time_support + ) + + assert isinstance(tsdframe[:, [0, 2]], nap.TsdFrame) + np.testing.assert_array_almost_equal( + tsdframe.values[:, [0, 2]], tsdframe[:, [0, 2]].values + ) + assert isinstance(tsdframe[:, [0, 2]].time_support, nap.IntervalSet) + np.testing.assert_array_almost_equal( + tsdframe.time_support, tsdframe[:, [0, 2]].time_support + ) def test_vertical_slicing(self, tsdframe): assert isinstance(tsdframe[0:10], nap.TsdFrame) - np.testing.assert_array_almost_equal(tsdframe.values[0:10], tsdframe[0:10].values) + np.testing.assert_array_almost_equal( + tsdframe.values[0:10], tsdframe[0:10].values + ) assert isinstance(tsdframe[0:10].time_support, nap.IntervalSet) - np.testing.assert_array_almost_equal(tsdframe[0:10].time_support, tsdframe.time_support) + np.testing.assert_array_almost_equal( + tsdframe[0:10].time_support, tsdframe.time_support + ) def test_str_indexing(self, tsdframe): - tsdframe = nap.TsdFrame(t=np.arange(100), d=np.random.rand(100, 3), time_units="s", columns=['a', 'b', 'c']) - np.testing.assert_array_almost_equal(tsdframe.values[:,0], tsdframe['a'].values) - np.testing.assert_array_almost_equal(tsdframe.values[:,[0,2]], tsdframe[['a', 'c']].values) + tsdframe = nap.TsdFrame( + t=np.arange(100), + d=np.random.rand(100, 3), + time_units="s", + columns=["a", "b", "c"], + ) + np.testing.assert_array_almost_equal( + tsdframe.values[:, 0], tsdframe["a"].values + ) + np.testing.assert_array_almost_equal( + tsdframe.values[:, [0, 2]], tsdframe[["a", "c"]].values + ) with pytest.raises(Exception): - tsdframe['d'] + tsdframe["d"] with pytest.raises(Exception): - tsdframe[['d', 'e']] - + tsdframe[["d", "e"]] def test_operators(self, tsdframe): v = tsdframe.values @@ -905,17 +971,17 @@ def test_operators(self, tsdframe): a = tsdframe // 0.5 assert isinstance(a, nap.TsdFrame) np.testing.assert_array_almost_equal(tsdframe.index, a.index) - assert np.all(a.values == (v // 0.5)) + assert np.all(a.values == (v // 0.5)) a = tsdframe % 0.5 assert isinstance(a, nap.TsdFrame) np.testing.assert_array_almost_equal(tsdframe.index, a.index) assert np.all(a.values == (v % 0.5)) - a = tsdframe ** 0.5 + a = tsdframe**0.5 assert isinstance(a, nap.TsdFrame) np.testing.assert_array_almost_equal(tsdframe.index, a.index) - np.testing.assert_array_almost_equal(a.values, v ** 0.5) + np.testing.assert_array_almost_equal(a.values, v**0.5) a = tsdframe > 0.5 assert isinstance(a, nap.TsdFrame) @@ -935,9 +1001,11 @@ def test_operators(self, tsdframe): a = tsdframe <= 0.5 assert isinstance(a, nap.TsdFrame) np.testing.assert_array_almost_equal(tsdframe.index, a.index) - assert np.all(a.values == (v <= 0.5)) + assert np.all(a.values == (v <= 0.5)) - tsdframe = nap.TsdFrame(t=np.arange(10), d=np.tile(np.arange(10)[:,np.newaxis], 3)) + tsdframe = nap.TsdFrame( + t=np.arange(10), d=np.tile(np.arange(10)[:, np.newaxis], 3) + ) v = tsdframe.values a = tsdframe == 5 assert isinstance(a, nap.TsdFrame) @@ -964,7 +1032,9 @@ def test_bin_average(self, tsdframe): meantsd = tsdframe.bin_average(10) assert len(meantsd) == 10 np.testing.assert_array_almost_equal(meantsd.index, np.arange(5, 100, 10)) - bins = np.arange(tsdframe.time_support.start[0], tsdframe.time_support.end[0]+1, 10) + bins = np.arange( + tsdframe.time_support.start[0], tsdframe.time_support.end[0] + 1, 10 + ) tmp = tsdframe.as_dataframe().groupby(np.digitize(tsdframe.index, bins)).mean() np.testing.assert_array_almost_equal(meantsd.values, tmp.values) @@ -973,89 +1043,108 @@ def test_bin_average_with_ep(self, tsdframe): meantsd = tsdframe.bin_average(10, ep) assert len(meantsd) == 4 np.testing.assert_array_almost_equal(meantsd.index, np.arange(5, 40, 10)) - bins = np.arange(ep.start[0], ep.end[0]+10, 10) + bins = np.arange(ep.start[0], ep.end[0] + 10, 10) tsdframe = tsdframe.restrict(ep) tmp = tsdframe.as_dataframe().groupby(np.digitize(tsdframe.index, bins)).mean() - np.testing.assert_array_almost_equal(meantsd.values, tmp.loc[np.arange(1,5)].values) + np.testing.assert_array_almost_equal( + meantsd.values, tmp.loc[np.arange(1, 5)].values + ) def test_save_npz(self, tsdframe): with pytest.raises(TypeError) as e: tsdframe.save(dict) with pytest.raises(RuntimeError) as e: - tsdframe.save('./') - assert str(e.value) == "Invalid filename input. {} is directory.".format(Path("./").resolve()) + tsdframe.save("./") + assert str(e.value) == "Invalid filename input. {} is directory.".format( + Path("./").resolve() + ) - fake_path = './fake/path' + fake_path = "./fake/path" with pytest.raises(RuntimeError) as e: - tsdframe.save(fake_path+'/file.npz') - assert str(e.value) == "Path {} does not exist.".format(Path(fake_path).resolve()) + tsdframe.save(fake_path + "/file.npz") + assert str(e.value) == "Path {} does not exist.".format( + Path(fake_path).resolve() + ) tsdframe.save("tsdframe.npz") - assert "tsdframe.npz" in [f.name for f in Path('.').iterdir()] + assert "tsdframe.npz" in [f.name for f in Path(".").iterdir()] tsdframe.save("tsdframe2") - assert "tsdframe2.npz" in [f.name for f in Path('.').iterdir()] + assert "tsdframe2.npz" in [f.name for f in Path(".").iterdir()] file = np.load("tsdframe.npz") keys = list(file.keys()) - assert 't' in keys - assert 'd' in keys - assert 'start' in keys - assert 'end' in keys - assert 'columns' in keys - - np.testing.assert_array_almost_equal(file['t'], tsdframe.index) - np.testing.assert_array_almost_equal(file['d'], tsdframe.values) - np.testing.assert_array_almost_equal(file['start'], tsdframe.time_support.start) - np.testing.assert_array_almost_equal(file['end'], tsdframe.time_support.end) - np.testing.assert_array_almost_equal(file['columns'], tsdframe.columns) + assert "t" in keys + assert "d" in keys + assert "start" in keys + assert "end" in keys + assert "columns" in keys + + np.testing.assert_array_almost_equal(file["t"], tsdframe.index) + np.testing.assert_array_almost_equal(file["d"], tsdframe.values) + np.testing.assert_array_almost_equal(file["start"], tsdframe.time_support.start) + np.testing.assert_array_almost_equal(file["end"], tsdframe.time_support.end) + np.testing.assert_array_almost_equal(file["columns"], tsdframe.columns) # Path("tsdframe.npz").unlink() # Path("tsdframe2.npz").unlink() def test_interpolate(self, tsdframe): - + y = np.arange(0, 1001) - data_stack = np.stack([np.arange(0, 1001),]*4).T + data_stack = np.stack( + [ + np.arange(0, 1001), + ] + * 4 + ).T tsdframe = nap.TsdFrame(t=np.arange(0, 101), d=data_stack[0::10, :]) # Ts - ts = nap.Ts(t=y/10) + ts = nap.Ts(t=y / 10) tsdframe2 = tsdframe.interpolate(ts) np.testing.assert_array_almost_equal(tsdframe2.values, data_stack) # Tsd - ts = nap.Tsd(t=y/10, d=np.zeros_like(y)) + ts = nap.Tsd(t=y / 10, d=np.zeros_like(y)) tsdframe2 = tsdframe.interpolate(ts) np.testing.assert_array_almost_equal(tsdframe2.values, data_stack) # TsdFrame - ts = nap.TsdFrame(t=y/10, d=np.zeros((len(y), 2))) + ts = nap.TsdFrame(t=y / 10, d=np.zeros((len(y), 2))) tsdframe2 = tsdframe.interpolate(ts) np.testing.assert_array_almost_equal(tsdframe2.values, data_stack) - + with pytest.raises(IOError) as e: tsdframe.interpolate([0, 1, 2]) - assert str(e.value) == "First argument should be an instance of Ts, Tsd, TsdFrame or TsdTensor" + assert ( + str(e.value) + == "First argument should be an instance of Ts, Tsd, TsdFrame or TsdTensor" + ) # Right left ep = nap.IntervalSet(start=0, end=5) - tsdframe = nap.Tsd(t=np.arange(1,4), d=np.arange(3), time_support=ep) + tsdframe = nap.Tsd(t=np.arange(1, 4), d=np.arange(3), time_support=ep) ts = nap.Ts(t=np.arange(0, 5)) tsdframe2 = tsdframe.interpolate(ts, left=1234) assert float(tsdframe2.values[0]) == 1234.0 tsdframe2 = tsdframe.interpolate(ts, right=4321) assert float(tsdframe2.values[-1]) == 4321.0 - def test_interpolate_with_ep(self, tsdframe): + def test_interpolate_with_ep(self, tsdframe): y = np.arange(0, 1001) - data_stack = np.stack([np.arange(0, 1001),]*4).T + data_stack = np.stack( + [ + np.arange(0, 1001), + ] + * 4 + ).T - tsdframe = nap.TsdFrame(t=np.arange(0, 101), d=data_stack[0::10, :]) - ts = nap.Ts(t=y/10) + tsdframe = nap.TsdFrame(t=np.arange(0, 101), d=data_stack[0::10, :]) + ts = nap.Ts(t=y / 10) ep = nap.IntervalSet(start=np.arange(0, 100, 20), end=np.arange(10, 110, 20)) tsdframe2 = tsdframe.interpolate(ts, ep) tmp = ts.restrict(ep).index * 10 @@ -1071,12 +1160,18 @@ def test_interpolate_with_ep(self, tsdframe): def test_convolve_keep_columns(self, tsdframe): array = np.random.randn(10) - tsdframe = nap.TsdFrame(t=np.arange(100), d=np.random.rand(100, 3), time_units="s", columns=['a', 'b', 'c']) + tsdframe = nap.TsdFrame( + t=np.arange(100), + d=np.random.rand(100, 3), + time_units="s", + columns=["a", "b", "c"], + ) tsd2 = tsdframe.convolve(array) assert isinstance(tsd2, nap.TsdFrame) np.testing.assert_array_equal(tsd2.columns, tsdframe.columns) + #################################################### # Test for ts #################################################### @@ -1090,7 +1185,7 @@ class Test_Time_Series_4: def test_repr_(self, ts): # assert pd.Series(ts).fillna("").__repr__() == ts.__repr__() - assert isinstance(ts.__repr__(), str) + assert isinstance(ts.__repr__(), str) def test_str_(self, ts): # assert pd.Series(ts).fillna("").__str__() == ts.__str__() @@ -1101,30 +1196,34 @@ def test_save_npz(self, ts): ts.save(dict) with pytest.raises(RuntimeError) as e: - ts.save('./') - assert str(e.value) == "Invalid filename input. {} is directory.".format(Path("./").resolve()) + ts.save("./") + assert str(e.value) == "Invalid filename input. {} is directory.".format( + Path("./").resolve() + ) - fake_path = './fake/path' + fake_path = "./fake/path" with pytest.raises(RuntimeError) as e: - ts.save(fake_path+'/file.npz') - assert str(e.value) == "Path {} does not exist.".format(Path(fake_path).resolve()) + ts.save(fake_path + "/file.npz") + assert str(e.value) == "Path {} does not exist.".format( + Path(fake_path).resolve() + ) ts.save("ts.npz") - assert "ts.npz" in [f.name for f in Path('.').iterdir()] + assert "ts.npz" in [f.name for f in Path(".").iterdir()] ts.save("ts2") - assert "ts2.npz" in [f.name for f in Path('.').iterdir()] + assert "ts2.npz" in [f.name for f in Path(".").iterdir()] file = np.load("ts.npz") keys = list(file.keys()) - assert 't' in keys - assert 'start' in keys - assert 'end' in keys + assert "t" in keys + assert "start" in keys + assert "end" in keys - np.testing.assert_array_almost_equal(file['t'], ts.index) - np.testing.assert_array_almost_equal(file['start'], ts.time_support.start) - np.testing.assert_array_almost_equal(file['end'], ts.time_support.end) + np.testing.assert_array_almost_equal(file["t"], ts.index) + np.testing.assert_array_almost_equal(file["start"], ts.time_support.start) + np.testing.assert_array_almost_equal(file["end"], ts.time_support.end) # Path("ts.npz").unlink() # Path("ts2.npz").unlink() @@ -1147,17 +1246,17 @@ def test_get(self, ts): assert isinstance(ts[0], nap.Ts) assert len(ts[0]) == 1 assert len(ts[0:10]) == 10 - assert len(ts[[0,2,5]]) == 3 + assert len(ts[[0, 2, 5]]) == 3 - np.testing.assert_array_equal(ts[[0,2,5]].index, np.array([0, 2, 5])) + np.testing.assert_array_equal(ts[[0, 2, 5]].index, np.array([0, 2, 5])) - assert len(ts[0:10,0]) == 10 + assert len(ts[0:10, 0]) == 10 def test_count(self, ts): count = ts.count(1) assert len(count) == 99 np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) - + count = ts.count(bin_size=1) assert len(count) == 99 np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) @@ -1165,17 +1264,16 @@ def test_count(self, ts): count = ts.count(bin_size=1, dtype=np.int16) assert len(count) == 99 assert count.dtype == np.dtype(np.int16) - def test_count_time_units(self, ts): - for b, tu in zip([1, 1e3, 1e6],['s', 'ms', 'us']): - count = ts.count(b, time_units = tu) + for b, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): + count = ts.count(b, time_units=tu) assert len(count) == 99 np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) - + count = ts.count(b, tu) assert len(count) == 99 - np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) + np.testing.assert_array_almost_equal(count.index, np.arange(0.5, 99, 1)) def test_count_with_ep(self, ts): ep = nap.IntervalSet(start=0, end=100) @@ -1201,15 +1299,15 @@ def test_count_with_ep_only(self, ts): assert len(count) == 1 np.testing.assert_array_almost_equal(count.values, np.array([100])) - def test_count_errors(self, ts): + def test_count_errors(self, ts): with pytest.raises(ValueError): - ts.count(bin_size = {}) + ts.count(bin_size={}) with pytest.raises(ValueError): - ts.count(ep = {}) + ts.count(ep={}) with pytest.raises(ValueError): - ts.count(time_units = {}) + ts.count(time_units={}) @pytest.mark.parametrize( "dtype, expectation", @@ -1222,7 +1320,7 @@ def test_count_errors(self, ts): (np.float32, does_not_raise()), (np.float64, does_not_raise()), (1, pytest.raises(ValueError, match=f"1 is not a valid numpy dtype")), - ] + ], ) def test_count_dtype(self, dtype, expectation, ts): with expectation: @@ -1230,13 +1328,14 @@ def test_count_dtype(self, dtype, expectation, ts): if dtype: assert np.issubdtype(count.dtype, dtype) + #################################################### # Test for tsdtensor #################################################### @pytest.mark.parametrize( "tsdtensor", [ - nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 3,2), time_units="s"), + nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 3, 2), time_units="s"), ], ) class Test_Time_Series_5: @@ -1245,21 +1344,33 @@ def test_return_ndarray(self, tsdtensor): np.testing.assert_array_equal(tsdtensor[0], tsdtensor.values[0]) def test_horizontal_slicing(self, tsdtensor): - assert isinstance(tsdtensor[:,0], nap.TsdFrame) - np.testing.assert_array_almost_equal(tsdtensor[:,0].values, tsdtensor.values[:,0]) - assert isinstance(tsdtensor[:,0].time_support, nap.IntervalSet) - np.testing.assert_array_almost_equal(tsdtensor.time_support, tsdtensor[:,0].time_support) - - assert isinstance(tsdtensor[:,[0,2]], nap.TsdTensor) - np.testing.assert_array_almost_equal(tsdtensor.values[:,[0,2]], tsdtensor[:,[0,2]].values) - assert isinstance(tsdtensor[:,[0,2]].time_support, nap.IntervalSet) - np.testing.assert_array_almost_equal(tsdtensor.time_support, tsdtensor[:,[0,2]].time_support) + assert isinstance(tsdtensor[:, 0], nap.TsdFrame) + np.testing.assert_array_almost_equal( + tsdtensor[:, 0].values, tsdtensor.values[:, 0] + ) + assert isinstance(tsdtensor[:, 0].time_support, nap.IntervalSet) + np.testing.assert_array_almost_equal( + tsdtensor.time_support, tsdtensor[:, 0].time_support + ) + + assert isinstance(tsdtensor[:, [0, 2]], nap.TsdTensor) + np.testing.assert_array_almost_equal( + tsdtensor.values[:, [0, 2]], tsdtensor[:, [0, 2]].values + ) + assert isinstance(tsdtensor[:, [0, 2]].time_support, nap.IntervalSet) + np.testing.assert_array_almost_equal( + tsdtensor.time_support, tsdtensor[:, [0, 2]].time_support + ) def test_vertical_slicing(self, tsdtensor): assert isinstance(tsdtensor[0:10], nap.TsdTensor) - np.testing.assert_array_almost_equal(tsdtensor.values[0:10], tsdtensor[0:10].values) + np.testing.assert_array_almost_equal( + tsdtensor.values[0:10], tsdtensor[0:10].values + ) assert isinstance(tsdtensor[0:10].time_support, nap.IntervalSet) - np.testing.assert_array_almost_equal(tsdtensor[0:10].time_support, tsdtensor.time_support) + np.testing.assert_array_almost_equal( + tsdtensor[0:10].time_support, tsdtensor.time_support + ) def test_operators(self, tsdtensor): v = tsdtensor.values @@ -1287,17 +1398,17 @@ def test_operators(self, tsdtensor): a = tsdtensor // 0.5 assert isinstance(a, nap.TsdTensor) np.testing.assert_array_almost_equal(tsdtensor.index, a.index) - assert np.all(a.values == (v // 0.5)) + assert np.all(a.values == (v // 0.5)) a = tsdtensor % 0.5 assert isinstance(a, nap.TsdTensor) np.testing.assert_array_almost_equal(tsdtensor.index, a.index) assert np.all(a.values == (v % 0.5)) - a = tsdtensor ** 0.5 + a = tsdtensor**0.5 assert isinstance(a, nap.TsdTensor) np.testing.assert_array_almost_equal(tsdtensor.index, a.index) - np.testing.assert_array_almost_equal(a.values, v ** 0.5) + np.testing.assert_array_almost_equal(a.values, v**0.5) a = tsdtensor > 0.5 assert isinstance(a, nap.TsdTensor) @@ -1317,9 +1428,11 @@ def test_operators(self, tsdtensor): a = tsdtensor <= 0.5 assert isinstance(a, nap.TsdTensor) np.testing.assert_array_almost_equal(tsdtensor.index, a.index) - assert np.all(a.values == (v <= 0.5)) + assert np.all(a.values == (v <= 0.5)) - tsdtensor = nap.TsdTensor(t=np.arange(10), d=np.atleast_3d(np.tile(np.arange(10)[:,np.newaxis], 3))) + tsdtensor = nap.TsdTensor( + t=np.arange(10), d=np.atleast_3d(np.tile(np.arange(10)[:, np.newaxis], 3)) + ) v = tsdtensor.values a = tsdtensor == 5 assert isinstance(a, nap.TsdTensor) @@ -1346,11 +1459,13 @@ def test_bin_average(self, tsdtensor): meantsd = tsdtensor.bin_average(10) assert len(meantsd) == 10 np.testing.assert_array_almost_equal(meantsd.index, np.arange(5, 100, 10)) - bins = np.arange(tsdtensor.time_support.start[0], tsdtensor.time_support.end[0]+1, 10) + bins = np.arange( + tsdtensor.time_support.start[0], tsdtensor.time_support.end[0] + 1, 10 + ) idx = np.digitize(tsdtensor.index, bins) tmp = [] for i in np.unique(idx): - tmp.append(np.mean(tsdtensor.values[idx==i],0)) + tmp.append(np.mean(tsdtensor.values[idx == i], 0)) tmp = np.array(tmp) np.testing.assert_array_almost_equal(meantsd.values, tmp) @@ -1359,82 +1474,110 @@ def test_save_npz(self, tsdtensor): tsdtensor.save(dict) with pytest.raises(RuntimeError) as e: - tsdtensor.save('./') - assert str(e.value) == "Invalid filename input. {} is directory.".format(Path("./").resolve()) + tsdtensor.save("./") + assert str(e.value) == "Invalid filename input. {} is directory.".format( + Path("./").resolve() + ) - fake_path = './fake/path' + fake_path = "./fake/path" with pytest.raises(RuntimeError) as e: - tsdtensor.save(fake_path+'/file.npz') - assert str(e.value) == "Path {} does not exist.".format(Path(fake_path).resolve()) + tsdtensor.save(fake_path + "/file.npz") + assert str(e.value) == "Path {} does not exist.".format( + Path(fake_path).resolve() + ) tsdtensor.save("tsdtensor.npz") - assert "tsdtensor.npz" in [f.name for f in Path('.').iterdir()] + assert "tsdtensor.npz" in [f.name for f in Path(".").iterdir()] tsdtensor.save("tsdtensor2") - assert "tsdtensor2.npz" in [f.name for f in Path('.').iterdir()] + assert "tsdtensor2.npz" in [f.name for f in Path(".").iterdir()] file = np.load("tsdtensor.npz") keys = list(file.keys()) - assert 't' in keys - assert 'd' in keys - assert 'start' in keys - assert 'end' in keys + assert "t" in keys + assert "d" in keys + assert "start" in keys + assert "end" in keys - np.testing.assert_array_almost_equal(file['t'], tsdtensor.index) - np.testing.assert_array_almost_equal(file['d'], tsdtensor.values) - np.testing.assert_array_almost_equal(file['start'], tsdtensor.time_support.start) - np.testing.assert_array_almost_equal(file['end'], tsdtensor.time_support.end) + np.testing.assert_array_almost_equal(file["t"], tsdtensor.index) + np.testing.assert_array_almost_equal(file["d"], tsdtensor.values) + np.testing.assert_array_almost_equal( + file["start"], tsdtensor.time_support.start + ) + np.testing.assert_array_almost_equal(file["end"], tsdtensor.time_support.end) # Path("tsdtensor.npz").unlink() # Path("tsdtensor2.npz").unlink() def test_interpolate(self, tsdtensor): - + y = np.arange(0, 1001) - data_stack = np.stack([np.stack([np.arange(0, 1001),] * 4)] * 3).T + data_stack = np.stack( + [ + np.stack( + [ + np.arange(0, 1001), + ] + * 4 + ) + ] + * 3 + ).T - tsdtensor = nap.TsdTensor(t=np.arange(0, 101), d= data_stack[0::10, ...]) + tsdtensor = nap.TsdTensor(t=np.arange(0, 101), d=data_stack[0::10, ...]) # Ts - ts = nap.Ts(t = y / 10) + ts = nap.Ts(t=y / 10) tsdtensor2 = tsdtensor.interpolate(ts) np.testing.assert_array_almost_equal(tsdtensor2.values, data_stack) # Tsd - ts = nap.Tsd(t=y/10, d=np.zeros_like(y)) + ts = nap.Tsd(t=y / 10, d=np.zeros_like(y)) tsdtensor2 = tsdtensor.interpolate(ts) np.testing.assert_array_almost_equal(tsdtensor2.values, data_stack) # TsdFrame - ts = nap.TsdFrame(t=y/10, d=np.zeros((len(y), 2))) + ts = nap.TsdFrame(t=y / 10, d=np.zeros((len(y), 2))) tsdtensor2 = tsdtensor.interpolate(ts) np.testing.assert_array_almost_equal(tsdtensor2.values, data_stack) - + with pytest.raises(IOError) as e: tsdtensor.interpolate([0, 1, 2]) - assert str(e.value) == "First argument should be an instance of Ts, Tsd, TsdFrame or TsdTensor" + assert ( + str(e.value) + == "First argument should be an instance of Ts, Tsd, TsdFrame or TsdTensor" + ) # Right left ep = nap.IntervalSet(start=0, end=5) - tsdtensor = nap.Tsd(t=np.arange(1,4), d=np.arange(3), time_support=ep) + tsdtensor = nap.Tsd(t=np.arange(1, 4), d=np.arange(3), time_support=ep) ts = nap.Ts(t=np.arange(0, 5)) tsdtensor2 = tsdtensor.interpolate(ts, left=1234) assert float(tsdtensor2.values[0]) == 1234.0 tsdtensor2 = tsdtensor.interpolate(ts, right=4321) assert float(tsdtensor2.values[-1]) == 4321.0 - def test_interpolate_with_ep(self, tsdtensor): + def test_interpolate_with_ep(self, tsdtensor): y = np.arange(0, 1001) - data_stack = np.stack([np.stack([np.arange(0, 1001),] * 4),] * 3).T + data_stack = np.stack( + [ + np.stack( + [ + np.arange(0, 1001), + ] + * 4 + ), + ] + * 3 + ).T + + tsdtensor = nap.TsdTensor(t=np.arange(0, 101), d=data_stack[::10, ...]) - tsdtensor = nap.TsdTensor(t=np.arange(0, 101), d=data_stack[::10, ...]) - ts = nap.Ts(t=y / 10) - ep = nap.IntervalSet(start=np.arange(0, 100, 20), end=np.arange(10, 110, 20)) - tsdframe2 = tsdtensor.interpolate(ts, ep) + tsdframe2 = tsdtensor.interpolate(ts, ep) tmp = ts.restrict(ep).index * 10 np.testing.assert_array_almost_equal(tmp, tsdframe2.values[:, 0, 0]) @@ -1444,14 +1587,20 @@ def test_interpolate_with_ep(self, tsdtensor): tsdframe2 = tsdtensor.interpolate(ts, ep) assert len(tsdframe2) == 0 -@pytest.mark.parametrize("obj", - [ - nap.Tsd(t=np.arange(10), d=np.random.rand(10), time_units="s"), - nap.TsdFrame( - t=np.arange(10), d=np.random.rand(10, 3), time_units="s", columns=["a","b","c"] - ), - nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 3, 2), time_units="s"), - ]) + +@pytest.mark.parametrize( + "obj", + [ + nap.Tsd(t=np.arange(10), d=np.random.rand(10), time_units="s"), + nap.TsdFrame( + t=np.arange(10), + d=np.random.rand(10, 3), + time_units="s", + columns=["a", "b", "c"], + ), + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 3, 2), time_units="s"), + ], +) def test_pickling(obj): """Test that pikling works as expected.""" # pickle and unpickle ts_group @@ -1477,16 +1626,72 @@ def test_pickling(obj): "start, end, mode, n_points, expectation", [ (1, 3, "closest_t", None, does_not_raise()), - (None, 3, "closest_t", None, pytest.raises(ValueError, match="'start' must be an int or a float")), - (2, "a", "closest_t", None, pytest.raises(ValueError, match="'end' must be an int or a float. Type provided instead!")), - (1, 3, "closest_t", "a", pytest.raises(TypeError, match="'n_points' must be of type int or None. Type provided instead!")), - (1, None, "closest_t", 1, pytest.raises(ValueError, match="'n_points' can be used only when 'end' is specified!")), - (1, 3, "banana", None, pytest.raises(ValueError, match="'mode' only accepts 'before_t', 'after_t', 'closest_t' or 'restrict'.")), - (3, 1, "closest_t", None, pytest.raises(ValueError, match="'start' should not precede 'end'")), - (1, 3, "restrict", 1, pytest.raises(ValueError, match="Fixing the number of time points is incompatible with 'restrict' mode.")), - (1., 3., "closest_t", None, does_not_raise()), - (1., None, "closest_t", None, does_not_raise()), - ] + ( + None, + 3, + "closest_t", + None, + pytest.raises(ValueError, match="'start' must be an int or a float"), + ), + ( + 2, + "a", + "closest_t", + None, + pytest.raises( + ValueError, + match="'end' must be an int or a float. Type provided instead!", + ), + ), + ( + 1, + 3, + "closest_t", + "a", + pytest.raises( + TypeError, + match="'n_points' must be of type int or None. Type provided instead!", + ), + ), + ( + 1, + None, + "closest_t", + 1, + pytest.raises( + ValueError, match="'n_points' can be used only when 'end' is specified!" + ), + ), + ( + 1, + 3, + "banana", + None, + pytest.raises( + ValueError, + match="'mode' only accepts 'before_t', 'after_t', 'closest_t' or 'restrict'.", + ), + ), + ( + 3, + 1, + "closest_t", + None, + pytest.raises(ValueError, match="'start' should not precede 'end'"), + ), + ( + 1, + 3, + "restrict", + 1, + pytest.raises( + ValueError, + match="Fixing the number of time points is incompatible with 'restrict' mode.", + ), + ), + (1.0, 3.0, "closest_t", None, does_not_raise()), + (1.0, None, "closest_t", None, does_not_raise()), + ], ) def test_get_slice_raise_errors(start, end, mode, n_points, expectation): ts = nap.Ts(t=np.array([1, 2, 3, 4])) @@ -1556,16 +1761,19 @@ def test_get_slice_raise_errors(start, end, mode, n_points, expectation): (4, 4, "restrict", slice(3, 4), np.array([4])), (4, 5, "restrict", slice(3, 4), np.array([4])), (0, 1, "restrict", slice(0, 1), np.array([1])), - - ] + ], +) +@pytest.mark.parametrize( + "ts", + [ + nap.Ts(t=np.array([1, 2, 3, 4])), + nap.Tsd(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])), + nap.TsdFrame(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])[:, None]), + nap.TsdTensor( + t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])[:, None, None] + ), + ], ) -@pytest.mark.parametrize("ts", - [ - nap.Ts(t=np.array([1, 2, 3, 4])), - nap.Tsd(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])), - nap.TsdFrame(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])[:, None]), - nap.TsdTensor(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])[:, None, None]) - ]) def test_get_slice_value(start, end, mode, expected_slice, expected_array, ts): out_slice = ts._get_slice(start, end=end, mode=mode) out_array = ts.t[out_slice] @@ -1593,7 +1801,7 @@ def test_get_slice_vs_get_random_val_start_end_value(): def test_get_slice_vs_get_random_val_start_value(): np.random.seed(123) ts = nap.Ts(np.linspace(0.2, 0.8, 100)) - starts = np.random.uniform(0, 1, size=(10000, )) + starts = np.random.uniform(0, 1, size=(10000,)) for start in starts: out_slice = ts.get_slice(start=start, end=None) @@ -1602,24 +1810,21 @@ def test_get_slice_vs_get_random_val_start_value(): assert np.all(out_get.t == out_ts.t) - @pytest.mark.parametrize( "end, n_points, expectation", [ (1, 3, does_not_raise()), (None, 3, pytest.raises(ValueError, match="'n_points' can be used only when")), - - ] + ], ) -@pytest.mark.parametrize("time_unit", ["s", "ms", "us"]) -@pytest.mark.parametrize("mode", ["closest_t", "before_t", "after_t"]) +@pytest.mark.parametrize("time_unit", ["s", "ms", "us"]) +@pytest.mark.parametrize("mode", ["closest_t", "before_t", "after_t"]) def test_get_slice_n_points(end, n_points, expectation, time_unit, mode): ts = nap.Ts(t=np.array([1, 2, 3, 4])) with expectation: ts._get_slice(1, end, n_points=n_points, mode=mode) - @pytest.mark.parametrize( "start, end, n_points, mode, expected_slice, expected_array", [ @@ -1647,52 +1852,61 @@ def test_get_slice_n_points(end, n_points, expectation, time_unit, mode): (1, 6.6, 2, "after_t", slice(0, 6, 3), np.array([1, 4])), (1, 6.6, 2, "before_t", slice(0, 4, 2), np.array([1, 3])), (1, 6.6, 2, "closest_t", slice(0, 6, 3), np.array([1, 4])), - ] + ], +) +@pytest.mark.parametrize( + "ts", + [ + nap.Ts(t=np.arange(1, 10)), + nap.Tsd(t=np.arange(1, 10), d=np.arange(1, 10)), + nap.TsdFrame(t=np.arange(1, 10), d=np.arange(1, 10)[:, None]), + nap.TsdTensor(t=np.arange(1, 10), d=np.arange(1, 10)[:, None, None]), + ], ) -@pytest.mark.parametrize("ts", - [ - nap.Ts(t=np.arange(1, 10)), - nap.Tsd(t=np.arange(1, 10), d=np.arange(1, 10)), - nap.TsdFrame(t=np.arange(1, 10), d=np.arange(1, 10)[:, None]), - nap.TsdTensor(t=np.arange(1, 10), d=np.arange(1, 10)[:, None, None]) - ]) -def test_get_slice_value_step(start, end, n_points, mode, expected_slice, expected_array, ts): +def test_get_slice_value_step( + start, end, n_points, mode, expected_slice, expected_array, ts +): out_slice = ts._get_slice(start, end=end, mode=mode, n_points=n_points) out_array = ts.t[out_slice] assert out_slice == expected_slice assert np.all(out_array == expected_array) + @pytest.mark.parametrize( "start, end, expected_slice, expected_array", [ (1, 3, slice(0, 3), np.array([1, 2, 3])), - (1, 2.7, slice(0, 2), np.array([1, 2])), - (1, 2.4, slice(0, 2), np.array([1, 2])), - (1.1, 3, slice(1, 3), np.array([2, 3])), - (1.6, 3, slice(1, 3), np.array([2, 3])), - (1.6, 1.8, slice(1, 1), np.array([])), - (1.4, 1.6, slice(1, 1), np.array([])), - (3, 3, slice(2, 3), np.array([3])), - (0, 3, slice(0, 3), np.array([1, 2, 3])), - (0, 4, slice(0, 4), np.array([1, 2, 3, 4])), - (4, 4, slice(3, 4), np.array([4])), - (4, 5, slice(3, 4), np.array([4])), - (0, 1, slice(0, 1), np.array([1])), + (1, 2.7, slice(0, 2), np.array([1, 2])), + (1, 2.4, slice(0, 2), np.array([1, 2])), + (1.1, 3, slice(1, 3), np.array([2, 3])), + (1.6, 3, slice(1, 3), np.array([2, 3])), + (1.6, 1.8, slice(1, 1), np.array([])), + (1.4, 1.6, slice(1, 1), np.array([])), + (3, 3, slice(2, 3), np.array([3])), + (0, 3, slice(0, 3), np.array([1, 2, 3])), + (0, 4, slice(0, 4), np.array([1, 2, 3, 4])), + (4, 4, slice(3, 4), np.array([4])), + (4, 5, slice(3, 4), np.array([4])), + (0, 1, slice(0, 1), np.array([1])), (0, None, slice(0, 1), np.array([1])), (1, None, slice(0, 1), np.array([1])), (4, None, slice(3, 4), np.array([4])), (5, None, slice(3, 4), np.array([4])), (-1, 0, slice(0, 0), np.array([])), (5, 6, slice(4, 4), np.array([])), - ] + ], +) +@pytest.mark.parametrize( + "ts", + [ + nap.Ts(t=np.array([1, 2, 3, 4])), + nap.Tsd(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])), + nap.TsdFrame(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])[:, None]), + nap.TsdTensor( + t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])[:, None, None] + ), + ], ) -@pytest.mark.parametrize("ts", - [ - nap.Ts(t=np.array([1, 2, 3, 4])), - nap.Tsd(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])), - nap.TsdFrame(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])[:, None]), - nap.TsdTensor(t=np.array([1, 2, 3, 4]), d=np.array([1, 2, 3, 4])[:, None, None]) - ]) def test_get_slice_public(start, end, expected_slice, expected_array, ts): out_slice = ts.get_slice(start, end=end) out_array = ts.t[out_slice] diff --git a/tests/test_time_units.py b/tests/test_time_units.py index 997187f1..3ad99f2e 100644 --- a/tests/test_time_units.py +++ b/tests/test_time_units.py @@ -5,33 +5,38 @@ # @Last Modified time: 2023-09-21 15:59:54 """Tests of time units for `pynapple` package.""" -import pynapple as nap +import warnings + import numpy as np import pandas as pd import pytest + +import pynapple as nap # from pynapple.core.time_units import format_timestamps, return_timestamps, sort_timestamps from pynapple.core.time_index import TsIndex -import warnings + def test_format_timestamps(): t = np.random.rand(100) np.testing.assert_array_almost_equal(t, TsIndex.format_timestamps(t)) - np.testing.assert_array_almost_equal(t/1e3, TsIndex.format_timestamps(t, 'ms')) - np.testing.assert_array_almost_equal(t/1e6, TsIndex.format_timestamps(t, 'us')) + np.testing.assert_array_almost_equal(t / 1e3, TsIndex.format_timestamps(t, "ms")) + np.testing.assert_array_almost_equal(t / 1e6, TsIndex.format_timestamps(t, "us")) with pytest.raises(ValueError, match=r"unrecognized time units type"): - TsIndex.format_timestamps(t, 'aaaa') + TsIndex.format_timestamps(t, "aaaa") + def test_return_timestamps(): t = np.random.rand(100) np.testing.assert_array_almost_equal(t, TsIndex.return_timestamps(t)) - np.testing.assert_array_almost_equal(t/1e3, TsIndex.return_timestamps(t, 'ms')) - np.testing.assert_array_almost_equal(t/1e6, TsIndex.return_timestamps(t, 'us')) + np.testing.assert_array_almost_equal(t / 1e3, TsIndex.return_timestamps(t, "ms")) + np.testing.assert_array_almost_equal(t / 1e6, TsIndex.return_timestamps(t, "us")) with pytest.raises(ValueError, match=r"unrecognized time units type"): - TsIndex.return_timestamps(t, 'aaaa') + TsIndex.return_timestamps(t, "aaaa") + def test_return_timestamps(): t = np.random.rand(100) @@ -41,4 +46,3 @@ def test_return_timestamps(): with warnings.catch_warnings(record=True) as w: TsIndex.sort_timestamps(t, True) assert str(w[0].message) == "timestamps are not sorted" - diff --git a/tests/test_ts_group.py b/tests/test_ts_group.py index 31f54107..85a90e43 100644 --- a/tests/test_ts_group.py +++ b/tests/test_ts_group.py @@ -1,16 +1,14 @@ - - """Tests of ts group for `pynapple` package.""" import pickle import warnings from collections import UserDict from contextlib import nullcontext as does_not_raise +from pathlib import Path import numpy as np import pandas as pd import pytest -from pathlib import Path import pynapple as nap @@ -63,17 +61,27 @@ def test_create_ts_group_from_invalid(self): @pytest.mark.parametrize( "test_dict, expectation", [ - ({"1": nap.Ts(np.arange(10)), "2": nap.Ts(np.arange(10))}, does_not_raise()), + ( + {"1": nap.Ts(np.arange(10)), "2": nap.Ts(np.arange(10))}, + does_not_raise(), + ), ({"1": nap.Ts(np.arange(10)), 2: nap.Ts(np.arange(10))}, does_not_raise()), - ({"1": nap.Ts(np.arange(10)), 1: nap.Ts(np.arange(10))}, - pytest.raises(ValueError, match="Two dictionary keys contain the same integer")), - ({"1.": nap.Ts(np.arange(10)), 2: nap.Ts(np.arange(10))}, - pytest.raises(ValueError, match="All keys must be convertible")), + ( + {"1": nap.Ts(np.arange(10)), 1: nap.Ts(np.arange(10))}, + pytest.raises( + ValueError, match="Two dictionary keys contain the same integer" + ), + ), + ( + {"1.": nap.Ts(np.arange(10)), 2: nap.Ts(np.arange(10))}, + pytest.raises(ValueError, match="All keys must be convertible"), + ), ({-1: nap.Ts(np.arange(10)), 1: nap.Ts(np.arange(10))}, does_not_raise()), - ({1.5: nap.Ts(np.arange(10)), 1: nap.Ts(np.arange(10))}, - pytest.raises(ValueError, match="All keys must have integer value")) - - ] + ( + {1.5: nap.Ts(np.arange(10)), 1: nap.Ts(np.arange(10))}, + pytest.raises(ValueError, match="All keys must have integer value"), + ), + ], ) def test_initialize_from_dict(self, test_dict, expectation): with expectation: @@ -84,21 +92,25 @@ def test_initialize_from_dict(self, test_dict, expectation): [ nap.TsGroup({"1": nap.Ts(np.arange(10)), "2": nap.Ts(np.arange(10))}), nap.TsGroup({"1": nap.Ts(np.arange(10)), 2: nap.Ts(np.arange(10))}), - nap.TsGroup({-1: nap.Ts(np.arange(10)), 1: nap.Ts(np.arange(10))}) - - ] + nap.TsGroup({-1: nap.Ts(np.arange(10)), 1: nap.Ts(np.arange(10))}), + ], ) def test_metadata_len_match(self, tsgroup): assert len(tsgroup._metadata) == len(tsgroup) def test_create_ts_group_from_array(self): with warnings.catch_warnings(record=True) as w: - nap.TsGroup({ - 0: np.arange(0, 200), - 1: np.arange(0, 200, 0.5), - 2: np.arange(0, 300, 0.2), - }) - assert str(w[0].message) == "Elements should not be passed as . Default time units is seconds when creating the Ts object." + nap.TsGroup( + { + 0: np.arange(0, 200), + 1: np.arange(0, 200, 0.5), + 2: np.arange(0, 300, 0.2), + } + ) + assert ( + str(w[0].message) + == "Elements should not be passed as . Default time units is seconds when creating the Ts object." + ) def test_create_ts_group_with_time_support(self, group): ep = nap.IntervalSet(start=0, end=100) @@ -111,39 +123,50 @@ def test_create_ts_group_with_time_support(self, group): def test_create_ts_group_with_empty_time_support(self): with pytest.raises(RuntimeError) as e_info: - tmp = nap.TsGroup({ - 0: nap.Ts(t=np.array([])), - 1: nap.Ts(t=np.array([])), - 2: nap.Ts(t=np.array([])), - }) - assert str(e_info.value) == "Union of time supports is empty. Consider passing a time support as argument." + tmp = nap.TsGroup( + { + 0: nap.Ts(t=np.array([])), + 1: nap.Ts(t=np.array([])), + 2: nap.Ts(t=np.array([])), + } + ) + assert ( + str(e_info.value) + == "Union of time supports is empty. Consider passing a time support as argument." + ) def test_create_ts_group_with_bypass_check(self): tmp = { 0: nap.Ts(t=np.arange(0, 100)), 1: nap.Ts(t=np.arange(0, 200, 0.5), time_units="s"), - 2: nap.Ts(t=np.arange(0, 300, 0.2), time_units="s") + 2: nap.Ts(t=np.arange(0, 300, 0.2), time_units="s"), } - tsgroup = nap.TsGroup(tmp, time_support = nap.IntervalSet(0, 100), bypass_check=True) + tsgroup = nap.TsGroup( + tmp, time_support=nap.IntervalSet(0, 100), bypass_check=True + ) for i in tmp.keys(): np.testing.assert_array_almost_equal(tmp[i].index, tsgroup[i].index) tmp = { 0: nap.Ts(t=np.arange(0, 100)), 1: nap.Ts(t=np.arange(0, 200, 0.5), time_units="s"), - 2: nap.Ts(t=np.arange(0, 300, 0.2), time_units="s") + 2: nap.Ts(t=np.arange(0, 300, 0.2), time_units="s"), } tsgroup = nap.TsGroup(tmp, bypass_check=True) for i in tmp.keys(): - np.testing.assert_array_almost_equal(tmp[i].index, tsgroup[i].index) + np.testing.assert_array_almost_equal(tmp[i].index, tsgroup[i].index) def test_create_ts_group_with_metainfo(self, group): sr_info = pd.Series(index=[0, 1, 2], data=[0, 0, 0], name="sr") ar_info = np.ones(3) * 1 tsgroup = nap.TsGroup(group, sr=sr_info, ar=ar_info) assert tsgroup._metadata.shape == (3, 3) - np.testing.assert_array_almost_equal(tsgroup._metadata["sr"].values, sr_info.values) - np.testing.assert_array_almost_equal(tsgroup._metadata["sr"].index.values, sr_info.index.values) + np.testing.assert_array_almost_equal( + tsgroup._metadata["sr"].values, sr_info.values + ) + np.testing.assert_array_almost_equal( + tsgroup._metadata["sr"].index.values, sr_info.index.values + ) np.testing.assert_array_almost_equal(tsgroup._metadata["ar"].values, ar_info) def test_add_metainfo(self, group): @@ -151,8 +174,8 @@ def test_add_metainfo(self, group): df_info = pd.DataFrame(index=[0, 1, 2], data=[0, 0, 0], columns=["df"]) sr_info = pd.Series(index=[0, 1, 2], data=[1, 1, 1], name="sr") ar_info = np.ones(3) * 3 - lt_info = [3,4,5] - tu_info = (6,8,3) + lt_info = [3, 4, 5] + tu_info = (6, 8, 3) tsgroup.set_info(df_info, sr=sr_info, ar=ar_info, lt=lt_info, tu=tu_info) assert tsgroup._metadata.shape == (3, 6) pd.testing.assert_series_equal(tsgroup._metadata["df"], df_info["df"]) @@ -210,20 +233,20 @@ def test_keys(self, group): def test_rates_property(self, group): tsgroup = nap.TsGroup(group) - pd.testing.assert_series_equal(tsgroup.rates, tsgroup._metadata['rate']) + pd.testing.assert_series_equal(tsgroup.rates, tsgroup._metadata["rate"]) def test_items(self, group): tsgroup = nap.TsGroup(group) items = tsgroup.items() assert isinstance(items, list) - for i,it in items: + for i, it in items: pd.testing.assert_series_equal(tsgroup[i].as_series(), it.as_series()) def test_items(self, group): tsgroup = nap.TsGroup(group) values = tsgroup.values() assert isinstance(values, list) - for i,it in enumerate(values): + for i, it in enumerate(values): pd.testing.assert_series_equal(tsgroup[i].as_series(), it.as_series()) def test_slicing(self, group): @@ -341,8 +364,8 @@ def test_count_with_ep(self, group): def test_count_time_units(self, group): ep = nap.IntervalSet(start=0, end=100) - tsgroup = nap.TsGroup(group, time_support =ep) - for b, tu in zip([1, 1e3, 1e6],['s', 'ms', 'us']): + tsgroup = nap.TsGroup(group, time_support=ep) + for b, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): count = tsgroup.count(b, time_units=tu) np.testing.assert_array_almost_equal( count.loc[0].values[0:-1].flatten(), np.ones(len(count) - 1) @@ -367,13 +390,13 @@ def test_count_time_units(self, group): def test_count_errors(self, group): tsgroup = nap.TsGroup(group) with pytest.raises(ValueError): - tsgroup.count(bin_size = {}) + tsgroup.count(bin_size={}) with pytest.raises(ValueError): - tsgroup.count(ep = {}) + tsgroup.count(ep={}) with pytest.raises(ValueError): - tsgroup.count(time_units = {}) + tsgroup.count(time_units={}) def test_get_interval(self, group): tsgroup = nap.TsGroup(group) @@ -381,17 +404,19 @@ def test_get_interval(self, group): assert isinstance(tsgroup2, nap.TsGroup) - assert all(map(lambda x: len(x[0]) == x[1], zip(tsgroup2.values(), [11, 21, 51]))) + assert all( + map(lambda x: len(x[0]) == x[1], zip(tsgroup2.values(), [11, 21, 51])) + ) for a, b in zip(tsgroup.values(), tsgroup2.values()): np.testing.assert_array_almost_equal( - a.t[np.searchsorted(a.t, 10):np.searchsorted(a.t, 20, 'right')], - b.t) + a.t[np.searchsorted(a.t, 10) : np.searchsorted(a.t, 20, "right")], b.t + ) np.testing.assert_array_almost_equal( tsgroup.time_support.values, tsgroup2.time_support.values, - ) + ) tsgroup3 = tsgroup.get(10) @@ -448,9 +473,9 @@ def test_repr_(self, group): from tabulate import tabulate tsgroup = nap.TsGroup(group) - tsgroup.set_info(abc = ['a']*len(tsgroup)) - tsgroup.set_info(bbb = [1]*len(tsgroup)) - tsgroup.set_info(ccc = [np.pi]*len(tsgroup)) + tsgroup.set_info(abc=["a"] * len(tsgroup)) + tsgroup.set_info(bbb=[1] * len(tsgroup)) + tsgroup.set_info(ccc=[np.pi] * len(tsgroup)) cols = tsgroup._metadata.columns.drop("rate") headers = ["Index", "rate"] + [c for c in cols] @@ -470,16 +495,16 @@ def round_if_float(x): assert tabulate(lines, headers=headers) == tsgroup.__repr__() def test_str_(self, group): - tsgroup = nap.TsGroup(group) + tsgroup = nap.TsGroup(group) assert tsgroup.__str__() == tsgroup.__repr__() - - def test_to_tsd(self, group): + + def test_to_tsd(self, group): t = [] d = [] group = {} for i in range(3): - t.append(np.sort(np.random.rand(10)*100)) - d.append(np.ones(10)*i) + t.append(np.sort(np.random.rand(10) * 100)) + d.append(np.ones(10) * i) group[i] = nap.Ts(t=t[-1]) times = np.array(t).flatten() @@ -495,31 +520,41 @@ def test_to_tsd(self, group): np.testing.assert_array_almost_equal(tsd.index, times) np.testing.assert_array_almost_equal(tsd.values, data) - alpha=np.random.randn(3) + alpha = np.random.randn(3) tsgroup.set_info(alpha=alpha) tsd2 = tsgroup.to_tsd("alpha") np.testing.assert_array_almost_equal(tsd2.index, times) - np.testing.assert_array_almost_equal(tsd2.values, np.array([alpha[int(i)] for i in data])) + np.testing.assert_array_almost_equal( + tsd2.values, np.array([alpha[int(i)] for i in data]) + ) tsd3 = tsgroup.to_tsd(alpha) np.testing.assert_array_almost_equal(tsd3.index, times) - np.testing.assert_array_almost_equal(tsd3.values, np.array([alpha[int(i)] for i in data])) + np.testing.assert_array_almost_equal( + tsd3.values, np.array([alpha[int(i)] for i in data]) + ) - beta=pd.Series(index=np.arange(3), data=np.random.randn(3)) + beta = pd.Series(index=np.arange(3), data=np.random.randn(3)) tsd4 = tsgroup.to_tsd(beta) np.testing.assert_array_almost_equal(tsd4.index, times) - np.testing.assert_array_almost_equal(tsd4.values, np.array([beta[int(i)] for i in data])) + np.testing.assert_array_almost_equal( + tsd4.values, np.array([beta[int(i)] for i in data]) + ) def test_to_tsd_runtime_errors(self, group): tsgroup = nap.TsGroup(group) - + with pytest.raises(Exception) as e_info: - tsgroup.to_tsd(pd.Series(index=np.arange(len(tsgroup)+1), data=np.arange(len(tsgroup)+1))) + tsgroup.to_tsd( + pd.Series( + index=np.arange(len(tsgroup) + 1), data=np.arange(len(tsgroup) + 1) + ) + ) assert str(e_info.value) == "Index are not equals" with pytest.raises(Exception) as e_info: - tsgroup.to_tsd(np.arange(len(tsgroup)+1)) + tsgroup.to_tsd(np.arange(len(tsgroup) + 1)) assert str(e_info.value) == "Values is not the same length." with pytest.raises(Exception) as e_info: @@ -528,46 +563,60 @@ def test_to_tsd_runtime_errors(self, group): with pytest.raises(Exception) as e_info: tsgroup.to_tsd(dict) - assert str(e_info.value) == """Unknown argument format. Must be pandas.Series, numpy.ndarray or a string from one of the following values : [rate]""" + assert ( + str(e_info.value) + == """Unknown argument format. Must be pandas.Series, numpy.ndarray or a string from one of the following values : [rate]""" + ) tsgroup.set_info(alpha=np.random.rand(len(tsgroup))) with pytest.raises(Exception) as e_info: tsgroup.to_tsd(dict) - assert str(e_info.value) == """Unknown argument format. Must be pandas.Series, numpy.ndarray or a string from one of the following values : [rate, alpha]""" + assert ( + str(e_info.value) + == """Unknown argument format. Must be pandas.Series, numpy.ndarray or a string from one of the following values : [rate, alpha]""" + ) def test_save_npz(self, group): group = { - 0: nap.Tsd(t=np.arange(0, 20), d = np.random.rand(20)), + 0: nap.Tsd(t=np.arange(0, 20), d=np.random.rand(20)), 1: nap.Tsd(t=np.arange(0, 20, 0.5), d=np.random.rand(40)), - 2: nap.Tsd(t=np.arange(0, 10, 0.2), d=np.random.rand(50)) + 2: nap.Tsd(t=np.arange(0, 10, 0.2), d=np.random.rand(50)), } - tsgroup = nap.TsGroup(group, meta = np.arange(len(group), dtype=np.int64), meta2 = np.array(['a', 'b', 'c'])) + tsgroup = nap.TsGroup( + group, + meta=np.arange(len(group), dtype=np.int64), + meta2=np.array(["a", "b", "c"]), + ) with pytest.raises(TypeError) as e: tsgroup.save(dict) with pytest.raises(RuntimeError) as e: - tsgroup.save('./') - assert str(e.value) == "Invalid filename input. {} is directory.".format(Path("./").resolve()) + tsgroup.save("./") + assert str(e.value) == "Invalid filename input. {} is directory.".format( + Path("./").resolve() + ) - fake_path = './fake/path' + fake_path = "./fake/path" with pytest.raises(RuntimeError) as e: - tsgroup.save(fake_path+'/file.npz') - assert str(e.value) == "Path {} does not exist.".format(Path(fake_path).resolve()) + tsgroup.save(fake_path + "/file.npz") + assert str(e.value) == "Path {} does not exist.".format( + Path(fake_path).resolve() + ) tsgroup.save("tsgroup.npz") - assert "tsgroup.npz" in [f.name for f in Path('.').iterdir()] + assert "tsgroup.npz" in [f.name for f in Path(".").iterdir()] tsgroup.save("tsgroup2") - assert "tsgroup2.npz" in [f.name for f in Path('.').iterdir()] + assert "tsgroup2.npz" in [f.name for f in Path(".").iterdir()] file = np.load("tsgroup.npz") - keys = list(file.keys()) - for k in ['t', 'd', 'start', 'end', 'index', 'meta', 'meta2']: + keys = list(file.keys()) + for k in ["t", "d", "start", "end", "index", "meta", "meta2"]: assert k in keys times = [] @@ -575,7 +624,7 @@ def test_save_npz(self, group): data = [] for n in group.keys(): times.append(group[n].index) - index.append(np.ones(len(group[n]))*n) + index.append(np.ones(len(group[n])) * n) data.append(group[n].values) times = np.hstack(times) index = np.hstack(index) @@ -585,23 +634,27 @@ def test_save_npz(self, group): data = data[idx] index = index[idx] - np.testing.assert_array_almost_equal(file['start'], tsgroup.time_support.start) - np.testing.assert_array_almost_equal(file['end'], tsgroup.time_support.end) - np.testing.assert_array_almost_equal(file['t'], times) - np.testing.assert_array_almost_equal(file['d'], data) - np.testing.assert_array_almost_equal(file['index'], index) - np.testing.assert_array_almost_equal(file['meta'], np.arange(len(group), dtype=np.int64)) - assert np.all(file['meta2']==np.array(['a', 'b', 'c'])) + np.testing.assert_array_almost_equal(file["start"], tsgroup.time_support.start) + np.testing.assert_array_almost_equal(file["end"], tsgroup.time_support.end) + np.testing.assert_array_almost_equal(file["t"], times) + np.testing.assert_array_almost_equal(file["d"], data) + np.testing.assert_array_almost_equal(file["index"], index) + np.testing.assert_array_almost_equal( + file["meta"], np.arange(len(group), dtype=np.int64) + ) + assert np.all(file["meta2"] == np.array(["a", "b", "c"])) file.close() - tsgroup3 = nap.TsGroup({ - 0: nap.Ts(t=np.arange(0, 20)), - }) + tsgroup3 = nap.TsGroup( + { + 0: nap.Ts(t=np.arange(0, 20)), + } + ) tsgroup3.save("tsgroup3") with np.load("tsgroup3.npz") as file: - assert 'd' not in list(file.keys()) - np.testing.assert_array_almost_equal(file['t'], tsgroup3[0].index) + assert "d" not in list(file.keys()) + np.testing.assert_array_almost_equal(file["t"], tsgroup3[0].index) Path("tsgroup.npz").unlink() Path("tsgroup2.npz").unlink() @@ -618,9 +671,12 @@ def test_save_npz(self, group): ([False, True, True], does_not_raise()), (True, does_not_raise()), (4, pytest.raises(KeyError, match="Key 4 not in group index.")), - ([3, 4], pytest.raises(KeyError, match= r"Key \[3, 4\] not in group index.")), - ([2, 3], pytest.raises(KeyError, match= r"Key \[3\] not in group index.")) - ] + ( + [3, 4], + pytest.raises(KeyError, match=r"Key \[3, 4\] not in group index."), + ), + ([2, 3], pytest.raises(KeyError, match=r"Key \[3\] not in group index.")), + ], ) def test_indexing_type(self, group, keys, expectation): ts_group = nap.TsGroup(group) @@ -635,9 +691,15 @@ def test_indexing_type(self, group, keys, expectation): ("__1", does_not_raise()), (1, pytest.raises(ValueError, match="Metadata keys must be strings")), (1.1, pytest.raises(ValueError, match="Metadata keys must be strings")), - (np.arange(1), pytest.raises(ValueError, match="Metadata keys must be strings")), - (np.arange(2), pytest.raises(ValueError, match="Metadata keys must be strings")) - ] + ( + np.arange(1), + pytest.raises(ValueError, match="Metadata keys must be strings"), + ), + ( + np.arange(2), + pytest.raises(ValueError, match="Metadata keys must be strings"), + ), + ], ) def test_setitem_metadata_key(self, group, name, expectation): group = nap.TsGroup(group) @@ -653,9 +715,15 @@ def test_setitem_metadata_key(self, group, name, expectation): ((1, 2, 3), does_not_raise()), (1, pytest.raises(TypeError, match="Metadata columns provided must be")), (1.1, pytest.raises(TypeError, match="Metadata columns provided must be")), - (np.arange(1), pytest.raises(RuntimeError, match="Array is not the same length")), - (np.arange(2), pytest.raises(RuntimeError, match="Array is not the same length")) - ] + ( + np.arange(1), + pytest.raises(RuntimeError, match="Array is not the same length"), + ), + ( + np.arange(2), + pytest.raises(RuntimeError, match="Array is not the same length"), + ), + ], ) def test_setitem_metadata_key(self, group, val, expectation): group = nap.TsGroup(group) @@ -707,7 +775,7 @@ def test_getitem_metadata(self, ts_group): np.array([True, False], dtype=bool), np.array([False, True], dtype=bool), np.array([True, True], dtype=bool), - ] + ], ) def test_getitem_bool_indexing(self, bool_idx, ts_group): out = ts_group[bool_idx] @@ -715,7 +783,7 @@ def test_getitem_bool_indexing(self, bool_idx, ts_group): assert len(out) == sum(bool_idx) idx = np.where(bool_idx)[0] if len(idx) == 1: - slc = slice(idx[0], idx[0]+1) + slc = slice(idx[0], idx[0] + 1) else: slc = slice(0, 2) assert all(out.keys()[i] == ts_group.keys()[slc][i] for i in range(len(idx))) @@ -724,19 +792,19 @@ def test_getitem_bool_indexing(self, bool_idx, ts_group): assert np.all(out[[key]].rates == ts_group.rates[[key]]) assert np.all(out[[key]].meta == ts_group.meta[[key]]) assert np.all(out[key].t == ts_group[key].t) - + @pytest.mark.parametrize( - "idx", + "idx", [ - [1], - [2], - [1, 2], - [2, 1], - np.array([1]), - np.array([2]), - np.array([1, 2]), - np.array([2, 1]) - ] + [1], + [2], + [1, 2], + [2, 1], + np.array([1]), + np.array([2]), + np.array([1, 2]), + np.array([2, 1]), + ], ) def test_getitem_int_indexing(self, idx, ts_group): out = ts_group[idx] @@ -748,13 +816,12 @@ def test_getitem_int_indexing(self, idx, ts_group): for k in idx: assert np.all(out[k].t == ts_group[k].t) - def test_getitem_metadata_direct(self, ts_group): - assert np.all(ts_group.rates == np.array([10/9, 5/9])) + assert np.all(ts_group.rates == np.array([10 / 9, 5 / 9])) def test_getitem_key_error(self, ts_group): with pytest.raises(KeyError, match="Key nonexistent not in group index."): - _ = ts_group['nonexistent'] + _ = ts_group["nonexistent"] def test_getitem_attribute_error(self, ts_group): with pytest.raises(AttributeError, match="'TsGroup' object has no attribute"): @@ -763,17 +830,26 @@ def test_getitem_attribute_error(self, ts_group): @pytest.mark.parametrize( "bool_idx, expectation", [ - (np.ones((3,), dtype=bool), pytest.raises(IndexError, match="Boolean index length must be equal")), - (np.ones((2, 1), dtype=bool), pytest.raises(IndexError, match="Only 1-dimensional boolean indices")), - (np.array(True), pytest.raises(IndexError, match="Only 1-dimensional boolean indices")) - ] + ( + np.ones((3,), dtype=bool), + pytest.raises(IndexError, match="Boolean index length must be equal"), + ), + ( + np.ones((2, 1), dtype=bool), + pytest.raises(IndexError, match="Only 1-dimensional boolean indices"), + ), + ( + np.array(True), + pytest.raises(IndexError, match="Only 1-dimensional boolean indices"), + ), + ], ) def test_getitem_boolean_fail(self, ts_group, bool_idx, expectation): with expectation: out = ts_group[bool_idx] def test_merge_complete(self, ts_group): - with pytest.raises(TypeError, match="Input at positions(.*)are not TsGroup!"): + with pytest.raises(TypeError, match="Input at positions(.*)are not TsGroup!"): nap.TsGroup.merge_group(ts_group, str, dict) ts_group2 = nap.TsGroup( @@ -782,7 +858,7 @@ def test_merge_complete(self, ts_group): 4: nap.Ts(t=np.arange(20)), }, time_support=ts_group.time_support, - meta=np.array([12, 13]) + meta=np.array([12, 13]), ) merged = ts_group.merge(ts_group2) assert len(merged) == 4 @@ -791,14 +867,21 @@ def test_merge_complete(self, ts_group): np.testing.assert_equal(merged.metadata_columns, ts_group.metadata_columns) @pytest.mark.parametrize( - 'col_name, ignore_metadata, expectation', - [ - ('meta', False, does_not_raise()), - ('meta', True, does_not_raise()), - ('wrong_name', False, pytest.raises(ValueError, match="TsGroup at position 2 has different metadata columns.*")), - ('wrong_name', True, does_not_raise()) - ] - ) + "col_name, ignore_metadata, expectation", + [ + ("meta", False, does_not_raise()), + ("meta", True, does_not_raise()), + ( + "wrong_name", + False, + pytest.raises( + ValueError, + match="TsGroup at position 2 has different metadata columns.*", + ), + ), + ("wrong_name", True, does_not_raise()), + ], + ) def test_merge_metadata(self, ts_group, col_name, ignore_metadata, expectation): metadata = pd.DataFrame([12, 13], index=[3, 4], columns=[col_name]) ts_group2 = nap.TsGroup( @@ -807,52 +890,66 @@ def test_merge_metadata(self, ts_group, col_name, ignore_metadata, expectation): 4: nap.Ts(t=np.arange(20)), }, time_support=ts_group.time_support, - **metadata - ) + **metadata, + ) with expectation: merged = ts_group.merge(ts_group2, ignore_metadata=ignore_metadata) - + if ignore_metadata: - assert merged.metadata_columns[0] == 'rate' - elif col_name == 'meta': + assert merged.metadata_columns[0] == "rate" + elif col_name == "meta": np.testing.assert_equal(merged.metadata_columns, ts_group.metadata_columns) @pytest.mark.parametrize( - 'index, reset_index, expectation', + "index, reset_index, expectation", [ - (np.array([1, 2]), False, pytest.raises(ValueError, match="TsGroup at position 2 has overlapping keys.*")), + ( + np.array([1, 2]), + False, + pytest.raises( + ValueError, match="TsGroup at position 2 has overlapping keys.*" + ), + ), (np.array([1, 2]), True, does_not_raise()), (np.array([3, 4]), False, does_not_raise()), - (np.array([3, 4]), True, does_not_raise()) - ] + (np.array([3, 4]), True, does_not_raise()), + ], ) def test_merge_index(self, ts_group, index, reset_index, expectation): ts_group2 = nap.TsGroup( dict(zip(index, [nap.Ts(t=np.arange(15)), nap.Ts(t=np.arange(20))])), time_support=ts_group.time_support, - meta=np.array([12, 13]) + meta=np.array([12, 13]), ) with expectation: merged = ts_group.merge(ts_group2, reset_index=reset_index) - + if reset_index: assert np.all(merged.keys() == np.arange(4)) elif np.all(index == np.array([3, 4])): assert np.all(merged.keys() == np.array([1, 2, 3, 4])) @pytest.mark.parametrize( - 'time_support, reset_time_support, expectation', + "time_support, reset_time_support, expectation", [ (None, False, does_not_raise()), - (None, True, does_not_raise()), - (nap.IntervalSet(start=0, end=1), False, - pytest.raises(ValueError, match="TsGroup at position 2 has different time support.*")), - (nap.IntervalSet(start=0, end=1), True, does_not_raise()) - ] + (None, True, does_not_raise()), + ( + nap.IntervalSet(start=0, end=1), + False, + pytest.raises( + ValueError, + match="TsGroup at position 2 has different time support.*", + ), + ), + (nap.IntervalSet(start=0, end=1), True, does_not_raise()), + ], ) - def test_merge_time_support(self, ts_group, time_support, reset_time_support, expectation): + def test_merge_time_support( + self, ts_group, time_support, reset_time_support, expectation + ): if time_support is None: time_support = ts_group.time_support @@ -862,17 +959,17 @@ def test_merge_time_support(self, ts_group, time_support, reset_time_support, ex 4: nap.Ts(t=np.arange(20)), }, time_support=time_support, - meta=np.array([12, 13]) + meta=np.array([12, 13]), ) with expectation: merged = ts_group.merge(ts_group2, reset_time_support=reset_time_support) - + if reset_time_support: np.testing.assert_array_almost_equal( ts_group.time_support.as_units("s").to_numpy(), - merged.time_support.as_units("s").to_numpy() - ) + merged.time_support.as_units("s").to_numpy(), + ) def test_pickling(ts_group): @@ -894,7 +991,12 @@ def test_pickling(ts_group): assert np.all(unpickled_obj._metadata.columns == ts_group._metadata.columns) # Ensure that the Ts are the same - assert all([np.all(ts_group[key].t == unpickled_obj[key].t) for key in unpickled_obj.keys()]) + assert all( + [ + np.all(ts_group[key].t == unpickled_obj[key].t) + for key in unpickled_obj.keys() + ] + ) # Ensure time support is the same assert np.all(ts_group.time_support == unpickled_obj.time_support) @@ -911,7 +1013,7 @@ def test_pickling(ts_group): (np.float32, does_not_raise()), (np.float64, does_not_raise()), (1, pytest.raises(ValueError, match=f"1 is not a valid numpy dtype")), - ] + ], ) def test_count_dtype(dtype, expectation, ts_group, ts_group_one_group): with expectation: diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 9610c0cf..bb54891a 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -1,121 +1,420 @@ """Tests of tuning curves for `pynapple` package.""" + from contextlib import nullcontext as does_not_raise -import pynapple as nap + import numpy as np import pandas as pd import pytest +import pynapple as nap + + ######################## # Type Error ######################## def get_group(): return nap.TsGroup({0: nap.Ts(t=np.arange(0, 100))}) + + def get_feature(): return nap.Tsd( - t=np.arange(0, 100, 0.1), d=np.arange(0, 100, 0.1) % 1.0, - time_support = nap.IntervalSet(0, 100) - ) + t=np.arange(0, 100, 0.1), + d=np.arange(0, 100, 0.1) % 1.0, + time_support=nap.IntervalSet(0, 100), + ) + + def get_features(): tmp = np.vstack( (np.repeat(np.arange(0, 100), 10), np.tile(np.arange(0, 100), 10)) ).T return nap.TsdFrame( - t=np.arange(0, 200, 0.1), d=np.vstack((tmp, tmp[::-1])), - time_support = nap.IntervalSet(0, 200) - ) + t=np.arange(0, 200, 0.1), + d=np.vstack((tmp, tmp[::-1])), + time_support=nap.IntervalSet(0, 200), + ) + + def get_ep(): return nap.IntervalSet(start=0, end=50) + + def get_tsdframe(): return nap.TsdFrame(t=np.arange(0, 100), d=np.ones((100, 2))) -@pytest.mark.parametrize("group, dict_ep, expected_exception", [ - ("a", {0:nap.IntervalSet(start=0, end=50),1:nap.IntervalSet(start=50, end=100)}, pytest.raises(TypeError,match="group should be a TsGroup.")), - (get_group(), "a", pytest.raises(TypeError,match="dict_ep should be a dictionary of IntervalSet")), - (get_group(), {0:"a",1:nap.IntervalSet(start=50, end=100)}, pytest.raises(TypeError,match="dict_ep argument should contain only IntervalSet.")), -]) + +@pytest.mark.parametrize( + "group, dict_ep, expected_exception", + [ + ( + "a", + { + 0: nap.IntervalSet(start=0, end=50), + 1: nap.IntervalSet(start=50, end=100), + }, + pytest.raises(TypeError, match="group should be a TsGroup."), + ), + ( + get_group(), + "a", + pytest.raises( + TypeError, match="dict_ep should be a dictionary of IntervalSet" + ), + ), + ( + get_group(), + {0: "a", 1: nap.IntervalSet(start=50, end=100)}, + pytest.raises( + TypeError, match="dict_ep argument should contain only IntervalSet." + ), + ), + ], +) def test_compute_discrete_tuning_curves_errors(group, dict_ep, expected_exception): with expected_exception: nap.compute_discrete_tuning_curves(group, dict_ep) -@pytest.mark.parametrize("group, feature, nb_bins, ep, minmax, expected_exception", [ - ("a",get_feature(), 10, get_ep(), (0, 1), "group should be a TsGroup."), - (get_group(),"a", 10, get_ep(), (0, 1), r"feature should be a Tsd \(or TsdFrame with 1 column only\)"), - (get_group(),get_feature(), "a", get_ep(), (0, 1), r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\)."), - (get_group(),get_feature(), 10, "a", (0, 1), r"ep should be an IntervalSet"), - (get_group(),get_feature(), 10, get_ep(), 1, r"minmax should be a tuple\/list of 2 numbers"), -]) -def test_compute_1d_tuning_curves_errors(group, feature, nb_bins, ep, minmax, expected_exception): + +@pytest.mark.parametrize( + "group, feature, nb_bins, ep, minmax, expected_exception", + [ + ("a", get_feature(), 10, get_ep(), (0, 1), "group should be a TsGroup."), + ( + get_group(), + "a", + 10, + get_ep(), + (0, 1), + r"feature should be a Tsd \(or TsdFrame with 1 column only\)", + ), + ( + get_group(), + get_feature(), + "a", + get_ep(), + (0, 1), + r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", + ), + (get_group(), get_feature(), 10, "a", (0, 1), r"ep should be an IntervalSet"), + ( + get_group(), + get_feature(), + 10, + get_ep(), + 1, + r"minmax should be a tuple\/list of 2 numbers", + ), + ], +) +def test_compute_1d_tuning_curves_errors( + group, feature, nb_bins, ep, minmax, expected_exception +): with pytest.raises(TypeError, match=expected_exception): nap.compute_1d_tuning_curves(group, feature, nb_bins, ep, minmax) -@pytest.mark.parametrize("group, features, nb_bins, ep, minmax, expected_exception", [ - ("a",get_features(), 10, get_ep(), (0, 1), "group should be a TsGroup."), - (get_group(),"a", 10, get_ep(), (0, 1), r"features should be a TsdFrame with 2 columns"), - (get_group(),get_features(), "a", get_ep(), (0, 1), r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\)."), - (get_group(),get_features(), 10, "a", (0, 1), r"ep should be an IntervalSet"), - (get_group(),get_features(), 10, get_ep(), 1, r"minmax should be a tuple\/list of 2 numbers"), -]) -def test_compute_2d_tuning_curves_errors(group, features, nb_bins, ep, minmax, expected_exception): + +@pytest.mark.parametrize( + "group, features, nb_bins, ep, minmax, expected_exception", + [ + ("a", get_features(), 10, get_ep(), (0, 1), "group should be a TsGroup."), + ( + get_group(), + "a", + 10, + get_ep(), + (0, 1), + r"features should be a TsdFrame with 2 columns", + ), + ( + get_group(), + get_features(), + "a", + get_ep(), + (0, 1), + r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", + ), + (get_group(), get_features(), 10, "a", (0, 1), r"ep should be an IntervalSet"), + ( + get_group(), + get_features(), + 10, + get_ep(), + 1, + r"minmax should be a tuple\/list of 2 numbers", + ), + ], +) +def test_compute_2d_tuning_curves_errors( + group, features, nb_bins, ep, minmax, expected_exception +): with pytest.raises(TypeError, match=expected_exception): nap.compute_2d_tuning_curves(group, features, nb_bins, ep, minmax) -@pytest.mark.parametrize("tc, feature, ep, minmax, bitssec, expected_exception", [ - ("a", get_feature(), get_ep(), (0, 1), True, "Argument tc should be of type pandas.DataFrame or numpy.ndarray"), - (pd.DataFrame(),"a", get_ep(), (0, 1), True, r"feature should be a Tsd \(or TsdFrame with 1 column only\)"), - (pd.DataFrame(), get_feature(), "a", (0, 1), True, r"ep should be an IntervalSet"), - (pd.DataFrame(), get_feature(), get_ep(), 1, True, r"minmax should be a tuple\/list of 2 numbers"), - (pd.DataFrame(), get_feature(), get_ep(), (0,1), "a", r"Argument bitssec should be of type bool"), -]) -def test_compute_1d_mutual_info_errors(tc, feature, ep, minmax, bitssec, expected_exception): + +@pytest.mark.parametrize( + "tc, feature, ep, minmax, bitssec, expected_exception", + [ + ( + "a", + get_feature(), + get_ep(), + (0, 1), + True, + "Argument tc should be of type pandas.DataFrame or numpy.ndarray", + ), + ( + pd.DataFrame(), + "a", + get_ep(), + (0, 1), + True, + r"feature should be a Tsd \(or TsdFrame with 1 column only\)", + ), + ( + pd.DataFrame(), + get_feature(), + "a", + (0, 1), + True, + r"ep should be an IntervalSet", + ), + ( + pd.DataFrame(), + get_feature(), + get_ep(), + 1, + True, + r"minmax should be a tuple\/list of 2 numbers", + ), + ( + pd.DataFrame(), + get_feature(), + get_ep(), + (0, 1), + "a", + r"Argument bitssec should be of type bool", + ), + ], +) +def test_compute_1d_mutual_info_errors( + tc, feature, ep, minmax, bitssec, expected_exception +): with pytest.raises(TypeError, match=expected_exception): nap.compute_1d_mutual_info(tc, feature, ep, minmax, bitssec) -@pytest.mark.parametrize("dict_tc, features, ep, minmax, bitssec, expected_exception", [ - ("a", get_features(), get_ep(), (0, 1), True, "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray"), - ({0:np.zeros((2,2))},"a", get_ep(), (0, 1), True, r"features should be a TsdFrame with 2 columns"), - ({0:np.zeros((2,2))}, get_features(), "a", (0, 1), True, r"ep should be an IntervalSet"), - ({0:np.zeros((2,2))}, get_features(), get_ep(), 1, True, r"minmax should be a tuple\/list of 2 numbers"), - ({0:np.zeros((2,2))}, get_features(), get_ep(), (0,1), "a", r"Argument bitssec should be of type bool"), -]) -def test_compute_2d_mutual_info_errors(dict_tc, features, ep, minmax, bitssec, expected_exception): + +@pytest.mark.parametrize( + "dict_tc, features, ep, minmax, bitssec, expected_exception", + [ + ( + "a", + get_features(), + get_ep(), + (0, 1), + True, + "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray", + ), + ( + {0: np.zeros((2, 2))}, + "a", + get_ep(), + (0, 1), + True, + r"features should be a TsdFrame with 2 columns", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + "a", + (0, 1), + True, + r"ep should be an IntervalSet", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + get_ep(), + 1, + True, + r"minmax should be a tuple\/list of 2 numbers", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + get_ep(), + (0, 1), + "a", + r"Argument bitssec should be of type bool", + ), + ], +) +def test_compute_2d_mutual_info_errors( + dict_tc, features, ep, minmax, bitssec, expected_exception +): with pytest.raises(TypeError, match=expected_exception): nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) -@pytest.mark.parametrize("tsdframe, feature, nb_bins, ep, minmax, expected_exception", [ - ("a",get_feature(), 10, get_ep(), (0, 1), "Argument tsdframe should be of type Tsd or TsdFrame."), - (get_tsdframe(),"a", 10, get_ep(), (0, 1), r"feature should be a Tsd \(or TsdFrame with 1 column only\)"), - (get_tsdframe(),get_feature(), "a", get_ep(), (0, 1), r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\)."), - (get_tsdframe(),get_feature(), 10, "a", (0, 1), r"ep should be an IntervalSet"), - (get_tsdframe(),get_feature(), 10, get_ep(), 1, r"minmax should be a tuple\/list of 2 numbers"), -]) -def test_compute_1d_tuning_curves_continuous_errors(tsdframe, feature, nb_bins, ep, minmax, expected_exception): + +@pytest.mark.parametrize( + "tsdframe, feature, nb_bins, ep, minmax, expected_exception", + [ + ( + "a", + get_feature(), + 10, + get_ep(), + (0, 1), + "Argument tsdframe should be of type Tsd or TsdFrame.", + ), + ( + get_tsdframe(), + "a", + 10, + get_ep(), + (0, 1), + r"feature should be a Tsd \(or TsdFrame with 1 column only\)", + ), + ( + get_tsdframe(), + get_feature(), + "a", + get_ep(), + (0, 1), + r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", + ), + ( + get_tsdframe(), + get_feature(), + 10, + "a", + (0, 1), + r"ep should be an IntervalSet", + ), + ( + get_tsdframe(), + get_feature(), + 10, + get_ep(), + 1, + r"minmax should be a tuple\/list of 2 numbers", + ), + ], +) +def test_compute_1d_tuning_curves_continuous_errors( + tsdframe, feature, nb_bins, ep, minmax, expected_exception +): with pytest.raises(TypeError, match=expected_exception): nap.compute_1d_tuning_curves_continuous(tsdframe, feature, nb_bins, ep, minmax) -@pytest.mark.parametrize("tsdframe, features, nb_bins, ep, minmax, expected_exception", [ - ("a",get_features(), 10, get_ep(), (0, 1), "Argument tsdframe should be of type Tsd or TsdFrame."), - (get_tsdframe(),"a", 10, get_ep(), (0, 1), r"features should be a TsdFrame with 2 columns"), - (get_tsdframe(),get_features(), "a", get_ep(), (0, 1), r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\)."), - (get_tsdframe(),get_features(), 10, "a", (0, 1), r"ep should be an IntervalSet"), - (get_tsdframe(),get_features(), 10, get_ep(), 1, r"minmax should be a tuple\/list of 2 numbers"), -]) -def test_compute_2d_tuning_curves_continuous_errors(tsdframe, features, nb_bins, ep, minmax, expected_exception): + +@pytest.mark.parametrize( + "tsdframe, features, nb_bins, ep, minmax, expected_exception", + [ + ( + "a", + get_features(), + 10, + get_ep(), + (0, 1), + "Argument tsdframe should be of type Tsd or TsdFrame.", + ), + ( + get_tsdframe(), + "a", + 10, + get_ep(), + (0, 1), + r"features should be a TsdFrame with 2 columns", + ), + ( + get_tsdframe(), + get_features(), + "a", + get_ep(), + (0, 1), + r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", + ), + ( + get_tsdframe(), + get_features(), + 10, + "a", + (0, 1), + r"ep should be an IntervalSet", + ), + ( + get_tsdframe(), + get_features(), + 10, + get_ep(), + 1, + r"minmax should be a tuple\/list of 2 numbers", + ), + ], +) +def test_compute_2d_tuning_curves_continuous_errors( + tsdframe, features, nb_bins, ep, minmax, expected_exception +): with pytest.raises(TypeError, match=expected_exception): nap.compute_2d_tuning_curves_continuous(tsdframe, features, nb_bins, ep, minmax) + ######################## # ValueError test ######################## -@pytest.mark.parametrize("func, args, minmax, expected", [ - (nap.compute_1d_tuning_curves, (get_group(), get_feature(), 10), (0,1,2), "minmax should be of length 2."), - (nap.compute_1d_tuning_curves_continuous, (get_tsdframe(), get_feature(), 10), (0,1,2), "minmax should be of length 2."), - (nap.compute_2d_tuning_curves, (get_group(), get_features(), 10), (0,1,2), "minmax should be of length 4."), - (nap.compute_2d_tuning_curves_continuous, (get_tsdframe(), get_features(), 10), (0,1,2), "minmax should be of length 4."), - (nap.compute_2d_tuning_curves, (get_group(), nap.TsdFrame(t=np.arange(10),d=np.ones((10,3))), 10), (0,1), "features should have 2 columns only."), - (nap.compute_1d_tuning_curves, (get_group(), nap.TsdFrame(t=np.arange(10),d=np.ones((10,3))), 10), (0,1), r"feature should be a Tsd \(or TsdFrame with 1 column only\)"), - (nap.compute_2d_tuning_curves, (get_group(), get_features(), (0, 1, 2)), (0,1,2,3), r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\)."), - (nap.compute_2d_tuning_curves_continuous, (get_tsdframe(), get_features(), (0, 1, 2)), (0,1,2,3), r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\)."), -]) +@pytest.mark.parametrize( + "func, args, minmax, expected", + [ + ( + nap.compute_1d_tuning_curves, + (get_group(), get_feature(), 10), + (0, 1, 2), + "minmax should be of length 2.", + ), + ( + nap.compute_1d_tuning_curves_continuous, + (get_tsdframe(), get_feature(), 10), + (0, 1, 2), + "minmax should be of length 2.", + ), + ( + nap.compute_2d_tuning_curves, + (get_group(), get_features(), 10), + (0, 1, 2), + "minmax should be of length 4.", + ), + ( + nap.compute_2d_tuning_curves_continuous, + (get_tsdframe(), get_features(), 10), + (0, 1, 2), + "minmax should be of length 4.", + ), + ( + nap.compute_2d_tuning_curves, + (get_group(), nap.TsdFrame(t=np.arange(10), d=np.ones((10, 3))), 10), + (0, 1), + "features should have 2 columns only.", + ), + ( + nap.compute_1d_tuning_curves, + (get_group(), nap.TsdFrame(t=np.arange(10), d=np.ones((10, 3))), 10), + (0, 1), + r"feature should be a Tsd \(or TsdFrame with 1 column only\)", + ), + ( + nap.compute_2d_tuning_curves, + (get_group(), get_features(), (0, 1, 2)), + (0, 1, 2, 3), + r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", + ), + ( + nap.compute_2d_tuning_curves_continuous, + (get_tsdframe(), get_features(), (0, 1, 2)), + (0, 1, 2, 3), + r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", + ), + ], +) def test_compute_tuning_curves_value_error(func, args, minmax, expected): with pytest.raises(ValueError, match=expected): func(*args, minmax=minmax) @@ -124,28 +423,48 @@ def test_compute_tuning_curves_value_error(func, args, minmax, expected): ######################## # Normal test ######################## -@pytest.mark.parametrize("group", [ - get_group() -]) -@pytest.mark.parametrize("dict_ep", [ - { 0:nap.IntervalSet(start=0, end=50), 1:nap.IntervalSet(start=50, end=100)}, - { "0":nap.IntervalSet(start=0, end=50), "1":nap.IntervalSet(start=50, end=100)} -]) +@pytest.mark.parametrize("group", [get_group()]) +@pytest.mark.parametrize( + "dict_ep", + [ + {0: nap.IntervalSet(start=0, end=50), 1: nap.IntervalSet(start=50, end=100)}, + { + "0": nap.IntervalSet(start=0, end=50), + "1": nap.IntervalSet(start=50, end=100), + }, + ], +) def test_compute_discrete_tuning_curves(group, dict_ep): tc = nap.compute_discrete_tuning_curves(group, dict_ep) assert len(tc) == 2 assert list(tc.columns) == list(group.keys()) assert list(tc.index.values) == list(dict_ep.keys()) - np.testing.assert_almost_equal(tc.iloc[0,0], 51/50) - np.testing.assert_almost_equal(tc.iloc[1,0], 1) + np.testing.assert_almost_equal(tc.iloc[0, 0], 51 / 50) + np.testing.assert_almost_equal(tc.iloc[1, 0], 1) + @pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize("args, kwargs, expected", [ - ((get_group(), get_feature(), 10), {}, np.array([10.0]+[0.0]*9)[:,None]), - ((get_group(), get_feature(), 10), {"ep":get_ep()}, np.array([10.0]+[0.0]*9)[:,None]), - ((get_group(), get_feature(), 10), {"minmax":(0, 0.9)}, np.array([10.0]+[0.0]*9)[:,None]), - ((get_group(), get_feature(), 20), {"minmax":(0, 1.9)}, np.array([10.0]+[0.0]*9+[np.nan]*10)[:,None]) -]) +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ((get_group(), get_feature(), 10), {}, np.array([10.0] + [0.0] * 9)[:, None]), + ( + (get_group(), get_feature(), 10), + {"ep": get_ep()}, + np.array([10.0] + [0.0] * 9)[:, None], + ), + ( + (get_group(), get_feature(), 10), + {"minmax": (0, 0.9)}, + np.array([10.0] + [0.0] * 9)[:, None], + ), + ( + (get_group(), get_feature(), 20), + {"minmax": (0, 1.9)}, + np.array([10.0] + [0.0] * 9 + [np.nan] * 10)[:, None], + ), + ], +) def test_compute_1d_tuning_curves(args, kwargs, expected): tc = nap.compute_1d_tuning_curves(*args, **kwargs) # Columns @@ -157,19 +476,35 @@ def test_compute_1d_tuning_curves(args, kwargs, expected): tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], args[2] + 1) else: tmp = np.linspace(np.min(args[1]), np.max(args[1]), args[2] + 1) - np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp)/2, tc.index.values) + np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) # Array np.testing.assert_almost_equal(tc.values, expected) + @pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize("args, kwargs, expected", [ - ((get_group(), get_features(), 10), {}, np.ones((10,10))*0.5), - ((get_group(), get_features(), (10,10)), {}, np.ones((10,10))*0.5), - ((get_group(), get_features(), 10), {"ep":nap.IntervalSet(0, 400)}, np.ones((10,10))*0.25), - ((get_group(), get_features(), 10), {"minmax":(0, 100, 0, 100)}, np.ones((10,10))*0.5), - ((get_group(), get_features(), 10), {"minmax":(0, 200, 0, 100)}, np.vstack((np.ones((5,10))*0.5,np.ones((5,10))*np.nan))), -]) +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ((get_group(), get_features(), 10), {}, np.ones((10, 10)) * 0.5), + ((get_group(), get_features(), (10, 10)), {}, np.ones((10, 10)) * 0.5), + ( + (get_group(), get_features(), 10), + {"ep": nap.IntervalSet(0, 400)}, + np.ones((10, 10)) * 0.25, + ), + ( + (get_group(), get_features(), 10), + {"minmax": (0, 100, 0, 100)}, + np.ones((10, 10)) * 0.5, + ), + ( + (get_group(), get_features(), 10), + {"minmax": (0, 200, 0, 100)}, + np.vstack((np.ones((5, 10)) * 0.5, np.ones((5, 10)) * np.nan)), + ), + ], +) def test_compute_2d_tuning_curves(args, kwargs, expected): tc, xy = nap.compute_2d_tuning_curves(*args, **kwargs) assert isinstance(tc, dict) @@ -187,11 +522,11 @@ def test_compute_2d_tuning_curves(args, kwargs, expected): tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) else: - tmp1 = np.linspace(np.min(args[1][:,0]), np.max(args[1][:,0]), nb_bins[0] + 1) - tmp2 = np.linspace(np.min(args[1][:,1]), np.max(args[1][:,1]), nb_bins[1] + 1) + tmp1 = np.linspace(np.min(args[1][:, 0]), np.max(args[1][:, 0]), nb_bins[0] + 1) + tmp2 = np.linspace(np.min(args[1][:, 1]), np.max(args[1][:, 1]), nb_bins[1] + 1) - np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1)/2, xy[0]) - np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2)/2, xy[1]) + np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) + np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) # Values for i in tc.keys(): @@ -200,23 +535,51 @@ def test_compute_2d_tuning_curves(args, kwargs, expected): @pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize("args, kwargs, expected", [ - ((pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)),), - {}, np.array([[1.]])), - ((pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)),), - {"bitssec":True}, np.array([[5.]])), - ((pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)),), - {"ep": nap.IntervalSet(start=0, end=49)}, np.array([[1.]])), - ((pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)),), - {"minmax": (0, 1)}, np.array([[1.]])), - ((np.array([[0],[10]]), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)),), - {"minmax": (0, 1)}, np.array([[1.]])), -]) +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {}, + np.array([[1.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"bitssec": True}, + np.array([[5.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"ep": nap.IntervalSet(start=0, end=49)}, + np.array([[1.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"minmax": (0, 1)}, + np.array([[1.0]]), + ), + ( + ( + np.array([[0], [10]]), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"minmax": (0, 1)}, + np.array([[1.0]]), + ), + ], +) def test_compute_1d_mutual_info(args, kwargs, expected): tc = args[0] feature = args[1] @@ -229,23 +592,66 @@ def test_compute_1d_mutual_info(args, kwargs, expected): @pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize("args, kwargs, expected", [ - (({0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame(t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T) - ), {}, np.array([[2.0]]) ), - ((np.array([[[0, 1], [0, 0]]]), - nap.TsdFrame(t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T) - ), {}, np.array([[2.0]])), - (({0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame(t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T) - ), {"bitssec":True}, np.array([[0.5]])), - (({0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame(t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T) - ), {"ep": nap.IntervalSet(start=0, end=7)}, np.array([[2.0]])), - (({0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame(t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T) - ), {"minmax": (0,1,0,1)}, np.array([[2.0]])), -]) +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {}, + np.array([[2.0]]), + ), + ( + ( + np.array([[[0, 1], [0, 0]]]), + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {}, + np.array([[2.0]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"bitssec": True}, + np.array([[0.5]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"ep": nap.IntervalSet(start=0, end=7)}, + np.array([[2.0]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"minmax": (0, 1, 0, 1)}, + np.array([[2.0]]), + ), + ], +) def test_compute_2d_mutual_info(args, kwargs, expected): dict_tc = args[0] features = args[1] @@ -258,14 +664,41 @@ def test_compute_2d_mutual_info(args, kwargs, expected): @pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize("args, kwargs, expected", [ - ((get_tsdframe(), get_feature(), 10), {}, np.vstack((np.ones((1,2)), np.zeros((9,2))))), - ((get_tsdframe(), get_feature()[:,np.newaxis], 10), {}, np.vstack((np.ones((1,2)), np.zeros((9,2))))), - ((get_tsdframe()[:,0], get_feature(), 10), {}, np.vstack((np.ones((1,1)), np.zeros((9,1))))), - ((get_tsdframe(), get_feature(), 10), {"ep":get_ep()}, np.vstack((np.ones((1,2)), np.zeros((9,2))))), - ((get_tsdframe(), get_feature(), 10), {"minmax":(0, 0.9)}, np.vstack((np.ones((1,2)), np.zeros((9,2))))), - ((get_tsdframe(), get_feature(), 20), {"minmax":(0, 1.9)}, np.vstack((np.ones((1,2)), np.zeros((9,2)), np.ones((10,2))*np.nan))), -]) +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ( + (get_tsdframe(), get_feature(), 10), + {}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), + ), + ( + (get_tsdframe(), get_feature()[:, np.newaxis], 10), + {}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), + ), + ( + (get_tsdframe()[:, 0], get_feature(), 10), + {}, + np.vstack((np.ones((1, 1)), np.zeros((9, 1)))), + ), + ( + (get_tsdframe(), get_feature(), 10), + {"ep": get_ep()}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), + ), + ( + (get_tsdframe(), get_feature(), 10), + {"minmax": (0, 0.9)}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), + ), + ( + (get_tsdframe(), get_feature(), 20), + {"minmax": (0, 1.9)}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)), np.ones((10, 2)) * np.nan)), + ), + ], +) def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): tsdframe, feature, nb_bins = args tc = nap.compute_1d_tuning_curves_continuous(tsdframe, feature, nb_bins, **kwargs) @@ -278,33 +711,84 @@ def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins + 1) else: tmp = np.linspace(np.min(args[1]), np.max(args[1]), nb_bins + 1) - np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp)/2, tc.index.values) + np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) # Array np.testing.assert_almost_equal(tc.values, expected) - -@pytest.mark.parametrize("tsdframe, nb_bins, kwargs, expected", [ - (nap.TsdFrame(t=np.arange(0, 100), d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2))), - 1, {}, {0: np.array([[1.]]), 1: np.array([[2.]])}), - (nap.TsdFrame(t=np.arange(0, 100), d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), columns=["x", "y"]), - 2, {}, {'x': np.array([[1,0],[0,0]]), 'y': np.array([[2,0],[0,0]])}), - (nap.Tsd(t=np.arange(0, 100), d=np.hstack((np.ones((100, )) * 2))), - 2, {}, {0: np.array([[2, 0], [0, 0]])}), - (nap.TsdFrame(t=np.arange(0, 100), d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2))), - (1,2), {}, {0: np.array([[1., 0.0]]), 1: np.array([[2., 0.0]])}), - (nap.TsdFrame(t=np.arange(0, 100), d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2))), - 1, {"ep":get_ep()}, {0: np.array([[1.]]), 1: np.array([[2.]])}), - (nap.TsdFrame(t=np.arange(0, 100), d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2))), - 1, {"minmax": (0, 1, 0, 1)}, {0: np.array([[1.]]), 1: np.array([[2.]])}), - (nap.TsdFrame(t=np.arange(0, 100), d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2))), - (1, 3), {"minmax": (0, 1, 0, 3)}, {0: np.array([[1., 1., np.nan]]), 1: np.array([[2., 2., np.nan]])}), -]) +@pytest.mark.parametrize( + "tsdframe, nb_bins, kwargs, expected", + [ + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + 1, + {}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + columns=["x", "y"], + ), + 2, + {}, + {"x": np.array([[1, 0], [0, 0]]), "y": np.array([[2, 0], [0, 0]])}, + ), + ( + nap.Tsd(t=np.arange(0, 100), d=np.hstack((np.ones((100,)) * 2))), + 2, + {}, + {0: np.array([[2, 0], [0, 0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + (1, 2), + {}, + {0: np.array([[1.0, 0.0]]), 1: np.array([[2.0, 0.0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + 1, + {"ep": get_ep()}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + 1, + {"minmax": (0, 1, 0, 1)}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + (1, 3), + {"minmax": (0, 1, 0, 3)}, + {0: np.array([[1.0, 1.0, np.nan]]), 1: np.array([[2.0, 2.0, np.nan]])}, + ), + ], +) def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expected): features = nap.TsdFrame( t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T ) - tc, xy = nap.compute_2d_tuning_curves_continuous(tsdframe, features, nb_bins, **kwargs) + tc, xy = nap.compute_2d_tuning_curves_continuous( + tsdframe, features, nb_bins, **kwargs + ) # Keys if hasattr(tsdframe, "columns"): @@ -319,14 +803,17 @@ def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expected tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) else: - tmp1 = np.linspace(np.min(features[:,0]), np.max(features[:,0]), nb_bins[0] + 1) - tmp2 = np.linspace(np.min(features[:,1]), np.max(features[:,1]), nb_bins[1] + 1) + tmp1 = np.linspace( + np.min(features[:, 0]), np.max(features[:, 0]), nb_bins[0] + 1 + ) + tmp2 = np.linspace( + np.min(features[:, 1]), np.max(features[:, 1]), nb_bins[1] + 1 + ) - np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1)/2, xy[0]) - np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2)/2, xy[1]) + np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) + np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) # Values for i in tc.keys(): assert tc[i].shape == nb_bins np.testing.assert_almost_equal(tc[i], expected[i]) - diff --git a/tests/test_utils.py b/tests/test_utils.py index 71c06027..9947626f 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,20 +1,23 @@ """Tests of utils for `pynapple` package.""" -import pynapple as nap import numpy as np import pandas as pd import pytest +import pynapple as nap + + def test_get_backend(): - assert nap.core.utils.get_backend() in ["numba", "jax"] + assert nap.core.utils.get_backend() in ["numba", "jax"] + def test_is_array_like(): - assert nap.core.utils.is_array_like(np.ones(3)) - assert nap.core.utils.is_array_like(np.array([])) - assert not nap.core.utils.is_array_like([1,2,3]) - assert not nap.core.utils.is_array_like(1) - assert not nap.core.utils.is_array_like('a') - assert not nap.core.utils.is_array_like(True) - assert not nap.core.utils.is_array_like((1,2,3)) - assert not nap.core.utils.is_array_like({0:1}) - assert not nap.core.utils.is_array_like(np.array(0)) \ No newline at end of file + assert nap.core.utils.is_array_like(np.ones(3)) + assert nap.core.utils.is_array_like(np.array([])) + assert not nap.core.utils.is_array_like([1, 2, 3]) + assert not nap.core.utils.is_array_like(1) + assert not nap.core.utils.is_array_like("a") + assert not nap.core.utils.is_array_like(True) + assert not nap.core.utils.is_array_like((1, 2, 3)) + assert not nap.core.utils.is_array_like({0: 1}) + assert not nap.core.utils.is_array_like(np.array(0)) From 4b2db8fee15c12951eb78797e1a090da4b30de36 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Fri, 1 Nov 2024 15:01:16 -0400 Subject: [PATCH 2/3] linted x2 --- tests/test_time_units.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_time_units.py b/tests/test_time_units.py index 3ad99f2e..6fc68aee 100644 --- a/tests/test_time_units.py +++ b/tests/test_time_units.py @@ -12,6 +12,7 @@ import pytest import pynapple as nap + # from pynapple.core.time_units import format_timestamps, return_timestamps, sort_timestamps from pynapple.core.time_index import TsIndex From f4a4ab48ef74d2ad775d0c4bc2d251567cb39837 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Wed, 6 Nov 2024 14:52:55 -0500 Subject: [PATCH 3/3] linting --- tests/test_jitted.py | 3 --- tests/test_metadata.py | 11 +++++---- tests/test_npz_file.py | 1 - tests/test_time_series.py | 47 ++++++++++++++++++++++++--------------- tox.ini | 2 ++ 5 files changed, 36 insertions(+), 28 deletions(-) diff --git a/tests/test_jitted.py b/tests/test_jitted.py index 807d04af..2cdbd8e5 100644 --- a/tests/test_jitted.py +++ b/tests/test_jitted.py @@ -364,7 +364,6 @@ def test_jitintersect(): pd.testing.assert_frame_equal(ep3._metadata, ep4._metadata) - def test_jitunion(): for i in range(10): ep1, ep2 = get_example_isets() @@ -405,7 +404,6 @@ def test_jitdiff(): ep1, ep2 = get_example_isets() ep1.set_info(label1=np.arange(len(ep1))) - s, e, m = nap.core._jitted_functions.jitdiff( ep1.start, ep1.end, ep2.start, ep2.end ) @@ -460,7 +458,6 @@ def test_jitdiff(): pd.testing.assert_frame_equal(ep3._metadata, ep4._metadata) - def test_jitunion_isets(): for i in range(10): ep1, ep2 = get_example_isets() diff --git a/tests/test_metadata.py b/tests/test_metadata.py index bbcf3319..44d7c6ef 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -1,16 +1,15 @@ """Tests for metadata in IntervalSet, TsdFrame, and TsGroup""" -from numbers import Number import inspect - - import pickle +import warnings +from contextlib import nullcontext as does_not_raise +from numbers import Number +from pathlib import Path + import numpy as np import pandas as pd import pytest -from pathlib import Path -from contextlib import nullcontext as does_not_raise -import warnings import pynapple as nap diff --git a/tests/test_npz_file.py b/tests/test_npz_file.py index 23f725e2..d809efb8 100644 --- a/tests/test_npz_file.py +++ b/tests/test_npz_file.py @@ -101,7 +101,6 @@ def test_load(path, k): pd.testing.assert_frame_equal(tmp._metadata, data[k]._metadata) - @pytest.mark.parametrize("path", [path]) @pytest.mark.parametrize("k", ["tsgroup", "tsgroup_minfo"]) def test_load_tsgroup(path, k): diff --git a/tests/test_time_series.py b/tests/test_time_series.py index 681adf3c..a53612fd 100755 --- a/tests/test_time_series.py +++ b/tests/test_time_series.py @@ -1,9 +1,8 @@ """Tests of time series for `pynapple` package.""" -from numbers import Number - import pickle from contextlib import nullcontext as does_not_raise +from numbers import Number from pathlib import Path import numpy as np @@ -762,15 +761,19 @@ def test_threshold_time_support(self, tsd): def test_slice_with_int_tsd(self, tsd): tsd = tsd.copy() array_slice = np.arange(10, dtype=int) - tsd_index = nap.Tsd(t=tsd.index[:len(array_slice)], d=array_slice) - + tsd_index = nap.Tsd(t=tsd.index[: len(array_slice)], d=array_slice) + with pytest.raises(ValueError) as e: indexed = tsd[tsd_index] - assert str(e.value) == "When indexing with a Tsd, it must contain boolean values" + assert ( + str(e.value) == "When indexing with a Tsd, it must contain boolean values" + ) with pytest.raises(ValueError) as e: tsd[tsd_index] = 0 - assert str(e.value) == "When indexing with a Tsd, it must contain boolean values" + assert ( + str(e.value) == "When indexing with a Tsd, it must contain boolean values" + ) def test_slice_with_bool_tsd(self, tsd): @@ -1796,11 +1799,13 @@ def test_interpolate_with_ep(self, tsdtensor): def test_indexing_with_boolean_tsd(self, tsdtensor): # Create a boolean Tsd for indexing - index_tsd = nap.Tsd(t=tsdtensor.t, d=np.random.choice([True, False], size=len(tsdtensor))) - + index_tsd = nap.Tsd( + t=tsdtensor.t, d=np.random.choice([True, False], size=len(tsdtensor)) + ) + # Test indexing result = tsdtensor[index_tsd] - + assert isinstance(result, nap.TsdTensor) assert len(result) == index_tsd.d.sum() np.testing.assert_array_equal(result.t, tsdtensor.t[index_tsd.d]) @@ -1808,11 +1813,13 @@ def test_indexing_with_boolean_tsd(self, tsdtensor): def test_indexing_with_boolean_tsd_and_additional_slicing(self, tsdtensor): # Create a boolean Tsd for indexing - index_tsd = nap.Tsd(t=tsdtensor.t, d=np.random.choice([True, False], size=len(tsdtensor))) - + index_tsd = nap.Tsd( + t=tsdtensor.t, d=np.random.choice([True, False], size=len(tsdtensor)) + ) + # Test indexing with additional dimension slicing result = tsdtensor[index_tsd, 1] - + assert isinstance(result, nap.TsdFrame) assert len(result) == index_tsd.d.sum() np.testing.assert_array_equal(result.t, tsdtensor.t[index_tsd.d]) @@ -1821,19 +1828,24 @@ def test_indexing_with_boolean_tsd_and_additional_slicing(self, tsdtensor): def test_indexing_with_non_boolean_tsd_raises_error(self, tsdtensor): # Create a non-boolean Tsd index_tsd = nap.Tsd(t=np.array([10, 20, 30, 40]), d=np.array([1, 2, 3, 0])) - + # Test indexing with non-boolean Tsd should raise an error - with pytest.raises(ValueError, match="When indexing with a Tsd, it must contain boolean values"): + with pytest.raises( + ValueError, match="When indexing with a Tsd, it must contain boolean values" + ): tsdtensor[index_tsd] def test_indexing_with_mismatched_boolean_tsd_raises_error(self, tsdtensor): # Create a boolean Tsd with mismatched length - index_tsd = nap.Tsd(t=np.arange(len(tsdtensor) + 5), d=np.random.choice([True, False], size=len(tsdtensor) + 5)) - + index_tsd = nap.Tsd( + t=np.arange(len(tsdtensor) + 5), + d=np.random.choice([True, False], size=len(tsdtensor) + 5), + ) + # Test indexing with mismatched boolean Tsd should raise an error with pytest.raises(IndexError, match="boolean index did not match"): tsdtensor[index_tsd] - + def test_setitem_with_boolean_tsd(self, tsdtensor): # Create a boolean Tsd for indexing index_tsd = nap.Tsd( @@ -1857,7 +1869,6 @@ def test_setitem_with_boolean_tsd(self, tsdtensor): ) - @pytest.mark.parametrize( "obj", [ diff --git a/tox.ini b/tox.ini index e0a8a5ee..e6ad4d7a 100644 --- a/tox.ini +++ b/tox.ini @@ -11,7 +11,9 @@ extras = dev commands = black --check pynapple + black --check tests isort --check pynapple --profile black + isort --check tests --profile black flake8 pynapple --max-complexity 10 coverage run --source=pynapple --branch -m pytest tests/ coverage report -m