diff --git a/.gitignore b/.gitignore index 894a44c..eaeecb8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +# Specific to this project +/neurotic/version.py +/neurotic/example/example-data/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/README.rst b/README.rst index fb59d45..1c2b43a 100644 --- a/README.rst +++ b/README.rst @@ -1 +1,48 @@ neurotic +======== + +*Curate, visualize, and annotate your behavioral ephys data using Python* + +.. image:: https://img.shields.io/pypi/v/neurotic.svg + :target: https://pypi.org/project/neurotic/ + :alt: PyPI project + +.. image:: https://img.shields.io/badge/github-source_code-blue.svg + :target: https://github.com/jpgill86/neurotic + :alt: GitHub source code + +Installation +------------ + +Because **neurotic** depends on some pre-release changes in a couple libraries, +pip cannot automatically fetch all dependencies during normal setup. The file +``requirements.txt`` is provided for installing the correct versions of +dependencies. Both **neurotic** and its dependencies can be installed using +these commands:: + + git clone https://github.com/jpgill86/neurotic + cd neurotic + pip install -r requirements.txt + pip install . + +If you get an error while installing PyAV, try this:: + + conda install -c conda-forge av + +and then attempt to install the dependencies in ``requirements.txt`` again. + +Getting started +--------------- + +Launch the standalone app from the command line:: + + neurotic + +A simple example is provided. Select the "example dataset", download the +associated data, and then click launch. + +Questions and support +--------------------- + +Please post any questions, problems, comments, or suggestions in the `GitHub +issue tracker `_. diff --git a/neurotic/__init__.py b/neurotic/__init__.py new file mode 100644 index 0000000..9e93d87 --- /dev/null +++ b/neurotic/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" +Curate, visualize, and annotate your behavioral ephys data using Python +""" + +from .version import version as __version__ +from .version import git_revision as __git_revision__ + +from .datasets import * +from .gui import * +from .scripts import * +from .utils import * diff --git a/neurotic/__main__.py b/neurotic/__main__.py new file mode 100644 index 0000000..6d6d132 --- /dev/null +++ b/neurotic/__main__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +""" + +""" + +from .scripts import launch_standalone + +launch_standalone() diff --git a/neurotic/datasets/__init__.py b/neurotic/datasets/__init__.py new file mode 100644 index 0000000..b3df35b --- /dev/null +++ b/neurotic/datasets/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +""" + +""" + +from ..datasets.ftpauth import * +from ..datasets.download import * +from ..datasets.metadata import * +from ..datasets.data import * diff --git a/neurotic/datasets/data.py b/neurotic/datasets/data.py new file mode 100644 index 0000000..7c23e47 --- /dev/null +++ b/neurotic/datasets/data.py @@ -0,0 +1,456 @@ +# -*- coding: utf-8 -*- +""" +Import raw data, annotations, and spike sorting results as Neo objects +""" + +import numpy as np +import pandas as pd +import quantities as pq +import elephant +import neo + +from ..datasets.metadata import abs_path +from neo.test.generate_datasets import fake_neo + +def LoadAndPrepareData(metadata, lazy=False, signal_group_mode='split-all', fake_data_for_testing = False): + """ + + """ + + # read in the electrophysiology data + blk = ReadDataFile(metadata, lazy, signal_group_mode) + # blk = CreateNeoBlockExample() + + # apply filters to signals if not using lazy loading of signals + if not lazy: + blk = ApplyFilters(metadata, blk) + + # read in annotations + annotations_dataframe = ReadAnnotationsFile(metadata) + blk.segments[0].epochs += CreateNeoEpochsFromDataframe(annotations_dataframe, metadata, abs_path(metadata, 'annotations_file')) + blk.segments[0].events += CreateNeoEventsFromDataframe(annotations_dataframe, metadata, abs_path(metadata, 'annotations_file')) + + # read in epoch encoder file + epoch_encoder_dataframe = ReadEpochEncoderFile(metadata) + blk.segments[0].epochs += CreateNeoEpochsFromDataframe(epoch_encoder_dataframe, metadata, abs_path(metadata, 'epoch_encoder_file')) + blk.segments[0].events += CreateNeoEventsFromDataframe(epoch_encoder_dataframe, metadata, abs_path(metadata, 'epoch_encoder_file')) + + # classify spikes by amplitude if not using lazy loading of signals + if not lazy: + blk.segments[0].spiketrains += RunAmplitudeDiscriminators(metadata, blk) + + # read in spikes identified by spike sorting using tridesclous + t_start = blk.segments[0].analogsignals[0].t_start + t_stop = blk.segments[0].analogsignals[0].t_stop + sampling_period = blk.segments[0].analogsignals[0].sampling_period + spikes_dataframe = ReadSpikesFile(metadata, blk) + blk.segments[0].spiketrains += CreateNeoSpikeTrainsFromDataframe(spikes_dataframe, metadata, t_start, t_stop, sampling_period) + + if fake_data_for_testing: + # load fake data as a demo + blk.segments[0].epochs += [fake_neo('Epoch') for _ in range(5)] + blk.segments[0].events += [fake_neo('Event') for _ in range(5)] + blk.segments[0].spiketrains += [fake_neo('SpikeTrain') for _ in range(5)] + + return blk + +def ReadDataFile(metadata, lazy=False, signal_group_mode='split-all'): + """ + + """ + + # read in the electrophysiology data + # - signal_group_mode='split-all' ensures every channel gets its own + # AnalogSignal, which is important for indexing in EphyviewerConfigurator + io = neo.io.get_io(abs_path(metadata, 'data_file')) + blk = io.read_block(lazy=lazy, signal_group_mode=signal_group_mode) + + # load all proxy objects except analog signals + if lazy: + for i in range(len(blk.segments[0].epochs)): + epoch = blk.segments[0].epochs[i] + if hasattr(epoch, 'load'): + blk.segments[0].epochs[i] = epoch.load() + + for i in range(len(blk.segments[0].events)): + event = blk.segments[0].events[i] + if hasattr(event, 'load'): + blk.segments[0].events[i] = event.load() + + for i in range(len(blk.segments[0].spiketrains)): + spiketrain = blk.segments[0].spiketrains[i] + if hasattr(spiketrain, 'load'): + blk.segments[0].spiketrains[i] = spiketrain.load() + + # convert byte labels to Unicode strings + for epoch in blk.segments[0].epochs: + epoch.labels = epoch.labels.astype('U') + + for event in blk.segments[0].events: + event.labels = event.labels.astype('U') + + return blk + +def ReadAnnotationsFile(metadata): + """ + + """ + + if metadata['annotations_file'] is None: + + return None + + else: + + # data types for each column in the file + dtypes = { + 'Start (s)': float, + 'End (s)': float, + 'Type': str, + 'Label': str, + } + + # parse the file and create a dataframe + df = pd.read_csv(abs_path(metadata, 'annotations_file'), dtype = dtypes) + + # increment row labels by 2 so they match the source file + # which is 1-indexed and has a header + df.index += 2 + + # discard entries with missing or negative start times + bad_start = df['Start (s)'].isnull() | (df['Start (s)'] < 0) + if bad_start.any(): + print('NOTE: These rows will be discarded because their Start times are missing or negative:') + print(df[bad_start]) + df = df[~bad_start] + + # discard entries with end time preceding start time + bad_end = df['End (s)'] < df['Start (s)'] + if bad_end.any(): + print('NOTE: These rows will be discarded because their End times precede their Start times:') + print(df[bad_end]) + df = df[~bad_end] + + # compute durations + df.insert( + column = 'Duration (s)', + value = df['End (s)'] - df['Start (s)'], + loc = 2, # insert after 'End (s)' + ) + + # replace some NaNs + df.fillna({ + 'Duration (s)': 0, + 'Type': 'Other', + 'Label': '', + }, inplace = True) + + # sort entries by time + df.sort_values([ + 'Start (s)', + 'Duration (s)', + ], inplace = True) + + # return the dataframe + return df + +def ReadEpochEncoderFile(metadata): + """ + + """ + + if metadata['epoch_encoder_file'] is None: + + return None + + else: + + # data types for each column in the file + dtypes = { + 'Start (s)': float, + 'End (s)': float, + 'Type': str, + } + + # parse the file and create a dataframe + df = pd.read_csv(abs_path(metadata, 'epoch_encoder_file'), dtype = dtypes) + + # increment row labels by 2 so they match the source file + # which is 1-indexed and has a header + df.index += 2 + + # discard entries with missing or negative start times + bad_start = df['Start (s)'].isnull() | (df['Start (s)'] < 0) + if bad_start.any(): + print('NOTE: These rows will be discarded because their Start times are missing or negative:') + print(df[bad_start]) + df = df[~bad_start] + + # discard entries with end time preceding start time + bad_end = df['End (s)'] < df['Start (s)'] + if bad_end.any(): + print('NOTE: These rows will be discarded because their End times precede their Start times:') + print(df[bad_end]) + df = df[~bad_end] + + # compute durations + df.insert( + column = 'Duration (s)', + value = df['End (s)'] - df['Start (s)'], + loc = 2, # insert after 'End (s)' + ) + + # replace some NaNs + df.fillna({ + 'Duration (s)': 0, + 'Type': 'Other', + }, inplace = True) + + # sort entries by time + df.sort_values([ + 'Start (s)', + 'Duration (s)', + ], inplace = True) + + # add 'Label' column to indicate where these epochs came from + df.insert( + column = 'Label', + value = '(from epoch encoder file)', + loc = 4, # insert after 'Type' + ) + + # return the dataframe + return df + +def ReadSpikesFile(metadata, blk): + """ + Read in spikes identified by spike sorting with tridesclous. + """ + + if metadata['tridesclous_file'] is None or metadata['tridesclous_channels'] is None: + + return None + + else: + + # parse the file and create a dataframe + df = pd.read_csv(abs_path(metadata, 'tridesclous_file'), names = ['index', 'label']) + + # drop clusters with negative labels + df = df[df['label'] >= 0] + + if metadata['tridesclous_merge']: + # merge some clusters and drop all others + new_labels = [] + for clusters_to_merge in metadata['tridesclous_merge']: + new_label = clusters_to_merge[0] + new_labels.append(new_label) + df.loc[df['label'].isin(clusters_to_merge), 'label'] = new_label + df = df[df['label'].isin(new_labels)] + + # return the dataframe + return df + +def CreateNeoEpochsFromDataframe(dataframe, metadata, file_origin): + """ + + """ + + epochs_list = [] + + if dataframe is not None: + + # keep only rows with a positive duration + dataframe = dataframe[dataframe['Duration (s)'] > 0] + + # group epochs by type + for type_name, df in dataframe.groupby('Type'): + + # create a Neo Epoch for each type + epoch = neo.Epoch( + name = type_name, + file_origin = file_origin, + times = df['Start (s)'].values * pq.s, + durations = df['Duration (s)'].values * pq.s, + labels = df['Label'].values, + ) + + epochs_list.append(epoch) + + # return the list of Neo Epochs + return epochs_list + +def CreateNeoEventsFromDataframe(dataframe, metadata, file_origin): + """ + + """ + + events_list = [] + + if dataframe is not None: + + # group events by type + for type_name, df in dataframe.groupby('Type'): + + # create a Neo Event for each type + event = neo.Event( + name = type_name, + file_origin = file_origin, + times = df['Start (s)'].values * pq.s, + labels = df['Label'].values, + ) + + events_list.append(event) + + # return the list of Neo Events + return events_list + +def CreateNeoSpikeTrainsFromDataframe(dataframe, metadata, t_start, t_stop, sampling_period): + """ + + """ + + spiketrain_list = [] + + if dataframe is not None: + + # group spikes by cluster label + for spike_label, df in dataframe.groupby('label'): + + # look up the channels that this unit was found on + channels = metadata['tridesclous_channels'][spike_label] + + # create a Neo SpikeTrain for each cluster label + st = neo.SpikeTrain( + name = str(spike_label), + file_origin = abs_path(metadata, 'tridesclous_file'), + channels = channels, # custom annotation + amplitude = None, # custom annotation + times = t_start + sampling_period * df['index'].values, + t_start = t_start, + t_stop = t_stop, + ) + + spiketrain_list.append(st) + + return spiketrain_list + +def ApplyFilters(metadata, blk): + """ + + """ + + if metadata['filters'] is not None: + + signalNameToIndex = {sig.name:i for i, sig in enumerate(blk.segments[0].analogsignals)} + + for sig_filter in metadata['filters']: + + index = signalNameToIndex.get(sig_filter['channel'], None) + if index is None: + + print('Warning: skipping filter with channel name {} because channel was not found!'.format(sig_filter['channel'])) + + else: + + high = sig_filter.get('highpass', None) + low = sig_filter.get('lowpass', None) + if high: + high *= pq.Hz + if low: + low *= pq.Hz + blk.segments[0].analogsignals[index] = elephant.signal_processing.butter( # may raise a FutureWarning + signal = blk.segments[0].analogsignals[index], + highpass_freq = high, + lowpass_freq = low, + ) + + return blk + +def RunAmplitudeDiscriminators(metadata, blk): + """ + + """ + + spiketrain_list = [] + + if metadata['amplitude_discriminators'] is not None: + + signalNameToIndex = {sig.name:i for i, sig in enumerate(blk.segments[0].analogsignals)} + epochs = blk.segments[0].epochs + + # classify spikes by amplitude + for discriminator in metadata['amplitude_discriminators']: + + index = signalNameToIndex.get(discriminator['channel'], None) + if index is None: + + print('Warning: skipping amplitude discriminator with channel name {} because channel was not found!'.format(discriminator['channel'])) + + else: + + sig = blk.segments[0].analogsignals[index] + st = DetectSpikes(sig, discriminator, epochs) + spiketrain_list.append(st) + + return spiketrain_list + + +def DetectSpikes(sig, discriminator, epochs): + """ + + """ + + assert sig.name == discriminator['channel'], 'sig name "{}" does not match amplitude discriminator channel "{}"'.format(sig.name, discriminator['channel']) + + min_threshold = min(discriminator['amplitude']) + max_threshold = max(discriminator['amplitude']) + if min_threshold >= 0 and max_threshold > 0: + sign = 'above' + elif min_threshold < 0 and max_threshold <= 0: + sign = 'below' + else: + raise ValueError('amplitude discriminator must have two nonnegative thresholds or two nonpositive thresholds: {}'.format(discriminator)) + + spikes_crossing_min = elephant.spike_train_generation.peak_detection(sig, min_threshold*pq.uV, sign, 'raw') + spikes_crossing_max = elephant.spike_train_generation.peak_detection(sig, max_threshold*pq.uV, sign, 'raw') + if sign == 'above': + spikes_between_min_and_max = np.setdiff1d(spikes_crossing_min, spikes_crossing_max) + elif sign == 'below': + spikes_between_min_and_max = np.setdiff1d(spikes_crossing_max, spikes_crossing_min) + else: + raise ValueError('sign should be "above" or "below": {}'.format(sign)) + + st = neo.SpikeTrain( + name = discriminator['name'], + channels = [discriminator['channel']], # custom annotation + amplitude = discriminator['amplitude'], # custom annotation + times = spikes_between_min_and_max * pq.s, + t_start = sig.t_start, + t_stop = sig.t_stop, + ) + + if 'epoch' in discriminator: + + time_masks = [] + if isinstance(discriminator['epoch'], str): + # search for matching epochs + ep = next((ep for ep in epochs if ep.name == discriminator['epoch']), None) + if ep is not None: + # select spike times that fall within each epoch + for t_start, duration in zip(ep.times, ep.durations): + t_stop = t_start + duration + time_masks.append((t_start <= st) & (st < t_stop)) + else: + # no matching epochs found + time_masks.append([False] * len(st)) + else: + # may eventually implement lists of ordered pairs, but + # for now raise an error + raise ValueError('amplitude discriminator epoch could not be handled: {}'.format(discriminator['epoch'])) + + # select the subset of spikes that fall within the epoch + # windows + st = st[np.any(time_masks, axis=0)] + + return st diff --git a/neurotic/datasets/download.py b/neurotic/datasets/download.py new file mode 100644 index 0000000..90ce2a8 --- /dev/null +++ b/neurotic/datasets/download.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +""" + +""" + +import os +import urllib +from getpass import getpass +import numpy as np +from tqdm.auto import tqdm + +from urllib.request import HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm +from ..datasets.ftpauth import FTPBasicAuthHandler + + +# install HTTP and FTP authentication handlers, the latter of which also adds +# reliable file size retrieval before downloading +_max_bad_login_attempts = 3 +_http_auth_handler = HTTPBasicAuthHandler(HTTPPasswordMgrWithDefaultRealm()) +_ftp_auth_handler = FTPBasicAuthHandler() +_opener = urllib.request.build_opener(_http_auth_handler, _ftp_auth_handler) +urllib.request.install_opener(_opener) + + +def _auth_needed(url): + """ + Determine whether authentication is needed by attempting to make a + connection + """ + + # escape spaces and other unsafe characters + url = urllib.parse.quote(url, safe='/:') + + try: + # try to connect + with urllib.request.urlopen(url) as dist: + # if connection was successful, additional auth is not needed + return False + + except urllib.error.HTTPError as e: + + if e.code == 401: + # unauthorized + return True + + elif e.code == 403: + # forbidden + raise + + else: + raise + + except urllib.error.URLError as e: + + # special case for ftp errors + if e.reason.startswith("ftp error: error_perm('"): + reason = e.reason[23:-2] + code = int(reason[:3]) + + if code in [530, 553]: + # unauthorized + return True + + else: + raise + + else: + # http server unresponsive or server refused + raise + + +def _authenticate(url): + """ + Perform HTTP or FTP authentication + """ + + # escape spaces and other unsafe characters + url = urllib.parse.quote(url, safe='/:') + + bad_login_attempts = 0 + while True: + try: + # try to connect + with urllib.request.urlopen(url) as dist: + # if connection was successful, authentication is done + return True + + except urllib.error.HTTPError as e: + + if e.code == 401: + # unauthorized -- will try to authenticate with handler + handler = _http_auth_handler + + elif e.code == 403: + # forbidden + raise + + else: + raise + + except urllib.error.URLError as e: + + # special case for ftp errors + if e.reason.startswith("ftp error: error_perm('"): + reason = e.reason[23:-2] + code = int(reason[:3]) + + if code in [530, 553]: + # unauthorized -- will try to authenticate with handler + handler = _ftp_auth_handler + + else: + raise + + else: + # http server unresponsive or server refused + raise + + + if bad_login_attempts >= _max_bad_login_attempts: + print('Unauthorized: Aborting login') + return False + else: + if bad_login_attempts == 0: + print('Authentication required') + else: + print(f'Failed login ({bad_login_attempts} of ' + f'{_max_bad_login_attempts}): Bad login credentials, or ' + f'else user {user} does not have permission to access ' + f'{url}') + bad_login_attempts += 1 + + netloc = urllib.parse.urlparse(url).netloc + host, port = urllib.parse.splitport(netloc) + user = input(f'User name on {host}: ') + if not user: + print('No user given, aborting login') + return False + passwd = getpass('Password: ') + handler.add_password(None, netloc, user, passwd) + +def _download(url, local_file, bytes_per_chunk=1024*8, show_progress=True): + """ + Download after authenticating if necessary + """ + + auth_needed = _auth_needed(url) + if auth_needed: + authenticated = _authenticate(url) + + if not auth_needed or (auth_needed and authenticated): + + # create the containing directory if necessary + if not os.path.exists(os.path.dirname(local_file)): + os.makedirs(os.path.dirname(local_file)) + + with urllib.request.urlopen(urllib.parse.quote(url, safe='/:')) as dist: + with open(local_file, 'wb') as f: + file_size_in_bytes = int(dist.headers['Content-Length']) + num_chunks = int(np.ceil(file_size_in_bytes/bytes_per_chunk)) + if show_progress: + pbar = tqdm(total=num_chunks*bytes_per_chunk, unit='B', unit_scale=True) + while True: + chunk = dist.read(bytes_per_chunk) + if chunk: + f.write(chunk) + if show_progress: + pbar.update(bytes_per_chunk) + else: + break + if show_progress: + pbar.close() + + +def safe_download(url, local_file, **kwargs): + """ + Download unless the file already exists locally + """ + if not os.path.exists(local_file): + print(f'Downloading {os.path.basename(local_file)}') + _download(url, local_file, **kwargs) + else: + print(f'Skipping {os.path.basename(local_file)} (already exists)') diff --git a/neurotic/datasets/ftpauth.py b/neurotic/datasets/ftpauth.py new file mode 100644 index 0000000..ea37252 --- /dev/null +++ b/neurotic/datasets/ftpauth.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +""" + +""" + +import ftplib +import urllib +from urllib.request import FTPHandler, HTTPPasswordMgr +from urllib.parse import splitport, splituser, unquote + + +class FTPBasicAuthHandler(FTPHandler): + """ + This subclass of urllib.request.FTPHandler implements basic authentication + management for FTP connections. Like HTTPBasicAuthHandler, this handler for + FTP connections has a password manager that it checks for login credentials + before connecting to a server. + + This subclass also ensures that file size is included in the response + header, which can fail for some FTP servers if the original FTPHandler is + used. + + This handler can be installed globally in a Python session so that calls + to urllib.request.urlopen('ftp://...') will use it automatically: + + >>> handler = FTPBasicAuthHandler() + >>> handler.add_password(None, uri, user, passwd) # realm must be None + >>> opener = urllib.request.build_opener(handler) + >>> urllib.request.install_opener(opener) + """ + + def __init__(self, password_mgr=None): + """ + + """ + + if password_mgr is None: + password_mgr = HTTPPasswordMgr() + self.passwd = password_mgr + self.add_password = self.passwd.add_password + return super().__init__() + + def ftp_open(self, req): + """ + When ftp requests are made using this handler, this function gets + called at some point, and it in turn calls the connect_ftp method. In + this subclass's reimplementation of connect_ftp, the FQDN of the + request's host is needed for looking up login credentials in the + password manager. However, by the time connect_ftp is called, that + information has been stripped away, and the host argument passed to + connect_ftp contains only the host's IP address instead of the FQDN. + This reimplementation of ftp_open, which is little more than a + copy-and-paste from the superclass's implementation, captures the + original host FQDN before it is replaced with the IP address and saves + it for later use. + + This reimplementation also ensures that the file size appears in the + response header by querying for it directly. For some FTP servers the + original implementation should handle this (retrlen should contain the + file size). However, for others this can fail silently due to the + server response not matching an anticipated regular expression. + """ + + import sys + import email + import socket + from urllib.error import URLError + from urllib.parse import splitattr, splitpasswd, splitvalue + from urllib.response import addinfourl + + #################################################### + # COPIED FROM FTPHandler.ftp_open (PYTHON 3.6.6) # + # WITH JUST A FEW ADDITIONS # + #################################################### + + import ftplib + import mimetypes + host = req.host + if not host: + raise URLError('ftp error: no host given') + host, port = splitport(host) + if port is None: + port = ftplib.FTP_PORT + else: + port = int(port) + + # username/password handling + user, host = splituser(host) + if user: + user, passwd = splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = user or '' + passwd = passwd or '' + + ############################################ + # DIFFERENT FROM FTPHandler.ftp_open + # save the host FQDN for later + self.last_req_host = host + ############################################ + try: + host = socket.gethostbyname(host) + except OSError as msg: + raise URLError(msg) + path, attrs = splitattr(req.selector) + dirs = path.split('/') + dirs = list(map(unquote, dirs)) + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: + dirs = dirs[1:] + try: + fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) + type = file and 'I' or 'D' + for attr in attrs: + attr, value = splitvalue(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + ############################################ + # DIFFERENT FROM FTPHandler.ftp_open + size = fw.ftp.size(file) + ############################################ + fp, retrlen = fw.retrfile(file, type) + headers = "" + mtype = mimetypes.guess_type(req.full_url)[0] + if mtype: + headers += "Content-type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-length: %d\n" % retrlen + ############################################ + # DIFFERENT FROM FTPHandler.ftp_open + elif size is not None and size >= 0: + headers += "Content-length: %d\n" % size + ############################################ + headers = email.message_from_string(headers) + return addinfourl(fp, headers, req.full_url) + except ftplib.all_errors as exp: + exc = URLError('ftp error: %r' % exp) + raise exc.with_traceback(sys.exc_info()[2]) + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + """ + Unless authentication credentials are provided in the request URL + (ftp://user:passwd@host/path), this method will be called with empty + user and passwd arguments. In that case, this reimplementation of + connect_ftp checks the password manager for credentials matching the + last_req_host (the host argument will be an IP address instead of the + FQDN and is thereby useless if the password manager is keyed by FQDN). + """ + + if not user and not passwd: + user, passwd = self.passwd.find_user_password(None, self.last_req_host) + return super().connect_ftp(user, passwd, host, port, dirs, timeout) + + +def setup(): + """ + Install FTPBasicAuthHandler as the global default FTP handler + + Note that install_opener will remove all other non-default handlers + installed in a different opener, such as an HTTPBasicAuthHandler. + """ + + handler = FTPBasicAuthHandler() + opener = urllib.request.build_opener(handler) + urllib.request.install_opener(opener) + return handler diff --git a/neurotic/datasets/metadata.py b/neurotic/datasets/metadata.py new file mode 100644 index 0000000..80547c4 --- /dev/null +++ b/neurotic/datasets/metadata.py @@ -0,0 +1,601 @@ +# -*- coding: utf-8 -*- +""" +Import metadata about experimental data +""" + +import os +import urllib +import yaml +import ipywidgets +from IPython.display import HTML + +from ..datasets.download import safe_download + + +def abs_path(metadata, file): + """ + Convert the relative path of file to an absolute path using data_dir + """ + if metadata[file] is None: + return None + else: + return os.path.normpath(os.path.join(metadata['data_dir'], metadata[file])) + + +def abs_url(metadata, file): + """ + Convert the relative path of file to a full URL using remote_data_dir + """ + if metadata[file] is None or metadata['remote_data_dir'] is None: + return None + else: + file_path = metadata[file].replace(os.sep, '/') + url = '/'.join([metadata['remote_data_dir'], file_path]) + # url = urllib.parse.unquote(url) + # url = urllib.parse.quote(url, safe='/:') + return url + + +def is_url(url): + """ + Returns True only if the parameter begins with the form :// + """ + try: + result = urllib.parse.urlparse(url) + return all([result.scheme, result.netloc]) + except Exception: + return False + + +def download_file(metadata, file): + """ + Download a file + """ + + if not is_url(metadata['remote_data_dir']): + print('metadata[remote_data_dir] is not a full URL') + return + + if metadata[file]: + + # create directories if necessary + if not os.path.exists(os.path.dirname(abs_path(metadata, file))): + os.makedirs(os.path.dirname(abs_path(metadata, file))) + + # download the file only if it does not already exist + safe_download(abs_url(metadata, file), abs_path(metadata, file)) + + +def DownloadAllDataFiles(metadata): + """ + Download all files associated with metadata + """ + + if not is_url(metadata['remote_data_dir']): + print('metadata[remote_data_dir] is not a full URL') + return + + for file in [k for k in metadata if k.endswith('_file')]: + download_file(metadata, file) + + +def _defaults_for_key(key): + """ + + """ + + defaults = { + # store the key with the metadata + 'key': key, + + # description of data set + 'description': None, + + # the path of the directory containing the data on the local system + # - this may be an absolute or relative path, but not None since data + # must be located locally + # - if it is a relative path, it will be interpreted by LoadMetadata as + # relative to local_data_root and will be converted to an absolute + # path + 'data_dir': None, + + # the path of the directory containing the data on a remote server + # - this may be a full URL or a relative path, or None if there exists + # no remote data store + # - if it is a relative path, it will be interpreted by LoadMetadata as + # relative to remote_data_root and will be converted to a full URL + 'remote_data_dir': None, + + # the ephys data file + # - path relative to data_dir and remote_data_dir + 'data_file': None, + + # digital filters to apply before analysis and plotting + # 0 <= highpass <= lowpass < sample_rate/2 + # - e.g. [{'channel': 'Channel A', 'highpass': 0, 'lowpass': 50}, ...] + 'filters': None, + + # the annotations file + # - path relative to data_dir and remote_data_dir + 'annotations_file': None, + + # the epoch encoder file + # - path relative to data_dir and remote_data_dir + 'epoch_encoder_file': None, + + # list of labels for epoch encoder + 'epoch_encoder_possible_labels': ['Type 1', 'Type 2', 'Type 3'], + + # list of dicts giving name, channel, amplitude window, epoch window for each unit + # - e.g. [{'name': 'Unit X', 'channel': 'Channel A', 'amplitude': [75, 150], 'epoch': 'Type 1'}, ...] + 'amplitude_discriminators': None, + + # the output file of a tridesclous spike sorting analysis + # - path relative to data_dir and remote_data_dir + 'tridesclous_file': None, + + # dict mapping spike ids to lists of channel indices + # - e.g. {0: ['Channel A'], 1: ['Channel A'], ...} to indicate clusters 0 and 1 are both on channel A + # - e.g. {0: ['Channel A', 'Channel B'], ...} to indicate cluster 0 is on both channels A and B + 'tridesclous_channels': None, + + # list of lists of spike ids specifying how to merge clusters + # - e.g. [[0, 1, 2], [3, 4]] to merge clusters 1 and 2 into 0, merge 4 into 3, and discard all others + # - e.g. [[0], [1], [2], [3], [4]] to keep clusters 0-4 as they are and discard all others + 'tridesclous_merge': None, + + # the video file + # - path relative to data_dir and remote_data_dir + 'video_file': None, + + # the video time offset in seconds + 'video_offset': None, + + # list of ordered pairs specifying times and durations that the ephys + # data collection was paused while the video continued recording + # - e.g. [[60, 10], [120, 10], [240, 10]] for three 10-second pauses + # occurring at times 1:00, 2:00, 3:00 according to the daq, which + # would correspond to times 1:00, 2:10, 3:20 according to the video + 'video_jumps': None, + + # a factor to multiply the video frame rate by to correct for async + # error that accumulates over time at a constant rate + # - a value less than 1 will decrease the frame rate and shift video + # events to later times + # - a value greater than 1 will increase the frame rate and shift video + # events to earlier times + # - a good estimate can be obtained by taking the amount of time + # between two events in the video and dividing by the amount of time + # between the same two events in the data + 'video_rate_correction': None, + + # list the channels in the order they should be plotted + # - e.g. [{'channel': 'Channel A', 'ylabel': 'My channel', 'ylim': [-120, 120], 'units': 'uV'}, ...] + 'plots': None, + + # amount of time in seconds to plot initially + 't_width': 40, + } + + return defaults + +def LoadMetadata(file = 'metadata.yml', local_data_root = None, remote_data_root = None): + """ + Read metadata stored in a YAML file about available collections of data, + assign defaults to missing parameters, and resolve absolute paths for local + data stores and full URLs for remote data stores. + + `local_data_root` must be an absolute or relative path on the local system, + or None. If it is a relative path, it is relative to the current working + directory. If it is None, its value defaults to the directory containing + `file`. + + `remote_data_root` must be a full URL or None. If it is None, `file` will + be checked for a fallback value. "remote_data_root" may be provided in the + YAML file as a special entry with no child properties. Any non-None value + passed to this function will override the value provided in the file. If + both are unspecified, it is assumed that no remote data store exists. + + The "data_dir" property must be provided for every data set in `file` and + specifies the directory on the local system containing the data files. + "data_dir" may be an absolute path or a relative path with respect to + `local_data_root`. If it is a relative path, it will be converted to an + absolute path. + + The "remote_data_dir" property is optional for every data set in `file` and + specifies the directory on a remote server containing the data files. + "remote_data_dir" may be a full URL or a relative path with respect to + `remote_data_root`. If it is a relative path, it will be converted to a + full URL. + + File paths (e.g., "data_file", "video_file") are assumed to be relative to + both "data_dir" and "remote_data_dir" (i.e., the local and remote data + stores mirror one another) and can be resolved with `abs_path` or + `abs_url`. + """ + + assert file is not None, 'metadata file must be specified' + assert os.path.exists(file), 'metadata file "{}" cannot be found'.format(file) + + # local_data_root defaults to the directory containing file + if local_data_root is None: + local_data_root = os.path.dirname(file) + + # load metadata from file + with open(file) as f: + md = yaml.safe_load(f) + + # remove special entry "remote_data_root" from the dict if it exists + remote_data_root_from_file = md.pop('remote_data_root', None) + + # use remote_data_root passed to function preferentially + if remote_data_root is not None: + if not is_url(remote_data_root): + raise ValueError('"remote_data_root" passed to function is not a full URL: "{}"'.format(remote_data_root)) + else: + # use the value passed to the function + pass + elif remote_data_root_from_file is not None: + if not is_url(remote_data_root_from_file): + raise ValueError('"remote_data_root" provided in file is not a full URL: "{}"'.format(remote_data_root_from_file)) + else: + # use the value provided in the file + remote_data_root = remote_data_root_from_file + else: + # both potential sources of remote_data_root are None + pass + + # iterate over all data sets + for key in md: + + assert type(md[key]) is dict, 'File "{}" may be formatted incorrectly, especially beginning with entry "{}"'.format(file, key) + + # fill in missing metadata with default values + defaults = _defaults_for_key(key) + for k in defaults: + md[key].setdefault(k, defaults[k]) + + # determine the absolute path of the local data directory + if md[key]['data_dir'] is not None: + # data_dir is either an absolute path already or is specified + # relative to local_data_root + if os.path.isabs(md[key]['data_dir']): + dir = md[key]['data_dir'] + else: + dir = os.path.abspath(os.path.join(local_data_root, md[key]['data_dir'])) + else: + # data_dir is a required property + raise ValueError('"data_dir" missing for "{}"'.format(key)) + md[key]['data_dir'] = os.path.normpath(dir) + + # determine the full URL to the remote data directory + if md[key]['remote_data_dir'] is not None: + # remote_data_dir is either a full URL already or is specified + # relative to remote_data_root + if is_url(md[key]['remote_data_dir']): + url = md[key]['remote_data_dir'] + elif is_url(remote_data_root): + url = '/'.join([remote_data_root, md[key]['remote_data_dir']]) + else: + url = None + else: + # there is no remote data store + url = None + md[key]['remote_data_dir'] = url + + return md + +def selector_labels(all_metadata): + """ + + """ + + # indicate presence of local data files with symbols + has_local_data = {} + for key, metadata in all_metadata.items(): + filenames = [k for k in metadata if k.endswith('_file') and metadata[k] is not None] + files_exist = [os.path.exists(abs_path(metadata, file)) for file in filenames] + if all(files_exist): + has_local_data[key] = '◆' + elif any(files_exist): + has_local_data[key] = '⬖' + else: + has_local_data[key] = '◇' + + # indicate lack of video_offset with an exclamation point unless there is + # no video_file + has_video_offset = {} + for key, metadata in all_metadata.items(): + if metadata['video_offset'] is None and metadata['video_file'] is not None: + has_video_offset[key] = '!' + else: + has_video_offset[key] = ' ' + + # create display text for the selector from keys and descriptions + longest_key_length = max([len(k) for k in all_metadata.keys()]) + labels = [ + has_local_data[k] + + has_video_offset[k] + + ' ' + + k.ljust(longest_key_length + 4) + + str(all_metadata[k]['description'] + if all_metadata[k]['description'] else '') + + for k in all_metadata.keys()] + + return labels + + +class MetadataManager(): + """ + A class for managing metadata. + + A metadata file can be specified at initialization, in which case it is + read immediately. The file contents are stored in `all_metadata`. + + >>> metadata = MetadataManager(file='metadata.yml') + >>> print(metadata.all_metadata) + + File contents can be reloaded after they have been changed, or after + changing `file`, using the `load` method. + + >>> metadata = MetadataManager() + >>> metadata.file = 'metadata.yml' + >>> metadata.load() + + A particular metadata set contained within the file can be selected at + initialization with `initial_selection` or later using the `select` method. + After making a selection, the selected metadata set is accessible at + `metadata.selected_metadata`, e.g. + + >>> metadata = MetadataManager(file='metadata.yml') + >>> metadata.select('Data Set 5') + >>> print(metadata.selected_metadata['data_file']) + + A compact indexing method is implemented that allows the selected metadata + set to be accessed directly, e.g. + + >>> print(metadata['data_file']) + + This allows the MetadataManager to be passed to functions expecting a + simple dictionary corresponding to a single metadata set, and the selected + metadata set will be used automatically. + + Files associated with the selected metadata set can be downloaded + individually or all together, e.g. + + >>> metadata.download('video_file') + + or + + >>> metadata.download_all_data_files() + + The absolute path to a local file or the full URL to a remote file + associated with the selected metadata set can be resolved with the + `abs_path` and `abs_url` methods, e.g. + + >>> print(metadata.abs_path('data_file')) + >>> print(metadata.abs_url('data_file')) + """ + + def __init__(self, file=None, local_data_root=None, remote_data_root=None, initial_selection=None): + """ + Initialize a new MetadataManager. + """ + + self.file = file + self.local_data_root = local_data_root + self.remote_data_root = remote_data_root + + self.all_metadata = None + self._selection = None + if self.file is not None: + self.load() + if initial_selection is not None: + self.select(initial_selection) + + def load(self): + """ + Read the metadata file. + """ + self.all_metadata = LoadMetadata(self.file, self.local_data_root, self.remote_data_root) + if self._selection not in self.all_metadata: + self._selection = None + + def select(self, selection): + """ + Select a metadata set. + """ + if self.all_metadata is None: + print('load metadata before selecting') + elif selection not in self.all_metadata: + raise ValueError('{} was not found in {}'.format(selection, self.file)) + else: + self._selection = selection + + @property + def selected_metadata(self): + """ + The access point for the selected metadata set. + """ + if self._selection is None: + return None + else: + return self.all_metadata[self._selection] + + def abs_path(self, file): + """ + Convert the relative path of file to an absolute path using data_dir. + """ + return abs_path(self.selected_metadata, file) + + def abs_url(self, file): + """ + Convert the relative path of file to a full URL using remote_data_dir. + """ + return abs_url(self.selected_metadata, file) + + def download(self, file): + """ + Download a file associated with the selected metadata set. + """ + download_file(self.selected_metadata, file) + + def download_all_data_files(self): + """ + Download all files associated with the selected metadata set. + """ + DownloadAllDataFiles(self.selected_metadata) + + def __iter__(self, *args): + """ + Pass-through method for using __iter__ on the selected metadata set. + """ + return self.selected_metadata.__iter__(*args) + + def __getitem__(self, *args): + """ + Pass-through method for using __getitem__ on the selected metadata set. + """ + return self.selected_metadata.__getitem__(*args) + + def __setitem__(self, *args): + """ + Pass-through method for using __setitem__ on the selected metadata set. + """ + return self.selected_metadata.__setitem__(*args) + + def __delitem__(self, *args): + """ + Pass-through method for using __delitem__ on the selected metadata set. + """ + return self.selected_metadata.__delitem__(*args) + + def get(self, *args): + """ + Pass-through method for using get() on the selected metadata set. + """ + return self.selected_metadata.get(*args) + + def setdefault(self, *args): + """ + Pass-through method for using setdefault() on the selected metadata set. + """ + return self.selected_metadata.setdefault(*args) + + +class MetadataSelector(MetadataManager, ipywidgets.VBox): + """ + Interactive list box for Jupyter notebooks that allows the user to select + which metadata set they would like to work with. + + >>> metadata = MetadataSelector(file='metadata.yml') + >>> display(metadata) + + After clicking on an item in the list, the selected metadata set is + accessible at `metadata.selected_metadata`, e.g. + + >>> metadata.selected_metadata['data_file'] + + A compact indexing method is implemented that allows the selected metadata + set to be accessed directly, e.g. + + >>> metadata['data_file'] + + This allows the MetadataSelector to be passed to functions expecting a + simple dictionary corresponding to a single metadata set, and the selected + metadata set will be used automatically. + """ + + def __init__(self, file=None, local_data_root=None, remote_data_root=None, initial_selection=None): + """ + Initialize a new MetadataSelector. + """ + + # load the metadata and set the initial selection + MetadataManager.__init__( + self, + file=file, + local_data_root=local_data_root, + remote_data_root=remote_data_root, + initial_selection=initial_selection) + + # initialize the Jupyter widget container + ipywidgets.VBox.__init__(self) + + # create the selector widget + self.selector = ipywidgets.Select() + + if self.all_metadata is not None: + # create display text for the selector from keys and descriptions + self.selector.options = zip(selector_labels(self.all_metadata), self.all_metadata.keys()) + + # set initial selection + if self._selection is None: + self.selector.value = list(self.all_metadata)[0] + else: + self.selector.value = self._selection + + # use monospace font for items in the selector + self.selector.add_class('metadata-selector') + try: + display(HTML('')) + except NameError: + # likely operating outside Jupyter notebook + pass + + # set other selector display options + self.selector.description = 'Data set:' + self.selector.rows = 20 + self.selector.layout = ipywidgets.Layout(width = '99%') + self.selector.style = {'description_width': 'initial'} + + # configure the _on_select function to be called whenever the selection changes + self.selector.observe(self._on_select, names = 'value') + if self.all_metadata is not None: + self._on_select({'new': self.selector.value}) # run now on initial selection + + # create the reload button + self.reload_button = ipywidgets.Button(icon='refresh', description='Reload', layout=ipywidgets.Layout(height='auto'), disabled=False) + self.reload_button.on_click(self._on_reload_clicked) + + # create the download button + self.download_button = ipywidgets.Button(icon='download', description='Download', layout=ipywidgets.Layout(height='auto'), disabled=False) + self.download_button.on_click(self._on_download_clicked) + + # populate the box + # self.children = [self.selector, self.reload_button, self.download_button] + self.children = [self.selector, self.reload_button] + + def _on_select(self, change): + """ + Run each time the selection changes. + """ + self._selection = self.selector.value + + # warn if video_offset is not set + if self.selected_metadata['video_file'] is not None and self.selected_metadata['video_offset'] is None: + print('Warning: Video sync may be incorrect! video_offset not set for {}'.format(self._selection)) + + def _on_reload_clicked(self, button): + """ + Run each time the reload button is clicked. + """ + self.load() + + if self.all_metadata is not None: + # remember the current selection + old_selection = self._selection + + # changing the options triggers the selection to change + self.selector.options = zip(selector_labels(self.all_metadata), self.all_metadata.keys()) + + # reselect the original selection if it still exists + if old_selection in self.all_metadata: + self.selector.value = old_selection + + def _on_download_clicked(self, button): + """ + Run each time the download button is clicked. + """ + self.download_all_data_files() diff --git a/neurotic/example/metadata.yml b/neurotic/example/metadata.yml new file mode 100644 index 0000000..60a7be0 --- /dev/null +++ b/neurotic/example/metadata.yml @@ -0,0 +1,64 @@ +################################################################################ +# plots: list the channels in the order they should be plotted +# - required parameters: +# channel string specifying name of the channel to be plotted +# note: channel names in AxoGraph files are assumed unique +# - optional parameters: +# units string specifying units to convert signal to +# default: uV for voltages, mN for forces +# ylabel the label for the y-axis +# default: channel name stored in AxoGraph file +# ylim the plot range for the y-axis +# default: [-120,120] for voltages, [-10,300] for forces +################################################################################ + + +example dataset: + description: This is an example data set + + # these data are a subset of 2018-06-21_IN-VIVO_JG-08 002 + data_dir: example-data + remote_data_dir: https://github.com/jpgill86/analysis/raw/master/example/example-data + data_file: data.axgx + video_file: video.mp4 + annotations_file: annotations.csv + epoch_encoder_file: epoch-encoder.csv + + video_offset: 640.3 + epoch_encoder_possible_labels: [force, large hump, small hump, B38] + amplitude_discriminators: + - name: I2 + channel: I2 + amplitude: [2, 75] + - name: B8a/b + channel: RN + amplitude: [5, 50] + - name: B3 (50-100 uV) + channel: BN2 + amplitude: [50, 100] + - name: B6/B9 ? (26-45 uV) + channel: BN2 + amplitude: [26, 45] + - name: B38 ? (17-26 uV) + channel: BN2 + amplitude: [17, 26] + epoch: B38 + - name: B4/B5 + channel: BN3 + amplitude: [75, 150] + filters: + - channel: I2 + lowpass: 100 + - channel: Force + lowpass: 50 + plots: + - channel: I2 + ylim: [-30, 30] + - channel: RN + ylim: [-60, 60] + - channel: BN2 + ylim: [-120, 120] + - channel: BN3 + ylim: [-150, 150] + - channel: Force + ylim: [-10, 300] diff --git a/neurotic/gui/__init__.py b/neurotic/gui/__init__.py new file mode 100644 index 0000000..453a4f3 --- /dev/null +++ b/neurotic/gui/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +""" + +""" + +from ..gui.epochencoder import * +from ..gui.icons import * +from ..gui.config import * +from ..gui.standalone import * diff --git a/neurotic/gui/config.py b/neurotic/gui/config.py new file mode 100644 index 0000000..fa613bd --- /dev/null +++ b/neurotic/gui/config.py @@ -0,0 +1,605 @@ +# -*- coding: utf-8 -*- +""" + +""" + +import os +from collections import OrderedDict +from functools import wraps + +import numpy as np +import quantities as pq +import ephyviewer +import ipywidgets + +from ..datasets import abs_path +from ..utils import EstimateVideoJumpTimes, NeoEpochToDataFrame +from ..gui.epochencoder import MyWritableEpochSource + +pq.mN = pq.UnitQuantity('millinewton', pq.N/1e3, symbol = 'mN'); # define millinewton + +def _fix_FrameGrabber_set_file(set_file_func): + """ + For some video files, ephyviewer sets the incorrect start_time for its + FrameGrabber when the set_file method is called. This monkey patch works + around the problem by resetting start_time to 0 after set_file is called. + """ + @wraps(set_file_func) + def wrapper(*args, **kwargs): + result = set_file_func(*args, **kwargs) + frame_grabber = args[0] + frame_grabber.start_time = 0 + return result + return wrapper +ephyviewer.FrameGrabber.set_file = _fix_FrameGrabber_set_file(ephyviewer.FrameGrabber.set_file) + +def defaultKeepSignal(sig): + """ + + """ + return (not sig.name.startswith('Analog Input #')) and (sig.name != 'Clock') + +def defaultUnits(sig): + """ + + """ + + mapping = { + 'V': 'uV', # convert voltages to microvolts + 'N': 'mN', # convert forces to millinewtons + } + mapping = {pq.Quantity(1, k).dimensionality.simplified: v for k, v in mapping.items()} + return mapping.get(sig.units.dimensionality.simplified, sig.units) + +def defaultYLim(sig): + """ + + """ + + mapping = { + 'V': [-120, 120], # plot range for voltages + 'N': [ -10, 300], # plot range for forces + } + mapping = {pq.Quantity(1, k).dimensionality.simplified: v for k, v in mapping.items()} + return mapping.get(sig.units.dimensionality.simplified, [-1, 1]) + +def setDefaultsForPlots(metadata, blk): + """ + + """ + + sigs = blk.segments[0].analogsignals + signalNameToIndex = {sig.name:i for i, sig in enumerate(sigs)} + + if metadata['plots'] is None: + metadata['plots'] = [{'channel': sig.name} for sig in sigs if defaultKeepSignal(sig)] + + plots = [] + for plot in metadata['plots']: + index = signalNameToIndex.get(plot['channel'], None) + if index is None: + print('Warning: removing plot with channel name "{}" because channel was not found in blk!'.format(plot['channel'])) + else: + plot['index'] = index + plot.setdefault('units', defaultUnits(sigs[index])) + plot.setdefault('ylim', defaultYLim(sigs[index])) + plot.setdefault('ylabel', sigs[index].name) + plots.append(plot) + metadata['plots'] = plots + + return metadata['plots'] + +class EphyviewerConfigurator(ipywidgets.HBox): + """ + + """ + + _toggle_button_defaults = OrderedDict([ + ('traces', {'value': True, 'icon': 'line-chart', 'description': 'Traces'}), + ('traces_rauc', {'value': False, 'icon': 'area-chart', 'description': 'RAUC'}), + # ('freqs', {'value': False, 'icon': 'wifi', 'description': 'Frequencies'}), + ('spike_trains', {'value': True, 'icon': 'barcode', 'description': 'Spike Trains'}), + ('epochs', {'value': True, 'icon': 'align-left', 'description': 'Read-Only Epochs'}), + ('epoch_encoder', {'value': True, 'icon': 'align-left', 'description': 'Epoch Encoder'}), + ('video', {'value': True, 'icon': 'youtube-play', 'description': 'Video'}), + ('event_list', {'value': True, 'icon': 'list', 'description': 'Events'}), + ('data_frame', {'value': False, 'icon': 'table', 'description': 'Annotation Table'}), + ]) + + themes = {} + themes['original'] = None # special keyword to use ephyviewer's defaults + themes['light'] = { + 'cmap': 'Dark2', # dark traces + 'background_color': '#F0F0F0', # light gray + 'vline_color': '#000000AA', # transparent black + 'label_fill_color': '#DDDDDDDD', # transparent light gray + } + themes['dark'] = { + 'cmap': 'Accent', # light traces + 'background_color': 'k', # black + 'vline_color': '#FFFFFFAA', # transparent white + 'label_fill_color': '#222222DD', # transparent dark gray + } + + def __init__(self, metadata, blk, rauc_sigs = None, lazy = False): + """ + + """ + + self.metadata = metadata + self.blk = blk + self.rauc_sigs = rauc_sigs + self.lazy = lazy + + # initialize the box + ipywidgets.HBox.__init__(self) + + # create buttons for controlling which elements to show + self.controls = OrderedDict() + for name, kwargs in self._toggle_button_defaults.items(): + self.controls[name] = ipywidgets.ToggleButton(**kwargs) + controls_vbox = ipywidgets.VBox(list(self.controls.values())) + + # permanently disable controls for which inputs are missing + if not self.rauc_sigs: + self.controls['traces_rauc'].value = False + self.controls['traces_rauc'].disabled = True + self.controls['traces_rauc'].tooltip = 'Cannot enable because rauc_sigs is empty' + if not self.blk.segments[0].spiketrains: + self.controls['spike_trains'].value = False + self.controls['spike_trains'].disabled = True + self.controls['spike_trains'].tooltip = 'Cannot enable because there are no spike trains' + if not [ep for ep in self.blk.segments[0].epochs if ep.size > 0 and '(from epoch encoder file)' not in ep.labels]: + self.controls['epochs'].value = False + self.controls['epochs'].disabled = True + self.controls['epochs'].tooltip = 'Cannot enable because there are no read-only epochs' + self.controls['data_frame'].value = False + self.controls['data_frame'].disabled = True + self.controls['data_frame'].tooltip = 'Cannot enable because there are no read-only epochs' + if not [ev for ev in self.blk.segments[0].events if ev.size > 0]: + self.controls['event_list'].value = False + self.controls['event_list'].disabled = True + self.controls['event_list'].tooltip = 'Cannot enable because there are no read-only epochs or events' + if not self.metadata['epoch_encoder_file']: + self.controls['epoch_encoder'].value = False + self.controls['epoch_encoder'].disabled = True + self.controls['epoch_encoder'].tooltip = 'Cannot enable because epoch_encoder_file is not set' + if not self.metadata['video_file']: + self.controls['video'].value = False + self.controls['video'].disabled = True + self.controls['video'].tooltip = 'Cannot enable because video_file is not set' + + # create the launch button + self.launch_button = ipywidgets.Button(icon='rocket', description='Launch', layout=ipywidgets.Layout(height='auto')) + self.launch_button.on_click(self._on_launch_clicked) + + # populate the box + self.children = [controls_vbox, self.launch_button] + + # warn about potential video sync problems + if metadata['video_file'] is not None and metadata['video_offset'] is None: + print('WARNING: Your video will likely be out of sync with your') + print('data because video_offset is unspecified! Consider adding') + print('it to your metadata.') + if metadata['video_file'] is not None and metadata['video_jumps'] is None: + approx_video_jumps = EstimateVideoJumpTimes(blk) + if approx_video_jumps: + print('WARNING: It seems that AxoGraph was paused at least once') + print('during data acquisition, but video_jumps is unspecified.') + print('This will cause your video and data to get out of sync.') + print('Consider adding the following to your metadata:') + print(' video_jumps:') + for t, dur in approx_video_jumps: + print(f' - [{t}, {dur}]') + print('Each ordered pair specifies the timing of a pause and') + print('approximately how long the pause lasted in seconds. The') + print('pause durations are only rough estimates +/- a second! You') + print('should refine them by inspecting the video to make sure') + print('your sync is accurate!') + + def is_enabled(self, name): + """ + + """ + if name in self.controls: + return self.controls[name].value + else: + return False + + def enable(self, name): + """ + + """ + self.controls[name].value = True + + def disable(self, name): + """ + + """ + self.controls[name].value = False + + def enable_all(self): + """ + + """ + for name in self.controls: + if not self.controls[name].disabled: + self.enable(name) + + def disable_all(self): + """ + + """ + for name in self.controls: + self.disable(name) + + def _on_launch_clicked(self, button): + """ + + """ + self.launch_ephyviewer() + + def launch_ephyviewer(self, theme='light', support_increased_line_width=False): + """ + + """ + + app = ephyviewer.mkQApp() + win = self.create_ephyviewer_window(theme=theme, support_increased_line_width=support_increased_line_width) + win.show() + app.exec_() + + def create_ephyviewer_window(self, theme='light', support_increased_line_width=False): + """ + + """ + + ######################################################################## + # DATA SOURCES + + seg = self.blk.segments[0] + sigs = seg.analogsignals + + sources = {'signal': [], 'epoch': [], 'event': [], 'spike': []} + sources['epoch'].append(ephyviewer.NeoEpochSource(seg.epochs)) + sources['event'].append(ephyviewer.NeoEventSource(seg.events)) + sources['spike'].append(ephyviewer.NeoSpikeTrainSource(seg.spiketrains)) + + # filter epoch encoder data out of read-only epoch and event lists + # so they are not presented multiple times, and remove empty channels + sources['epoch'][0].all = [ep for ep in sources['epoch'][0].all if len(ep['time']) > 0 and '(from epoch encoder file)' not in ep['label']] + sources['event'][0].all = [ev for ev in sources['event'][0].all if len(ev['time']) > 0 and '(from epoch encoder file)' not in ev['label']] + + ######################################################################## + # WINDOW + + # create a window that will be populated with viewers + win = ephyviewer.MainViewer( + # settings_name='test2', # remember settings (e.g. xsize) between sessions + show_auto_scale = False, + global_xsize_zoom = True, + play_interval = 0.1, # refresh period in seconds + ) + win.setWindowTitle(self.metadata['key']) + win.setWindowIcon(ephyviewer.QT.QIcon(':/soundwave.png')) + + ######################################################################## + # PREPARE TRACE PARAMETERS + + setDefaultsForPlots(self.metadata, self.blk) + + plotNameToIndex = {p['channel']:i for i, p in enumerate(self.metadata['plots'])} + + ######################################################################## + # PREPARE SCATTER PLOT PARAMETERS + + if not self.lazy: + all_times = sigs[0].times.rescale('s').magnitude # assuming all AnalogSignals have the same sampling rate and start time + spike_indices = {} + spike_channels = {} + for st in seg.spiketrains: + if 'channels' in st.annotations: + c = [] + for channel in st.annotations['channels']: + index = plotNameToIndex.get(channel, None) + if index is None: + print('Note: Spike train {} will not be plotted on channel {} because that channel isn\'t being plotted'.format(st.name, channel)) + else: + c.append(index) + if c: + spike_channels[st.name] = c + spike_indices[st.name] = np.where(np.isin(all_times, st.times.magnitude))[0] + + ######################################################################## + # TRACES WITH SCATTER PLOTS + + if self.is_enabled('traces'): + + if self.lazy: + import neo + neorawioclass = neo.rawio.get_rawio_class(abs_path(self.metadata, 'data_file')) + neorawio = neorawioclass(abs_path(self.metadata, 'data_file')) + neorawio.parse_header() + + # Intan-specific tricks + if type(neorawio) is neo.rawio.IntanRawIO: + # dirty trick for getting ungrouped channels into a single source + neorawio.header['signal_channels']['group_id'] = 0 + + # prepare to append custom channel names stored in data file to ylabels + custom_channel_names = {c['native_channel_name']: c['custom_channel_name'] for c in neorawio._ordered_channels} + + channel_indexes = [p['index'] for p in self.metadata['plots']] + sources['signal'].append(ephyviewer.AnalogSignalFromNeoRawIOSource(neorawio, channel_indexes)) + + # modify loaded channel names to use ylabels + for i, p in enumerate(self.metadata['plots']): + + ylabel = p['ylabel'] + + # Intan-specific tricks + if type(neorawio) is neo.rawio.IntanRawIO: + # append custom channel names stored in data file to ylabels + if custom_channel_names[p['channel']] != ylabel: + ylabel += ' ({})'.format(custom_channel_names[p['channel']]) + + sources['signal'][-1].channels['name'][i] = ylabel + + # TODO support scatter + else: + sources['signal'].append(ephyviewer.AnalogSignalSourceWithScatter( + signals = np.concatenate([sigs[p['index']].magnitude for p in self.metadata['plots']], axis = 1), + sample_rate = sigs[0].sampling_rate, # assuming all AnalogSignals have the same sampling rate + t_start = sigs[0].t_start, # assuming all AnalogSignals start at the same time + channel_names = [p['ylabel'] for p in self.metadata['plots']], + scatter_indexes = spike_indices, + scatter_channels = spike_channels, + )) + + # useOpenGL=True eliminates the extremely poor performance associated + # with TraceViewer's line_width > 1.0, but it also degrades overall + # performance somewhat and is reportedly unstable + if support_increased_line_width: + useOpenGL = True + line_width = 2.0 + else: + useOpenGL = None + line_width = 1.0 + + trace_view = ephyviewer.TraceViewer(source = sources['signal'][0], name = 'signals', useOpenGL = useOpenGL) + + win.add_view(trace_view) + + trace_view.params['scatter_size'] = 5 + trace_view.params['line_width'] = line_width + trace_view.params['display_labels'] = True + trace_view.params['antialias'] = True + + # set the theme + if theme != 'original': + trace_view.params['background_color'] = self.themes[theme]['background_color'] + trace_view.params['vline_color'] = self.themes[theme]['vline_color'] + trace_view.params['label_fill_color'] = self.themes[theme]['label_fill_color'] + trace_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap']) + trace_view.params_controller.on_automatic_color() + + # adjust plot range, scaling, and positioning + trace_view.params['ylim_max'] = 0.5 + trace_view.params['ylim_min'] = -trace_view.source.nb_channel + 0.5 + trace_view.params['scale_mode'] = 'by_channel' + for i, p in enumerate(self.metadata['plots']): + sig_units = sigs[p['index']].units + units_ratio = (pq.Quantity(1, p['units'])/pq.Quantity(1, sig_units)).simplified + assert units_ratio.dimensionality.string == 'dimensionless', f"Channel \"{p['channel']}\" has units {sig_units} and cannot be converted to {p['units']}" + ylim_span = np.ptp(p['ylim'] * units_ratio.magnitude) + ylim_center = np.mean(p['ylim'] * units_ratio.magnitude) + trace_view.by_channel_params['ch{}'.format(i), 'gain'] = 1/ylim_span # rescale [ymin,ymax] across a unit + trace_view.by_channel_params['ch{}'.format(i), 'offset'] = -i - ylim_center/ylim_span # center [ymin,ymax] within the unit + + ######################################################################## + # TRACES OF RAUC + + if self.is_enabled('traces_rauc') and self.rauc_sigs is not None: + + sig_rauc_source = ephyviewer.InMemoryAnalogSignalSource( + signals = np.concatenate([self.rauc_sigs[p['index']].as_array() for p in self.metadata['plots']], axis = 1), + sample_rate = self.rauc_sigs[0].sampling_rate, # assuming all AnalogSignals have the same sampling rate + t_start = self.rauc_sigs[0].t_start, # assuming all AnalogSignals start at the same time + channel_names = [p['ylabel'] + ' RAUC' for p in self.metadata['plots']], + ) + sources['signal_rauc'] = [sig_rauc_source] + + trace_rauc_view = ephyviewer.TraceViewer(source = sources['signal_rauc'][0], name = 'signals rauc') + + if 'signals' in win.viewers: + win.add_view(trace_rauc_view, tabify_with = 'signals') + else: + win.add_view(trace_rauc_view) + + trace_rauc_view.params['line_width'] = line_width + trace_rauc_view.params['display_labels'] = True + trace_rauc_view.params['display_offset'] = True + trace_rauc_view.params['antialias'] = True + + # set the theme + if theme != 'original': + trace_rauc_view.params['background_color'] = self.themes[theme]['background_color'] + trace_rauc_view.params['vline_color'] = self.themes[theme]['vline_color'] + trace_rauc_view.params['label_fill_color'] = self.themes[theme]['label_fill_color'] + trace_rauc_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap']) + trace_rauc_view.params_controller.on_automatic_color() + + # adjust plot range + trace_rauc_view.params['ylim_max'] = 0.5 + trace_rauc_view.params['ylim_min'] = -trace_rauc_view.source.nb_channel + 0.5 + trace_rauc_view.params['scale_mode'] = 'by_channel' + for i, p in enumerate(self.metadata['plots']): + ylim_span = np.median(self.rauc_sigs[p['index']].magnitude) * 10 + ylim_center = ylim_span / 2 + trace_rauc_view.by_channel_params['ch{}'.format(i), 'gain'] = 1/ylim_span # rescale [ymin,ymax] across a unit + trace_rauc_view.by_channel_params['ch{}'.format(i), 'offset'] = -i - ylim_center/ylim_span # center [ymin,ymax] within the unit + + ######################################################################## + # FREQUENCY (EXPERIMENTAL AND COMPUTATIONALLY EXPENSIVE!) + + if self.is_enabled('freqs'): + + freq_view = ephyviewer.TimeFreqViewer(source = trace_view.source, name = 'timefreqs') + + freq_view.params['scale_mode'] = 'by_channel' + freq_view.params['nb_column'] = 1 + freq_view.params['colormap'] = 'gray' + freq_view.params.param('timefreq')['deltafreq'] = 100 + freq_view.params.param('timefreq')['f_start'] = 1 + freq_view.params.param('timefreq')['f_stop'] = 1500 + + freq_view.by_channel_params['ch0', 'visible'] = False + freq_view.by_channel_params['ch1', 'visible'] = True + freq_view.by_channel_params['ch2', 'visible'] = True + freq_view.by_channel_params['ch3', 'visible'] = True + freq_view.by_channel_params['ch4', 'visible'] = False + + # freq_view.params.param('timefreq')['normalisation'] = 1.5 + freq_view.by_channel_params['ch1', 'clim'] = 3 + freq_view.by_channel_params['ch2', 'clim'] = 5 + freq_view.by_channel_params['ch3', 'clim'] = 10 + + if 'signals' in win.viewers: + win.add_view(freq_view, tabify_with = 'signals') + elif 'signals rauc' in win.viewers: + win.add_view(freq_view, tabify_with = 'signals rauc') + else: + win.add_view(freq_view) + + ######################################################################## + # SPIKE TRAINS + + if self.is_enabled('spike_trains') and sources['spike'][0].nb_channel > 0: + + spike_train_view = ephyviewer.SpikeTrainViewer(source = sources['spike'][0], name = 'spiketrains') + win.add_view(spike_train_view) + + # set the theme + if theme != 'original': + spike_train_view.params['background_color'] = self.themes[theme]['background_color'] + spike_train_view.params['vline_color'] = self.themes[theme]['vline_color'] + spike_train_view.params['label_fill_color'] = self.themes[theme]['label_fill_color'] + spike_train_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap']) + spike_train_view.params_controller.on_automatic_color() + + ######################################################################## + # EPOCHS + + if self.is_enabled('epochs') and sources['epoch'][0].nb_channel > 0: + + epoch_view = ephyviewer.EpochViewer(source = sources['epoch'][0], name = 'epochs') + win.add_view(epoch_view) + + # set the theme + if theme != 'original': + epoch_view.params['background_color'] = self.themes[theme]['background_color'] + epoch_view.params['vline_color'] = self.themes[theme]['vline_color'] + epoch_view.params['label_fill_color'] = self.themes[theme]['label_fill_color'] + epoch_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap']) + epoch_view.params_controller.on_automatic_color() + + ######################################################################## + # EPOCH ENCODER + + if self.is_enabled('epoch_encoder') and self.metadata['epoch_encoder_file'] is not None: + + writable_epoch_source = MyWritableEpochSource( + filename = abs_path(self.metadata, 'epoch_encoder_file'), + possible_labels = self.metadata['epoch_encoder_possible_labels'], + ) + + epoch_encoder = ephyviewer.EpochEncoder(source = writable_epoch_source, name = 'epoch encoder') + epoch_encoder.params['exclusive_mode'] = False + win.add_view(epoch_encoder) + + # set the theme + if theme != 'original': + epoch_encoder.params['background_color'] = self.themes[theme]['background_color'] + epoch_encoder.params['vline_color'] = self.themes[theme]['vline_color'] + epoch_encoder.params['label_fill_color'] = self.themes[theme]['label_fill_color'] + # TODO add support for combo_cmap + + ######################################################################## + # VIDEO + + if self.is_enabled('video') and self.metadata['video_file'] is not None: + + video_source = ephyviewer.MultiVideoFileSource(video_filenames = [abs_path(self.metadata, 'video_file')]) + if self.metadata['video_offset'] is not None: + video_source.t_starts[0] += self.metadata['video_offset'] + video_source.t_stops[0] += self.metadata['video_offset'] + video_source._t_start = max(min(video_source.t_starts), 0) + video_source._t_stop = max(video_source.t_stops) + + # correct for videos that report frame rates that are too fast or + # too slow compared to the clock on the data acquisition system + if self.metadata['video_rate_correction'] is not None: + video_source.rates[0] *= self.metadata['video_rate_correction'] + + if self.metadata['video_jumps'] is not None: + + # create an unmodified video_times vector with evenly spaced times + video_times = np.arange(video_source.nb_frames[0])/video_source.rates[0] + video_source.t_starts[0] + + # insert repeating times at pause_start to fill pause_duration + # so that that section of the video is skipped over + for pause_start, pause_duration in self.metadata['video_jumps']: + pause_start_index = np.searchsorted(video_times, pause_start) + pause_fill = video_times[pause_start_index] * np.ones(int(np.round(pause_duration*video_source.rates[0]))) + video_times = np.insert(video_times, pause_start_index, pause_fill) + video_times = video_times[:video_source.nb_frames[0]] + + # add the modified video_times to the video_source + video_source.video_times = [video_times] + video_source.t_starts[0] = min(video_times) + video_source.t_stops[0] = max(video_times) + video_source._t_start = max(min(video_source.t_starts), 0) + video_source._t_stop = max(video_source.t_stops) + + video_view = ephyviewer.VideoViewer(source = video_source, name = 'video') + if theme != 'original': + video_view.graphiclayout.setBackground(self.themes[theme]['background_color']) + win.add_view(video_view, location = 'bottom', orientation = 'horizontal') + + ######################################################################## + # EVENTS + + if self.is_enabled('event_list') and sources['event'][0].nb_channel > 0: + + event_list = ephyviewer.EventList(source = sources['event'][0], name = 'events') + if 'video' in win.viewers: + win.add_view(event_list, split_with = 'video') + else: + win.add_view(event_list, location = 'bottom', orientation = 'horizontal') + + ######################################################################## + # DATAFRAME + + annotations_dataframe = NeoEpochToDataFrame(seg.epochs, exclude_epoch_encoder_epochs=True) + if self.is_enabled('data_frame') and len(annotations_dataframe) > 0: + + data_frame_view = ephyviewer.DataFrameView(source = annotations_dataframe, name = 'table') + if 'events' in win.viewers: + win.add_view(data_frame_view, tabify_with = 'events') + elif 'video' in win.viewers: + win.add_view(data_frame_view, split_with = 'video') + else: + win.add_view(data_frame_view, location = 'bottom', orientation = 'horizontal') + + ######################################################################## + # FINAL TOUCHES + + # select first tabs + for widget in win.children(): + if isinstance(widget, ephyviewer.PyQt5.QtWidgets.QTabBar): + widget.setCurrentIndex(0) + + # set amount of time shown initially + win.set_xsize(self.metadata['t_width']) # seconds + + return win diff --git a/neurotic/gui/epochencoder.py b/neurotic/gui/epochencoder.py new file mode 100644 index 0000000..b8c99c4 --- /dev/null +++ b/neurotic/gui/epochencoder.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +""" + +""" + +import os +import shutil +import numpy as np +import pandas as pd +from ephyviewer import WritableEpochSource + +class MyWritableEpochSource(WritableEpochSource): + """ + + """ + + def __init__(self, filename, possible_labels, color_labels=None, channel_name='', backup=True): + """ + + """ + + self.filename = filename + self.backup = backup + + WritableEpochSource.__init__(self, epoch=None, possible_labels=possible_labels, color_labels=color_labels, channel_name=channel_name) + + def load(self): + """ + Returns a dictionary containing the data for an epoch. + + Data is loaded from the CSV file if it exists; otherwise the superclass + implementation in WritableEpochSource.load() is called to create an + empty dictionary with the correct keys and types. + + The method returns a dictionary containing the loaded data in this form: + + { 'time': np.array, 'duration': np.array, 'label': np.array, 'name': string } + """ + + if os.path.exists(self.filename): + # if file already exists, load previous epoch + df = pd.read_csv(self.filename, index_col=None, dtype={ + 'Start (s)': 'float64', + 'End (s)': 'float64', + 'Type': 'U'}) + epoch = {'time': df['Start (s)'].values, + 'duration': df['End (s)'].values - df['Start (s)'].values, + 'label': df['Type'].values, + 'name': self.channel_name} + else: + # if file does NOT already exist, use superclass method for creating + # an empty dictionary + epoch = super().load() + + return epoch + + def save(self): + """ + + """ + + # if file already exists, make a backup copy first + if self.backup and os.path.exists(self.filename): + backup_filename = self.filename.split('.') + backup_filename.insert(-1, 'bck') + backup_filename = '.'.join(backup_filename) + shutil.copy2(self.filename, backup_filename) + + df = pd.DataFrame() + df['Start (s)'] = np.round(self.ep_times, 6) # round to nearest microsecond + df['End (s)'] = np.round(self.ep_times + self.ep_durations, 6) # round to nearest microsecond + df['Type'] = self.ep_labels + df.sort_values(['Start (s)', 'End (s)', 'Type'], inplace=True) + df.to_csv(self.filename, index=False) diff --git a/neurotic/gui/icons/__init__.py b/neurotic/gui/icons/__init__.py new file mode 100644 index 0000000..9208796 --- /dev/null +++ b/neurotic/gui/icons/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +""" + +""" + +from ...gui.icons import icons diff --git a/neurotic/gui/icons/icons.py b/neurotic/gui/icons/icons.py new file mode 100644 index 0000000..2c3435b --- /dev/null +++ b/neurotic/gui/icons/icons.py @@ -0,0 +1,330 @@ +# -*- coding: utf-8 -*- + +# Resource object code +# +# Created by: The Resource Compiler for PyQt5 (Qt v5.12.2) +# +# WARNING! All changes made in this file will be lost! + +from PyQt5 import QtCore + +qt_resource_data = b"\ +\x00\x00\x11\x8b\ +\x89\ +\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ +\x00\x00\x5a\x00\x00\x00\x5a\x08\x06\x00\x00\x00\x38\xa8\x41\x02\ +\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\ +\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\ +\x01\x95\x2b\x0e\x1b\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\ +\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\ +\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x11\x08\x49\x44\ +\x41\x54\x78\x9c\xed\x9d\x79\x78\x54\xe5\xbd\xc7\xbf\xef\x99\x25\ +\x61\x92\xc9\x4c\x36\x12\x42\xb6\x09\x61\x02\x09\x6b\x48\x21\x1b\ +\xab\xb6\xb8\x63\x1f\x5a\xae\xb2\x53\xaf\x5a\xd4\x7a\xb9\xed\xe3\ +\xed\x23\xda\x2b\x7d\xda\x72\xad\x55\x54\x94\x16\xb4\x8a\x0f\xab\ +\xa2\x7d\x44\x8a\x08\x8a\x20\x66\x61\x29\xb2\x49\x48\x86\xec\x21\ +\x3b\x59\x66\x32\x93\x49\x32\x33\xe7\xbc\xf7\x8f\x04\xc8\xc9\x39\ +\x33\x99\xe5\x4c\x48\xb0\x9f\x7f\x60\x7e\xef\xf6\xe3\xc7\x39\xef\ +\xf2\x7b\x7f\xef\x7b\x08\x86\x11\x67\x67\x6c\x53\x38\xda\xd4\x13\ +\x59\x70\x7a\x06\x44\x4f\x81\x09\x00\x92\x01\x04\x03\x08\x02\x10\ +\xda\xf7\x27\x00\x74\x02\x68\x07\xd0\x09\x4a\xcd\x20\xa4\x8c\x00\ +\x06\x0e\xf4\x2a\x21\x30\x28\x43\x3b\x4b\x32\xbe\x7b\xd2\x7e\x7b\ +\xfe\x25\x42\xc8\xed\x6c\x7c\x1f\xf6\xc9\xe2\xc7\xd9\xa7\x71\x2c\ +\x72\x41\x90\x03\xe0\x27\x00\x34\x12\x55\x6f\x05\x50\x48\x09\x0a\ +\x18\x20\x5f\x11\x6a\x39\x71\x3b\x0d\x7f\x5b\x0c\x9d\x9f\xbc\x27\ +\x4d\xc6\xd1\x15\x94\x62\x0d\x80\xd1\x43\xd4\x6c\x3b\x80\x8f\x19\ +\x42\x76\x66\x56\x2c\xcd\x1f\xa2\x36\x6f\x32\x64\x86\x3e\x9b\xb4\ +\x4f\x63\xe7\x6c\x4f\x52\x82\xd5\x00\x99\x38\x54\xed\x8a\x43\x8b\ +\x01\x66\x3b\x23\xe3\xb6\x65\x96\x2d\xef\x18\x8a\x16\xfd\x6e\xe8\ +\xb3\xfa\x3d\x11\x3d\x0e\xfa\x0c\xa1\x78\x16\xbd\x7d\xec\x70\xc2\ +\x4c\x80\xed\x32\xaa\xf8\xbf\x99\x55\x4b\x1a\xfd\xd9\x90\xdf\x0c\ +\x9d\x9f\xf2\x9e\x9a\xd8\x47\xbd\x48\x28\x7d\x1a\xb7\x06\xb0\xe1\ +\x8a\x05\x94\xbe\xdd\x13\xa4\xfc\xd3\xfc\xa2\x25\x16\x7f\x34\xe0\ +\x17\x43\x9f\xd4\xed\x79\x90\x82\xbe\x0d\x20\xde\x1f\xf5\xfb\x91\ +\x7a\x00\xcf\x67\x57\x2e\xdb\x21\x75\xc5\x92\x1a\xfa\x74\xc2\x0e\ +\x1d\xcb\xc8\xfe\x06\x60\xa1\x94\xf5\x0e\x3d\xf4\x30\xa1\xb2\xb5\ +\x59\x55\x8f\x56\x49\x55\xa3\x64\x86\x2e\x48\xdc\xbd\x88\x10\x6c\ +\xc7\xf0\xeb\x87\xbd\xa5\x83\x80\x3c\x9e\x55\xb9\x74\x9f\x14\x95\ +\xf9\x6c\xe8\x43\xc9\x87\x02\x42\xd9\xf6\x57\x28\xf0\x2b\x29\xea\ +\x1b\x86\xbc\xa3\x51\x29\x7e\x95\x56\xb4\xc4\xe6\x4b\x25\x3e\x19\ +\xa6\x60\xdc\x8e\xd1\x84\x93\xfd\x13\xc0\x4c\x5f\xea\x19\xee\x10\ +\xe0\x94\x42\x41\x1e\xcc\xb8\xba\xb4\xc5\x87\x3a\xbc\xa3\x40\xb7\ +\x3b\x81\x00\x47\x00\xa4\x78\x5b\xc7\x88\x82\xa2\x9c\xca\x65\x0b\ +\x73\xca\x1e\x29\xf7\xa6\x38\xe3\x4d\xa1\xfc\xe4\x3d\x69\x04\xc8\ +\xc7\x0f\xc5\xc8\x00\x40\x30\x8e\xb0\x6c\x5e\x9e\x6e\xe7\x54\xef\ +\x8a\x7b\x48\x61\xd2\xae\xc9\xa0\xe4\x04\xee\x9c\x41\xcf\x53\xda\ +\x28\xc3\xce\xcd\x29\x5f\x79\xd9\x93\x42\x1e\x19\xfa\xcc\xb8\xbd\ +\x71\x76\x8e\x2b\x20\x40\x9c\x67\xba\xdd\x71\xd4\x13\xca\xe4\x78\ +\x32\xfd\x73\xbb\xeb\x38\xab\xdf\x13\xe1\xe0\xb8\xaf\xfe\x6d\x64\ +\x00\x40\x0c\x25\xdc\x57\xa7\x74\x7b\xa2\xdc\x2d\xe0\x96\xa1\x0f\ +\x25\x1f\x0a\xb0\xd9\xe9\xe7\xf8\x21\xf5\xc9\x83\x93\xcc\x81\x7e\ +\x5a\x94\xb6\x4f\xe9\x4e\x66\xb7\x0c\xad\x65\xdb\x5f\xc5\x1d\x3e\ +\x85\xf3\x92\x2c\x53\xa7\xfd\x65\x77\x32\x0e\xda\x47\xf7\xf9\x2d\ +\x3e\x73\x27\xef\x0f\x14\x0a\x8e\x2e\xce\xae\x5e\xfe\xa9\xab\x4c\ +\x2e\x8d\xd7\xe7\xbb\x38\x07\x40\x2b\xa9\x6a\x77\x1e\xed\x14\x98\ +\x9e\x53\xb9\xac\xda\x59\x06\x97\x5d\x07\x47\x64\x6f\xe3\xdf\x46\ +\x76\x87\x50\x02\x6c\x73\x95\xc1\xa9\xa1\x0b\x75\xbb\x7f\x4e\x09\ +\xee\x93\x5e\xa7\xa1\x81\xc8\x08\x02\x62\x83\xa0\xd2\x6b\xa1\x08\ +\x0b\x18\x8a\x26\x17\x16\x26\xec\xfa\xa9\x53\x7d\xc4\x84\xf9\x29\ +\xef\xa9\x19\xfb\xa8\x62\x50\x3a\xd6\x7f\x7a\xf9\x07\x45\x44\x20\ +\x62\x9f\x99\x84\x88\x87\x12\x21\xd7\xde\x9a\x10\x58\x4b\x4d\x68\ +\xfe\xb0\x0c\x8d\xbb\x4a\x41\xed\x9c\xbf\x9a\xaf\x09\xb2\xb2\xa9\ +\x53\x9b\x56\x76\x0e\x4c\x10\x7d\xa2\x89\x7d\xd4\x8b\x23\xd1\xc8\ +\xea\x8c\x48\x4c\xfd\xe2\x3e\x44\xaf\xd4\xf3\x8c\x0c\x00\xaa\xf1\ +\x1a\x24\xfe\x6e\x06\xa6\x1c\xbc\x17\xa3\x92\x42\xfc\xa5\x42\xbc\ +\x45\xc5\x3c\x2f\x96\x20\x78\xa2\xcf\xea\xf7\x44\xd8\xec\xb4\x0a\ +\xc3\x7f\xfb\x89\x87\x7a\x7a\x04\x52\x77\x2e\x00\xa3\x92\x0f\x9a\ +\xd7\x61\xb4\xa1\xe8\xd1\xa3\xb0\x1a\x8c\xfe\x50\xc5\x0c\xbb\x22\ +\x31\xbb\x76\x49\x5b\x7f\xa1\xe0\x89\xee\xb1\x73\xeb\x30\xc2\x8c\ +\xac\x08\x0b\x80\x7e\x4b\xae\xc0\xc8\x0e\xa3\x0d\x5d\x65\x1d\xa0\ +\x2c\xe5\xc9\xe5\x5a\x25\x26\xbe\x3f\x0f\x72\x8d\x5b\x6b\x0d\x4f\ +\x51\x43\x69\xfb\xaf\x81\x42\xde\x13\x7d\x2a\x79\x57\x08\xc7\x92\ +\x6a\x8c\xb0\x99\xc6\xb8\x97\x67\x61\xf4\x92\x71\x3c\x59\xfd\xdf\ +\x8b\x51\xf3\x97\x8b\xa0\x76\x0e\x8a\xc8\x40\x24\xbe\x90\x8e\x88\ +\x87\x12\x79\x79\xda\x8e\x5c\x83\x61\x6d\x9e\x3f\x54\x32\x29\x89\ +\x22\x21\xa3\x62\x89\xe9\x86\x80\xf7\x44\x53\x07\x7e\x89\x11\x66\ +\xe4\xa0\xd4\x50\x44\x2e\x4e\xe2\xc9\xea\xb7\x5d\x41\xf5\xc6\xf3\ +\x37\x07\x3d\xfb\xf5\x6e\x94\xae\x2b\x44\xc3\x07\x06\x5e\xbe\xb0\ +\x85\x71\x88\x78\x30\xc1\x1f\x6a\x69\x6c\xd4\xfe\x78\x7f\x01\xdf\ +\xd0\x84\xac\xf1\x47\xab\xfe\x24\xe1\xf9\xe9\x20\xb2\x5b\x2f\x66\ +\x67\x51\x3b\x6a\x36\x5d\x12\xcd\x5b\xbd\xf1\x3c\xcc\xe7\xf9\x9b\ +\x24\x71\xbf\x99\x0a\xa2\xf0\xca\x2d\xef\x1a\x8a\xd5\xfd\x7f\xde\ +\x6c\xa1\x50\xb7\x7b\x26\x7a\x83\x0a\x47\x0c\x41\xa9\xa1\xd0\xe4\ +\x44\xf3\x64\x55\x7f\x3a\x07\xca\x52\xa8\xc6\x6b\x10\x7a\xd7\x58\ +\x84\x2d\x8c\x43\xc8\x8f\x46\x43\x16\xac\x00\x75\x70\x28\x5d\x57\ +\x08\xae\x87\xbd\x99\x3f\x30\x3e\x18\x51\x8f\x24\x4b\xaf\x1c\x41\ +\xda\xc9\x84\x9d\xe9\x37\x7e\xf6\x1f\x3d\x56\x48\xdf\x9a\x7f\x89\ +\x5e\xa9\xe7\xfd\xee\xbc\xdc\x06\x4d\x66\x14\xc6\xbf\x9e\x0d\x65\ +\xd4\x28\x5e\x1a\x65\x29\x4c\x85\x8d\xa8\xdf\x56\x8c\xa6\xdd\xa5\ +\x18\xf3\x8b\x5b\xcf\xd4\xd8\xb5\xa9\x68\xda\x5b\x06\xea\x90\x76\ +\x7e\x4d\x65\xcc\x4a\x00\xe7\x80\xbe\xc1\x70\x1f\xf6\xc9\x62\x75\ +\xf6\x06\x00\x91\xce\x0a\x85\xcc\x1a\x8d\xe0\xa9\xe1\xb0\xb7\x74\ +\xc3\x72\xa1\x15\x5d\x15\x43\x12\xb2\xe6\x14\xb9\x56\x89\x19\x85\ +\x3f\x05\x13\x28\xbb\x25\xa4\x70\xcb\xf5\x65\xca\x6f\x84\x26\x3b\ +\x0a\x60\x6e\x65\xbe\xfa\x6c\x01\x5a\x0f\x3a\x75\x55\x78\x4b\x53\ +\x56\x65\x69\x0c\xc1\x06\x4e\x0e\x00\x63\x93\x6c\xe9\xa0\xc4\xa9\ +\x91\xc3\xef\x8f\x87\xfe\xad\x5c\x9e\xcc\x7c\xae\x05\xd5\x1b\xcf\ +\xc1\x7c\xce\xeb\x8d\x61\x9f\x08\xbf\x3f\x81\x6f\x64\xc0\x6d\xff\ +\xa2\x26\x37\x5a\x20\x1b\xb3\x3a\xc5\x1f\x86\x8e\x3a\x3d\x6e\xfc\ +\x74\x94\xe3\xbb\xde\x3e\x9a\x23\x0b\x5c\xe5\xd6\xce\x8d\x11\xc8\ +\xd4\xe9\x11\x48\xfb\xf0\x6e\x44\x2d\xf5\x43\xff\xe6\x06\xe1\xf7\ +\xb9\x8e\x36\x63\xad\x0e\x18\xf3\x1a\xd0\x76\xe4\x1a\xac\x57\x07\ +\x5f\x98\xa8\xd3\x23\xa0\x4a\x91\x7e\xc2\xc5\x71\x98\x0f\xf4\xf5\ +\xd1\x84\xf4\xfe\x70\x86\x29\xbf\x11\xa3\x7f\x96\x24\x90\x13\x39\ +\x83\xa4\x3f\xce\x84\x4c\xa5\x40\xfd\xdf\x8b\x25\x57\xd2\x19\x8a\ +\xc8\x40\x84\xcc\x74\x1e\x56\xdd\xb4\xa7\x0c\x35\xaf\x5d\x84\xa3\ +\xbd\xe7\xa6\x2c\x78\x72\x18\x74\x7f\x9c\x89\xe0\xc9\x61\x4e\xcb\ +\x45\x3e\x9c\x88\xea\x3f\x5f\x90\x54\x57\x10\x2c\x00\xf0\x2a\x29\ +\x4a\xdb\xa7\x34\x59\xed\x6d\x18\x64\x35\xa8\x9d\x33\x06\x21\x99\ +\x51\x50\x4f\x0b\x47\x48\x26\x7f\xab\x8c\xb2\x14\x25\x8f\x9f\x80\ +\xf1\x9b\x7a\x69\x95\x74\x42\xf4\x0a\x3d\x74\xbf\xcf\x10\x4d\xab\ +\x79\xe5\x02\xea\xb6\x5e\x11\x4d\x63\x94\x0c\xf4\x5b\xe7\x20\x74\ +\x9e\xf0\x0d\x05\x00\x5b\x83\x15\xdf\xcd\xfe\x0c\xe0\xa8\x68\xba\ +\x97\x58\x94\x61\x96\x30\xd9\x72\xf5\xa2\x54\x80\x08\x96\x8c\x03\ +\xe9\xae\xb6\xc0\x54\xd0\x88\xeb\xff\xa8\x44\x57\xa9\x09\x9a\xdc\ +\x68\x30\x01\xbd\x7d\x24\x61\x08\xc2\x7f\x1c\x0b\xe3\xb7\x0d\xb0\ +\x37\x77\x49\xa9\xa4\x28\x71\xeb\x26\x23\x30\x51\x2d\x90\x37\xbc\ +\x5f\x82\x6b\x9b\x2e\x81\x51\xc9\x31\x2a\x29\x04\xaa\x14\x2d\x02\ +\xe3\x83\xa1\x08\x0d\x00\xb5\x73\x60\x2d\x0e\xb4\x1d\xb9\x06\x6d\ +\xee\x18\x28\xa3\x55\x82\xf2\x32\xb5\x02\xe6\xd3\xcd\xe8\xa9\x15\ +\x38\xdf\x7c\x41\x69\xb7\x05\x7c\x24\xa7\x14\x29\x9e\xee\x51\xb5\ +\x1e\xaa\x81\xbd\xa5\x1b\xa9\x3b\x17\xdc\x9c\xec\x33\x2a\x39\xf4\ +\x6f\xe5\xe0\xd2\x03\x87\xc1\x76\xfa\xef\xa8\x08\x13\x20\x43\xc8\ +\x2c\x61\xb7\xe1\x68\xef\x81\xc3\x68\xc3\xa4\x4f\x7e\x02\xf5\xb4\ +\x70\xde\x8c\xe2\x06\x3d\xb5\x9d\x68\x3b\x5a\x8b\xba\xad\x57\x90\ +\xfc\x5a\x16\x64\x22\x0e\xa8\x88\x87\x13\x61\x3a\xd9\x24\xad\xce\ +\x76\x4e\xcf\x30\x60\xbc\xda\xd9\xee\x38\xd3\x8c\xea\x57\xf8\xfd\ +\x59\x60\x82\x1a\xba\x0d\x33\x24\x51\xce\x19\xea\x1f\x45\x82\x19\ +\x25\x34\x90\x3c\x34\x00\x71\xbf\x9e\x02\x75\x7a\x84\xa8\x91\x01\ +\x20\x20\x36\x08\x63\x56\xa7\x20\x65\x4b\x2e\xc0\x8a\x77\x0f\xe1\ +\xf7\xc6\x0b\x67\x33\xbe\x22\xc3\x04\x86\x82\x7a\x1d\x42\xd0\xf8\ +\x81\x01\xd6\x12\xfe\x88\x1e\xb9\x38\x89\xb7\x18\x90\x1a\xed\x9c\ +\x31\xbe\x57\xc2\x10\xc8\xd4\x0a\xd1\x24\x59\xb0\x02\xa1\x77\x49\ +\xec\x8a\xa7\x4c\x0a\x03\x40\x38\x9d\x70\xb7\x3c\x4b\x51\xb5\xf1\ +\x9c\x40\x9e\xb8\x7e\xfa\xa0\xd3\x2f\x6f\x71\xcb\xd0\x14\xe8\xae\ +\x34\xa3\xe3\x4c\x33\x4c\x27\x9b\x60\x2d\x31\x7a\xb4\xab\x12\xb9\ +\x28\xd1\x7b\x05\xc5\x49\x92\x03\xf0\x69\xbb\xc1\x94\xdf\x08\xcb\ +\xc5\x56\x04\x4f\x0d\xbf\x25\x64\x08\x92\xff\x92\x89\xae\x8a\x0e\ +\xc1\x13\xef\x0b\xca\x68\x15\x54\x7a\xe7\x73\xdd\xae\xb2\x0e\x34\ +\xbc\x5f\x82\xd6\xc3\x35\x70\x18\xf9\xe1\xcc\x8c\x4a\x8e\xd0\x05\ +\x63\x31\xf6\x97\xa9\x08\x4a\x75\x1d\x36\xa8\x9d\x1b\x03\xb9\x56\ +\x29\xa8\xc3\x7b\xa8\x86\x01\x20\x1c\xbe\x3d\x44\x6c\x3a\xc5\x8c\ +\x92\x23\x65\xeb\x1c\x49\x9d\xeb\xda\xd9\x4e\x9e\x66\x0a\x5c\x7b\ +\xf3\x7b\x5c\xbc\xf7\x73\x34\x7d\x58\x26\x6a\x20\xce\xea\x40\xeb\ +\xc1\x6a\x7c\xbf\xe8\x30\x6a\x5e\xbb\xd8\xbb\x5c\x77\x02\x51\x30\ +\x52\xbf\x91\x6a\x49\x0c\xdd\x7e\xb4\x16\xb6\x46\xab\x40\x1e\x18\ +\x1f\x8c\xe4\xd7\xb3\x9d\x0e\x4e\x9e\xa2\x9d\x2b\x34\x34\x6b\x75\ +\xc0\xb0\xf6\x5b\xd4\xbe\xf9\xbd\x60\x27\x45\x0c\xca\x52\xd4\x6d\ +\x29\x42\xf9\xfa\xd3\x2e\xe7\xcb\x11\x12\x76\x1f\x54\x2a\x43\x53\ +\x96\xa2\xf9\x23\xf1\xf8\xec\xd0\x79\x31\x88\xff\xef\x29\xbe\x36\ +\x01\xed\xbc\x18\x84\xfd\x38\x96\x27\xe3\xba\x1c\x28\x5e\x7d\x1c\ +\x6d\x5f\xd6\x7a\x5c\x5f\xf3\x47\xe5\x28\x7f\xfe\x8c\xd3\x27\x3b\ +\x24\x63\x34\x02\x62\xa4\xd9\xd1\x23\x7d\x86\x96\x84\xe6\x4f\x2a\ +\x9c\x2a\x3d\xf6\xa9\x34\xc1\x36\x92\x27\x84\xcc\x1a\x8d\x94\xbf\ +\xce\xe6\x39\xe8\xa9\x9d\x83\xe1\xa9\x3c\x98\xcf\x5e\xf7\xba\xde\ +\xe6\x8f\xcb\x51\xb7\xb5\x48\x3c\x91\x00\x91\x3f\xf7\x7a\x9e\x20\ +\x80\x01\x60\x96\xa2\xa2\x9e\xba\x4e\x74\x9c\x6d\x16\x4f\x24\x40\ +\xf2\xab\x99\x08\xbf\xd7\xf3\x7e\x2f\xfc\x9e\x38\x4c\x7c\x6f\x9e\ +\x60\x6e\xdb\xf6\x65\x2d\x8c\x27\x1a\xbc\x51\x95\xc7\xb5\x4d\x97\ +\x60\xfc\x56\xbc\x9e\xa8\xff\x18\xc7\xdb\xbd\xf1\x16\x0a\x98\x25\ +\x33\x34\x00\xb4\x1c\x70\xee\x66\x24\x72\x06\xfa\xb7\x72\x30\xf6\ +\xe9\x34\xb7\x94\x27\x72\x06\x71\xbf\x9e\x02\xfd\x96\xd9\xa2\x21\ +\x04\xc6\x13\xd2\xf8\x55\x28\x4b\x51\xba\xae\x00\xdd\x35\xc2\x03\ +\xb3\xca\x68\x15\x42\xef\x8e\x15\x29\xe5\x19\x44\x6a\x43\xb7\x1e\ +\xaa\x71\x3d\x5f\x65\x08\xe2\x7f\x33\x15\x93\x0f\xdc\x83\xf0\x07\ +\x12\x44\x97\xc0\xf2\xd0\x00\x44\x2d\x1b\x8f\x69\x47\xee\x47\xec\ +\x33\x93\xc4\x7d\xcc\x14\x30\xe6\xf9\xfe\x34\xdf\xc0\x61\xb4\xe1\ +\xea\xda\x3c\x70\x5d\x0e\x41\xda\xd8\x27\x53\xa5\x68\xc2\x2c\xa7\ +\x80\x49\xaa\x78\x5c\x47\x7b\x0f\x8c\x27\xea\x07\x7d\x0a\x82\x26\ +\x86\x42\xbf\x39\x07\xd4\xce\xa1\xab\xa2\x03\xf6\xb6\x1e\x30\x0a\ +\x06\x8a\xc8\x40\x04\x26\x0c\x3e\x36\x5b\x0d\x46\xd8\x9a\xa4\x75\ +\x5e\x75\x16\xb7\xa3\xec\xb9\x53\xd0\x6f\xce\xe1\xcd\x92\x82\xa7\ +\x85\x43\x93\x13\x0d\x53\x81\x4f\x67\xf2\x8d\x0c\x01\x2a\x7c\xd6\ +\xb2\x1f\xd7\x3f\xab\x72\x3b\x2f\x51\x30\x50\xa5\x68\xa1\xc9\x8a\ +\x82\x3a\x23\xd2\x2d\x23\x03\xd2\x75\x1b\x03\x69\x3d\x54\x83\xaa\ +\x8d\xe7\x05\xf2\xc4\x17\xd2\x7d\xea\xab\x29\x41\x05\x43\x00\xc3\ +\xe0\x59\xdd\xa7\xfd\xeb\x3a\xb0\x16\x71\xef\x1d\x6b\xb6\x4b\xe2\ +\xeb\x75\x36\x78\x49\x41\xc3\xfb\x25\xb8\xfe\x0f\xfe\xb3\xa7\x9a\ +\xa0\x45\xf4\x6a\xef\x4f\x95\x10\x10\x03\xc3\x81\x5e\xf5\x55\xb9\ +\xfe\x70\xdd\x2c\x5a\x0f\x5f\x13\x4f\x94\x11\x18\xd6\xe6\xa1\x61\ +\xbb\xc1\xb9\xcf\x97\xa3\x30\x7f\x77\x1d\xd7\x36\x5d\xe2\xed\x90\ +\xdc\x4c\xb6\x3a\x60\xfe\xce\xfb\x29\x9d\x3b\x54\xbc\x74\x16\x5d\ +\xe5\xfc\xcd\xe7\x84\xdf\x4e\x83\x26\x5b\xb8\xd7\xe8\x16\x2c\x67\ +\x20\x7d\xe7\x06\xc5\x23\x4e\xbc\x44\x93\x15\x85\xd4\xdd\x77\x89\ +\xa6\x99\xcf\xb7\xe0\xca\xb2\xaf\xc1\x75\xb3\x50\x84\x05\x20\x20\ +\x36\xb8\xd7\x93\xc6\x51\xd8\x5b\x7b\xd0\x5d\x63\x06\xd7\xcd\x42\ +\xf7\x52\x06\xa2\x57\xe9\x05\xe5\xdb\x8f\xd6\xa2\xe4\x89\x6f\xa5\ +\x54\x57\x94\xa0\x49\x61\x98\xf4\xe1\xdd\xbc\x19\x8f\xa3\xc3\x86\ +\x8a\xf5\x67\xd0\x7a\xa8\xc6\xa3\xba\x38\x19\x99\xe4\xf6\x56\x96\ +\x47\x30\x04\x33\xf2\x17\x89\xee\x62\x00\xbd\x8e\xa8\x92\xc7\x4f\ +\xf0\x02\x59\xfa\x33\xe6\xb1\x09\x48\x7c\x21\x5d\x34\xad\xe2\x85\ +\x33\x68\xda\x5b\x26\x99\xaa\xae\x08\x9d\x17\x83\x94\x77\xe7\x0a\ +\xfa\xe7\x8e\xd3\xcd\xb0\x5c\x68\x81\xe9\x54\x93\x3b\x73\x79\xb3\ +\x32\xcc\x12\x2e\xfb\xeb\xf5\x8f\xd9\xc7\x42\x17\xcf\x41\xef\xb5\ +\x67\xd2\x40\x01\x65\xe4\x28\xa8\x67\x88\x47\x30\x04\xc6\x07\x43\ +\x3b\x37\x06\xd6\x2b\xed\xbc\xd9\x83\x22\x2c\x00\xba\xff\xcd\xc0\ +\xd8\xb5\x69\xe2\xd5\xb2\x14\xe5\xbf\x3d\x0d\xae\x5b\xfc\x3f\x48\ +\x6a\xba\xab\x7a\xdf\xae\x81\xce\xac\x80\xd8\x20\xa8\x33\x22\x11\ +\xf9\xb0\x0e\xdd\x95\x66\x97\xe1\xbf\x84\xe2\xeb\x59\xc5\x6b\x76\ +\xf6\xee\x82\x53\x7a\x8c\x12\x22\xe9\x65\x26\xd7\xf7\x57\x22\xe6\ +\x09\xe7\x77\x54\x05\x4f\x0e\xc3\xe4\xfd\x0b\x61\x2d\x35\xa1\xbb\ +\xa2\x03\xb2\x60\x05\xd4\xe9\x11\xa2\xbb\x27\x37\x30\x15\x36\x8a\ +\xf6\xdb\xfe\xa4\xfe\xdd\x62\xb0\x9d\x76\xe8\x36\x64\x80\xc8\x85\ +\x1e\x0b\x4d\x4e\x14\x5a\x0e\x54\x39\x2d\xcf\x11\x7a\x0c\xe8\x0b\ +\x37\x60\x19\xd9\x31\x86\x4a\x1b\x0e\x65\x2d\x31\xc2\x7c\xbe\x05\ +\xea\xe9\x11\x2e\xf3\xa9\xc6\x6b\xa0\x1a\xef\xde\x55\x77\xad\x07\ +\x3d\xeb\x1b\xa5\xa2\x69\x4f\x19\xac\xc5\x46\x24\x3c\x3f\x1d\xea\ +\x8c\x7e\x6f\x29\x05\x8c\xc7\x5d\x4f\x35\x19\x8e\x1e\x07\xfa\x0c\ +\x5d\x5f\x21\x3b\x1f\xab\xe3\xae\xc3\x45\x48\x98\x37\x34\x7e\x60\ +\xe0\x1b\x9a\xa3\x30\x15\x36\x89\x46\x0a\x09\x18\x10\xde\xc5\x5a\ +\x1d\x68\xfd\xe2\xf6\x18\x1a\xe8\x1d\xc4\x2f\x2f\xf9\x0a\x81\x3a\ +\x35\xd4\xd3\x22\xa0\x88\x08\x84\xe5\x42\x2b\x3a\xfe\xe5\xc4\xbf\ +\x03\x00\xa0\x8d\x99\xd5\xe5\x17\x80\xbe\x68\xd2\x25\x58\xc2\x12\ +\x60\xaf\xd4\xca\xb5\x7e\x71\x8d\xbf\x82\x63\x08\xac\x06\x23\xae\ +\x2c\x3f\x06\x63\x5e\x83\xa8\xff\xd8\xd6\x68\xc5\xb5\xd7\x2f\xa1\ +\xb3\x88\x77\x32\x01\x2d\x9f\x56\x3a\x9d\x9f\x0f\x25\xdd\x95\x66\ +\x5c\xff\xb4\x12\xf5\xef\x16\x0f\x62\x64\x00\x94\xec\x25\xd8\xc0\ +\x01\xfd\xa2\x49\x59\xc2\xec\x64\x28\xf7\xac\x94\x4a\x51\x07\x87\ +\xba\xad\x45\xd0\xbd\x74\x2b\xd8\x25\x6a\xf9\x78\x34\xec\xb8\x8a\ +\xe2\x55\xc7\x21\x0b\x56\x40\x35\x41\x0b\x45\x78\x20\xa8\x8d\x45\ +\x77\x8d\x05\x5d\x15\x1d\xd0\x64\x45\x23\x6e\x80\x0f\xbb\x71\x57\ +\xa9\x94\xaa\x0d\x0d\x94\xd9\x79\xe3\xaf\xbc\x79\x4b\xa1\x6e\xd7\ +\x15\xa9\x6f\x59\x24\x0a\x06\xd3\x8f\x3e\x80\x80\xb8\xe0\x9b\x32\ +\xf3\xb9\x16\x14\x3d\x72\x54\x34\x4c\x96\x51\x32\x98\xbc\xff\x1e\ +\xa8\x26\xdc\xda\x1b\x34\x9e\x68\x40\xf1\x9a\xe3\x52\xaa\xe5\x7f\ +\x28\x8a\xb2\xab\x96\x4d\xba\xf1\x73\xc0\x30\xca\x6c\x97\xbc\x3d\ +\x3b\x87\x9a\x57\x2f\xf2\x64\xea\xf4\x08\x24\xac\x9f\x2e\x9a\x3f\ +\xfe\xb9\x69\x3c\x23\x53\x96\xa2\x5a\x64\xa7\x7d\xb8\x43\x09\xe5\ +\xd9\x92\x67\x68\x46\xc6\x6d\x43\xef\x65\xa9\x92\xd2\xf2\xcf\x6a\ +\x41\x48\xec\x98\xd5\x29\x18\xff\x46\x36\x94\xa3\x7b\x03\xc6\x19\ +\x25\x83\xc4\xdf\xcd\xc0\x98\xc7\xf8\x31\x21\x4d\xbb\x4b\x61\x2d\ +\x35\x61\x84\xd1\x46\x95\x3d\xef\xf4\x17\x08\x5c\x52\x27\x75\xbb\ +\xff\x40\x81\x17\xa5\x6e\x59\xae\x55\x62\xea\xa1\xfb\x04\xab\x45\ +\xca\x52\xf4\xd4\x58\x20\x0f\x0f\x80\x3c\x84\xbf\x63\xde\x53\xdb\ +\x89\x4b\x0f\x7e\x01\x87\x49\xaa\x6d\xff\xa1\x81\x50\x6c\xc8\xaa\ +\x5a\xf6\xfb\xfe\x32\xc1\x0c\x9c\xb1\x39\xde\x00\x20\xf9\xfd\x9c\ +\x0e\xa3\x0d\xc5\x6b\xbe\x11\x2c\x38\x88\x8c\x20\x50\xa7\x16\x18\ +\x99\xb5\xd8\x61\x78\x2a\x6f\xc4\x19\x19\x40\x87\x83\xc3\xe6\x81\ +\x42\x81\xa1\x67\xd5\xad\x6a\x05\xa5\x6f\xfb\x43\x03\xab\xc1\x88\ +\xef\x7f\xf6\xe5\xa0\x5d\x81\xad\xa9\x0b\x57\x56\x1e\x43\xe7\xe5\ +\x36\x97\xf9\x86\x29\x6f\xce\xae\x59\x26\xe8\x7e\x45\xbd\xd9\xc7\ +\xd3\xf6\x05\x2b\xad\xf6\x2b\xfe\xba\x3f\x89\x28\x18\x44\x2d\x4d\ +\x46\xd4\x23\xc9\xbc\x28\x7b\x7b\x5b\x0f\x5a\xf6\x57\xa2\x76\x4b\ +\xd1\x90\x2f\xb5\x25\xa2\x5a\xd9\x63\x49\xcd\xa8\x7f\x52\x10\xe4\ +\xe2\x74\xdb\xa0\x20\x69\xcf\x62\x42\xe9\x27\xfe\xd5\xab\xb7\xef\ +\x56\x46\x8e\x82\xc3\x62\x87\xbd\xb9\xcb\xad\x20\x98\xe1\x0a\xa5\ +\x78\x38\xa7\x6a\xd9\x67\x62\x69\x2e\xf7\x67\x4e\x26\xee\xfe\x7c\ +\x24\xdf\xd9\x31\xc4\x1c\xc9\xae\x5c\x76\x8f\xb3\xc4\x41\x02\x68\ +\x98\xa7\x01\xf8\xe5\x0a\x80\x3b\x8c\x36\x8e\xb0\x4f\xb8\xca\xe0\ +\xd2\xd0\x59\x55\x8f\x56\x81\xd0\x15\x70\x19\x12\xf8\x83\x87\x82\ +\x92\xc7\x72\x2b\x56\xba\xf4\x78\x0d\x1a\x12\x96\x5d\xb1\xfc\x20\ +\x01\xde\x92\x4e\xaf\x3b\x0b\x02\xb2\x29\xbb\x6a\xe9\xfe\xc1\xf2\ +\xb9\x15\x7b\x17\xa2\x52\x3c\x47\x80\x53\xbe\xab\x75\xc7\x51\xa0\ +\x08\x33\x8b\xde\x38\x33\x10\xb7\x83\x15\xfa\x6e\xa6\xc9\xc3\x08\ +\x3b\x98\xef\x47\x4a\x29\xc3\xe6\xe6\x94\xaf\x1c\xc4\x57\xda\x8b\ +\x47\x51\x21\xa7\x92\x77\xc5\x72\x2c\x29\xc0\xc8\xfb\x48\x82\xb4\ +\x10\x52\x47\x29\xcd\x71\x75\xcf\xdd\x40\x3c\x0a\xdb\xcd\x2c\x5b\ +\x5e\x4b\x19\xf6\x7e\x00\x23\x72\xc9\x26\x11\x6d\x1c\x83\x85\x9e\ +\x18\x19\xf0\xe2\xa2\xee\x9c\xf2\x95\x97\x19\x96\xce\xa6\x80\x93\ +\x28\x99\x3b\x9a\x06\x16\xdc\x82\xdc\xb2\xa5\x4e\x82\xaa\x9d\xe3\ +\x55\x20\x7a\x66\xcd\xf2\x2b\x00\x66\x03\x28\xf1\xa6\xfc\xc8\x84\ +\x16\x73\x84\xcd\x9c\x5d\xb9\xe2\xe2\xe0\x79\x85\xf8\x14\x48\xda\ +\x37\x40\x1e\x00\x90\xe5\x4b\x3d\xc3\x1e\x4a\x0b\x65\x76\xf6\xa1\ +\x59\x75\xab\x5a\xbd\xad\xc2\xa7\xa3\x15\x19\x57\x97\xb6\xf4\x24\ +\xc4\xcc\x21\x04\x7f\xc6\x9d\xb9\xa8\xa1\x04\xd8\xac\x09\x52\xce\ +\xf7\xc5\xc8\x80\x84\x57\x15\x9f\xd4\xed\x7c\x88\xf6\x6e\x85\x39\ +\xbf\xa7\x61\x64\x61\xa2\xa0\xff\x99\x53\xb9\x5c\x12\xc7\x9a\x64\ +\x87\x85\xb2\x2a\x57\x1c\xa0\x40\x3a\xa1\x38\x24\x55\x9d\xb7\x0f\ +\xfa\x39\x47\xd8\x29\x52\x19\x19\xf0\xef\x47\xc9\x36\x03\x48\xf4\ +\x47\xfd\x7e\x83\x90\x3a\x50\xba\x7e\xd8\x7f\x94\xac\x3f\x17\xa3\ +\x76\x04\x59\x54\xb2\xf5\xa4\xf7\xd3\x4e\x3e\x9f\x65\xf4\x33\x66\ +\x00\x6f\x28\x7b\x2c\x2f\x8b\x39\xed\xa5\xc0\xef\xd7\xc9\x9f\x4a\ +\xde\x15\xc2\x3a\xc8\x5a\x42\xf0\x3f\x18\x7e\xfd\x77\x07\x21\xf8\ +\x1b\xb5\x29\x5e\x19\x78\x69\xab\xd4\x0c\xd9\xbd\xfd\x7d\xf7\x9e\ +\x3e\x01\x8a\xd5\x20\x10\x8f\xcb\x1d\x2a\x28\x8a\x28\xa1\xdb\xa9\ +\xb2\xe7\x9d\x5c\xc3\x63\x92\x9d\x4a\x73\xc5\x6d\xfe\xb8\x2f\x5d\ +\x05\x10\x2f\xcf\x2b\x78\x4c\x1b\x80\x4f\xee\xf8\x8f\xfb\x8a\x41\ +\xb1\x81\x29\x48\xd6\x4f\x64\x58\x9a\x03\xe0\x6e\x48\xfb\xb9\xea\ +\x4e\x00\x27\x29\xc5\x51\x99\x0c\x47\x67\x95\x97\x9e\xbf\x11\x70\ +\x78\x3b\x18\x56\x9f\xfc\x38\x3e\xef\xb8\x5c\x51\xdb\x90\x22\xe3\ +\x68\x0a\x05\x4d\xa1\x20\x7a\x42\x31\x9e\x80\x04\x53\x50\x35\x7a\ +\x6f\x02\xbe\x11\xc4\x67\x01\x60\x24\x20\x66\x0a\x6a\xa1\x04\xa5\ +\x04\xc4\x40\x40\xaf\xb2\x0c\x31\xd8\x63\xc7\x18\xe6\x7f\x33\x5f\ +\x78\x42\xf3\x36\xf1\xff\xd6\x78\x32\xd0\xdf\x94\xee\x4b\x00\x00\ +\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ +" + +qt_resource_name = b"\ +\x00\x0d\ +\x0c\x20\xc1\x07\ +\x00\x73\ +\x00\x6f\x00\x75\x00\x6e\x00\x64\x00\x77\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\ +" + +qt_resource_struct_v1 = b"\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ +" + +qt_resource_struct_v2 = b"\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ +\x00\x00\x01\x6b\x27\xcc\x1a\x06\ +" + +qt_version = [int(v) for v in QtCore.qVersion().split('.')] +if qt_version < [5, 8, 0]: + rcc_version = 1 + qt_resource_struct = qt_resource_struct_v1 +else: + rcc_version = 2 + qt_resource_struct = qt_resource_struct_v2 + +def qInitResources(): + QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) + +def qCleanupResources(): + QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) + +qInitResources() diff --git a/neurotic/gui/icons/icons.qrc b/neurotic/gui/icons/icons.qrc new file mode 100644 index 0000000..e7128d5 --- /dev/null +++ b/neurotic/gui/icons/icons.qrc @@ -0,0 +1,5 @@ + + + img/soundwave.png + + diff --git a/neurotic/gui/icons/img/attribution.txt b/neurotic/gui/icons/img/attribution.txt new file mode 100644 index 0000000..d7a23ee --- /dev/null +++ b/neurotic/gui/icons/img/attribution.txt @@ -0,0 +1,9 @@ +Soundwave Icon +-------------- + +Original: +Creative Commons Attribution CCBY +Soundwave by Maxim Kulikov from the Noun Project +https://thenounproject.com/icon/1786374/ + +Modified by Jeffrey Gill diff --git a/neurotic/gui/icons/img/soundwave.ico b/neurotic/gui/icons/img/soundwave.ico new file mode 100644 index 0000000..2f83af4 Binary files /dev/null and b/neurotic/gui/icons/img/soundwave.ico differ diff --git a/neurotic/gui/icons/img/soundwave.png b/neurotic/gui/icons/img/soundwave.png new file mode 100644 index 0000000..177e4d3 Binary files /dev/null and b/neurotic/gui/icons/img/soundwave.png differ diff --git a/neurotic/gui/icons/img/soundwave.svg b/neurotic/gui/icons/img/soundwave.svg new file mode 100644 index 0000000..0d2e582 --- /dev/null +++ b/neurotic/gui/icons/img/soundwave.svg @@ -0,0 +1,83 @@ + +image/svg+xml \ No newline at end of file diff --git a/neurotic/gui/icons/make_resources.py b/neurotic/gui/icons/make_resources.py new file mode 100644 index 0000000..c0116c2 --- /dev/null +++ b/neurotic/gui/icons/make_resources.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +""" + +""" + +import os + +os.popen('pyrcc5 icons.qrc -o icons.py') diff --git a/neurotic/gui/standalone.py b/neurotic/gui/standalone.py new file mode 100644 index 0000000..4a9c793 --- /dev/null +++ b/neurotic/gui/standalone.py @@ -0,0 +1,310 @@ +# -*- coding: utf-8 -*- +""" + +""" + +import os +import pkg_resources + +import quantities as pq +import elephant +from ephyviewer import QT + +from ..datasets import MetadataManager, LoadAndPrepareData, selector_labels +from ..gui.config import EphyviewerConfigurator + + +class MetadataSelectorQt(MetadataManager, QT.QListWidget): + """ + + """ + + def __init__(self): + """ + + """ + + MetadataManager.__init__(self) + QT.QListWidget.__init__(self) + + self.setSelectionMode(QT.QListWidget.SingleSelection) + self.setStyleSheet('font: 9pt Courier;') + + self.currentRowChanged.connect(self._on_select) + + def _on_select(self, currentRow): + """ + + """ + + if currentRow >= 0: + self._selection = list(self.all_metadata)[currentRow] + else: + self._selection = None + + def load(self): + """ + + """ + + # remember the current selection + old_selection = self._selection + + try: + MetadataManager.load(self) + except AssertionError as e: + print('Bad metadata file!', e) + + if self.all_metadata is not None: + + # clear and repopulate the list, + # which triggers the selection to change + self.clear() + for label in selector_labels(self.all_metadata): + QT.QListWidgetItem(label, self) + + if old_selection in self.all_metadata: + # reselect the original selection if it still exists + self.setCurrentRow(list(self.all_metadata).index(old_selection)) + else: + # otherwise select the first item + self.setCurrentRow(0) + + +class DownloadWorker(QT.QObject): + """ + + """ + + download_finished = QT.pyqtSignal() + + def __init__(self, metadata_selector): + """ + + """ + + QT.QObject.__init__(self) + + self.metadata_selector = metadata_selector + + def download(self): + """ + + """ + + self.metadata_selector.download_all_data_files() + self.download_finished.emit() + + +class DataExplorer(QT.QMainWindow): + """ + + """ + + request_download = QT.pyqtSignal() + + def __init__(self, lazy=True, theme='light', support_increased_line_width=False): + """ + + """ + + QT.QMainWindow.__init__(self) + + self.setWindowIcon(QT.QIcon(':/soundwave.png')) + + self.setWindowTitle('Data Explorer') + self.resize(600, 300) + + # lazy loading using Neo RawIO + self.lazy = lazy + + # available themes are 'light', 'dark', and 'original' + self.theme = theme + + # support_increased_line_width=True eliminates the extremely poor + # performance associated with TraceViewer's line_width > 1.0, but it + # also degrades overall performance somewhat and uses a mode of + # pyqtgraph that is reportedly unstable + self.support_increased_line_width = support_increased_line_width + + # windows are appended to this list so that they persist after the + # function that spawned them returns + self.windows = [] + + # metadata selector + self.metadata_selector = MetadataSelectorQt() + self.setCentralWidget(self.metadata_selector) + + # create a worker thread for downloading data + self.download_thread = QT.QThread() + self.download_worker = DownloadWorker(self.metadata_selector) + self.download_worker.moveToThread(self.download_thread) + self.request_download.connect(self.download_worker.download) + self.download_worker.download_finished.connect(self.on_download_finished) + + # construct the menus + self.create_menus() + + # open example metadata file + self.metadata_selector.file = pkg_resources.resource_filename('neurotic', 'example/metadata.yml') + self.metadata_selector.load() + + def create_menus(self): + """ + + """ + + self.file_menu = self.menuBar().addMenu(self.tr('&File')) + + do_open_metadata = QT.QAction('&Open metadata', self, shortcut = 'Ctrl+O') + do_open_metadata.triggered.connect(self.open_metadata) + self.file_menu.addAction(do_open_metadata) + + do_reload_metadata = QT.QAction('&Reload metadata', self, shortcut = 'Ctrl+R') + do_reload_metadata.triggered.connect(self.metadata_selector.load) + self.file_menu.addAction(do_reload_metadata) + + self.do_download_data = QT.QAction('&Download data', self, shortcut = 'Ctrl+D') + self.do_download_data.triggered.connect(self.download_files) + self.file_menu.addAction(self.do_download_data) + + do_launch = QT.QAction('&Launch', self, shortcut = 'Return') + do_launch.triggered.connect(self.launch) + self.file_menu.addAction(do_launch) + + self.options_menu = self.menuBar().addMenu(self.tr('&Options')) + + do_toggle_lazy = QT.QAction('&Fast loading (disables filters, spike detection, RAUC)', self) + do_toggle_lazy.setCheckable(True) + do_toggle_lazy.setChecked(self.lazy) + do_toggle_lazy.triggered.connect(self.toggle_lazy) + self.options_menu.addAction(do_toggle_lazy) + + do_toggle_support_increased_line_width = QT.QAction('&Thick traces (worse performance)', self) + do_toggle_support_increased_line_width.setCheckable(True) + do_toggle_support_increased_line_width.setChecked(self.support_increased_line_width) + do_toggle_support_increased_line_width.triggered.connect(self.toggle_support_increased_line_width) + self.options_menu.addAction(do_toggle_support_increased_line_width) + + self.theme_menu = self.menuBar().addMenu(self.tr('&Theme')) + self.theme_group = QT.QActionGroup(self.theme_menu) + + do_select_light_theme = self.theme_menu.addAction('&Light theme') + do_select_light_theme.setCheckable(True) + do_select_light_theme.triggered.connect(self.select_light_theme) + self.theme_group.addAction(do_select_light_theme) + + do_select_dark_theme = self.theme_menu.addAction('&Dark theme') + do_select_dark_theme.setCheckable(True) + do_select_dark_theme.triggered.connect(self.select_dark_theme) + self.theme_group.addAction(do_select_dark_theme) + + do_select_original_theme = self.theme_menu.addAction('&Original theme') + do_select_original_theme.setCheckable(True) + do_select_original_theme.triggered.connect(self.select_original_theme) + self.theme_group.addAction(do_select_original_theme) + + if self.theme == 'light': + do_select_light_theme.setChecked(True) + elif self.theme == 'dark': + do_select_dark_theme.setChecked(True) + elif self.theme == 'original': + do_select_original_theme.setChecked(True) + else: + raise ValueError('theme "{}" is unrecognized'.format(self.theme)) + + def open_metadata(self): + """ + + """ + + file, _ = QT.QFileDialog.getOpenFileName( + parent=self, + caption='Open metadata', + directory=None, + filter='YAML files (*.yml *.yaml)') + + if file: + self.metadata_selector.file = file + self.metadata_selector.load() + + def download_files(self): + """ + + """ + + self.download_thread.start() + self.request_download.emit() + self.do_download_data.setText('&Download in progress!') + self.do_download_data.setEnabled(False) + + def on_download_finished(self): + """ + + """ + + self.download_thread.quit() + self.metadata_selector.load() + self.do_download_data.setText('&Download data') + self.do_download_data.setEnabled(True) + + def launch(self): + """ + + """ + + metadata = self.metadata_selector.selected_metadata + + try: + + blk = LoadAndPrepareData(metadata, lazy=self.lazy) + + rauc_sigs = [] + if not self.lazy: + for sig in blk.segments[0].analogsignals: + rauc = elephant.signal_processing.rauc(sig, bin_duration = 0.1*pq.s) + rauc.name = sig.name + ' RAUC' + rauc_sigs.append(rauc) + + ephyviewer_config = EphyviewerConfigurator(metadata, blk, rauc_sigs, self.lazy) + ephyviewer_config.enable_all() + + win = ephyviewer_config.create_ephyviewer_window(theme=self.theme, support_increased_line_width=self.support_increased_line_width) + self.windows.append(win) + win.show() + + except FileNotFoundError as e: + + print('Some files were not found locally and may need to be downloaded') + print(e) + return + + def toggle_lazy(self, checked): + """ + + """ + self.lazy = checked + + def toggle_support_increased_line_width(self, checked): + """ + + """ + self.support_increased_line_width = checked + + def select_light_theme(self): + """ + + """ + self.theme = 'light' + + def select_dark_theme(self): + """ + + """ + self.theme = 'dark' + + def select_original_theme(self): + """ + + """ + self.theme = 'original' diff --git a/neurotic/scripts.py b/neurotic/scripts.py new file mode 100644 index 0000000..9042e3c --- /dev/null +++ b/neurotic/scripts.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +""" + +""" + +def launch_standalone(): + """ + + """ + + from ephyviewer import mkQApp + from .gui.standalone import DataExplorer + + app = mkQApp() + win = DataExplorer() + win.show() + print('Ready') + app.exec_() diff --git a/neurotic/utils.py b/neurotic/utils.py new file mode 100644 index 0000000..4c434d2 --- /dev/null +++ b/neurotic/utils.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +""" +Tools for working with Neo objects +""" + +import re +import numpy as np +import pandas as pd +import neo +import elephant +from pylttb import lttb + +class CausalAlphaKernel(elephant.kernels.AlphaKernel): + """ + This modified version of elephant.kernels.AlphaKernel shifts time such that + convolution of the kernel with spike trains (as in + elephant.statistics.instantaneous_rate) results in alpha functions that + begin rising at the spike time, not before. The entire area of the kernel + comes after the spike, rather than half before and half after, as in + AlphaKernel. Consequently, CausalAlphaKernel can be used in causal filters. + + Derived from: + """ + __doc__ += elephant.kernels.AlphaKernel.__doc__ + + def median_index(self, t): + """ + In CausalAlphaKernel, "median_index" is a misnomer. Instead of returning + the index into t that gives half area above and half below (median), it + returns the index for the first non-negative time, which always + corresponds to the start of the rise phase of the alpha function. This + hack ensures that, when the kernel is convolved with a spike train, the + entire alpha function is located to the right of each spike time. + + Overrides the following: + """ + return np.nonzero(t >= 0)[0].min() + median_index.__doc__ += elephant.kernels.AlphaKernel.median_index.__doc__ + +def DownsampleNeoSignal(sig, decimation_factor): + """ + + """ + + assert sig.shape[1] == 1, 'Can only downsample single-channel signals' + + num_points = int(sig.shape[0] / decimation_factor) + + x_downsampled, y_downsampled = lttb( + x=sig.times.magnitude, + y=sig.magnitude[:, 0], + threshold=num_points, + ) + + sig_downsampled = neo.IrregularlySampledSignal( + times=x_downsampled, + signal=y_downsampled, + units=sig.units, + time_units=sig.times.units, + ) + return sig_downsampled + +def NeoEpochToDataFrame(neo_epochs, exclude_epoch_encoder_epochs=False): + """ + + """ + + dtypes = { + 'Start (s)': float, + 'End (s)': float, + 'Duration (s)': float, + 'Type': str, + 'Label': str, + } + columns = list(dtypes.keys()) + df = pd.DataFrame(columns=columns) + for ep in neo_epochs: + if not exclude_epoch_encoder_epochs or '(from epoch encoder file)' not in ep.labels: + data = np.array([ep.times, ep.times+ep.durations, ep.durations, [ep.name]*len(ep), ep.labels]).T + df = df.append(pd.DataFrame(data, columns=columns), ignore_index=True) + return df.astype(dtype=dtypes).sort_values(['Start (s)', 'End (s)', 'Type', 'Label']).reset_index(drop=True) + +def BehaviorsDataFrame(neo_epochs, behavior_query, subepoch_queries): + """ + + """ + + # filter epochs to obtain the behaviors + df = NeoEpochToDataFrame(neo_epochs) + df = df.rename(columns={'Start (s)': 'Start', 'End (s)': 'End', 'Duration (s)': 'Duration'}) + df = df.query(behavior_query) + df = df.rename(columns={'Start': 'Start (s)', 'End': 'End (s)', 'Duration': 'Duration (s)'}) + + # add defaults for subepoch columns + for col_prefix in subepoch_queries: + df[col_prefix+' start (s)'] = np.nan + df[col_prefix+' end (s)'] = np.nan + df[col_prefix+' duration (s)'] = np.nan + df[col_prefix+' type'] = '' + df[col_prefix+' label'] = '' + + # for every behavior, identify other epochs that belong to it + df2 = NeoEpochToDataFrame(neo_epochs) + df2 = df2.rename(columns={'Start (s)': 'Start', 'End (s)': 'End', 'Duration (s)': 'Duration'}) + for i in df.index: + + # these variables are accessible within queries when the + # @ symbol appears before their names, e.g. @behavior_start + behavior_start = df.loc[i, 'Start (s)'] + behavior_end = df.loc[i, 'End (s)'] + behavior_duration = df.loc[i, 'Duration (s)'] + behavior_type = df.loc[i, 'Type'] + behavior_label = df.loc[i, 'Label'] + + for col_prefix, query in subepoch_queries.items(): + df3 = df2.query(query) + if len(df3) == 0: + pass + elif len(df3) == 1: + matching_epoch = df3.iloc[0] + df.loc[i, col_prefix+' start (s)'] = matching_epoch['Start'] + df.loc[i, col_prefix+' end (s)'] = matching_epoch['End'] + df.loc[i, col_prefix+' duration (s)'] = matching_epoch['Duration'] + df.loc[i, col_prefix+' type'] = matching_epoch['Type'] + df.loc[i, col_prefix+' label'] = matching_epoch['Label'] + else: + raise Exception(f'More than one epoch was found for the behavior spanning [{behavior_start}, {behavior_end}] that matches this query: {query}') + + return df + +def EstimateVideoJumpTimes(blk): + """ + Estimate how much time to skip in video playback if AxoGraph was temporarily + paused during data acquisition while the video continued to record. Returns + a list of ordered pairs suitable for the video_jumps metadata parameter. The + returned stop times are exact, but pause durations have only whole-second + precision and should be manually refined by inspecting the video before + using. + """ + + if 'notes' not in blk.annotations: + return None + else: + + # obtain approximate start and stop times according to AxoGraph notes + note_start_times = np.array([0], dtype=np.int) + note_stop_times = np.array([], dtype=np.int) + for note_line in blk.annotations['notes'].split('\n'): + m = re.match('\d\d\d: Start at (\d*) s', note_line) + if m: + note_start_times = np.append(note_start_times, int(m.group(1))) + m = re.match('\d\d\d: Stop at (\d*) s', note_line) + if m: + note_stop_times = np.append(note_stop_times, int(m.group(1))) + + # calculate approximate pause durations + pause_durations = note_start_times[1:]-note_stop_times[:-1] + + # obtain exact stop times (AxoGraph time, not video time) + event_stop_times = np.array([], dtype=np.float) + ev = blk.segments[0].events[0] + for time, label in zip(ev.times, ev.labels): + if label == 'Stop': + event_stop_times = np.append(event_stop_times, time.magnitude) + + # pair stop times with pause durations + video_jumps = [] + for t, dur in zip(event_stop_times[:-1], pause_durations): + video_jumps.append([t, dur]) + + return video_jumps diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..b838405 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +av +elephant>=0.6.2 +git+https://github.com/jpgill86/ephyviewer.git@experimental +ipywidgets +git+https://github.com/jpgill86/python-neo.git@axographrawio +numpy +pandas +pylttb +pyqt5 +pyyaml +quantities +tqdm diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..1ab8a3e --- /dev/null +++ b/setup.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +""" + +""" + +import os +import subprocess +from setuptools import setup, find_packages + + +# Change version number here, not in neurotic/version.py, which is generated +# by this script. Try to follow recommended versioning guidelines at semver.org. +MAJOR = 0 # increment for backwards-incompatible changes +MINOR = 1 # increment for backwards-compatible feature additions +MICRO = 0 # increment for backwards-compatible bug fixes +IS_RELEASED = False # determines whether version will be marked as development +VERSION = f'{MAJOR}.{MINOR}.{MICRO}' + +# Try to fetch the git revision number from the .git directory if it exists. +if os.path.exists('.git'): + try: + out = subprocess.Popen(['git', 'rev-parse', 'HEAD'], + stdout=subprocess.PIPE).communicate()[0] + GIT_REVISION = out.strip().decode('ascii') + except OSError: + GIT_REVISION = 'unknown' +# If the .git directory is absent (perhaps because this is a source distro), +# try to fetch the rev number from neurotic/version.py where it may have been +# stored during packaging. +elif os.path.exists('neurotic/version.py'): + try: + v = {} + with open('neurotic/version.py', 'r') as f: + exec(f.read(), v) + GIT_REVISION = v['git_revision'] + except ImportError: + raise ImportError('Unable to import git_revision. Try removing ' \ + 'neurotic/version.py and the build directory ' \ + 'before building.') +else: + GIT_REVISION = 'unknown' + +# If this is not a release version, mark it as a development build/distro and +# tag it with the git revision number. +if not IS_RELEASED: + VERSION += '.dev0+' + GIT_REVISION[:7] + +# Write the version string to a file that will be included with the +# build/distro. This makes the string accessible to the package via +# neurotic.__version__. The git revision is also written in case a source +# distro is being built, so that it can be fetched later during installation. +with open('neurotic/version.py', 'w') as f: + try: + f.write('"""THIS FILE WAS GENERATED BY SETUP.PY DURING BUILDING/PACKAGING"""\n') + f.write(f'version = \'{VERSION}\'\n') + f.write(f'git_revision = \'{GIT_REVISION}\'\n') + finally: + f.close() + +# Read in the README to serve as the long_description, which will be presented +# on pypi.org as the project description. +with open('README.rst', 'r') as f: + README = f.read() + +install_requires = [ + # unreleased versions are needed so install from requirements.txt instead + # TODO permanent fix? + # 'av', + # 'elephant>=0.6.2', + # 'ephyviewer', # TODO require latest + # 'ipywidgets', + # 'neo', # TODO require >0.7.1 for AxographRawIO + # 'numpy', + # 'pandas', + # 'pylttb', + # 'pyqt5', + # 'pyyaml', + # 'quantities', + # 'tqdm', +] + +setup( + name = 'neurotic', + version = VERSION, + description = 'Curate, visualize, and annotate your behavioral ephys data using Python', + packages = find_packages(), + package_data = {'neurotic': ['example/metadata.yml']}, + install_requires = install_requires, + entry_points = {'console_scripts': ['neurotic=neurotic.scripts:launch_standalone']}, + long_description = README, + author = 'Jeffrey Gill', + author_email = 'jeffrey.p.gill@gmail.com', + license = 'MIT', + url = 'https://github.com/jpgill86/neurotic', + classifiers = [ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Topic :: Scientific/Engineering', + ], +)