Skip to content

Commit

Permalink
0.8.2
Browse files Browse the repository at this point in the history
  • Loading branch information
ray306 committed Mar 5, 2018
1 parent e0065ce commit bba4f72
Show file tree
Hide file tree
Showing 37 changed files with 259 additions and 3,240 deletions.
2 changes: 2 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Auto detect text files and perform LF normalization
* text=auto
99 changes: 99 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
4 changes: 1 addition & 3 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1 @@
## Concise Agile Flexible EEG (Cafe) data analyis toolbox

Cafe provides simple, flexible and powerful methods that can be used to directly test neural and psychological hypotheses based on topographic responses. These multivariate methods can investigate effects in the dimensions of response magnitude and topographic patterns separately using data in the sensor space, therefore enable assessing neural sources and its dynamics without sophisticated localization. Python based algorithms provide concise and extendable features of Cafe. Users of all levels can benefit from Cafe and obtain a straightforward solution to efficiently handle and process EEG data and a complete pipeline from raw data to publication.
# EasyEEG
2 changes: 1 addition & 1 deletion __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ def reload(module=None):
importlib.reload(graph.figure_group)
importlib.reload(graph.figure_unit)

print('Cafe loaded')
print('EasyEEG loaded.')
Binary file removed __pycache__/__init__.cpython-35.pyc
Binary file not shown.
Binary file removed __pycache__/__init__.cpython-36.pyc
Binary file not shown.
Binary file removed __pycache__/default.cpython-35.pyc
Binary file not shown.
Binary file removed __pycache__/default.cpython-36.pyc
Binary file not shown.
Binary file removed __pycache__/group.cpython-35.pyc
Binary file not shown.
Binary file removed __pycache__/group.cpython-36.pyc
Binary file not shown.
Binary file removed __pycache__/parameter.cpython-35.pyc
Binary file not shown.
Binary file removed __pycache__/parameter.cpython-36.pyc
Binary file not shown.
Binary file removed __pycache__/structure.cpython-36.pyc
Binary file not shown.
1 change: 1 addition & 0 deletions algorithms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
structure.Extracted_epochs.Spectrum = Spectrum
structure.Extracted_epochs.Time_frequency = Time_frequency
structure.Extracted_epochs.topography = topography
structure.Extracted_epochs.frequency_topography = frequency_topography
structure.Extracted_epochs.significant_channels_count = significant_channels_count
structure.Extracted_epochs.clustering = clustering
structure.Extracted_epochs.tanova = tanova
Expand Down
Binary file modified algorithms/__pycache__/__init__.cpython-36.pyc
Binary file not shown.
Binary file modified algorithms/__pycache__/basic.cpython-36.pyc
Binary file not shown.
Binary file modified algorithms/__pycache__/cosine_distance_models.cpython-36.pyc
Binary file not shown.
Binary file modified algorithms/__pycache__/erp.cpython-36.pyc
Binary file not shown.
Binary file modified algorithms/__pycache__/spectrum.cpython-36.pyc
Binary file not shown.
Binary file modified algorithms/__pycache__/topo.cpython-36.pyc
Binary file not shown.
15 changes: 12 additions & 3 deletions algorithms/basic.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from ..default import *
from tqdm import tqdm
from ..statistics import stats_methods
import numexpr as ne

# todo: group single or not, 2*2 or not
# def check_availability(data,single_value_level=[],complex_value_level=[]):
Expand All @@ -19,13 +20,15 @@
# if len(values)==1:
# raise Exception(f'level "{l}" should have more than one value, but {values} in there.')

def check_availability(data, level, unique_value_count):
def check_availability(data, level, condition):
if level == 'time_group':
values = list(data.columns.get_level_values(level).unique())
else:
values = list(data.index.get_level_values(level).unique())
if len(values)!=unique_value_count:
raise Exception(f'level "{level}"\'s unique_value_count should be {unique_value_count} , but {values} in there.')
target = len(values)
if not ne.evaluate(str(target)+condition):
raise Exception(
f'level "{level}"\'s unique_value_count should {condition} , but {values} in there.')

def shuffle_on_level(df, level, within_subject=True, inplace=True):
raw = list(zip(df.index.get_level_values('subject'),df.index.labels[df.index.names.index(level)],df.index.get_level_values('trial')))
Expand Down Expand Up @@ -138,6 +141,12 @@ def sampling(data,step_size='1ms',win_size='1ms',sample='mean'):
return data

'map and apply'
# divide into units, convert the units, then combine them
def convert(df, unit, func):
df_t = df.mean(level=unit)
converted_list = [func(data) for name, data in df_t.groupby(level=unit)]
return pd.concat(converted_list)

# def roll_on_levels(df, func, arguments_dict=dict(), levels='time', prograssbar=True):
# col_level = df.columns.names[1]
# df = df.stack(col_level)
Expand Down
51 changes: 0 additions & 51 deletions algorithms/cosine_distance_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,20 +20,6 @@ def sub_func(group_data, shuffle=500, within_subject=True):
pvalue = stats_methods.get_pvalue_from_distribution(result_real, dist_baseline)
return pvalue, result_real

# def sub_func4(group_data):
# result_real = []
# baseline = []
# for subject_group_id,subject_group_data in group_data.groupby(level='subject'):
# result_real.append(calc_cosD(subject_group_data))
# dist_baseline = []
# for _ in range(shuffle):
# shuffle_on_level(group_data, 'condition_group')
# dist_baseline.append(calc_cosD(group_data))
# baseline.append(np.mean(dist_baseline))

# pvalue = scipy.stats.ttest_rel(result_real, baseline)[1]
# return pvalue, result_real

def tanova(self,step_size='1ms',win_size='1ms',sample='mean',shuffle=500,mode=1,parallel=False):
# with the decorator, we can just focuse on case data instead of batch/collection data
@self.iter('all')
Expand Down Expand Up @@ -78,40 +64,3 @@ def calc(case_raw_data):

default_plot_params = dict(title='cosine_distance_dynamics', plot_type=['direct','waveform'], x_title='time', y_title='distance', color="Set1", style='darkgrid')
return structure.Analyzed_data('cosine distance dynamics', cosine_distance_collection, default_plot_params=default_plot_params)


# def Topo_CosD(data,container,step='1ms',err_style='ci_band',win='5ms',sample='mean', sig_limit=0):

# def calc(batch_data):

# def sub_task(scene_data):
# scene_data = mean_axis(scene_data,'trial')
# # sampling along time axis
# if step!='1ms':
# scene_data = point_sample(scene_data,step)
# elif win!='1ms':
# scene_data = window_sample(scene_data,win,sample)

# distance = process.row_roll(scene_data, row=['subject','condition','time'], column=['channel'], func=calc_cosD)

# return distance['p'].unstack('time')

# map_result = [(scene_name,sub_task(scene_data)) for scene_name,scene_data in batch_data]

# result = pd.concat([result for name,result in map_result])
# result.sort_index(inplace=True)
# result = result.reindex([name for name,result in map_result],level='condition') # 使condition的顺序和定义时一致

# return result

# # group the data
# container_data = group.extract(data,container,'Topograph')
# # calculate
# diff_data = [(title,calc(batch_data)) for title,batch_data in container_data]
# diff_stat_data = [None for i in diff_data]

# # plot
# note = ['Time(ms)','Distance',[]]
# plot_put.block(diff_data,note,err_style,diff_stat_data,win,sig_limit=0)

# return diff_data, diff_stat_data
4 changes: 2 additions & 2 deletions algorithms/spectrum.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,10 @@ def to_tf(case_raw_data):
if freq_span[0]==0:
widths = freqs+0.001
else:
widths = widths
widths = freqs
# index = pd.MultiIndex.from_tuples([(*name,freq) for freq in freqs[::-1]],
# names=['condition_group','channel_group','frequency'])

cwt_result = signal.cwt(np.array(data)[0], signal.ricker, widths=widths)
cwt_result = pd.DataFrame(cwt_result,index=freqs[::-1],columns=data.columns)

Expand Down
72 changes: 69 additions & 3 deletions algorithms/topo.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,19 @@
from .. import structure
from .basic import *
from ..statistics import stats_methods
from scipy import signal

def topography(self, step_size='1ms', win_size='1ms', sample='mean', sig_limit=0.05):
@self.iter('average')
def to_topo(case_raw_data):
case_raw_data = sampling(case_raw_data, step_size, win_size, sample)

data_with_subject = case_raw_data.mean(level=['subject','condition_group','channel'])
check_availability(case_raw_data, 'channel', '>1')

data_with_subject = case_raw_data.mean(
level=['subject', 'condition_group', 'channel'])

data_with_subject = sampling(
data_with_subject, step_size, win_size, sample)

if len(data_with_subject.index.get_level_values('condition_group').unique()) == 2:
condition_groups_data = [data for idx,data in data_with_subject[0].groupby(level='condition_group')]

Expand All @@ -34,7 +40,67 @@ def to_topo(case_raw_data):
default_plot_params = dict(title='Topography',plot_type=['matrix','topograph'], zlim=minmax, color=plt.cm.jet, cbar_title='uV',
chan_locs=self.info['xy_locs'], sig_limit=sig_limit, x_title='time', y_title='condition_group')
return structure.Analyzed_data('Topography', topo_collection, default_plot_params=default_plot_params)

def frequency_topography(self, step_size='1ms', win_size='1ms', sample='mean', sig_limit=0.05, target=10):
if isinstance(target, (int, float)):
freqs = [target]
elif type(target) is list and len(target) == 2 \
and isinstance(target[0], (int, float)) and isinstance(target[1], (int, float)):
freqs = np.arange(target[0], target[1])
else:
raise Exception(
'Unsupported value for "target". The value should be a number, or a list of two numbers')

if freqs[0] == 0: freqs += 0.001

def cwt(data):
cwt_result = signal.cwt(
np.array(data)[0], signal.ricker, widths=freqs).mean(axis=0)
cwt_result = pd.DataFrame(
[cwt_result], index=data.index, columns=data.columns)
return cwt_result

@self.iter('average')
def to_topo(case_raw_data):
data_without_subject = convert(
case_raw_data, ['condition_group', 'channel'], cwt)
data_without_subject = sampling(data_without_subject, step_size, win_size, sample)

if len(data_without_subject.index.get_level_values('condition_group').unique()) == 2:
data_with_subject = convert(
case_raw_data, ['subject', 'condition_group', 'channel'], cwt)
data_with_subject = sampling(
data_with_subject, step_size, win_size, sample)

condition_groups_data = [
data for idx, data in data_with_subject[0].groupby(level='condition_group')]

topo_result = condition_groups_data[0].mean(
level='channel') - condition_groups_data[1].mean(level='channel')
# re-add level 'condition_group' in index
recover_index(topo_result, data_with_subject, 'condition_group')
stats_result = roll_on_levels_and_compare(data_with_subject, stats_methods.t_test, levels=['time', 'channel'],
between='condition_group', in_group='subject', prograssbar=False)
# [0] is ugly
result = pd.concat([topo_result, stats_result[0]],
keys=['Amp', 'p_val'], axis=1)

else:
result = data_without_subject
result.columns = pd.MultiIndex.from_product(
[['Amp'], result[0].columns], names=['', 'time'])

return result

topo_collection = to_topo()

minmax = [(t.min().min(), t.max().max()) for t in topo_collection]
minmax = (np.array(minmax).min(), np.array(minmax).max())

default_plot_params = dict(title='Topography', plot_type=['matrix', 'topograph'], zlim=minmax, color=plt.cm.jet, cbar_title='Power',
chan_locs=self.info['xy_locs'], sig_limit=sig_limit, x_title='time', y_title='condition_group')
return structure.Analyzed_data('Topography', topo_collection, default_plot_params=default_plot_params)

def significant_channels_count(self, step_size='1ms', win_size='1ms', sample='mean', sig_limit=0.05):
@self.iter('average')
def to_signif(case_raw_data):
Expand Down
13 changes: 11 additions & 2 deletions changelog.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,20 @@
# Future works:
- correct()
- Better support for parallel cores
- Make the "extract()" more flexible

# 0.8.3 (todo)
- (todo) Make the "extract()" more flexible

# 0.8.2 (2018-03-05)
- Removed 'time_group' index which are unnecessary in Analyzed_Result
- Refine X axis in figure
- Improved the compatibility of save() and load() of Analyzed_Result
- New algorithm 'frequency_topography()'. Now we can calculate the topography of frequency.
- New method 'convert()' in module 'Basic'

# 0.8.1 (2018-03-01)
- MASSIVE amount of changes
- Altered the package name to "Cafe"
- Altered the package name to "easyEEG"

# 0.8 (2017-12-25)
- First to public release. Merry Xmax!
Binary file modified graph/__pycache__/figure_unit.cpython-36.pyc
Binary file not shown.
Loading

0 comments on commit bba4f72

Please sign in to comment.