-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathmain_script.py
121 lines (99 loc) · 3.83 KB
/
main_script.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
"""
This is the main script of the paper.
It perfroms a parcellation of the brain volume using various methods
and derives the average parcel signal for different contarsts.
Author: Bertrand Thirion, 2012--2014
"""
import numpy as np
import os
from nibabel import load
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from group_parcellation import (
make_parcels, reproducibility_selection, parcel_selection,
parcel_cv, rate_atlas)
###############################################################################
# Get the data
contrasts = ['horizontal vs vertical checkerboard',
'left vs right button press',
'button press vs calculation and sentence listening/reading',
'auditory processing vs visual processing',
'auditory&visual calculation vs sentences',
'sentence reading vs checkerboard']
nifti_masker = NiftiMasker('mask_GM_forFunc.nii')
grp_mask = load(nifti_masker.mask_img).get_data()
affine = load(nifti_masker.mask_img).get_affine()
# Create the data matrix
n_contrasts, n_subjects = len(contrasts), 40
subjects = ['S%02d' % i for i in range(n_subjects)]
n_voxels = grp_mask.sum()
X = np.zeros((n_voxels, n_contrasts, n_subjects))
for nc, contrast in enumerate(contrasts):
imgs = datasets.fetch_localizer_contrasts(
[contrast], n_subjects=n_subjects,
data_dir='/tmp/')['cmaps']
X[:, nc, :] = nifti_masker.fit_transform(imgs).T
# improve the mask
second_mask = (X == 0).sum(1).sum(1) < 100
grp_mask[grp_mask > 0] = second_mask
X = X[second_mask]
# write directory
write_dir = os.path.join(os.getcwd(), 'results')
if not os.path.exists(write_dir):
os.mkdir(write_dir)
###############################################################################
# what shall we do in the present experiment ?
do_parcel = True
do_parcel_selection = False
do_parcel_reproducibility = False
do_parcel_cv = False
do_atlas = False
do_atlas_comparison = False
k_range = [10, 20, 30, 40, 50, 70, 100, 150, 200, 300, 400, 500, 700, 1000,
1500, 2000, 3000, 5000, 7000, 10000]
if do_parcel_cv:
method = 'spectral'
print method
llr = parcel_cv(
X, grp_mask, write_dir=write_dir,
method=method, n_folds=.2, k_range=k_range)
if do_parcel_selection:
method = 'kmeans'
print method
llr, bic = parcel_selection(
X, grp_mask, write_dir=write_dir, method=method, k_range=k_range,
criterion='ll')
if do_parcel:
for method in ['ward', 'kmeans', 'geometric', 'spectral']:
print method
ll, bic = make_parcels(
X, grp_mask, contrasts, affine, subjects,
write_dir=write_dir, method=method, n_clusters=158)
print ll, bic
if do_atlas:
atlases = ['sri24_3mm.nii', 'HarvardOxford-labels-3mm-slpit.nii']
for atlas in atlases:
labels = load(atlas).get_data()[grp_mask]
rate_atlas(X, labels, write_dir=write_dir)
if do_parcel_reproducibility:
method = 'spectral'
print method
r_score = reproducibility_selection(
X, grp_mask, niter=5, method=method, k_range=k_range,
write_dir=write_dir)
if do_atlas_comparison:
atlas = load('HarvardOxford-labels-3mm-slpit.nii')
labels = atlas.get_data()[grp_mask]
ll = {'ward':[], 'kmeans':[], 'geometric':[], 'spectral':[], 'atlas':[]}
for i in range(30):
bootstrap_sample = (
n_subjects * np.random.rand(n_subjects)).astype(np.int)
X_ = X[:, :, bootstrap_sample]
for method in ['ward', 'kmeans', 'geometric', 'spectral']:
print method
ll_, bic = make_parcels(
X_, grp_mask, contrasts, affine, subjects,
write_dir=write_dir, method=method, n_clusters=158)
ll[method].append(ll_)
ll_, _ = rate_atlas(X_, labels, write_dir=write_dir)
ll['atlas'].append(ll_)