-
Notifications
You must be signed in to change notification settings - Fork 12
/
train_decoders.py
199 lines (175 loc) · 10.1 KB
/
train_decoders.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
'''
Train and fine-tune decoders. Should work with other datasets as long as they are
in the same xarray format (will need to specify loadpath too).
'''
import numpy as np
import pdb,os,time
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0" #specify GPU to use
from run_nn_models import run_nn_models
from transfer_learn_nn import transfer_learn_nn
from model_utils import unseen_modality_test, diff_specs, ntrain_combine_df, frac_combine_df
from transfer_learn_nn_eeg import transfer_learn_nn_eeg
t_start = time.time()
##################USER-DEFINED PARAMETERS##################
# Where data will be saved: rootpath + dataset + '/'
rootpath = '.../'
dataset = 'move_rest_ecog'
# Data load paths
ecog_lp = rootpath + 'ecog_dataset/' # data load path
ecog_roi_proj_lp = ecog_lp+'proj_mat/' #
### Tailored decoder params (within participant) ###
n_folds_tail = 3 # number of folds (per participant)
spec_meas_tail = ['power', 'power_log', 'relative_power', 'phase', 'freqslide']
hyps_tail = {'F1' : 20, 'dropoutRate' : 0.693, 'kernLength' : 64,
'kernLength_sep' : 56, 'dropoutType' : 'SpatialDropout2D',
'D' : 2, 'n_estimators' : 240, 'max_depth' : 9}
hyps_tail['F2'] = hyps_tail['F1'] * hyps_tail['D'] # F2 = F1 * D
epochs_tail = 300
patience_tail = 30
### Same modality decoder params (across participants) ###
n_folds_same = 36 # number of total folds
spec_meas_same = ['power', 'power_log', 'relative_power', 'phase', 'freqslide']
hyps_same = {'F1' : 19, 'dropoutRate' : 0.342, 'kernLength' : 24,
'kernLength_sep' : 88, 'dropoutType' : 'Dropout',
'D' : 2, 'n_estimators' : 240, 'max_depth' : 6}
hyps_same['F2'] = hyps_same['F1'] * hyps_same['D'] # F2 = F1 * D
epochs_same = 300
patience_same = 20
### Unseen modality testing params (across participants) ###
eeg_lp = rootpath + 'eeg_dataset/' # path to EEG xarray data
eeg_roi_proj_lp = eeg_lp+'proj_mat/' # path to EEG projection matrix
### Fine-tune same modality decoders ###
model_type_finetune = 'eegnet_hilb' # NN model type to fine-tune (must be either 'eegnet_hilb' or 'eegnet')
layers_to_finetune = ['all',['conv2d','batch_normalization'],
['batch_normalization','depthwise_conv2d','batch_normalization_1'],
['separable_conv2d','batch_normalization_2']]
# Options: 'all' - allow entire model to be retrained
# ['conv2d','batch_normalization']
# ['batch_normalization','depthwise_conv2d','batch_normalization_1']
# ['separable_conv2d','batch_normalization_2']
# None - transfer learning of new last 3 layers
sp_finetune = [rootpath + dataset + '/tf_all_per/',
rootpath + dataset + '/tf_per_1dconv/',
rootpath + dataset + '/tf_depth_per/',
rootpath + dataset + '/tf_sep_per/'] # where to save output (should match layers_to_finetune)
# How much train/val data to use, either by number of trials or percentage of available data
use_per_vals = True #if True, use percentage values (otherwise, use number of trials)
per_train_trials = [.17,.33,.5,0.67]
per_val_trials = [.08,.17,.25,0.33]
n_train_trials = [16,34,66,100]
n_val_trials = [8,16,34,50]
### Train same modality decoders with different numbers of training participants ###
max_train_parts = 10 # use 1--max_train_subs training participants
n_val_parts = 1 # number of validation participants to use
##################USER-DEFINED PARAMETERS##################
#### Tailored decoder training ####
for s,val in enumerate(spec_meas_tail):
do_log = True if val == 'power_log' else False
compute_val = 'power' if val == 'power_log' else val
single_sp = rootpath + dataset + '/single_sbjs_' + val + '/'
combined_sbjs = False
if not os.path.exists(single_sp):
os.makedirs(single_sp)
if s==0:
models = ['eegnet_hilb','eegnet','rf','riemann'] # fit all decoder types
else:
models = ['eegnet_hilb'] # avoid fitting non-HTNet models again
run_nn_models(single_sp, n_folds_tail, combined_sbjs, ecog_lp, ecog_roi_proj_lp, test_day = 'last', do_log=do_log,
epochs=epochs_tail, patience=patience_tail, models=models, compute_val=compute_val,
F1 = hyps_tail['F1'], dropoutRate = hyps_tail['dropoutRate'], kernLength = hyps_tail['kernLength'],
kernLength_sep = hyps_tail['kernLength_sep'], dropoutType = hyps_tail['dropoutType'],
D = hyps_tail['D'], F2 = hyps_tail['F2'], n_estimators = hyps_tail['n_estimators'], max_depth = hyps_tail['max_depth'])
#### Same modality training ####
for s,val in enumerate(spec_meas_same):
do_log = True if val == 'power_log' else False
compute_val = 'power' if val == 'power_log' else val
multi_sp = rootpath + dataset + '/combined_sbjs_' + val + '/'
if not os.path.exists(multi_sp):
os.makedirs(multi_sp)
combined_sbjs = True
if s==0:
models = ['eegnet_hilb','eegnet','rf','riemann'] # fit all decoder types
else:
models = ['eegnet_hilb'] # avoid fitting non-HTNet models again
run_nn_models(multi_sp, n_folds_same, combined_sbjs, ecog_lp, ecog_roi_proj_lp, test_day = 'last', do_log=do_log,
epochs=epochs_same, patience=patience_same, models=models, compute_val=compute_val,
F1 = hyps_same['F1'], dropoutRate = hyps_same['dropoutRate'], kernLength = hyps_same['kernLength'],
kernLength_sep = hyps_same['kernLength_sep'], dropoutType = hyps_same['dropoutType'],
D = hyps_same['D'], F2 = hyps_same['F2'], n_estimators = hyps_same['n_estimators'], max_depth = hyps_same['max_depth'])
#### Unseen modality testing ####
for s,val in enumerate(spec_meas_same):
if s==0:
models = ['eegnet_hilb','eegnet','rf','riemann'] # fit all decoder types
else:
models = ['eegnet_hilb']
for mod_curr in models:
unseen_modality_test(eeg_lp, eeg_roi_proj_lp, rootpath + dataset + '/',
pow_type = val, model_type = mod_curr)
#### Same modality fine-tuning ####
spec_meas = 'power'
for j,curr_layer in enumerate(layers_to_finetune):
# Create save directory if does not exist already
if not os.path.exists(sp_finetune[j]):
os.makedirs(sp_finetune[j])
# Fine-tune with each amount of train/val data
if curr_layer==layers_to_finetune[-1]:
single_sub = True
else:
single_sub = False
lp_finetune = rootpath + dataset + '/combined_sbjs_'+spec_meas+'/'
if use_per_vals:
for i in range(len(per_train_trials)):
transfer_learn_nn(lp_finetune, sp_finetune[j],
model_type = model_type_finetune, layers_to_finetune = curr_layer,
use_per_vals = use_per_vals, per_train_trials = per_train_trials[i],
per_val_trials = per_val_trials[i],single_sub = single_sub, epochs=epochs_same, patience=patience_same)
else:
for i in range(len(n_train_trials)):
transfer_learn_nn(lp_finetune, sp_finetune[j],
model_type = model_type_finetune, layers_to_finetune = curr_layer,
use_per_vals = use_per_vals, n_train_trials = n_train_trials[i],
n_val_trials = n_val_trials[i], single_sub = single_sub, epochs=epochs_same, patience=patience_same)
#### Unseen modality fine-tuning ####
spec_meas = 'relative_power'
for j,curr_layer in enumerate(layers_to_finetune):
sp_finetune_eeg = sp_finetune[j][:-1]+'_eeg/'
# Create save directory if does not exist already
if not os.path.exists(sp_finetune_eeg):
os.makedirs(sp_finetune_eeg)
# Fine-tune with each amount of train/val data
if curr_layer==layers_to_finetune[-1]:
single_sub = True
else:
single_sub = False
lp_finetune = rootpath + dataset + '/combined_sbjs_'+spec_meas+'/'
if use_per_vals:
for i in range(len(per_train_trials)):
transfer_learn_nn_eeg(lp_finetune, sp_finetune_eeg, eeg_lp,
model_type = model_type_finetune, layers_to_finetune = curr_layer,
use_per_vals = use_per_vals, per_train_trials = per_train_trials[i],
per_val_trials = per_val_trials[i],single_sub = single_sub, epochs=epochs_same, patience=patience_same)
else:
for i in range(len(n_train_trials)):
transfer_learn_nn_eeg(lp_finetune, sp_finetune_eeg, eeg_lp,
model_type = model_type_finetune, layers_to_finetune = curr_layer,
use_per_vals = use_per_vals, n_train_trials = n_train_trials[i],
n_val_trials = n_val_trials[i], single_sub = single_sub, epochs=epochs_same, patience=patience_same)
#### Training same modality decoders with different numbers of training participants ####
for i in range(max_train_parts):
sp_curr = rootpath + dataset + '/combined_sbjs_ntra'+str(i+1)+'/'
combined_sbjs = True
if not os.path.exists(sp_curr):
os.makedirs(sp_curr)
run_nn_models(sp_curr,n_folds_same,combined_sbjs,ecog_lp,ecog_roi_proj_lp,test_day = 'last', do_log=False,
epochs=epochs_same, patience=patience_same, models=['eegnet_hilb','eegnet','rf','riemann'], compute_val='power',
n_val = n_val_parts, n_train = i + 1, F1 = hyps_same['F1'], dropoutRate = hyps_same['dropoutRate'],
kernLength = hyps_same['kernLength'], kernLength_sep = hyps_same['kernLength_sep'], dropoutType = hyps_same['dropoutType'],
D = hyps_same['D'], F2 = hyps_same['F2'], n_estimators = hyps_same['n_estimators'], max_depth = hyps_same['max_depth'])
# Combine results into dataframes
ntrain_combine_df(rootpath + dataset)
frac_combine_df(rootpath + dataset, ecog_roi_proj_lp)
#### Pre-compute difference spectrograms for ECoG and EEG datasets ####
diff_specs(rootpath + dataset + '/combined_sbjs_power/', ecog_lp, ecog = True)
diff_specs(rootpath + dataset + '/combined_sbjs_power/', eeg_lp, ecog = False)
print('Elapsed time: '+str(time.time() - t_start))