-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
203 lines (169 loc) · 5.97 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
"""Time-series Generative Adversarial Networks (TimeGAN) Codebase.
Reference: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar,
"Time-series Generative Adversarial Networks,"
Neural Information Processing Systems (NeurIPS), 2019.
Paper link: https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks
-----------------------------
main_timegan.py
(1) Import data
(2) Generate synthetic data
(3) Evaluate the performances in three ways
- Visualization (t-SNE, PCA)
- Discriminative score
- Predictive score
"""
## Necessary packages
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# 1. TimeGAN model
from timegan import timegan
# 2. Data loading
from data_loading import real_data_loading, sine_data_generation
# 3. Metrics
from metrics.discriminative_metrics import discriminative_score_metrics
from metrics.predictive_metrics import predictive_score_metrics
from metrics.visualization_metrics import visualization
def main (args):
"""Main function for timeGAN experiments.
Args:
- data_name: sine, stock, or energy
- seq_len: sequence length
- Network parameters (should be optimized for different datasets)
- module: gru, lstm, or lstmLN
- hidden_dim: hidden dimensions
- num_layer: number of layers
- iteration: number of training iterations
- batch_size: the number of samples in each batch
- metric_iteration: number of iterations for metric computation
Returns:
- ori_data: original data
- generated_data: generated synthetic data
- metric_results: discriminative and predictive scores
"""
## Data loading
if args.data_name in ['stock', 'energy', 'bg_level']:
ori_data = real_data_loading(args.data_name, args.seq_len, args.num_patients)
elif args.data_name == 'sine':
# Set number of samples and its dimensions
no, dim = 10000, 5
_, ori_data = sine_data_generation(no, args.seq_len, dim)
print(args.data_name + ' dataset is ready.')
## Synthetic data generation by TimeGAN
# Set newtork parameters
parameters = dict()
parameters['module'] = args.module
parameters['hidden_dim'] = args.hidden_dim
parameters['num_layer'] = args.num_layer
parameters['iterations'] = args.iteration
parameters['batch_size'] = args.batch_size
generated_data = timegan(ori_data, parameters)
print('Finish Synthetic Data Generation')
## Performance metrics
# Output initialization
metric_results = dict()
# 1. Discriminative Score
discriminative_score = list()
for _ in range(args.metric_iteration):
temp_disc = discriminative_score_metrics(ori_data, generated_data)
discriminative_score.append(temp_disc)
metric_results['discriminative'] = np.mean(discriminative_score)
# 2. Predictive score
predictive_score = list()
for tt in range(args.metric_iteration):
temp_pred = predictive_score_metrics(ori_data, generated_data)
predictive_score.append(temp_pred)
metric_results['predictive'] = np.mean(predictive_score)
# 3. Visualization (PCA and tSNE)
from matplotlib import pyplot as plt
'''def inverse_min_max_scaler(data, denominator, min_val):
"""Min Max normalizer.
Args:
- data: original data
Returns:
- norm_data: normalized data
"""
data = data * (denominator + 1e-7)
data = data + min_val
return data, denominator, np.min(data,0)'''
#o_data = np.squeeze(ori_data)
#ori_data = inverse_min_max_scaler(ori_data, denominator, min_val)
g_data = np.squeeze(generated_data)
#generated_data = inverse_min_max_scaler(generated_data, denominator, min_val)
np.savetxt('./result/generated_data.csv',g_data,delimiter=",")
for i in range(20):
for j in range(generated_data.shape[-1]):
graph_data = generated_data[i, :, j]
#print(graph_data.shape)
plt.title('generated data')
plt.plot(graph_data)
plt.savefig('./result/generated_{}.png'.format(i))
plt.clf()
for i in range(20):
for j in range(ori_data.shape[-1]):
graph_data = ori_data[i, :, j]
#print(graph_data.shape)
plt.title('original data')
plt.plot(graph_data)
plt.savefig('./result/ori_{}.png'.format(i))
plt.clf()
visualization(ori_data, generated_data, 'pca')
visualization(ori_data, generated_data, 'tsne')
## Print discriminative and predictive scores
print(metric_results)
return ori_data, generated_data, metric_results
if __name__ == '__main__':
# Inputs for the main function
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_name',
choices=['sine','stock','bg_level','energy'],
default='bg_level',
type=str)
parser.add_argument(
'--seq_len',
help='sequence length',
default=24,
type=int)
parser.add_argument(
'--module',
choices=['gru','lstm','lstmLN'],
default='gru',
type=str)
parser.add_argument(
'--hidden_dim',
help='hidden state dimensions (should be optimized)',
default=24,
type=int)
parser.add_argument(
'--num_layer',
help='number of layers (should be optimized)',
default=3,
type=int)
parser.add_argument(
'--iteration',
help='Training iterations (should be optimized)',
default=50000,
type=int)
parser.add_argument(
'--batch_size',
help='the number of samples in mini-batch (should be optimized)',
default=128,
type=int)
parser.add_argument(
'--metric_iteration',
help='iterations of the metric computation',
default=10,
type=int)
parser.add_argument(
'--num_patients',
help='number of patients for training',
default=18,
type=int)
args = parser.parse_args()
# Calls main function
ori_data, generated_data, metrics = main(args)