forked from AIRI-Institute/sensorscan
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
59 lines (49 loc) · 2.13 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import hydra
from models import pca_kmeans, st_catgan, convae, sensorscan, sensordbscan
from utils import weighted_max_occurence, print_clustering, print_fdd, label_assignment
import logging
from fddbenchmark import FDDEvaluator
import pandas as pd
import numpy as np
@hydra.main(version_base=None, config_path="configs")
def main(cfg):
if cfg.model == 'pca_kmeans':
train_pred, train_label, test_pred, test_label = pca_kmeans.run(cfg)
elif cfg.model == 'st_catgan':
train_pred, train_label, test_pred, test_label = st_catgan.run(cfg)
elif cfg.model == 'convae':
train_pred, train_label, test_pred, test_label = convae.run(cfg)
elif cfg.model == 'sensorscan':
train_pred, train_label, test_pred, test_label = sensorscan.run(cfg)
elif cfg.model == 'sensordbscan':
train_pred, train_label, test_pred, test_label = sensordbscan.run(cfg)
else:
raise NotImplementedError
if cfg.classes == 'all':
if cfg.dataset == 'small_tep':
n_types = 21
elif cfg.dataset == 'rieth_tep':
n_types = 21
elif cfg.dataset == 'reinartz_tep':
n_types = 29
else:
raise NotImplementedError(f'Got unknown dataset: {cfg.dataset}')
else:
n_types = len(cfg.classes)
logging.info(f'Got {np.unique(train_pred).shape[0]} clusters on train set, got {np.unique(test_pred).shape[0]} clusters on test set')
logging.info('Calculating clustering metrics')
evaluator = FDDEvaluator(step_size=cfg.step_size)
metrics = evaluator.evaluate(test_label, test_pred)
print_clustering(metrics, logging)
logging.info('Creating label matching')
# label_matching = weighted_max_occurence(test_label, test_pred, n_types)
if test_pred.min() < 0:
test_pred += 1
label_matching = label_assignment(test_label, test_pred)
test_pred = pd.Series(label_matching[test_pred], index=test_pred.index)
logging.info('Calculating FDD metrics')
evaluator = FDDEvaluator(step_size=cfg.step_size)
metrics = evaluator.evaluate(test_label, test_pred)
print_fdd(metrics, logging)
if __name__ == '__main__':
main()