-
Notifications
You must be signed in to change notification settings - Fork 40
/
Copy pathopt.py
65 lines (58 loc) · 4.15 KB
/
opt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='From https://github.com/Feaxure-fresh/TL-Fault-Diagnosis-Library')
# basic parameters
parser.add_argument('--model_name', type=str, default='CNN',
help='Name of the model (in ./models directory)')
parser.add_argument('--source', type=str, default='CWRU_0',
help='Source data, separated by "," (select specific conditions of the dataset with name_number, such as CWRU_0)')
parser.add_argument('--target', type=str, default='CWRU_1',
help='Target data (select specific conditions of the dataset with name_number, such as CWRU_0)')
parser.add_argument('--data_dir', type=str, default="./datasets",
help='Directory of the datasets')
parser.add_argument('--train_mode', type=str, default='single_source',
choices=['single_source', 'source_combine', 'multi_source'],
help='Training mode (select correctly before training)')
parser.add_argument('--cuda_device', type=str, default='0',
help='Allocate the device to use only one GPU ('' means using cpu)')
parser.add_argument('--max_epoch', type=int, default=30,
help='Number of epochs')
parser.add_argument('--batch_size', type=int, default=64,
help='Batch size')
parser.add_argument('--signal_size', type=int, default=1024,
help='Signal length split by sliding window')
parser.add_argument('--random_state', type=int, default=10,
help='Random state for the entire training')
# optimizer parameters
parser.add_argument('--opt', type=str, choices=['sgd', 'adam'], default='sgd', help='Optimizer')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for sgd')
parser.add_argument('--betas', type=tuple, default=(0.9, 0.999), help='Betas for adam')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay for both sgd and adam')
# learning rate parameters
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate')
parser.add_argument('--lr_scheduler', type=str, choices=['step', 'exp', 'stepLR', 'fix'], default='stepLR',
help='Type of learning rate schedule')
parser.add_argument('--gamma', type=float, default=0.2,
help='Parameter for the learning rate scheduler (except "fix")')
parser.add_argument('--steps', type=str, default='10',
help='Step of learning rate decay for "step" and "stepLR"')
# optimization parameters
parser.add_argument('--backbone', type=str, default='CNN', choices=['CNN', 'ResNet'],
help='The backbone used to construct the training model (defined in ./modules)')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of workers for dataloader')
parser.add_argument('--normlize_type', type=str, choices=['0-1', '-1-1', 'mean-std'], default='-1-1',
help='Data normalization methods')
parser.add_argument('--tradeoff', type=list, default=['exp', 'exp', 'exp'],
help='Trade-off coefficients for the sum of loss terms. Use integer or "exp" ("exp" represents an increase from 0 to 1 during training)')
parser.add_argument('--zeta', type=float, default=10.0,
help='Parameter to control the increasing rate of "exp" tradeoff')
parser.add_argument('--dropout', type=float, default=0., help='Dropout layer coefficient')
# save and load
parser.add_argument('--save', type=bool, default=True, help='Save logs and trained model checkpoints')
parser.add_argument('--save_dir', type=str, default='./ckpt',
help='Directory to save logs and model checkpoints')
parser.add_argument('--load_path', type=str, default='',
help='Load trained model checkpoints from this path (for testing, not for resuming training)')
args = parser.parse_args()
return args