-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathconfig.yml.example
58 lines (39 loc) · 1.64 KB
/
config.yml.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
### WANDB METADATA
# this will be used to associate this run with a wandb project
WANDB_PROJECT : "iNat CV"
WANDB_LOG_FREQ : 100
#### EXPERIMENT PARAMETERS
TENSORBOARD_LOG_DIR : "/data-ssd/alex/experiments/slim_export/1608157217/log_dir"
CHECKPOINT_DIR : "/data-ssd/alex/experiments/slim_export/1608157217/checkpoints/ckpt"
FINAL_SAVE_DIR : "/data-ssd/alex/experiments/slim_export/1608157217/final_model/final"
BACKUP_DIR : "/data-ssd/alex/experiments/slim_export/1608157217/backup"
#### DATASET PARAMETERS
TRAINING_DATA : "/data-ssd/alex/datasets/slim_export_20201213/train_cleaned.json"
VAL_DATA : "/data-ssd/alex/datasets/slim_export_20201213/val_cleaned.json"
TEST_DATA : "/data-ssd/alex/datasets/slim_export_20201213/test_cleaned.json"
NUM_CLASSES : 38148
#### TRAINING PARAMETERS
# training policy - only use mixed precision=true if you
# have a recent NVIDIA GPU that supports CUDA 7.0 or later
TRAIN_MIXED_PRECISION : True
# size of batch, per gpu
BATCH_SIZE : 256
# number of training epochs
NUM_EPOCHS : 80
# initial learning rate for the model
INITIAL_LEARNING_RATE : 0.05
LR_DECAY_FACTOR : 0.94
EPOCHS_PER_LR_DECAY : 4
#### MODEL PARAMETERS
activation : "softmax" # or null (python None) for logits
# neural network architecture
MODEL_NAME : "xception"
# size of input
IMAGE_SIZE : [299,299]
# dropout percentage for layer between pool & logits
DROPOUT_PCT : 0.5
# optiimzer
OPTIMIZER_NAME : "rmsprop"
RMSPROP_RHO : 0.9
RMSPROP_MOMENTUM : 0.9
RMSPROP_EPSILON : 1.0