-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig_pos.yaml
79 lines (68 loc) · 1.61 KB
/
config_pos.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
## Where the samples will be written
save_data: pos/run/example
## Where the vocab(s) will be written
src_vocab: pos/run/example.vocab.src
tgt_vocab: pos/run/example.vocab.tgt
# Prevent overwriting existing files in the folder
overwrite: true
# Corpus opts:
data:
corpus_1:
path_src: short/train.fr
path_tgt: short/train.en
transforms: [inferfeats, filtertoolong]
weight: 1
valid:
path_src: short/val.fr
path_tgt: short/val.en
transforms: [inferfeats]
# Vocabulary files that were just created
#src_vocab: baseline/run/example.vocab.src
#tgt_vocab: baseline/run/example.vocab.tgt
# Transform options
reversible_tokenization: "joiner"
# Features options
n_src_feats: 1
src_feats_defaults: "0"
feat_merge: "concat"
####################################################################################################################
# General opts
save_model: pos/run/model/
save_checkpoint_steps: 5000
valid_steps: 5000
train_steps: 50000 #50000
# Batching
bucket_size: 262144
world_size: 1
gpu_ranks: [0]
num_workers: 2
batch_type: "tokens"
batch_size: 4096
valid_batch_size: 2048
accum_count: [4]
accum_steps: [0]
# Optimization
model_dtype: "fp16"
optim: "adam"
learning_rate: 2
warmup_steps: 8000
decay_method: "noam"
adam_beta2: 0.998
max_grad_norm: 0
label_smoothing: 0.1
param_init: 0
param_init_glorot: true
normalization: "tokens"
# Model
encoder_type: transformer
decoder_type: transformer
position_encoding: true
enc_layers: 6
dec_layers: 6
heads: 8
hidden_size: 512
word_vec_size: 512
transformer_ff: 2048
dropout_steps: [0]
dropout: [0.1]
attention_dropout: [0.1]