-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsubmit_formal_large.sh
136 lines (125 loc) · 30.2 KB
/
submit_formal_large.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#!/bin/bash
#SBATCH --output=./logs_formal/res_%j.txt # output file
#SBATCH -e ./logs_formal/res_%j.err # File to which STDERR will be written
#SBATCH --gres=gpu:1
#SBATCH --mem=20G # Memory in MB per cpu allocated
##SBATCH --time=07-00:00 # Runtime in D-HH:MM
model_name=$1
exp_name=$2
task=$3
seed=$4
lr_array=( 1 2 3 4 5 7 )
#lr_array=( 1 2 3 5 )
#lr_array=( 4 7 )
#lr_array=( 5 )
#lr_array=( 7 )
#lr_array=( 4 7 )
common_para="max_grad_norm = 1.0, weight_decay=1e-6,"
common_name="clip1_l2_e-6"
#common_para="max_grad_norm = 1.0, weight_decay=1e-5,"
#common_name="clip1_l2_e-5"
if [[ "$exp_name" == "mf+mlm" ]]; then
for lr_v in "${lr_array[@]}"
do
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, pretrain_tasks = ${task}, target_tasks = ${task}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, max_epochs = 20, pooler_dropout = 0, pool_type=first_init_avg, random_seed = ${seed}, run_name_suffix = adam_pdrop0_avg_e20:s${seed}:lr"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_first_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_proj_avg_train_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, max_grad_norm = 1.0, weight_decay = 1e-6, run_name_suffix = _adam_warmup02_clip1_l2_e-6_proj_avg_train_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_first_init_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=32, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_proj_avg_train_ep20_bsz32:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
done
elif [[ "$exp_name" == "mf+mlm_too_large" ]]; then
for lr_v in "${lr_array[@]}"
do
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, accumulate_grad_iter=2, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
done
elif [[ "$exp_name" == "mf+mlm_few" ]]; then
for lr_v in "${lr_array[@]}"
do
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, pretrain_tasks = ${task}, target_tasks = ${task}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, max_epochs = 20, pooler_dropout = 0, pool_type=first_init_avg, random_seed = ${seed}, run_name_suffix = adam_pdrop0_avg_e20:s${seed}:lr"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_first_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, run_name_suffix = _adam_warmup01_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
done
elif [[ "$exp_name" == "mf+mlm_few_noise" ]]; then
for lr_v in "${lr_array[@]}"
do
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, noise_ratio = 0.7, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_first_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, noise_ratio = 0.7, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, noise_ratio = 0.7, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
done
elif [[ "$exp_name" == "mf+mlm_super_glue_too_large" ]]; then
for lr_v in "${lr_array[@]}"
do
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, accumulate_grad_iter=2, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
done
elif [[ "$exp_name" == "mf+mlm_super_glue" ]]; then
for lr_v in "${lr_array[@]}"
do
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, pretrain_tasks = ${task}, target_tasks = ${task}, lr=${lr_v}e-5, batch_size=4, few_shot = -1, max_epochs = 20, pooler_dropout = 0, pool_type=first_init_avg, random_seed = ${seed}, run_name_suffix = adam_pdrop0_avg_e20:s${seed}:lr" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
if [[ "${task}" == "superglue_record" ]]; then
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, warmup_ratio = 0.01, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
if [[ "${lr_v}" == 1 ]]; then
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_1, lr=1e-5, patience = 40, batch_size=8, val_data_limit = 50000, target_train_max_vals = 400, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_400_first_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_1, lr=1e-5, patience = 40, batch_size=8, val_data_limit = 50000, target_train_max_vals = 400, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, run_name_suffix = _adam_warmup01_400_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, patience = 80, batch_size=16, accumulate_grad_iter=2, val_data_limit = 40000, target_train_max_vals = 150, target_train_val_interval = 2000, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_1, lr=1e-5, patience = 40, batch_size=8, val_data_limit = 50000, target_train_max_vals = 400, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_400_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
fi
else
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, target_train_max_vals = 400, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_first_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_first_init_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=16, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_proj_avg_train_ep20_bsz16:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = -1, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
fi
done
elif [[ "$exp_name" == "mf+mlm_super_glue_few" ]]; then
for lr_v in "${lr_array[@]}"
do
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, pretrain_tasks = ${task}, target_tasks = ${task}, lr=${lr_v}e-5, batch_size=4, few_shot = 1000, max_epochs = 20, pooler_dropout = 0, pool_type=first_init_avg, random_seed = ${seed}, run_name_suffix = adam_pdrop0_avg_e20:s${seed}:lr" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_first_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, run_name_suffix = _adam_warmup01_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=8, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.3, run_name_suffix = _adam_warmup03_proj_avg_train_ep20_bsz8:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 1000, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.3, run_name_suffix = _adam_warmup03_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
done
elif [[ "$exp_name" == "mf+mlm_super_glue_few_100_ry" ]]; then
for lr_v in "${lr_array[@]}"
do
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_first_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, run_name_suffix = _adam_warmup01_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, run_name_suffix = _adam_warmup02_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json" -c superglue-bert.conf
done
elif [[ "$exp_name" == "mf+mlm_few_100_ry" ]]; then
for lr_v in "${lr_array[@]}"
do
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, pretrain_tasks = ${task}, target_tasks = ${task}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, max_epochs = 20, pooler_dropout = 0, pool_type=first_init_avg, random_seed = ${seed}, run_name_suffix = adam_pdrop0_avg_e20:s${seed}:lr"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_first_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, run_name_suffix = _adam_warmup01_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 100, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
done
elif [[ "$exp_name" == "mf+mlm_few_32" ]]; then
for lr_v in "${lr_array[@]}"
do
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 32, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 32, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first, run_name_suffix = _adam_warmup01_first_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 32, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=proj_avg_train, warmup_ratio=0.2, $common_para run_name_suffix = _adam_warmup02_${common_name}_proj_avg_train_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
#python -m olfmlm.evaluate.main --exp_name $exp_name --overrides "run_name = ${model_name}_${lr_v}, lr=${lr_v}e-5, batch_size=4, few_shot = 32, pretrain_tasks = ${task}, target_tasks = ${task}, max_epochs = 20, random_seed = ${seed}, pooler_dropout = 0, pool_type=first_init_avg, run_name_suffix = _adam_warmup01_pdrop0_avg_ep20_bsz4:s${seed}:lr, tokenizer = bert-large-uncased, input_module = bert-large-uncased, bert_config_file = olfmlm/bert_large_config.json"
done
fi