-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathnametag3.py
executable file
·276 lines (227 loc) · 14.8 KB
/
nametag3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
#!/usr/bin/env python3
# coding=utf-8
#
# Copyright 2024 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""NameTag 3, flat and nested NER training and prediction script.
Implements both standard flat NER by fine-tuning with a softmax classification
layer and nested NER by seq2seq decoder with hard attention proposed in
https://aclanthology.org/P19-1527.pdf.
If you use this software, please give us credit by referencing https://aclanthology.org/P19-1527.pdf:
@inproceedings{strakova-etal-2019-neural,
title = "Neural Architectures for Nested {NER} through Linearization",
author = "Strakov{\'a}, Jana and
Straka, Milan and
Hajic, Jan",
editor = "Korhonen, Anna and
Traum, David and
M{\`a}rquez, Llu{\'\i}s",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1527",
doi = "10.18653/v1/P19-1527",
pages = "5326--5331",
}
Installation:
See README.md for installation instructions.
Usage:
venv/bin/python3 nametag3.py [--argument=value]
Example Usage:
venv/bin/python3 nametag3.py \
--load_checkpoint=models/nametag3-multilingual-conll-240830/ \
--test_data=examples/en_input.conll
Input:
The input data file format is a vertical file, one token and its label(s) per
line: labels separated by a '|', columns separated by a tabulator; sentences
delimited by newlines (such as the first and the fourth column in the well-known
CoNLL-2003 shared task). A line containing '-DOCSTART-' with the label 'O', as
seen in the CoNLL-2003 shared task data, can be used to mark document
boundaries. Input examples can be found in 'nametag3.py' and in 'examples'.
John B-PER
loves O
Mary B-PER
. O
Mary B-PER
loves O
John B-PER
. O
"""
import io
import json
import os
import pickle
import sys
# Force CPU fallback for PyTorch with the MPS device due to some operators not
# implemented in MPS.
os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'
os.environ.setdefault("KERAS_BACKEND", "torch")
# This is only set for debugging to force all CUDA calls to be synchronous.
# os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
import keras
import numpy as np
import torch
import transformers
from nametag3_dataset_collection import NameTag3DatasetCollection
from nametag3_model import nametag3_model_factory
if __name__ == "__main__":
import argparse
import datetime
import os
import re
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=64, type=int, help="Batch size.")
parser.add_argument("--checkpoint_filename", default="checkpoint.weights.h5", type=str, help="Checkpoint filename.")
parser.add_argument("--context_type", default="split_document", choices=["max_context", "sentence", "document", "split_document"], help="Context type to add to sentence.")
parser.add_argument("--corpus", default=None, type=str, help="Corpus name. If given for training, the corpus name will be saved with the model.")
parser.add_argument("--decoding", default="classification", choices=["classification", "seq2seq"], help="Decoding head.")
parser.add_argument("--dev_data", default=None, type=str, help="Dev data.")
parser.add_argument("--dropout", default=0.5, type=float, help="Dropout rate.")
parser.add_argument("--epochs", default="10", type=int, help="Number of epochs.")
parser.add_argument("--epochs_frozen", default=0, type=int, help="Number of pretraining epochs with frozen transformer.")
parser.add_argument("--evaluate_test_data", default=False, action="store_true", help="If enabled, a second, tab-separated column with gold data is expected. By default, only one input column is expected.")
parser.add_argument("--keep_original_casing", default=False, action="store_true", help="If enabled, turns truecasing off, i.e., keeps original casing in data.")
parser.add_argument("--latent_dim", default=256, type=int, help="RNN decoder hidden dim.")
parser.add_argument("--learning_rate", default="1e-4", type=float, help="Learning rate.")
parser.add_argument("--learning_rate_frozen", default="1e-3", type=float, help="Learning rate for frozen pretraining.")
parser.add_argument("--logdir", default="logs", type=str, help="Logdir name.")
parser.add_argument("--max_sentences_train", default=None, type=int, help="Limit number of training sentences (for debugging).")
parser.add_argument("--max_labels_per_token", default=5, type=int, help="Maximum labels per token.")
parser.add_argument("--name", default=None, type=str, help="Experiment name.")
parser.add_argument("--hf_plm", default=None, type=str, help="HF pre-trained model name.")
parser.add_argument("--load_checkpoint", default=None, type=str, help="Load previously saved checkpoint.")
parser.add_argument("--postprocess", default=False, action="store_true", help="If enabled, performs a sanity check on the predicted test output to ensure entities are correctly nested and unique.")
parser.add_argument("--prevent_all_dropouts", default=False, action="store_true", help="If enabled, sets --dropout=0., --transformer_hidden_dropout_probs=0. and --transformer_attention_probs_dropout_prob=0.")
parser.add_argument("--remove_optimizer_from_checkpoint", default=False, action="store_true", help="If enabled, removes the optimizer from the loaded checkpoint, saves the checkpoint, and exits.")
parser.add_argument("--sampling", default="concatenate", choices=["proportional", "uniform", "concatenate", "temperature", "temperature_logits", "temperature_probs"], help="Sampling strategy for multilingual datasets.")
parser.add_argument("--save_best_checkpoint", default=False, action="store_true", help="Save best checkpoint on dev if set.")
parser.add_argument("--seed", default=42, type=int, help="Random seed.")
parser.add_argument("--subword_masking", default=0.0, type=float, help="Mask subwords with the given probability.")
parser.add_argument("--steps_per_epoch", default=None, type=int, help="Steps per epoch. Default None (epoch iterates over all data).")
parser.add_argument("--tagsets", default=None, type=str, help="Specifies the tagsets corresponding to the given corpora for multitagset training, separated by commas. During training, each tagset is applied to its respective corpus. When used in prediction mode, the output will only include valid tags from the specified tagset(s).")
parser.add_argument("--temperature", default=2.0, type=float, help="Value of temperature for temperature sampling.")
parser.add_argument("--test_data", default=None, type=str, help="Test data.")
parser.add_argument("--time", default=False, action="store_true", help="Measure prediction time.")
parser.add_argument("--train_data", default=None, type=str, help="Training data.")
parser.add_argument("--transformer_hidden_dropout_prob", default=None, type=float, help="HF Tranformer hidden dropout prob. If None, default value from config is used.")
parser.add_argument("--transformer_attention_probs_dropout_prob", default=None, type=float, help="HF Transformer dropout ratio for the attention probabilities. If None, default value from config is used.")
parser.add_argument("--threads", default=4, type=int, help="Maximum number of threads to use.")
parser.add_argument("--warmup_epochs", default=1, type=int, help="Number of warmup epochs.")
parser.add_argument("--warmup_epochs_frozen", default=1, type=int, help="Number of warmup epochs for frozen pretraining.")
args = parser.parse_args()
# During inference, transfer crucial train args
if args.load_checkpoint:
with open("{}/options.json".format(args.load_checkpoint), mode="r") as options_file:
train_args = argparse.Namespace(**json.load(options_file))
for key in ["checkpoint_filename", "context_type", "decoding", "hf_plm", "keep_original_casing"]:
args.__dict__[key] = train_args.__dict__[key]
# Set threads and random seed
torch.set_num_threads(args.threads)
torch.set_num_interop_threads(args.threads)
keras.utils.set_random_seed(args.seed)
keras.config.disable_traceback_filtering()
# Create logdir
logargs = dict(vars(args).items())
if args.corpus and len(args.corpus.split(",")) > 1:
logargs["corpus"]="multilingual"
for key in ["checkpoint_filename", "dev_data", "keep_original_casing",
"load_checkpoint", "logdir", "max_labels_per_token",
"max_sentences_train", "prevent_all_dropouts", "sampling",
"save_best_checkpoint", "seed", "subword_masking", "tagsets",
"temperature", "test_data", "threads", "time", "train_data",
"transformer_hidden_dropout_prob",
"transformer_attention_probs_dropout_prob", "warmup_epochs",
"warmup_epochs_frozen"]:
del logargs[key]
# Include unique Slurm job id if running in Slurm-managed environment.
slurm_job_id = os.getenv("SLURM_JOB_ID")
args.logdir = os.path.join(args.logdir,
"{}-{}{}-{}".format(os.path.basename(__file__),
slurm_job_id + "-" if slurm_job_id else "",
datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), re.sub("^.*/", "", value) if type(value) == str else value)
for key, value in sorted(logargs.items())))))
print("Making logdir \"{}\"".format(args.logdir), file=sys.stderr, flush=True)
os.makedirs(args.logdir, exist_ok=True)
# Load the tokenizer
tokenizer = transformers.AutoTokenizer.from_pretrained(args.hf_plm,
add_prefix_space = args.hf_plm in ["roberta-base", "roberta-large", "ufal/robeczech-base"])
# We load the training data only to get the mappings, nothing else is used.
train_loaded=None
if args.load_checkpoint:
train_loaded = NameTag3DatasetCollection(args)
train_loaded.load_collection_mappings(args.load_checkpoint)
train_collection=None
if args.train_data:
train_collection = NameTag3DatasetCollection(args, tokenizer=tokenizer, filenames=args.train_data, train_collection=train_loaded)
if args.save_best_checkpoint:
train_collection.save_mappings(args.logdir)
dev_collection=None
if args.dev_data:
dev_collection = NameTag3DatasetCollection(args, tokenizer=tokenizer, filenames=args.dev_data, train_collection=train_collection if args.train_data else train_loaded)
if args.test_data:
test_collection = NameTag3DatasetCollection(args, tokenizer=tokenizer, filenames=args.test_data, train_collection=train_collection if args.train_data else train_loaded)
# Construct the model
model = nametag3_model_factory(args.decoding)(len(train_collection.label2id().keys()) if train_collection else len(train_loaded.label2id().keys()),
args,
train_collection.id2label() if train_collection else train_loaded.id2label(),
tokenizer)
# Pretrain with frozen transformer
if args.train_data and args.epochs_frozen:
print("Pretraining with frozen transformer for {} epochs.".format(args.epochs_frozen), file=sys.stderr, flush=True)
model.compile(training_batches=train_collection.training_batches(), frozen=True)
model.fit(args.epochs_frozen,
train_collection,
dev_collection=dev_collection,
save_best_checkpoint=args.save_best_checkpoint)
# Compile the model
model.compile(training_batches=train_collection.training_batches() if train_collection else 0, frozen=False)
# Load checkpoint
if args.load_checkpoint:
model.load_checkpoint(os.path.join(args.load_checkpoint, args.checkpoint_filename))
if args.remove_optimizer_from_checkpoint:
new_checkpoint_filename = os.path.join(args.logdir, "{}_wo_optimizer.weights.h5".format(args.load_checkpoint, args.checkpoint_filename[:-len(".weights.h5")]))
print("Saving checkpoint without optimizer to {}".format(new_checkpoint_filename), file=sys.stderr, flush=True)
model.optimizer = None
model.save_weights(new_checkpoint_filename)
sys.exit()
# Finetune the transformer
if args.train_data and args.epochs:
print("Finetuning for {} epochs.".format(args.epochs), file=sys.stderr, flush=True)
model.fit(args.epochs_frozen + args.epochs,
train_collection,
dev_collection=dev_collection,
save_best_checkpoint=args.save_best_checkpoint,
initial_epoch=args.epochs_frozen)
# Predict test data, evaluate if --evaluate_test_data is set:
if args.test_data:
test_scores = []
for i, test_dataset in enumerate(test_collection.datasets):
# Predict and save the predicted output to the output file.
predictions_filename = "{}_{}_predictions.conll".format("test", test_dataset.corpus)
print("Predicting dataset {} ({}), predictions will be saved to \"{}\"".format(i+1, test_dataset.corpus, os.path.join(args.logdir, predictions_filename)), file=sys.stderr, flush=True)
predicted_output = "".join(model.predict("test", test_dataset, args))
# Postprocess if --postprocess enabled.
if args.postprocess:
predicted_output = model.postprocess(predicted_output)
with open(os.path.join(args.logdir, predictions_filename), "w", encoding="utf-8") as predictions_file:
print(predicted_output, file=predictions_file, end="")
# If there is only one dataset, print to stdout too. We had this in
# previous versions, so keep this for compatibility with older
# pipelines that users might still have.
if len(test_collection.datasets) == 1:
print(predicted_output, end="")
# Evaluate.
if args.evaluate_test_data:
test_score = test_dataset.evaluate("test", predictions_filename, args.logdir)
print("Test F1 ({}): {:.4f}".format(test_dataset.corpus, test_score), file=sys.stderr)
test_scores.append(test_score)
if test_scores:
print("Macro avg F1: {:.4f}".format(np.average(test_scores)), file=sys.stderr)