-
Notifications
You must be signed in to change notification settings - Fork 0
/
data_loaders.py
136 lines (117 loc) · 4.63 KB
/
data_loaders.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
from torch.utils.data import Dataset
import ir_datasets
import pytrec_eval
import random
import torch
from tqdm.auto import tqdm
from indexed_reader import IndexedReader
class BaseLoader:
def __init__(self):
raise NotImplementedError()
pass
def __len__(self):
return len(self.q_ids)
def cross_encoder_batcher(self, batch):
texts = []
labels = []
for data in batch:
texts.append([data["query_text"], data["doc_text"]])
texts.append([data["query_text"], data["neg_text"]])
labels.append(1.0)
labels.append(0.0)
tokenized = self.tokenizer(
texts,
padding=True,
truncation=True,
return_tensors="pt",
max_length=512,
)
labels = torch.tensor(labels, dtype=torch.float)
return tokenized, labels
class IRDatasetsLoader(Dataset, BaseLoader):
def __init__(self, tokenizer, docs_path, queries_path, qrels):
del docs_path # We don't need this here
self.data = ir_datasets.load("msmarco-document/train")
# Forces to download data if not here yet.
_ = self.data._path()
self.tokenizer = tokenizer
self.num_docs = 3213835
self.queries = {}
for line in tqdm(open(queries_path), desc="reading queries", total=367013, leave=False, ncols=90):
d_id, doc = line.strip().split("\t", maxsplit=1)
self.queries[d_id] = doc
self.train_qrels = pytrec_eval.parse_qrel(open(qrels))
self.q_ids = dict(enumerate(self.train_qrels.keys()))
def __getitem__(self, item):
q_id = self.q_ids[item]
d_id = list(self.train_qrels[q_id].keys())[0]
neg_id = random.randrange(self.num_docs)
pos_doc_obj = self.data.docs.lookup(d_id)
pos_doc = f"{pos_doc_obj.url} {pos_doc_obj.title} {pos_doc_obj.body}"
neg_doc_obj = self.data.docs[neg_id]
neg_doc = f"{neg_doc_obj.url} {neg_doc_obj.title} {neg_doc_obj.body}"
query_text = self.queries[q_id]
ret_dict = {
"query_text": query_text,
"doc_text": pos_doc,
"neg_text": neg_doc,
}
return ret_dict
class InMemoryLoader(Dataset, BaseLoader):
def __init__(self, tokenizer, docs_path, queries_path, qrels):
self.tokenizer = tokenizer
self.docs = {}
self.queries = {}
self.all_doc_ids = []
# load all docs in memory
for line in tqdm(open(docs_path), desc="reading docs", total=3213835, leave=False, ncols=90):
d_id, doc = line.strip().split("\t", maxsplit=1)
self.all_doc_ids.append(d_id)
self.docs[d_id] = doc
for line in tqdm(open(queries_path), desc="reading queries", total=367013, leave=False, ncols=90):
d_id, doc = line.strip().split("\t", maxsplit=1)
self.queries[d_id] = doc
self.train_qrels = pytrec_eval.parse_qrel(open(qrels))
self.q_ids = dict(enumerate(self.train_qrels.keys()))
def __getitem__(self, item):
q_id = self.q_ids[item]
d_id = list(self.train_qrels[q_id].keys())[0]
neg_id = random.choice(self.all_doc_ids)
if neg_id == d_id:
neg_id = random.choice(self.all_doc_ids)
pos_doc = self.docs[d_id]
neg_doc = self.docs[neg_id]
query_text = self.queries[q_id]
ret_dict = {
"query_text": query_text,
"doc_text": pos_doc,
"neg_text": neg_doc,
}
return ret_dict
class IndexedLoader(Dataset, BaseLoader):
def __init__(self, tokenizer, doc_path, queries_path, qrels):
self.tokenizer = tokenizer
self.queries = {}
self.all_doc_ids = []
self.docs = IndexedReader("msmarco", doc_path)
self.all_doc_ids = self.docs.all_ids
for line in tqdm(open(queries_path), desc="reading queries", total=367013, leave=False, ncols=90):
d_id, doc = line.strip().split("\t", maxsplit=1)
self.queries[d_id] = doc
self.train_qrels = pytrec_eval.parse_qrel(open(qrels))
self.q_ids = dict(enumerate(self.train_qrels.keys()))
def __getitem__(self, item):
q_id = self.q_ids[item]
d_id = list(self.train_qrels[q_id].keys())[0]
neg_id = random.choice(self.all_doc_ids)
if neg_id == d_id:
neg_id = random.choice(self.all_doc_ids)
pos_doc = self.docs[d_id]
neg_doc = self.docs[neg_id]
query_text = self.queries[q_id]
ret_dict = {
"query_text": query_text,
"doc_text": pos_doc,
"neg_text": neg_doc,
}
return ret_dict