-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllmdata.py
97 lines (81 loc) · 3.43 KB
/
llmdata.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
"""
Data Loading Utilities for Language Model Evaluation
====================================================
This module provides utilities for loading and processing text datasets
(WikiText-2 and C4) for language model training and evaluation.
Features:
- Supports WikiText-2 and C4 datasets
- Handles tokenization and data preparation
- Generates training samples with specified sequence length
- Provides consistent data loading interface
Original code forked from:
- https://github.com/locuslab/wanda/blob/main/lib/data.py
- https://github.com/IST-DASLab/sparsegpt/blob/master/datautils.py
Author: Dong Wang (dong.wang@tugraz.at)
Date: 2024-01-30
"""
# Code forked from https://github.com/locuslab/wanda/blob/main/lib/data.py
# Code adapted from https://github.com/IST-DASLab/sparsegpt/blob/master/datautils.py
import numpy as np
import random
import torch
from datasets import load_dataset
text_datasets = ['wikitext2', 'c4']
# Set seed for reproducibility
def set_seed(seed):
np.random.seed(seed)
torch.random.manual_seed(seed)
# Wrapper for tokenized input IDs
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
# Load and process wikitext2 dataset
def get_wikitext2(nsamples, seed, seqlen, tokenizer):
# Load train and test datasets
traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')
testdata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test')
# Encode datasets
trainenc = tokenizer(" ".join(traindata['text']), return_tensors='pt')
testenc = tokenizer("\n\n".join(testdata['text']), return_tensors='pt')
# Generate samples from training set
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
# Load and process c4 dataset
def get_c4(nsamples, seed, seqlen, tokenizer):
# Load train and validation datasets
traindata = load_dataset('allenai/c4', 'allenai--c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train')
valdata = load_dataset('allenai/c4', 'allenai--c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation')
# Generate samples from training set
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]['text'], return_tensors='pt')
if trainenc.input_ids.shape[1] > seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
# Prepare validation dataset
valenc = tokenizer(' '.join(valdata[:1100]['text']), return_tensors='pt')
valenc = valenc.input_ids[:, :(256 * seqlen)]
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
# Function to select the appropriate loader based on dataset name
def get_loaders(name, nsamples=128, seed=0, seqlen=2048, tokenizer=None):
if 'wikitext2' in name:
return get_wikitext2(nsamples, seed, seqlen, tokenizer)
if "c4" in name:
return get_c4(nsamples, seed, seqlen, tokenizer)