-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathbenchmark_gpt_neox.py
106 lines (96 loc) · 5.1 KB
/
benchmark_gpt_neox.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
import torch
import time
from torch.profiler import profile, record_function, ProfilerActivity
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b").half().cuda()
tokenizer = GPTNeoXTokenizerFast.from_pretrained("EleutherAI/gpt-neox-20b")
prompt = (
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains. Even more surprising to the "
"researchers was the fact that the unicorns spoke perfect English. "
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains. Even more surprising to the "
"researchers was the fact that the unicorns spoke perfect English. "
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains."
"The unicorns spoke really great perfect American English."
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains. Even more surprising to the "
"researchers was the fact that the unicorns spoke perfect English. "
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains. Even more surprising to the "
"researchers was the fact that the unicorns spoke perfect English. "
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains."
"The unicorns spoke really great perfect American English."
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains. Even more surprising to the "
"researchers was the fact that the unicorns spoke perfect English. "
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains. Even more surprising to the "
"researchers was the fact that the unicorns spoke perfect English. "
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains."
"The unicorns spoke really great perfect American English."
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains. Even more surprising to the "
"researchers was the fact that the unicorns spoke perfect English. "
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains. Even more surprising to the "
"researchers was the fact that the unicorns spoke perfect English. "
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
"previously unexplored valley, in the Andes Mountains."
"The unicorns spoke really great perfect American English."
)
confs = [
(128, 2, 1),
(128, 32, 1),
(128, 64, 1),
(128, 96, 1),
(128, 128, 1),
# (512, 128, 1)
]
for (input_length, output_length, batch_size) in confs:
print(f'Input: {input_length}, Output: {output_length}, Batch: {batch_size}')
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to('cuda:0').repeat(batch_size, 1)[:, :input_length]
print('Input size: ', input_ids.shape)
print(input_ids[0])
print(tokenizer.batch_decode(input_ids)[0])
with torch.inference_mode():
# warmup
for _ in range(3):
gen_tokens = model.generate(
input_ids,
do_sample=True,
temperature=0.9,
# max_length=output_length,
max_new_tokens = output_length,
)
# benchmark
for _ in range(1):
torch.cuda.synchronize()
start_time = time.time()
gen_tokens = model.generate(
input_ids,
do_sample=True,
temperature=0.9,
# max_length=output_length,
max_new_tokens = output_length,
)
torch.cuda.synchronize()
print(f"Generation took {time.time() - start_time:.2f} seconds")
if output_length == 2:
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
gen_tokens = model.generate(
input_ids,
do_sample=True,
temperature=0.9,
# max_length=output_length,
max_new_tokens = output_length,
)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
prof.export_chrome_trace(f"trace_{input_length}_{output_length}_{batch_size}.json")
print(gen_tokens.shape)
gen_text = tokenizer.batch_decode(gen_tokens)[0]
# print(prompt)
print(gen_text)