-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest.py
44 lines (37 loc) · 1.19 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import numpy as np
import glob
import os
from lstm import Model
from multiprocessing import connection
from threading import Thread
from time import time
from config import Config
import torch
from torch.autograd import Variable as V
config = Config()
model = Model(config)
def convert_raw(x, config):
if config.use_cuda:
return V(x.cuda(), volatile=True)
else:
return V(x, volatile=True)
def convert(x, config):
return convert_raw(torch.from_numpy(x.astype(np.float32)), config)
action, lengths, cache_tmp, sizes = [], [], [], []
x = np.random.randn(config.depth//config.unit_depth,
config.hidden_dim, 2)
t = time()
for i in range(20):
cache = []
for i in range(4840):
cache.append(x)
cache = np.stack(cache) if len(cache_tmp) > 1 else np.expand_dims(cache[0], 0)
cache = tuple([tuple([convert(cache[:,i,:,j], config) for j in range(cache.shape[-1])]) for i in range(cache.shape[1])])
action = [[a] for a in range(4840)]
action = torch.LongTensor(action)
print(time()-t)
t = time()
for i in range(20):
_, _, d_ary, _ = model(V(action, volatile=True), cache)
value_ary = d_ary.data.cpu().numpy()
print(time()-t)