-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathtest_sharedclone.lua
98 lines (65 loc) · 1.69 KB
/
test_sharedclone.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
require 'nn'
require 'rnn'
require 'image'
require 'optim'
require 'loader'
require 'ctc_log'
require 'utils/decoder'
local threads = require 'threads'
-- initialize
torch.setdefaulttensortype('torch.FloatTensor')
torch.manualSeed(450)
-- debug switch
DEBUG = false
-- timer initialize
base = 0
timer = torch.Timer()
function show_log(log)
local now = timer:time().real
local cost = now - base
base = now
print(string.format("[%.4f][%.4f]%s", now, cost, log))
end
-- settings
DROPOUT_RATE = 0.4
GPU_ENABLED = false
local input_size = 32
local hidden_size = 200
clamp_size = 5
-- configuration
training_list_file = "1.txt"
-- GPU
if GPU_ENABLED then
require 'cutorch'
require 'cunn'
end
-- load samples
show_log("Loading samples...")
local loader = Loader()
loader:load(training_list_file)
local codec = loader:codec()
show_log(string.format("Loading finished. Got %d samples, %d classes of characters.", #loader.samples, codec.codec_size))
local class_num = codec.codec_size
-- build network
show_log("Building networks...")
local net
net = nn.Sequential()
net:add(nn.Dropout(DROPOUT_RATE))
net:add(nn.SplitTable(1))
net:add(nn.BiSequencer(nn.FastLSTM(input_size, hidden_size)))
output = nn.Sequential()
output:add(nn.Linear(hidden_size * 2, class_num + 1))
output:add(nn.SoftMax())
net:add(nn.Sequencer(output))
net:float()
-- prepare prarmeters and training method
local params, grad_params
params, grad_params = net:getParameters()
n = net:sharedClone(true, false)
p, gd = n:getParameters()
local sample = loader:pick()
local im = sample.img
local target = codec:encode(sample.gt)
outputTable = n:forward(im)
loss, grad = ctc.getCTCCostAndGrad(outputTable, target)
n:backward(im, grad)