-
Notifications
You must be signed in to change notification settings - Fork 3
/
DenseColor.py
348 lines (311 loc) · 14 KB
/
DenseColor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
# Copyright (c) Hiren Galiyawala, Kenil Shah, Vandit Gajjar, and Mehul S. Raval.
# from keras.models import Model
# from keras.layers import Input, merge, ZeroPadding2D
# from keras.layers.core import Dense, Dropout, Activation
# from keras.layers.convolutional import Convolution2D
# from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
# from keras.layers.normalization import BatchNormalization
# from keras.layers import concatenate
# import keras.backend as K
#
# from mrcnn.custom_layers.scale_layer import Scale
#
# def DenseNet(nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, classes=1000, weights_path=None):
# '''Instantiate the DenseNet architecture,
# # Arguments
# nb_dense_block: number of dense blocks to add to end
# growth_rate: number of filters to add per dense block
# nb_filter: initial number of filters
# reduction: reduction factor of transition blocks.
# dropout_rate: dropout rate
# weight_decay: weight decay factor
# classes: optional number of classes to classify images
# weights_path: path to pre-trained weights
# # Returns
# A Keras model instance.
# '''
# eps = 1.1e-5
#
# # compute compression factor
# compression = 1.0 - reduction
#
# # Handle Dimension Ordering for different backends
# global concat_axis
# if K.image_dim_ordering() == 'tf':
# concat_axis = 3
# img_input = Input(shape=(224, 224, 3), name='data')
# else:
# concat_axis = 1
# img_input = Input(shape=(3, 224, 224), name='data')
#
# # From architecture for ImageNet (Table 1 in the paper)
# nb_filter = 64
# nb_layers = [6,12,32,32] # For DenseNet-169
#
# # Initial convolution
# x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
# x = Convolution2D(nb_filter, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
# x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
# x = Scale(axis=concat_axis, name='conv1_scale')(x)
# x = Activation('relu', name='relu1')(x)
# x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
# x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
#
# # Add dense blocks
# for block_idx in range(nb_dense_block - 1):
# stage = block_idx+2
# x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
#
# # Add transition_block
# x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
# nb_filter = int(nb_filter * compression)
#
# final_stage = stage + 1
# x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
#
# x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv'+str(final_stage)+'_blk_bn')(x)
# x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x)
# x = Activation('relu', name='relu'+str(final_stage)+'_blk')(x)
# x = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
#
# x = Dense(classes, name='fc6')(x)
# x = Activation('softmax', name='prob')(x)
#
# model = Model(img_input, x, name='densenet')
#
# if weights_path is not None:
# model.load_weights(weights_path)
#
# return model
#
#
# def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
# '''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# # Arguments
# x: input tensor
# stage: index for dense block
# branch: layer index within each dense block
# nb_filter: number of filters
# dropout_rate: dropout rate
# weight_decay: weight decay factor
# '''
# eps = 1.1e-5
# conv_name_base = 'conv' + str(stage) + '_' + str(branch)
# relu_name_base = 'relu' + str(stage) + '_' + str(branch)
#
# # 1x1 Convolution (Bottleneck layer)
# inter_channel = nb_filter * 4
# x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
# x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
# x = Activation('relu', name=relu_name_base+'_x1')(x)
# x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x)
#
# if dropout_rate:
# x = Dropout(dropout_rate)(x)
#
# # 3x3 Convolution
# x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
# x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
# x = Activation('relu', name=relu_name_base+'_x2')(x)
# x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
# x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x)
#
# if dropout_rate:
# x = Dropout(dropout_rate)(x)
#
# return x
#
#
# def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
# ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# # Arguments
# x: input tensor
# stage: index for dense block
# nb_filter: number of filters
# compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
# dropout_rate: dropout rate
# weight_decay: weight decay factor
# '''
#
# eps = 1.1e-5
# conv_name_base = 'conv' + str(stage) + '_blk'
# relu_name_base = 'relu' + str(stage) + '_blk'
# pool_name_base = 'pool' + str(stage)
#
# x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
# x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
# x = Activation('relu', name=relu_name_base)(x)
# x = Convolution2D(int(nb_filter * compression), 1, 1, name=conv_name_base, bias=False)(x)
#
# if dropout_rate:
# x = Dropout(dropout_rate)(x)
#
# x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
#
# return x
#
#
# def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
# ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# # Arguments
# x: input tensor
# stage: index for dense block
# nb_layers: the number of layers of conv_block to append to the model.
# nb_filter: number of filters
# growth_rate: growth rate
# dropout_rate: dropout rate
# weight_decay: weight decay factor
# grow_nb_filters: flag to decide to allow number of filters to grow
# '''
#
# eps = 1.1e-5
# concat_feat = x
#
# for i in range(nb_layers):
# branch = i+1
# x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
# concat_feat = concatenate([concat_feat, x], name='concat_'+str(stage)+'_'+str(branch))
#
# if grow_nb_filters:
# nb_filter += growth_rate
#
# return concat_feat, nb_filter
from keras.models import Model
from keras.layers import Input, merge, ZeroPadding2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers import concatenate
import keras.backend as K
import os
#from Object_Detection.Mask_RCNN.mrcnn.custom_layers.scale_layer import Scale
from custom_layers.scale_layer import Scale
def DenseNet(nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, classes=1000, weights_path=None):
'''Instantiate the DenseNet architecture,
# Arguments
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters
reduction: reduction factor of transition blocks.
dropout_rate: dropout rate
weight_decay: weight decay factor
classes: optional number of classes to classify images
weights_path: path to pre-trained weights
# Returns
A Keras model instance.
'''
eps = 1.1e-5
# compute compression factor
compression = 1.0 - reduction
# Handle Dimension Ordering for different backends
global concat_axis
if K.image_dim_ordering() == 'tf':
concat_axis = 3
img_input = Input(shape=(224, 224, 3), name='data')
else:
concat_axis = 1
img_input = Input(shape=(3, 224, 224), name='data')
# From architecture for ImageNet (Table 1 in the paper)
nb_filter = 64
nb_layers = [6,12,32,32] # For DenseNet-169
# Initial convolution
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Conv2D(nb_filter, (7, 7), subsample=(2, 2), name='conv1', use_bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
x = Scale(axis=concat_axis, name='conv1_scale')(x)
x = Activation('relu', name='relu1')(x)
x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
stage = block_idx+2
x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
# Add transition_block
x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
final_stage = stage + 1
x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv'+str(final_stage)+'_blk_bn')(x)
x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x)
x = Activation('relu', name='relu'+str(final_stage)+'_blk')(x)
x = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
x = Dense(classes, name='fc6')(x)
x = Activation('softmax', name='prob')(x)
model = Model(img_input, x, name='densenet')
if weights_path is not None:
model.load_weights(weights_path)
return model
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
'''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_' + str(branch)
relu_name_base = 'relu' + str(stage) + '_' + str(branch)
# 1x1 Convolution (Bottleneck layer)
inter_channel = nb_filter * 4
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
x = Activation('relu', name=relu_name_base+'_x1')(x)
x = Conv2D(inter_channel, (1, 1), name=conv_name_base+'_x1', use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
# 3x3 Convolution
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
x = Activation('relu', name=relu_name_base+'_x2')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
x = Conv2D(nb_filter, (3, 3), name=conv_name_base+'_x2', use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_blk'
relu_name_base = 'relu' + str(stage) + '_blk'
pool_name_base = 'pool' + str(stage)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
x = Activation('relu', name=relu_name_base)(x)
x = Conv2D(int(nb_filter * compression), (1, 1), name=conv_name_base, use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
return x
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = concatenate([concat_feat, x], name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter