Skip to content

Commit

Permalink
Update weight indexing methods
Browse files Browse the repository at this point in the history
  • Loading branch information
MarsTechHAN authored Dec 8, 2020
1 parent 2c6496a commit fac474a
Showing 1 changed file with 24 additions and 17 deletions.
41 changes: 24 additions & 17 deletions keras2ncnn/keras_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,6 @@

class KerasConverter:
MULTI_OUTPUT_OP = []
CONV_ACTIVATION_TYPE = {
'linear': 0,
'relu': 1,
'sigmoid': 4
}

def InputLayer_helper(self, layer, keras_graph_helper,
ncnn_graph_helper, ncnn_helper):
Expand All @@ -32,6 +27,12 @@ def replaceNone(x): return -1 if x is None else x
def Conv2D_helper(self, layer, keras_graph_helper,
ncnn_graph_helper, ncnn_helper):

CONV2D_ACTIVATION_TYPE = {
'linear': 0,
'relu': 1,
'sigmoid': 4
}

num_output = layer['layer']['config']['filters']
kernel_w, kernel_h = layer['layer']['config']['kernel_size']
dilation_w, dilation_h = layer['layer']['config']['dilation_rate']
Expand All @@ -55,14 +56,14 @@ def Conv2D_helper(self, layer, keras_graph_helper,
bias_weight = layer['weight']['bias:0']
else:
# Reorder weight, h-w-i-o to o-i-h-w
weight_data_size = int(layer['weight'].size)
weight_data_size = int(layer['weight']['kernel:0'].size)
# Reorder weight, h-w-i-o to o-i-h-w
weight = np.insert(np.transpose(layer['weight'],
weight = np.insert(np.transpose(layer['weight']['kernel:0'],
[3, 2, 0, 1]).flatten(), 0, 0)

if 'activation' in layer['layer']['config']:
if layer['layer']['config']['activation'] in self.CONV_ACTIVATION_TYPE.keys():
activation_type = self.CONV_ACTIVATION_TYPE[layer['layer'][
if layer['layer']['config']['activation'] in CONV2D_ACTIVATION_TYPE.keys():
activation_type = CONV2D_ACTIVATION_TYPE[layer['layer'][
'config']['activation']]
else:
print(layer['layer'])
Expand Down Expand Up @@ -102,6 +103,12 @@ def Conv2D_helper(self, layer, keras_graph_helper,
def Conv2DTranspose_helper(self, layer, keras_graph_helper,
ncnn_graph_helper, ncnn_helper):

CONV2D_T_ACTIVATION_TYPE = {
'linear': 0,
'relu': 1,
'sigmoid': 4
}

num_output = layer['layer']['config']['filters']
kernel_w, kernel_h = layer['layer']['config']['kernel_size']
dilation_w, dilation_h = layer['layer']['config']['dilation_rate']
Expand All @@ -125,14 +132,14 @@ def Conv2DTranspose_helper(self, layer, keras_graph_helper,
bias_weight = layer['weight']['bias:0']
else:
# Reorder weight, h-w-i-o to o-i-h-w
weight_data_size = int(layer['weight'].size)
weight_data_size = int(layer['weight']['kernel:0'].size)
# Reorder weight, h-w-i-o to o-i-h-w
weight = np.insert(np.transpose(layer['weight'],
weight = np.insert(np.transpose(layer['weight']['kernel:0'],
[2, 3, 0, 1]).flatten(), 0, 0)

if 'activation' in layer['layer']['config']:
if layer['layer']['config']['activation'] in self.CONV_ACTIVATION_TYPE.keys():
activation_type = self.CONV_ACTIVATION_TYPE[layer['layer'][
if layer['layer']['config']['activation'] in CONV2D_T_ACTIVATION_TYPE.keys():
activation_type = CONV2D_T_ACTIVATION_TYPE[layer['layer'][
'config']['activation']]
else:
print(layer['layer'])
Expand Down Expand Up @@ -178,12 +185,12 @@ def DepthwiseConv2D_helper(
# Reorder weight, h-w-i-o to o-i-h-w
weight = np.insert(
np.transpose(
layer['weight'], [
layer['weight']['depthwise_kernel:0'], [
3, 2, 0, 1]).flatten(), 0, 0)

num_output = layer['weight'].shape[2] * \
num_output = layer['weight']['depthwise_kernel:0'].shape[2] * \
layer['layer']['config']['depth_multiplier']
group = layer['weight'].shape[2]
group = layer['weight']['depthwise_kernel:0'].shape[2]

kernel_w, kernel_h = layer['layer']['config']['kernel_size']

Expand All @@ -201,7 +208,7 @@ def DepthwiseConv2D_helper(
if bias_term:
raise NotImplementedError

weight_data_size = int(layer['weight'].size)
weight_data_size = int(layer['weight']['depthwise_kernel:0'].size)

ncnn_graph_attr = ncnn_helper.dump_args(
'ConvolutionDepthWise',
Expand Down

0 comments on commit fac474a

Please sign in to comment.