Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
i-evi committed Jun 10, 2020
1 parent 8ef5264 commit 92790eb
Show file tree
Hide file tree
Showing 10 changed files with 245 additions and 53 deletions.
8 changes: 5 additions & 3 deletions src/catcoon.h
Original file line number Diff line number Diff line change
@@ -1,16 +1,18 @@
#ifndef _CARCOON_H_
#define _CARCOON_H_
#ifndef _CATCOON_H_
#define _CATCOON_H_

#ifdef __cplusplus
extern "C" {
#endif

#include "cc_actfn.h"
#include "cc_assert.h"
#include "cc_basic.h"
#include "cc_conv2d.h"
#include "cc_fmap2d.h"
#include "cc_fullycon.h"
#include "cc_image.h"
#include "cc_normfn.h"
#include "cc_pad2d.h"
#include "cc_pool2d.h"
#include "cc_tsrmgr.h"
Expand All @@ -27,4 +29,4 @@ void cc_print_info(void);
}
#endif

#endif /* _CARCOON_H_ */
#endif /* _CATCOON_H_ */
48 changes: 48 additions & 0 deletions src/cc_actfn.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#include <string.h>

#include "cc_basic.h"
#include "cc_actfn.h"

/* #include "global_fn_cfg.h" */
extern void (*_activation_relu)
(void *inp, cc_int32 elems, cc_dtype dt);
extern void (*_activation_relu6)
(void *inp, cc_int32 elems, cc_dtype dt);
extern void (*_activation_softmax)
(void *inp, cc_int32 elems, cc_dtype dt);

cc_tensor_t *cc_relu(cc_tensor_t *tensor, const char *name)
{
cc_tensor_t *relu;
if (!name || !strcmp(name, tensor->name))
relu = tensor;
else
relu = cc_copy_tensor(tensor, name);
_activation_relu(relu->data,
cc_tensor_elements(relu), *relu->dtype);
return relu;
}

cc_tensor_t *cc_relu6(cc_tensor_t *tensor, const char *name)
{
cc_tensor_t *relu;
if (!name || !strcmp(name, tensor->name))
relu = tensor;
else
relu = cc_copy_tensor(tensor, name);
_activation_relu6(relu->data,
cc_tensor_elements(relu), *relu->dtype);
return relu;
}

cc_tensor_t *cc_softmax(cc_tensor_t *tensor, const char *name)
{
cc_tensor_t *softmax;
if (!name || !strcmp(name, tensor->name))
softmax = tensor;
else
softmax = cc_copy_tensor(tensor, name);
_activation_softmax(softmax->data,
cc_tensor_elements(softmax), *softmax->dtype);
return softmax;
}
19 changes: 19 additions & 0 deletions src/cc_actfn.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#ifndef _CC_ACTFN_H_
#define _CC_ACTFN_H_

#ifdef __cplusplus
extern "C" {
#endif

#include "cc_tensor.h"

cc_tensor_t *cc_relu (cc_tensor_t *tensor, const char *name);
cc_tensor_t *cc_relu6(cc_tensor_t *tensor, const char *name);

cc_tensor_t *cc_softmax(cc_tensor_t *tensor, const char *name);

#ifdef __cplusplus
}
#endif

#endif /* _CC_ACTFN_H_ */
44 changes: 0 additions & 44 deletions src/cc_basic.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,6 @@
#include "util_log.h"
#include "cc_basic.h"

/* #include "global_fn_cfg.h" */
extern void (*_activation_relu)
(void *inp, cc_int32 elems, cc_dtype dt);
extern void (*_activation_relu6)
(void *inp, cc_int32 elems, cc_dtype dt);
extern void (*_activation_softmax)
(void *inp, cc_int32 elems, cc_dtype dt);

static cc_int32 _calc_elems(const cc_int32 *shape)
{
cc_int32 elems;
Expand Down Expand Up @@ -239,39 +231,3 @@ cc_tensor_t *cc_tensor_by_scalar(cc_tensor_t *tensor,
}
return yield;
}

cc_tensor_t *cc_relu(cc_tensor_t *tensor, const char *name)
{
cc_tensor_t *relu;
if (!name || !strcmp(name, tensor->name))
relu = tensor;
else
relu = cc_copy_tensor(tensor, name);
_activation_relu(relu->data,
cc_tensor_elements(relu), *relu->dtype);
return relu;
}

cc_tensor_t *cc_relu6(cc_tensor_t *tensor, const char *name)
{
cc_tensor_t *relu;
if (!name || !strcmp(name, tensor->name))
relu = tensor;
else
relu = cc_copy_tensor(tensor, name);
_activation_relu6(relu->data,
cc_tensor_elements(relu), *relu->dtype);
return relu;
}

cc_tensor_t *cc_softmax(cc_tensor_t *tensor, const char *name)
{
cc_tensor_t *softmax;
if (!name || !strcmp(name, tensor->name))
softmax = tensor;
else
softmax = cc_copy_tensor(tensor, name);
_activation_softmax(softmax->data,
cc_tensor_elements(softmax), *softmax->dtype);
return softmax;
}
6 changes: 0 additions & 6 deletions src/cc_basic.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,6 @@ cc_tensor_t *cc_cast_tensor(cc_tensor_t *tensor,
cc_tensor_t *cc_tensor_by_scalar(cc_tensor_t *tensor,
char op, void *data, const char *name);

cc_tensor_t *cc_relu (cc_tensor_t *tensor, const char *name);
cc_tensor_t *cc_relu6(cc_tensor_t *tensor, const char *name);

cc_tensor_t *cc_softmax(cc_tensor_t *tensor, const char *name);


#ifdef __cplusplus
}
#endif
Expand Down
79 changes: 79 additions & 0 deletions src/cc_cpufn.c
Original file line number Diff line number Diff line change
Expand Up @@ -415,3 +415,82 @@ void cc_cpu_fully_connected(void *inp, void *oup,
UNSUPPORTED_DTYPE_LOG(dt);
}
}

#define CC_CPU_BATCH_NORM_IMPLEMENTATION(dt) \
void cc_cpu_batch_norm_ ## dt(cc_ ## dt *inp, \
cc_int32 len, cc_ ## dt *bnpara) \
{ \
cc_ ## dt gamma = *(bnpara + CC_BN_OFFSET_GAMMA), \
beta = *(bnpara + CC_BN_OFFSET_BETA), \
mean = *(bnpara + CC_BN_OFFSET_MEAN), \
var = *(bnpara + CC_BN_OFFSET_VAR), \
epsilon = *(bnpara + CC_BN_OFFSET_EPSILON); \
cc_ ## dt frac = (cc_ ## dt)sqrt((double)var + epsilon); \
cc_int32 i; \
for (i = 0; i < len; ++i) { \
*(inp + i) = (gamma * \
(*(inp + i) - mean) / frac) \
+ beta; \
} \
return; \
}

CC_CPU_BATCH_NORM_IMPLEMENTATION (uint8)
CC_CPU_BATCH_NORM_IMPLEMENTATION (uint16)
CC_CPU_BATCH_NORM_IMPLEMENTATION (uint32)
CC_CPU_BATCH_NORM_IMPLEMENTATION (uint64)
CC_CPU_BATCH_NORM_IMPLEMENTATION (int8)
CC_CPU_BATCH_NORM_IMPLEMENTATION (int16)
CC_CPU_BATCH_NORM_IMPLEMENTATION (int32)
CC_CPU_BATCH_NORM_IMPLEMENTATION (int64)
CC_CPU_BATCH_NORM_IMPLEMENTATION (float32)
CC_CPU_BATCH_NORM_IMPLEMENTATION (float64)

void cc_cpu_batch_norm(void *inp, cc_int32 len, void *bnpara, cc_dtype dt)
{
switch (dt) {
case CC_UINT8:
cc_cpu_batch_norm_uint8(
(cc_uint8*)inp, len, (cc_uint8*)bnpara);
break;
case CC_UINT16:
cc_cpu_batch_norm_uint16(
(cc_uint16*)inp, len, (cc_uint16*)bnpara);
break;
case CC_UINT32:
cc_cpu_batch_norm_uint32(
(cc_uint32*)inp, len, (cc_uint32*)bnpara);
break;
case CC_UINT64:
cc_cpu_batch_norm_uint64(
(cc_uint64*)inp, len, (cc_uint64*)bnpara);
break;
case CC_INT8:
cc_cpu_batch_norm_int8(
(cc_int8*)inp, len, (cc_int8*)bnpara);
break;
case CC_INT16:
cc_cpu_batch_norm_int16(
(cc_int16*)inp, len, (cc_int16*)bnpara);
break;
case CC_INT32:
cc_cpu_batch_norm_int32(
(cc_int32*)inp, len, (cc_int32*)bnpara);
break;
case CC_INT64:
cc_cpu_batch_norm_int64(
(cc_int64*)inp, len, (cc_int64*)bnpara);
break;
case CC_FLOAT32:
cc_cpu_batch_norm_float32(
(cc_float32*)inp, len, (cc_float32*)bnpara);
break;
case CC_FLOAT64:
cc_cpu_batch_norm_float64(
(cc_float64*)inp, len, (cc_float64*)bnpara);
break;
default:
utlog_format(UTLOG_ERR,
"cc_cpufn: unsupported dtype %x\n", dt);
}
}
19 changes: 19 additions & 0 deletions src/cc_cpufn.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,23 @@

#include "cc_dtype.h"

/*
* Batch Normalization parameters offset
* bnpara:
* GAMMA | BETA | MEAN | VAR | EPSILON
*/
#ifndef CC_BN_OFFSET_CFG
#define CC_BN_OFFSET_CFG
enum cc_batch_norm_paraoff {
CC_BN_OFFSET_GAMMA,
CC_BN_OFFSET_BETA,
CC_BN_OFFSET_MEAN,
CC_BN_OFFSET_VAR,
CC_BN_OFFSET_EPSILON,
CC_BN_PARAMETERS
};
#endif

void cc_cpu_activation_relu(void *inp, cc_int32 elems, cc_dtype dt);

void cc_cpu_activation_relu6(void *inp, cc_int32 elems, cc_dtype dt);
Expand All @@ -23,6 +40,8 @@ void cc_cpu_conv2d(void *inp, void *oup,
void cc_cpu_fully_connected(void *inp, void *oup,
void *w, void *b, cc_int32 iw, cc_int32 ow, cc_dtype dt);

void cc_cpu_batch_norm(void *inp, cc_int32 len, void *bnpara, cc_dtype dt);

#ifdef __cplusplus
}
#endif
Expand Down
35 changes: 35 additions & 0 deletions src/cc_normfn.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#include <string.h>

#include "cc_assert.h"
#include "cc_basic.h"
#include "cc_fmap2d.h"
#include "cc_normfn.h"

/* #include "global_fn_cfg.h" */
extern void (*_batch_norm)(
void *inp, cc_int32 len, void *bnpara, cc_dtype dt);

cc_tensor_t *cc_batch_norm2d(cc_tensor_t *inp,
cc_tensor_t *para, const char *name)
{
cc_tensor_t *oup;
cc_int32 i, dt_size, ch_size, ch_mem_size;
#ifdef ENABLE_CC_ASSERT
cc_assert_zero(cc_tensor_dimension(inp) - CC_CNN2D_DIM);
cc_assert_zero(*inp->dtype - *para->dtype);
#endif
if (!name || !strcmp(name, inp->name))
oup = inp;
else
oup = cc_copy_tensor(inp, name);
dt_size = cc_dtype_size(*inp->dtype);
ch_size = inp->shape[CC_CNN2D_SHAPE_H] *
inp->shape[CC_CNN2D_SHAPE_W];
ch_mem_size = ch_size * dt_size;
for (i = 0; i < inp->shape[CC_CNN2D_SHAPE_C]; ++i) {
_batch_norm(inp->data + ch_mem_size * i,
ch_size, para->data + CC_BN_PARAMETERS *
dt_size * i, *para->dtype);
}
return oup;
}
38 changes: 38 additions & 0 deletions src/cc_normfn.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#ifndef _CC_NORMFN_H_
#define _CC_NORMFN_H_

#ifdef __cplusplus
extern "C" {
#endif

#include "cc_tensor.h"

/*
* Batch Normalization parameters offset
* bnpara:
* GAMMA | BETA | MEAN | VAR | EPSILON
* cc_int32 shape[] = {ch, CC_BN_PARAMETERS, 1, 0}
* \ \___ number of parameters(5)
* \___________ number of channels
* cc_tensor_t *bnpara = cc_create_tensor(shape, dt, "name");
*/
#ifndef CC_BN_OFFSET_CFG
#define CC_BN_OFFSET_CFG
enum cc_batch_norm_paraoff {
CC_BN_OFFSET_GAMMA,
CC_BN_OFFSET_BETA,
CC_BN_OFFSET_MEAN,
CC_BN_OFFSET_VAR,
CC_BN_OFFSET_EPSILON,
CC_BN_PARAMETERS
};
#endif

cc_tensor_t *cc_batch_norm2d(cc_tensor_t *inp,
cc_tensor_t *para, const char *name);

#ifdef __cplusplus
}
#endif

#endif /* _CC_NORMFN_H_ */
2 changes: 2 additions & 0 deletions src/global_fn_cfg.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ void (*_conv2d)(void *inp, void *oup,
void (*_fully_connected)(void *inp, void *oup, void *w, void *b,
cc_int32 iw, cc_int32 ow, cc_dtype dt) = cc_cpu_fully_connected;

void (*_batch_norm)(void *inp, cc_int32 len,
void *bnpara, cc_dtype dt) = cc_cpu_batch_norm;

#ifdef __cplusplus
}
Expand Down

0 comments on commit 92790eb

Please sign in to comment.