Skip to content

Commit

Permalink
dtype
Browse files Browse the repository at this point in the history
  • Loading branch information
i-evi committed Dec 8, 2020
1 parent 2b10d91 commit 0f464e1
Show file tree
Hide file tree
Showing 27 changed files with 1,576 additions and 1,573 deletions.
18 changes: 9 additions & 9 deletions demo/lenet.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,15 @@ int main(int argc, const char *argv[])
/* Parameters */
cc_tensor_t *conv1_w, *conv1_b,
*conv2_w, *conv2_b, *fc1_w, *fc1_b, *fc2_w, *fc2_b;
cc_int32 shape_conv1_w[] = {32, 1, 5, 5, 0};
cc_int32 shape_conv1_b[] = {32, 0};
cc_int32 shape_conv2_w[] = {64, 32, 5, 5, 0};
cc_int32 shape_conv2_b[] = {64, 0};
cc_int32 shape_flat[] = {-1, 1, 1, 0};
cc_int32 shape_fc1_w[] = {128, 3136, 1, 1, 0};
cc_int32 shape_fc1_b[] = {128, 0};
cc_int32 shape_fc2_w[] = {10, 128, 1, 1, 0};
cc_int32 shape_fc2_b[] = {10, 0};
cc_ssize shape_conv1_w[] = {32, 1, 5, 5, 0};
cc_ssize shape_conv1_b[] = {32, 0};
cc_ssize shape_conv2_w[] = {64, 32, 5, 5, 0};
cc_ssize shape_conv2_b[] = {64, 0};
cc_ssize shape_flat[] = {-1, 1, 1, 0};
cc_ssize shape_fc1_w[] = {128, 3136, 1, 1, 0};
cc_ssize shape_fc1_b[] = {128, 0};
cc_ssize shape_fc2_w[] = {10, 128, 1, 1, 0};
cc_ssize shape_fc2_b[] = {10, 0};

arg_parser(argc, (char**)argv);

Expand Down
2 changes: 1 addition & 1 deletion demo/simple.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
int main(int argc, const char *argv[])
{
cc_tensor_t *tensor;
cc_int32 shape[] = {3, 3, 3, 0};
cc_ssize shape[] = {3, 3, 3, 0};
tensor = cc_create(shape, CC_FLOAT32, "tensor0");
cc_property(tensor);
cc_free(tensor);
Expand Down
2 changes: 1 addition & 1 deletion demo/vgg16.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ void arg_parser(int argc, char* const argv[])

void vgg16(cc_tensor_t *in, cc_tensor_t **out)
{
static int __shape0[] = {-1, 1, 1, 0};
static cc_ssize __shape0[] = {-1, 1, 1, 0};
static const char *p_namels[] = {
"000.w", "000.b", "001.w", "001.b", "002.w", "002.b", "003.w",
"003.b", "004.w", "004.b", "005.w", "005.b", "006.w", "006.b",
Expand Down
92 changes: 46 additions & 46 deletions src/cc_basic.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,28 +11,28 @@
#include "util_log.h"
#include "cc_basic.h"

static cc_int32 _calc_elems(const cc_int32 *shape)
static cc_ssize _calc_elems(const cc_ssize *shape)
{
cc_int32 elems;
cc_ssize elems;
elems = *shape;
while (*++shape)
elems *= *shape;
return elems;
}

cc_int32 cc_elements(const cc_tensor_t *tensor)
cc_ssize cc_elements(const cc_tensor_t *tensor)
{
cc_int32 elems;
cc_ssize elems;
if (!tensor)
return 0;
elems = _calc_elems(tensor->shape);
return elems;
}

void cc_shape_fix(cc_int32 *shape, cc_int32 elems)
void cc_shape_fix(cc_ssize *shape, cc_ssize elems)
{
cc_int32 v, i = 0, f = 0, s = 1;
const cc_int32 *sptr = shape;
cc_ssize v, i = 0, f = 0, s = 1;
const cc_ssize *sptr = shape;
while ((v = *sptr)) {
if (v == -1) {
#ifdef ENABLE_CC_ASSERT
Expand All @@ -49,10 +49,10 @@ void cc_shape_fix(cc_int32 *shape, cc_int32 elems)
}
}

cc_int32 cc_dimension(const cc_tensor_t *tensor)
cc_ssize cc_dimension(const cc_tensor_t *tensor)
{
cc_int32 dim = 0;
const cc_int32 *sptr;
cc_ssize dim = 0;
const cc_ssize *sptr;
if (!tensor)
return 0;
sptr = tensor->shape;
Expand All @@ -61,10 +61,10 @@ cc_int32 cc_dimension(const cc_tensor_t *tensor)
return dim;
}

cc_tensor_t *cc_reshape(cc_tensor_t *tensor, cc_int32 *shape)
cc_tensor_t *cc_reshape(cc_tensor_t *tensor, cc_ssize *shape)
{
cc_int32 elems;
const cc_int32 *sptr;
cc_ssize elems;
const cc_ssize *sptr;
cc_shape_fix(shape, _calc_elems(tensor->shape));
#ifdef ENABLE_CC_ASSERT
cc_assert_zero(_calc_elems(tensor->shape) - _calc_elems(shape));
Expand All @@ -77,19 +77,19 @@ cc_tensor_t *cc_reshape(cc_tensor_t *tensor, cc_int32 *shape)
list_erase(tensor->container, CC_TENSOR_SHAPE));
cc_assert_ptr(
list_set_data(tensor->container, CC_TENSOR_SHAPE,
shape, (sptr - shape + 1) * sizeof(cc_int32)));
shape, (sptr - shape + 1) * sizeof(cc_ssize)));
cc_assert_ptr(
tensor->shape = (cc_int32*)
tensor->shape = (cc_ssize*)
list_index(tensor->container, CC_TENSOR_SHAPE));
return tensor;
}

int cc_compare_by_shape(const cc_tensor_t *a, const cc_tensor_t *b)
{
int ret = 0;
const cc_int32 *ptra = a->shape;
const cc_int32 *ptrb = b->shape;
while(!(ret = *(cc_int32*)ptra - *(cc_int32*)ptrb) && *ptra) {
const cc_ssize *ptra = a->shape;
const cc_ssize *ptrb = b->shape;
while(!(ret = *(cc_ssize*)ptra - *(cc_ssize*)ptrb) && *ptra) {
ptra++;
ptrb++;
}
Expand All @@ -101,22 +101,22 @@ int cc_compare_by_shape(const cc_tensor_t *a, const cc_tensor_t *b)
}

cc_tensor_t *cc_stack(cc_tensor_t **tsr,
cc_int32 ntsr, cc_int32 axis, const char *name)
cc_ssize ntsr, cc_ssize axis, const char *name)
{
cc_tensor_t *yield;
cc_int32 *shape;
cc_int32 i, dim, size, umem, ymem;
cc_int32 ncp = 0, nstp = 0;
cc_int32 off = 0, unit = 1;
cc_ssize *shape;
cc_ssize i, dim, size, umem, ymem;
cc_ssize ncp = 0, nstp = 0;
cc_ssize off = 0, unit = 1;
dim = cc_dimension(tsr[0]);
cc_assert_ptr(shape =
(cc_int32*)calloc(dim + 2, sizeof(cc_int32)));
(cc_ssize*)calloc(dim + 2, sizeof(cc_ssize)));
if (dim == axis) { /* axis <= dim */
off = 1;
shape[0] = 1;
}
memcpy(shape + off,
tsr[0]->shape, dim * sizeof(cc_int32));
tsr[0]->shape, dim * sizeof(cc_ssize));
shape[dim + off - 1 - axis] *= ntsr;
cc_assert_ptr(yield =
cc_create(shape, *tsr[0]->dtype, name));
Expand All @@ -141,16 +141,16 @@ cc_tensor_t *cc_stack(cc_tensor_t **tsr,
}

cc_tensor_t *cc_concat(cc_tensor_t **tsr,
cc_int32 ntsr, cc_int32 axis, const char *name)
cc_ssize ntsr, cc_ssize axis, const char *name)
{
cc_tensor_t *yield;
cc_int32 *shape;
cc_int32 i, j, dim, raxis, umem, cmem;
cc_int32 ncp = 0, nseg = 1, unit = 1;
cc_ssize *shape;
cc_ssize i, j, dim, raxis, umem, cmem;
cc_ssize ncp = 0, nseg = 1, unit = 1;
dim = cc_dimension(tsr[0]);
cc_assert_ptr(shape =
(cc_int32*)calloc(dim + 1, sizeof(cc_int32)));
memcpy(shape, tsr[0]->shape, dim * sizeof(cc_int32));
(cc_ssize*)calloc(dim + 1, sizeof(cc_ssize)));
memcpy(shape, tsr[0]->shape, dim * sizeof(cc_ssize));
raxis = dim - 1 - axis;
shape[raxis] = 0;
for (i = 0; i < ntsr; ++i) {
Expand Down Expand Up @@ -247,19 +247,19 @@ static void _cc_print_indent(int n)

void cc_print(const cc_tensor_t *tensor)
{
cc_int32 *sops, *sbak;
cc_ssize *sops, *sbak;
int fidt, cidt;
cc_int32 i, j, dim, lelem, lsize, esize, ssize, npt = 0;
cc_ssize i, j, dim, lelem, lsize, esize, ssize, npt = 0;
FILE *ostream = (FILE*)utlog_get_ostream();
dim = cc_dimension(tensor);
esize = cc_dtype_size(*tensor->dtype);
lelem = tensor->shape[dim - 1];
lsize = lelem * esize;
ssize = (dim + 1) * sizeof(cc_int32);
ssize = (dim + 1) * sizeof(cc_ssize);
cc_assert_ptr(sops =
(cc_int32*)calloc(dim + 1, sizeof(cc_int32)));
(cc_ssize*)calloc(dim + 1, sizeof(cc_ssize)));
cc_assert_ptr(sbak =
(cc_int32*)calloc(dim + 1, sizeof(cc_int32)));
(cc_ssize*)calloc(dim + 1, sizeof(cc_ssize)));
memcpy(sops, tensor->shape, ssize);
memcpy(sbak, tensor->shape, ssize);
sops[dim - 1] = 0;
Expand All @@ -280,8 +280,8 @@ cc_tensor_t *cc_clip_by_value(cc_tensor_t *tensor,
const void *min, const void *max, const char *name)
{
cc_tensor_t *yield;
cc_int32 elems = *tensor->shape;
const cc_int32 *sptr = tensor->shape;
cc_ssize elems = *tensor->shape;
const cc_ssize *sptr = tensor->shape;
while (*++sptr)
elems *= *sptr;
if (!name || !strcmp(name, tensor->name))
Expand All @@ -297,8 +297,8 @@ cc_tensor_t *cc_cast(cc_tensor_t *tensor,
cc_dtype dtype, const char *name)
{
cc_tensor_t *cast;
const cc_int32 *sptr = tensor->shape;
cc_int32 memsize, elems = *tensor->shape;
const cc_ssize *sptr = tensor->shape;
cc_ssize memsize, elems = *tensor->shape;
while (*++sptr)
elems *= *sptr;
memsize = cc_dtype_size(dtype) * elems;
Expand Down Expand Up @@ -373,8 +373,8 @@ cc_tensor_t *cc_scalar(cc_tensor_t *tensor,
char op, const void *data, const char *name)
{
cc_tensor_t *yield;
cc_int32 elems = *tensor->shape;
const cc_int32 *sptr = tensor->shape;
cc_ssize elems = *tensor->shape;
const cc_ssize *sptr = tensor->shape;
while (*++sptr)
elems *= *sptr;
if (!name || !strcmp(name, tensor->name))
Expand Down Expand Up @@ -417,8 +417,8 @@ cc_tensor_t *cc_elemwise(cc_tensor_t *a,
cc_tensor_t *b, char op, const char *name)
{
cc_tensor_t *yield;
cc_int32 elems = *a->shape;
const cc_int32 *sptr = a->shape;
cc_ssize elems = *a->shape;
const cc_ssize *sptr = a->shape;
while (*++sptr)
elems *= *sptr;
#ifdef ENABLE_CC_ASSERT
Expand Down Expand Up @@ -460,9 +460,9 @@ cc_tensor_t *cc_elemwise(cc_tensor_t *a,
}

cc_tensor_t *cc_from_array(void *arr,
const cc_int32 *shape, cc_dtype dtype, const char *name)
const cc_ssize *shape, cc_dtype dtype, const char *name)
{
cc_int32 memsize;
cc_ssize memsize;
cc_tensor_t *tensor;
cc_assert_ptr(tensor = cc_create(shape, dtype, name));
memsize = list_getlen(tensor->container, CC_TENSOR_DATA);
Expand Down
14 changes: 7 additions & 7 deletions src/cc_basic.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,27 +7,27 @@

#include "cc_tensor.h"

cc_int32 cc_elements (const cc_tensor_t *tensor);
cc_ssize cc_elements (const cc_tensor_t *tensor);

cc_int32 cc_dimension(const cc_tensor_t *tensor);
cc_ssize cc_dimension(const cc_tensor_t *tensor);

void cc_shape_fix(cc_int32 *shape, cc_int32 elems);
void cc_shape_fix(cc_ssize *shape, cc_ssize elems);

/*
* Before reshape, `cc_reshape` will check and fix shape
* e.g. For a tensor shape with `[3, 3, 3]`, it can be reshaped through
* the argument `shape = [-1, 3]`. After reshaping, the tensor's shape
* should be `[9, 3]`, and `shape` will be modified to `[9, 3]`.
*/
cc_tensor_t *cc_reshape(cc_tensor_t *tensor, cc_int32 *shape);
cc_tensor_t *cc_reshape(cc_tensor_t *tensor, cc_ssize *shape);

int cc_compare_by_shape(const cc_tensor_t *a, const cc_tensor_t *b);

cc_tensor_t *cc_stack(cc_tensor_t **tsr,
cc_int32 ntsr, cc_int32 axis, const char *name);
cc_ssize ntsr, cc_ssize axis, const char *name);

cc_tensor_t *cc_concat(cc_tensor_t **tsr,
cc_int32 ntsr, cc_int32 axis, const char *name);
cc_ssize ntsr, cc_ssize axis, const char *name);

void cc_print(const cc_tensor_t *tensor);

Expand All @@ -51,7 +51,7 @@ cc_tensor_t *cc_clip_by_value(cc_tensor_t *tensor,
const void *min, const void *max, const char *name);

cc_tensor_t *cc_from_array(void *arr,
const cc_int32 *shape, cc_dtype dtype, const char *name);
const cc_ssize *shape, cc_dtype dtype, const char *name);

#ifdef __cplusplus
}
Expand Down
22 changes: 11 additions & 11 deletions src/cc_conv2d.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,22 +19,22 @@
#include "global_fn_cfg.h"
extern fn_conv2d _conv2d;

cc_int32 cc_conv2d_shape_calc(
cc_int32 i, cc_int32 k, cc_int32 s, cc_int32 p)
cc_ssize cc_conv2d_shape_calc(
cc_ssize i, cc_ssize k, cc_ssize s, cc_ssize p)
{
return (cc_int32)((i - k + 2 * p) / s) + 1;
return (cc_ssize)((i - k + 2 * p) / s) + 1;
}

cc_tensor_t *cc_conv2d(const cc_tensor_t *inp,
const cc_tensor_t *kernel, const cc_tensor_t *bias,
cc_int32 s, cc_int32 p, cc_int32 off, const char *name)
cc_ssize s, cc_ssize p, cc_ssize off, const char *name)
{
cc_uint8 *omp_out_buf = NULL;
cc_tensor_t *oup = NULL;
const cc_tensor_t *inp_pad;
cc_int32 o_ch_size, p_ch_mem_size, o_ch_mem_size,
cc_ssize o_ch_size, p_ch_mem_size, o_ch_mem_size,
k_ch_mem_size, k_mem_size, num_omp_threads, i, j;
cc_int32 shape[CC_CNN2D_SHAPE] = {0};
cc_ssize shape[CC_CNN2D_SHAPE] = {0};
char pad_name[CC_CONV2D_PAD_NAME_LEN];
#ifdef ENABLE_CC_ASSERT
cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM);
Expand Down Expand Up @@ -139,11 +139,11 @@ cc_tensor_t *cc_conv2d(const cc_tensor_t *inp,

cc_tensor_t *cc_dw_conv2d(cc_tensor_t *inp,
const cc_tensor_t *kernel, const cc_tensor_t *bias,
cc_int32 s, cc_int32 p, cc_int32 off, const char *name)
cc_ssize s, cc_ssize p, cc_ssize off, const char *name)
{
cc_tensor_t *inp_pad, *oup = NULL;
cc_int32 o_ch_size, p_ch_mem_size, o_ch_mem_size, k_ch_mem_size, i;
cc_int32 shape[CC_CNN2D_SHAPE] = {0};
cc_ssize o_ch_size, p_ch_mem_size, o_ch_mem_size, k_ch_mem_size, i;
cc_ssize shape[CC_CNN2D_SHAPE] = {0};
char pad_name[CC_CONV2D_PAD_NAME_LEN];
#ifdef ENABLE_CC_ASSERT
cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM);
Expand Down Expand Up @@ -220,9 +220,9 @@ cc_tensor_t *cc_pw_conv2d(cc_tensor_t *inp, const cc_tensor_t *kernel,
{
cc_uint8 *omp_out_buf = NULL;
cc_tensor_t *oup = NULL;
cc_int32 o_ch_size, o_ch_mem_size,
cc_ssize o_ch_size, o_ch_mem_size,
k_ch_mem_size, k_mem_size, num_omp_threads, i, j;
cc_int32 shape[CC_CNN2D_SHAPE] = {0};
cc_ssize shape[CC_CNN2D_SHAPE] = {0};
#ifdef ENABLE_CC_ASSERT
cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM);
cc_assert_zero(cc_dimension(kernel) - CC_CONV2D_KERNEL_DIM);
Expand Down
8 changes: 4 additions & 4 deletions src/cc_conv2d.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,17 @@ enum cc_conv2d_kernel {

#include "cc_tensor.h"

cc_int32 cc_conv2d_shape_calc(
cc_int32 i, cc_int32 k, cc_int32 s, cc_int32 p);
cc_ssize cc_conv2d_shape_calc(
cc_ssize i, cc_ssize k, cc_ssize s, cc_ssize p);

cc_tensor_t *cc_conv2d(const cc_tensor_t *inp,
const cc_tensor_t *kernel, const cc_tensor_t *bias,
cc_int32 s, cc_int32 p, cc_int32 off, const char *name);
cc_ssize s, cc_ssize p, cc_ssize off, const char *name);

/* Depth-wise convolution 2d */
cc_tensor_t *cc_dw_conv2d(cc_tensor_t *inp,
const cc_tensor_t *kernel, const cc_tensor_t *bias,
cc_int32 s, cc_int32 p, cc_int32 off, const char *name);
cc_ssize s, cc_ssize p, cc_ssize off, const char *name);

/* Point-wise convolution 2d */
cc_tensor_t *cc_pw_conv2d(cc_tensor_t *inp, const cc_tensor_t *kernel,
Expand Down
2 changes: 1 addition & 1 deletion src/cc_dtype.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ void __________cc_datatype_check__________()
COMPILER_ASSERT(CC_8B_LEN - sizeof(cc_float64));
}

int cc_dtype_size(cc_dtype dt)
cc_ssize cc_dtype_size(cc_dtype dt)
{
switch (dt) {
case CC_INT8:
Expand Down
Loading

0 comments on commit 0f464e1

Please sign in to comment.