Coverage Report

Created: 2019-07-03 22:50

/home/liu/buildslave/linux-x64-runtests/build/test/unit/nnc/reduce.tests.c
Line
Count
Source
1
#include "case.h"
2
#include "ccv_case.h"
3
#include "ccv_nnc_case.h"
4
#include <ccv.h>
5
#include <nnc/ccv_nnc.h>
6
#include <nnc/ccv_nnc_easy.h>
7
#include "3rdparty/dsfmt/dSFMT.h"
8
9
TEST_SETUP()
10
{
11
  ccv_nnc_init();
12
}
13
14
TEST_CASE("reduce sum for [[1, 2, 3], [4, 5, 6]] on axis 1")
15
1
{
16
1
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0);
17
1
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 1), 0);
18
1
  a->data.f32[0] = 1;
19
1
  a->data.f32[1] = 2;
20
1
  a->data.f32[2] = 3;
21
1
  a->data.f32[3] = 4;
22
1
  a->data.f32[4] = 5;
23
1
  a->data.f32[5] = 6;
24
1
  ccv_nnc_cmd_exec(CMD_REDUCE_SUM_FORWARD(1), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0);
25
1
  float btp[] = {
26
1
    6,
27
1
    15
28
1
  };
29
1
  ccv_nnc_tensor_t bt = ccv_nnc_tensor(btp, CPU_TENSOR_NHWC(32F, 2, 1), 0);
30
1
  REQUIRE_TENSOR_EQ(b, &bt, "result should be equal");
31
1
  ccv_nnc_tensor_free(a);
32
1
  ccv_nnc_tensor_free(b);
33
1
}
34
35
TEST_CASE("reduce sum for [[1, 2, 3], [4, 5, 6]] on axis 0")
36
1
{
37
1
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0);
38
1
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3), 0);
39
1
  a->data.f32[0] = 1;
40
1
  a->data.f32[1] = 2;
41
1
  a->data.f32[2] = 3;
42
1
  a->data.f32[3] = 4;
43
1
  a->data.f32[4] = 5;
44
1
  a->data.f32[5] = 6;
45
1
  ccv_nnc_cmd_exec(CMD_REDUCE_SUM_FORWARD(0), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0);
46
1
  float btp[] = {
47
1
    5, 7, 9
48
1
  };
49
1
  ccv_nnc_tensor_t bt = ccv_nnc_tensor(btp, CPU_TENSOR_NHWC(32F, 3), 0);
50
1
  REQUIRE_TENSOR_EQ(b, &bt, "result should be equal");
51
1
  ccv_nnc_tensor_free(a);
52
1
  ccv_nnc_tensor_free(b);
53
1
}
54
55
TEST_CASE("use reduce for softmax")
56
1
{
57
1
  ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new();
58
1
  ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 100), "x");
59
1
  ccv_nnc_tensor_symbol_t max = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "max");
60
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_REDUCE_MAX_FORWARD(0), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(max), "max");
61
1
  ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 100), "y");
62
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_ADD_FORWARD(1, -1), TENSOR_SYMBOL_LIST(x, max), TENSOR_SYMBOL_LIST(y), "neg");
63
1
  ccv_nnc_tensor_symbol_t z = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 100), "z");
64
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWEXP_FORWARD(), TENSOR_SYMBOL_LIST(y), TENSOR_SYMBOL_LIST(z), "exp");
65
1
  ccv_nnc_tensor_symbol_t sum = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "sum");
66
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_REDUCE_SUM_FORWARD(0), TENSOR_SYMBOL_LIST(z), TENSOR_SYMBOL_LIST(sum), "sum");
67
1
  ccv_nnc_tensor_symbol_t inv_sum = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "1 / sum");
68
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWDIV_FORWARD(), TENSOR_SYMBOL_LIST(NO_TENSOR_SYMBOL, sum), TENSOR_SYMBOL_LIST(inv_sum), "inv sum");
69
1
  ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 100), "a");
70
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_MUL_FORWARD(1), TENSOR_SYMBOL_LIST(z, inv_sum), TENSOR_SYMBOL_LIST(a), "softmax");
71
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS);
72
1
  ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(a), TENSOR_SYMBOL_LIST(x), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph));
73
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS);
74
1
  ccv_nnc_tensor_symbol_t da = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, a);
75
1
  ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x);
76
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
77
1
  ccv_nnc_graph_t* graph = 0;
78
1
  ccv_nnc_tensor_arena_t* tensor_arena = 0;
79
1
  ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0;
80
1
  ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0);
81
1
  ccv_nnc_tensor_t* const da_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0);
82
1
  ccv_nnc_symbolic_graph_compile(symbolic_graph, TENSOR_BIND_MAP(KV(a, a_tensor), KV(da, da_tensor)), 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena);
83
1
  GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH);
84
1
  ccv_nnc_tensor_t* const tx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0);
85
1
  ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x);
86
1
  dsfmt_t dsfmt;
87
1
  int i;
88
1
  dsfmt_init_gen_rand(&dsfmt, 1);
89
101
  for (i = 0; i < 100; 
i++100
)
90
100
    x_tensor->data.f32[i] = tx_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt);
91
101
  for (i = 0; i < 100; 
i++100
)
92
100
    da_tensor->data.f32[i] = 0;
93
1
  da_tensor->data.f32[88] = 1;
94
1
  ccv_nnc_graph_run(graph, 0, 0, 0, TRAVERSE_FULL);
95
1
  ccv_nnc_tensor_t* const ta_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0);
96
1
  ccv_nnc_cmd_exec(CMD_SOFTMAX_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(tx_tensor), TENSOR_LIST(ta_tensor), 0);
97
1
  REQUIRE_TENSOR_EQ(a_tensor, ta_tensor, "softmax should match from the graph");
98
1
  ccv_nnc_tensor_t* const tdx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0);
99
1
  ccv_nnc_cmd_exec(CMD_SOFTMAX_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(da_tensor, 0, ta_tensor), TENSOR_LIST(tdx_tensor), 0);
100
1
  ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dx);
101
1
  REQUIRE_TENSOR_EQ(dx_tensor, tdx_tensor, "softmax backward should match from the graph");
102
1
  ccv_nnc_tensor_free(tdx_tensor);
103
1
  ccv_nnc_tensor_free(tx_tensor);
104
1
  ccv_nnc_tensor_free(ta_tensor);
105
1
  ccv_nnc_tensor_free(a_tensor);
106
1
  ccv_nnc_tensor_free(da_tensor);
107
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
108
1
  ccv_nnc_graph_free(graph);
109
1
  ccv_nnc_tensor_arena_free(tensor_arena);
110
1
  ccv_nnc_graph_exec_arena_free(graph_exec_arena);
111
1
}
112
113
#include "case_main.h"