Coverage Report

Created: 2017-11-12 13:27

/home/liu/buildslave/linux-x64-runtests/build/test/unit/nnc/autograd.vector.tests.c
Line
Count
Source
1
#include "case.h"
2
#include "ccv_case.h"
3
#include "ccv_nnc_case.h"
4
#include <ccv.h>
5
#include <nnc/ccv_nnc.h>
6
#include <nnc/ccv_nnc_easy.h>
7
8
TEST_SETUP()
9
{
10
  ccv_nnc_init();
11
}
12
13
TEST_CASE("autograd with D[y = x + [1 1.5] => x_1 + (y_1 + y_1 ^ 2) + Exp[y_2], x] when x = [0.44 -1.18]")
14
1
{
15
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
16
1
  ccv_nnc_tensor_symbol_t one = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(2), "[1 1.5]");
17
1
  ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(2), "x");
18
1
  ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(2), "y");
19
1
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {0};
20
1
  int inc[CCV_NNC_MAX_DIM_ALLOC] = {0};
21
1
  inc[0] = 2;
22
1
  ccv_nnc_tensor_symbol_t x_1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, x, ofs, inc, ONE_CPU_TENSOR(1), "x_1");
23
1
  ccv_nnc_tensor_symbol_t y_1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, y, ofs, inc, ONE_CPU_TENSOR(1), "y_1");
24
1
  ofs[0] = 1;
25
1
  ccv_nnc_tensor_symbol_t y_2 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, y, ofs, inc, ONE_CPU_TENSOR(1), "y_2");
26
1
  ccv_nnc_tensor_symbol_t w_1 = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "w_1");
27
1
  ccv_nnc_tensor_symbol_t u_1 = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "u_1");
28
1
  ccv_nnc_tensor_symbol_t u_2 = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "u_2");
29
1
  ccv_nnc_tensor_symbol_t v = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "v");
30
1
  ccv_nnc_graph_exec_symbol_t plus = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(x, one),
TENSOR_SYMBOL_LIST1
(y), "plus");
31
1
  ccv_nnc_graph_exec_symbol_t sqr = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWPROD_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(y_1, y_1),
TENSOR_SYMBOL_LIST1
(w_1), "sqr");
32
1
  ccv_nnc_graph_exec_symbol_t plus_y = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(w_1, y_1),
TENSOR_SYMBOL_LIST1
(u_1), "plus_y");
33
1
  ccv_nnc_graph_exec_symbol_t exp_ = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWEXP_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(y_2),
TENSOR_SYMBOL_LIST1
(u_2), "exp");
34
1
  ccv_nnc_graph_exec_symbol_t sum = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(x_1, u_1, u_2),
TENSOR_SYMBOL_LIST1
(v), "sum");
35
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(plus, sqr, plus_y, exp_, sum), 0);
36
1
  ccv_nnc_symbolic_graph_backward(symbolic_graph, 
GRAPH_EXEC_SYMBOL_LIST1
(plus),
GRAPH_EXEC_SYMBOL_LIST1
(sum),
TENSOR_SYMBOL_LIST1
(v),
TENSOR_SYMBOL_LIST1
(x));
37
1
  ccv_nnc_graph_t* graph = 0;
38
1
  ccv_nnc_tensor_arena_t* tensor_arena = 0;
39
1
  ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0;
40
1
  ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x);
41
1
  ccv_nnc_graph_exec_symbol_t dxc = ccv_nnc_graph_exec_symbol_for_backward(symbolic_graph, dx);
42
1
  ccv_nnc_symbolic_graph_compile(symbolic_graph, 0, 0, 
GRAPH_EXEC_SYMBOL_LIST1
(plus),
GRAPH_EXEC_SYMBOL_LIST1
(dxc, sum), &graph, &tensor_arena, &graph_exec_arena);
43
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
44
1
  GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH);
45
1
  ccv_nnc_tensor_t* tone = ccv_nnc_tensor_from_symbol(tensor_arena, one);
46
1
  tone->data.f32[0] = 1;
47
1
  tone->data.f32[1] = 1.5;
48
1
  ccv_nnc_tensor_t* tx = ccv_nnc_tensor_from_symbol(tensor_arena, x);
49
1
  tx->data.f32[0] = 0.44;
50
1
  tx->data.f32[1] = -1.18;
51
1
  ccv_nnc_tensor_symbol_t dv = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, v);
52
1
  ccv_nnc_tensor_t* tdv = ccv_nnc_tensor_from_symbol(tensor_arena, dv);
53
1
  // Seed the initialization vector.
54
1
  tdv->data.f32[0] = 1;
55
1
  ccv_nnc_graph_run(graph, 0, 
GRAPH_EXEC_LIST1
(ccv_nnc_graph_exec_source(graph_exec_arena)),
GRAPH_EXEC_LIST1
(ccv_nnc_graph_exec_destination(graph_exec_arena)));
56
1
  ccv_nnc_tensor_t* tv = ccv_nnc_tensor_from_symbol(tensor_arena, v);
57
1
  ccv_nnc_tensor_t* tdx = ccv_nnc_tensor_from_symbol(tensor_arena, dx);
58
1
  
REQUIRE_EQ_WITH_TOLERANCE1
(tv->data.f32[0], 0.44 + (0.44 + 1 + (0.44 + 1) * (0.44 + 1)) + expf(-1.18 + 1.5), 1e-6, "computed result of y = x + [1 1.5] => x_1 + (y_1 + y_1 ^ 2) + Exp[y_2] should be the same")
;1
59
1
  
REQUIRE_EQ_WITH_TOLERANCE1
(tdx->data.f32[0], 2 + 2 * (0.44 + 1), 1e-6, "computed result of D[y = x + [1 1.5] => x_1 + (y_1 + y_1 ^ 2) + Exp[y_2], x] for x_1 should be the same")
;1
60
1
  
REQUIRE_EQ_WITH_TOLERANCE1
(tdx->data.f32[1], expf(-1.18 + 1.5), 1e-6, "computed result of D[y = x + [1 1.5] => x_1 + (y_1 + y_1 ^ 2) + Exp[y_2], x] for x_2 should be the same")
;1
61
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
62
1
  ccv_nnc_graph_free(graph);
63
1
  ccv_nnc_tensor_arena_free(tensor_arena);
64
1
  ccv_nnc_graph_exec_arena_free(graph_exec_arena);
65
1
}
66
67
TEST_CASE("autograd with D[y_1 = Log[x_1], y_2 = x_2 ^ 2 => y_1 ^ 2 + y_1 * y_2, x] when x = [0.38 -2.8]")
68
1
{
69
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
70
1
  ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(2), "x");
71
1
  ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(2), "y");
72
1
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {0};
73
1
  int inc[CCV_NNC_MAX_DIM_ALLOC] = {0};
74
1
  inc[0] = 2;
75
1
  ccv_nnc_tensor_symbol_t x_1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, x, ofs, inc, ONE_CPU_TENSOR(1), "x_1");
76
1
  ccv_nnc_tensor_symbol_t y_1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, y, ofs, inc, ONE_CPU_TENSOR(1), "y_1");
77
1
  ofs[0] = 1;
78
1
  ccv_nnc_tensor_symbol_t x_2 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, x, ofs, inc, ONE_CPU_TENSOR(1), "x_2");
79
1
  ccv_nnc_tensor_symbol_t y_2 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, y, ofs, inc, ONE_CPU_TENSOR(1), "y_2");
80
1
  ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "w");
81
1
  ccv_nnc_tensor_symbol_t u = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "u");
82
1
  ccv_nnc_tensor_symbol_t v = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "v");
83
1
  ccv_nnc_graph_exec_symbol_t plus = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWLOG_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(x_1),
TENSOR_SYMBOL_LIST1
(y_1), "log");
84
1
  ccv_nnc_graph_exec_symbol_t x_1_sqr = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWPROD_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(x_2, x_2),
TENSOR_SYMBOL_LIST1
(y_2), "x_1_sqr");
85
1
  ccv_nnc_graph_exec_symbol_t y_1_sqr = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWPROD_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(y_1, y_1),
TENSOR_SYMBOL_LIST1
(w), "y_1_sqr");
86
1
  ccv_nnc_graph_exec_symbol_t prod = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWPROD_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(y_1, y_2),
TENSOR_SYMBOL_LIST1
(u), "prod");
87
1
  ccv_nnc_graph_exec_symbol_t sum = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(w, u),
TENSOR_SYMBOL_LIST1
(v), "sum");
88
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(plus, x_1_sqr, y_1_sqr, prod, sum), 0);
89
1
  ccv_nnc_symbolic_graph_backward(symbolic_graph, 
GRAPH_EXEC_SYMBOL_LIST1
(plus, x_1_sqr),
GRAPH_EXEC_SYMBOL_LIST1
(sum),
TENSOR_SYMBOL_LIST1
(v),
TENSOR_SYMBOL_LIST1
(x));
90
1
  ccv_nnc_graph_t* graph = 0;
91
1
  ccv_nnc_tensor_arena_t* tensor_arena = 0;
92
1
  ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0;
93
1
  ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x);
94
1
  ccv_nnc_graph_exec_symbol_t dxc = ccv_nnc_graph_exec_symbol_for_backward(symbolic_graph, dx);
95
1
  ccv_nnc_symbolic_graph_compile(symbolic_graph, 0, 0, 
GRAPH_EXEC_SYMBOL_LIST1
(plus, x_1_sqr),
GRAPH_EXEC_SYMBOL_LIST1
(dxc, sum), &graph, &tensor_arena, &graph_exec_arena);
96
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
97
1
  GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH);
98
1
  ccv_nnc_tensor_t* tx = ccv_nnc_tensor_from_symbol(tensor_arena, x);
99
1
  tx->data.f32[0] = 0.38;
100
1
  tx->data.f32[1] = -2.8;
101
1
  ccv_nnc_tensor_symbol_t dv = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, v);
102
1
  ccv_nnc_tensor_t* tdv = ccv_nnc_tensor_from_symbol(tensor_arena, dv);
103
1
  // Seed the initialization vector.
104
1
  tdv->data.f32[0] = 1;
105
1
  ccv_nnc_graph_run(graph, 0, 
GRAPH_EXEC_LIST1
(ccv_nnc_graph_exec_source(graph_exec_arena)),
GRAPH_EXEC_LIST1
(ccv_nnc_graph_exec_destination(graph_exec_arena)));
106
1
  ccv_nnc_tensor_t* tv = ccv_nnc_tensor_from_symbol(tensor_arena, v);
107
1
  ccv_nnc_tensor_t* tdx = ccv_nnc_tensor_from_symbol(tensor_arena, dx);
108
1
  
REQUIRE_EQ_WITH_TOLERANCE1
(tv->data.f32[0], logf(0.38) * logf(0.38) + logf(0.38) * (-2.8 * -2.8), 1e-6, "computed result of y_1 = Log[x_1], y_2 = x_2 ^ 2 => y_1 ^ 2 + y_1 * y_2 should be the same")
;1
109
1
  
REQUIRE_EQ_WITH_TOLERANCE1
(tdx->data.f32[0], 2 * logf(0.38) / 0.38 + (-2.8 * -2.8) / 0.38, 1e-6, "computed result of D[y_1 = Log[x_1], y_2 = x_2 ^ 2 => y_1 ^ 2 + y_1 * y_2, x] for x_1 should be the same")
;1
110
1
  
REQUIRE_EQ_WITH_TOLERANCE1
(tdx->data.f32[1], 2 * -2.8 * logf(0.38), 1e-6, "computed result of D[y_1 = Log[x_1], y_2 = x_2 ^ 2 => y_1 ^ 2 + y_1 * y_2, x] for x_2 should be the same")
;1
111
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
112
1
  ccv_nnc_graph_free(graph);
113
1
  ccv_nnc_tensor_arena_free(tensor_arena);
114
1
  ccv_nnc_graph_exec_arena_free(graph_exec_arena);
115
1
}
116
117
TEST_CASE("autograd with D[y_1 = Log[x_1] => y_1 ^ 2 + y_1, x] when x = [0.21 -13.22]")
118
1
{
119
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
120
1
  ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(2), "x");
121
1
  ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(2), "y");
122
1
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {0};
123
1
  int inc[CCV_NNC_MAX_DIM_ALLOC] = {0};
124
1
  inc[0] = 2;
125
1
  ccv_nnc_tensor_symbol_t x_1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, x, ofs, inc, ONE_CPU_TENSOR(1), "x_1");
126
1
  ccv_nnc_tensor_symbol_t y_1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, y, ofs, inc, ONE_CPU_TENSOR(1), "y_1");
127
1
  ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "w");
128
1
  ccv_nnc_tensor_symbol_t v = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1), "v");
129
1
  ccv_nnc_graph_exec_symbol_t plus = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWLOG_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(x_1),
TENSOR_SYMBOL_LIST1
(y_1), "log");
130
1
  ccv_nnc_graph_exec_symbol_t sqr = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWPROD_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(y_1, y_1),
TENSOR_SYMBOL_LIST1
(w), "sqr");
131
1
  ccv_nnc_graph_exec_symbol_t sum = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(w, y_1),
TENSOR_SYMBOL_LIST1
(v), "sum");
132
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(plus, sqr, sum), 0);
133
1
  ccv_nnc_symbolic_graph_backward(symbolic_graph, 
GRAPH_EXEC_SYMBOL_LIST1
(plus),
GRAPH_EXEC_SYMBOL_LIST1
(sum),
TENSOR_SYMBOL_LIST1
(v),
TENSOR_SYMBOL_LIST1
(x));
134
1
  ccv_nnc_graph_t* graph = 0;
135
1
  ccv_nnc_tensor_arena_t* tensor_arena = 0;
136
1
  ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0;
137
1
  ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x);
138
1
  ccv_nnc_graph_exec_symbol_t dxc = ccv_nnc_graph_exec_symbol_for_backward(symbolic_graph, dx);
139
1
  ccv_nnc_symbolic_graph_compile(symbolic_graph, 0, 0, 
GRAPH_EXEC_SYMBOL_LIST1
(plus),
GRAPH_EXEC_SYMBOL_LIST1
(dxc, sum), &graph, &tensor_arena, &graph_exec_arena);
140
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
141
1
  GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH);
142
1
  ccv_nnc_tensor_t* tx = ccv_nnc_tensor_from_symbol(tensor_arena, x);
143
1
  tx->data.f32[0] = 0.21;
144
1
  tx->data.f32[1] = -13.22;
145
1
  ccv_nnc_tensor_symbol_t dv = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, v);
146
1
  ccv_nnc_tensor_t* tdv = ccv_nnc_tensor_from_symbol(tensor_arena, dv);
147
1
  // Seed the initialization vector.
148
1
  tdv->data.f32[0] = 1;
149
1
  ccv_nnc_graph_run(graph, 0, 
GRAPH_EXEC_LIST1
(ccv_nnc_graph_exec_source(graph_exec_arena)),
GRAPH_EXEC_LIST1
(ccv_nnc_graph_exec_destination(graph_exec_arena)));
150
1
  ccv_nnc_tensor_t* tv = ccv_nnc_tensor_from_symbol(tensor_arena, v);
151
1
  ccv_nnc_tensor_t* tdx = ccv_nnc_tensor_from_symbol(tensor_arena, dx);
152
1
  
REQUIRE_EQ_WITH_TOLERANCE1
(tv->data.f32[0], logf(0.21) * logf(0.21) + logf(0.21), 1e-6, "computed result of y_1 = Log[x_1] => y_1 ^ 2 + y_1 should be the same")
;1
153
1
  
REQUIRE_EQ_WITH_TOLERANCE1
(tdx->data.f32[0], 2 * logf(0.21) / 0.21 + 1 / 0.21, 1e-6, "computed result of D[y_1 = Log[x_1] => y_1 ^ 2 + y_1, x] for x_1 should be the same")
;1
154
1
  // Note that the value in tdx->data.f32[1] is undefined.
155
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
156
1
  ccv_nnc_graph_free(graph);
157
1
  ccv_nnc_tensor_arena_free(tensor_arena);
158
1
  ccv_nnc_graph_exec_arena_free(graph_exec_arena);
159
1
}
160
161
TEST_CASE("autograd with sliced tensors for convolution doesn't require zeros (similar to Inception module)")
162
1
{
163
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
164
1
  ccv_nnc_tensor_symbol_t image = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 3), "image");
165
1
  ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(128, 3, 3, 3), "w");
166
1
  ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(128), "bias");
167
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 128), "b");
168
1
  ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 128), "c");
169
1
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {0};
170
1
  int inc[CCV_NNC_MAX_DIM_ALLOC] = {0};
171
1
  inc[0] = 100;
172
1
  inc[1] = 100;
173
1
  inc[2] = 128;
174
1
  ccv_nnc_tensor_symbol_t b0 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b0");
175
1
  ccv_nnc_tensor_symbol_t c0 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c0");
176
1
  ofs[2] = 64;
177
1
  ccv_nnc_tensor_symbol_t b1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b1");
178
1
  ccv_nnc_tensor_symbol_t c1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c1");
179
1
  ofs[2] = 0;
180
1
  ofs[0] = 50;
181
1
  ccv_nnc_tensor_symbol_t b2 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b2");
182
1
  ccv_nnc_tensor_symbol_t c2 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c2");
183
1
  ofs[2] = 64;
184
1
  ofs[0] = 50;
185
1
  ccv_nnc_tensor_symbol_t b3 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b3");
186
1
  ccv_nnc_tensor_symbol_t c3 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c3");
187
1
  ccv_nnc_graph_exec_symbol_t conv = ccv_nnc_graph_exec_symbol_new(symbolic_graph, 
CMD_CONVOLUTION_FORWARD1
(128, 3, 3, 3),
TENSOR_SYMBOL_LIST1
(image, w, bias),
TENSOR_SYMBOL_LIST1
(b), "conv");
188
1
  ccv_nnc_graph_exec_symbol_t relu0 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b0),
TENSOR_SYMBOL_LIST1
(c0), "relu0");
189
1
  ccv_nnc_graph_exec_symbol_t relu1 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b1),
TENSOR_SYMBOL_LIST1
(c1), "relu1");
190
1
  ccv_nnc_graph_exec_symbol_t relu2 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b2),
TENSOR_SYMBOL_LIST1
(c2), "relu2");
191
1
  ccv_nnc_graph_exec_symbol_t relu3 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b3),
TENSOR_SYMBOL_LIST1
(c3), "relu3");
192
1
  ccv_nnc_tensor_symbol_t d = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1, 1, 128), "d");
193
1
  ccv_nnc_graph_exec_symbol_t pool = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_AVERAGE_POOL_FORWARD, 0, 
CMD_GENERIC1
(100, 100, 128), 0),
TENSOR_SYMBOL_LIST1
(c),
TENSOR_SYMBOL_LIST1
(d), "pool");
194
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(conv, relu0, relu1, relu2, relu3, pool), 0);
195
1
  ccv_nnc_symbolic_graph_backward(symbolic_graph, 
GRAPH_EXEC_SYMBOL_LIST1
(conv),
GRAPH_EXEC_SYMBOL_LIST1
(pool),
TENSOR_SYMBOL_LIST1
(d),
TENSOR_SYMBOL_LIST1
(w, bias, b, c));
196
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
197
1
  ccv_nnc_tensor_symbol_t db = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, b);
198
1
  ccv_nnc_tensor_symbol_t dc = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, c);
199
1
  
REQUIRE1
(!ccv_nnc_tensor_symbol_flag(symbolic_graph, db, CCV_NNC_SYM_TENSOR_INIT_ZEROS), "The gradient for b doesn't need to be zero init")
;1
200
1
  
REQUIRE1
(!ccv_nnc_tensor_symbol_flag(symbolic_graph, dc, CCV_NNC_SYM_TENSOR_INIT_ZEROS), "The gradient for c doesn't need to be zero init")
;1
201
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
202
1
}
203
204
TEST_CASE("autograd with sliced tensors for convolution require zeros")
205
1
{
206
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
207
1
  ccv_nnc_tensor_symbol_t image = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(3, 100, 100, 3), "image");
208
1
  ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(128, 3, 3, 3), "w");
209
1
  ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(128), "bias");
210
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 128), "b");
211
1
  ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 128), "c");
212
1
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {0};
213
1
  int inc[CCV_NNC_MAX_DIM_ALLOC] = {0};
214
1
  inc[0] = 100;
215
1
  inc[1] = 100;
216
1
  inc[2] = 128;
217
1
  ccv_nnc_tensor_symbol_t b0 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b0");
218
1
  ccv_nnc_tensor_symbol_t c0 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c0");
219
1
  ofs[2] = 64;
220
1
  ccv_nnc_tensor_symbol_t b1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b1");
221
1
  ccv_nnc_tensor_symbol_t c1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c1");
222
1
  ccv_nnc_graph_exec_symbol_t conv = ccv_nnc_graph_exec_symbol_new(symbolic_graph, 
CMD_CONVOLUTION_FORWARD1
(128, 3, 3, 3),
TENSOR_SYMBOL_LIST1
(image, w, bias),
TENSOR_SYMBOL_LIST1
(b), "conv");
223
1
  ccv_nnc_graph_exec_symbol_t relu0 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b0),
TENSOR_SYMBOL_LIST1
(c0), "relu0");
224
1
  ccv_nnc_graph_exec_symbol_t relu1 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b1),
TENSOR_SYMBOL_LIST1
(c1), "relu1");
225
1
  ccv_nnc_tensor_symbol_t d = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(1, 1, 128), "d");
226
1
  ccv_nnc_graph_exec_symbol_t pool = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_AVERAGE_POOL_FORWARD, 0, 
CMD_GENERIC1
(100, 100, 128), 0),
TENSOR_SYMBOL_LIST1
(c),
TENSOR_SYMBOL_LIST1
(d), "pool");
227
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(conv, relu0, relu1, pool), 0);
228
1
  ccv_nnc_symbolic_graph_backward(symbolic_graph, 
GRAPH_EXEC_SYMBOL_LIST1
(conv),
GRAPH_EXEC_SYMBOL_LIST1
(pool),
TENSOR_SYMBOL_LIST1
(d),
TENSOR_SYMBOL_LIST1
(w, bias, b, c));
229
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_SHORT_DOT_GRAPH);
230
1
  ccv_nnc_tensor_symbol_t db = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, b);
231
1
  ccv_nnc_tensor_symbol_t dc = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, c);
232
1
  
REQUIRE1
(ccv_nnc_tensor_symbol_flag(symbolic_graph, db, CCV_NNC_SYM_TENSOR_INIT_ZEROS), "The gradient for b needs to be zero init")
;1
233
1
  
REQUIRE1
(!ccv_nnc_tensor_symbol_flag(symbolic_graph, dc, CCV_NNC_SYM_TENSOR_INIT_ZEROS), "The gradient for c doesn't need to be zero init")
;1
234
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
235
1
}
236
237
TEST_CASE("autograd with sliced tensors for convolution that are over-subscribed")
238
1
{
239
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
240
1
  ccv_nnc_tensor_symbol_t image = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 3), "image");
241
1
  ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(128, 3, 3, 3), "w");
242
1
  ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(128), "bias");
243
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 128), "b");
244
1
  ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 128), "c");
245
1
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {0};
246
1
  int inc[CCV_NNC_MAX_DIM_ALLOC] = {0};
247
1
  inc[0] = 100;
248
1
  inc[1] = 100;
249
1
  inc[2] = 128;
250
1
  ccv_nnc_tensor_symbol_t b0 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b0");
251
1
  ccv_nnc_tensor_symbol_t c0 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c0");
252
1
  ofs[2] = 32;
253
1
  ccv_nnc_tensor_symbol_t b1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b1");
254
1
  ccv_nnc_tensor_symbol_t c1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c1");
255
1
  ccv_nnc_graph_exec_symbol_t conv = ccv_nnc_graph_exec_symbol_new(symbolic_graph, 
CMD_CONVOLUTION_FORWARD1
(128, 3, 3, 3),
TENSOR_SYMBOL_LIST1
(image, w, bias),
TENSOR_SYMBOL_LIST1
(b), "conv");
256
1
  ccv_nnc_graph_exec_symbol_t relu0 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b0),
TENSOR_SYMBOL_LIST1
(c0), "relu0");
257
1
  ccv_nnc_graph_exec_symbol_t relu1 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b1),
TENSOR_SYMBOL_LIST1
(c1), "relu1");
258
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(conv, relu0, relu1), 0);
259
1
  ccv_nnc_symbolic_graph_backward(symbolic_graph, 
GRAPH_EXEC_SYMBOL_LIST1
(conv),
GRAPH_EXEC_SYMBOL_LIST1
(relu0, relu1),
TENSOR_SYMBOL_LIST1
(c),
TENSOR_SYMBOL_LIST1
(w, bias, b));
260
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_SHORT_DOT_GRAPH);
261
1
  ccv_nnc_tensor_symbol_t db = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, b);
262
1
  ccv_nnc_graph_exec_symbol_t dbx = ccv_nnc_graph_exec_symbol_for_backward(symbolic_graph, db);
263
1
  
REQUIRE1
(ccv_nnc_graph_exec_symbol_cmd(symbolic_graph, dbx).cmd == CCV_NNC_EWSUM_FORWARD, "Since gradient of b is overlapped, it has to be summed up")
;1
264
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
265
1
}
266
267
TEST_CASE("autograd with sliced tensors for convolution that are over-subscribed with no-op")
268
1
{
269
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
270
1
  ccv_nnc_tensor_symbol_t image = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 3), "image");
271
1
  ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(128, 3, 3, 3), "w");
272
1
  ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(128), "bias");
273
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 128), "b");
274
1
  ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, ONE_CPU_TENSOR(100, 100, 128), "c");
275
1
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {0};
276
1
  int inc[CCV_NNC_MAX_DIM_ALLOC] = {0};
277
1
  inc[0] = 100;
278
1
  inc[1] = 100;
279
1
  inc[2] = 128;
280
1
  ccv_nnc_tensor_symbol_t b0 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b0");
281
1
  ccv_nnc_tensor_symbol_t c0 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c0");
282
1
  ofs[2] = 32;
283
1
  ccv_nnc_tensor_symbol_t b1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, b, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "b1");
284
1
  ccv_nnc_tensor_symbol_t c1 = ccv_nnc_tensor_symbol_alias_new(symbolic_graph, c, ofs, inc, ONE_CPU_TENSOR(50, 100, 64), "c1");
285
1
  ccv_nnc_graph_exec_symbol_t conv = ccv_nnc_graph_exec_symbol_new(symbolic_graph, 
CMD_CONVOLUTION_FORWARD1
(128, 3, 3, 3),
TENSOR_SYMBOL_LIST1
(image, w, bias),
TENSOR_SYMBOL_LIST1
(b), "conv");
286
1
  ccv_nnc_graph_exec_symbol_t relu0 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b0),
TENSOR_SYMBOL_LIST1
(c0), "relu0");
287
1
  ccv_nnc_graph_exec_symbol_t relu1 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(b1),
TENSOR_SYMBOL_LIST1
(c1), "relu1");
288
1
  ccv_nnc_graph_exec_symbol_t noop = ccv_nnc_graph_exec_symbol_new(symbolic_graph, ccv_nnc_cmd(CCV_NNC_NOOP, 0, 
CMD_GENERIC1
(), 0),
TENSOR_SYMBOL_LIST1
(c0, c1),
TENSOR_SYMBOL_LIST1
(c), "noop");
289
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(conv, relu0, relu1, noop), 0);
290
1
  ccv_nnc_symbolic_graph_backward(symbolic_graph, 
GRAPH_EXEC_SYMBOL_LIST1
(conv),
GRAPH_EXEC_SYMBOL_LIST1
(noop),
TENSOR_SYMBOL_LIST1
(c),
TENSOR_SYMBOL_LIST1
(w, bias, b));
291
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_SHORT_DOT_GRAPH);
292
1
  ccv_nnc_tensor_symbol_t db = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, b);
293
1
  ccv_nnc_graph_exec_symbol_t dbx = ccv_nnc_graph_exec_symbol_for_backward(symbolic_graph, db);
294
1
  
REQUIRE1
(ccv_nnc_graph_exec_symbol_cmd(symbolic_graph, dbx).cmd == CCV_NNC_EWSUM_FORWARD, "Since gradient of b is overlapped, it has to be summed up")
;1
295
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
296
1
}
297
298
#include "case_main.h"