/home/liu/actions-runner/_work/ccv/ccv/test/unit/nnc/minimize.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include "3rdparty/dsfmt/dSFMT.h" |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("solve least square sum with stochastic gradient descent on symbolic graph") |
15 | 1 | { |
16 | 1 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
17 | 1 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "a"); |
18 | 1 | ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "w"); |
19 | 1 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2), "bias"); |
20 | 1 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "b"); |
21 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GEMM_FORWARD(NO_TRANSPOSE, TRANSPOSE(0, 1)), TENSOR_SYMBOL_LIST(a, w, bias), TENSOR_SYMBOL_LIST(b), "gemm"); |
22 | 1 | ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "c"); |
23 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWPROD_FORWARD(), TENSOR_SYMBOL_LIST(b, b), TENSOR_SYMBOL_LIST(c), "square"); |
24 | 1 | ccv_nnc_tensor_symbol_t s = ccv_nnc_tensor_symbol_new(symbolic_graph, ccv_nnc_tensor_auto, "s"); |
25 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_REDUCE_SUM_FORWARD(0, 1), TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(s), "sum"); |
26 | 1 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
27 | 1 | ccv_nnc_tensor_symbol_t updates[1]; |
28 | 1 | ccv_nnc_tensor_symbol_map_t aux[1]; |
29 | 1 | ccv_nnc_graph_exec_symbol_t update_execs[1]; |
30 | 1 | ccv_nnc_symbolic_graph_minimize(symbolic_graph, CMD_SGD_FORWARD(0, 0.001, 1, 0.995, 0.9, 0.9), TENSOR_SYMBOL_LIST(s), TENSOR_SYMBOL_LIST(w), 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), 0, updates, aux, update_execs); |
31 | 1 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
32 | 1 | ccv_nnc_graph_t* graph; |
33 | 1 | ccv_nnc_tensor_arena_t* tensor_arena; |
34 | 1 | ccv_nnc_graph_exec_arena_t* graph_exec_arena; |
35 | 1 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), update_execs, 1, &graph, &tensor_arena, &graph_exec_arena); |
36 | 1 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
37 | | // Relies on the inplace ops for SGD set on both updated w / bias, and momentum. |
38 | 1 | ccv_nnc_tensor_t* const w_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, w); |
39 | 1 | ccv_nnc_cmd_exec(CMD_RANDOM_UNIFORM_FORWARD(-0.5, 0.5), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(w_tensor), 0); |
40 | 1 | ccv_nnc_tensor_t* const bias_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bias); |
41 | 1 | int i; |
42 | 2 | for (i = 0; i < 1; i++1 ) |
43 | 1 | { |
44 | 1 | ccv_nnc_tensor_t* const aux_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, aux[i].source); |
45 | 1 | ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(aux_tensor), 0); |
46 | 1 | } |
47 | 1 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a); |
48 | 1 | ccv_nnc_tensor_t* const f_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, s)); |
49 | 1 | ccv_nnc_graph_exec_t sgd = ccv_nnc_graph_exec_from_symbol(graph_exec_arena, update_execs[0]); |
50 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
51 | 1.00k | { |
52 | 1.00k | a_tensor->data.f32[0] = 10; |
53 | 1.00k | a_tensor->data.f32[1] = 1; |
54 | 1.00k | a_tensor->data.f32[2] = 3; |
55 | 1.00k | a_tensor->data.f32[3] = 5; |
56 | 1.00k | f_tensor->data.f32[0] = 1; |
57 | 1.00k | bias_tensor->data.f32[0] = 1; |
58 | 1.00k | bias_tensor->data.f32[1] = -1; |
59 | 1.00k | if (i == 750) |
60 | 1 | ccv_nnc_graph_exec_set(graph, sgd, CMD_SGD_FORWARD(0, 0.000001, 1, 0.995, 0.9, 0.9)); |
61 | 999 | else if (i == 500) |
62 | 1 | ccv_nnc_graph_exec_set(graph, sgd, CMD_SGD_FORWARD(0, 0.00001, 1, 0.995, 0.9, 0.9)); |
63 | 998 | else if (i == 250) |
64 | 1 | ccv_nnc_graph_exec_set(graph, sgd, CMD_SGD_FORWARD(0, 0.0001, 1, 0.995, 0.9, 0.9)); |
65 | 1.00k | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
66 | 1.00k | } |
67 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[0] * w_tensor->data.f32[0] + a_tensor->data.f32[1] * w_tensor->data.f32[1], -1, 1e-3, "converge for vector 1"); |
68 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[0] * w_tensor->data.f32[2] + a_tensor->data.f32[1] * w_tensor->data.f32[3], 1, 1e-3, "converge for vector 1"); |
69 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[2] * w_tensor->data.f32[0] + a_tensor->data.f32[3] * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 2"); |
70 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[2] * w_tensor->data.f32[2] + a_tensor->data.f32[3] * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 2"); |
71 | 1 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
72 | 1 | ccv_nnc_graph_free(graph); |
73 | 1 | ccv_nnc_tensor_arena_free(tensor_arena); |
74 | 1 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
75 | 1 | } |
76 | | |
77 | | TEST_CASE("solve least square sum with stochastic gradient descent on dynamic graph") |
78 | 1 | { |
79 | 1 | ccv_nnc_dynamic_graph_t* const graph = ccv_nnc_dynamic_graph_new(); |
80 | 1 | ccv_nnc_tensor_variable_t w = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 2, 2)); |
81 | 1 | ccv_nnc_tensor_variable_t aux = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 2, 2)); |
82 | 1 | ccv_nnc_dynamic_graph_exec(graph, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_VARIABLE_LIST(aux), 0, 0); |
83 | 1 | ccv_nnc_dynamic_graph_exec(graph, CMD_RANDOM_UNIFORM_FORWARD(-0.5, 0.5), ccv_nnc_no_hint, 0, 0, 0, TENSOR_VARIABLE_LIST(w), 0, 0); |
84 | 1 | int i; |
85 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
86 | 1.00k | { |
87 | 1.00k | ccv_nnc_tensor_variable_t a = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 2, 2)); |
88 | 1.00k | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_variable(graph, a); |
89 | 1.00k | a_tensor->data.f32[0] = 10; |
90 | 1.00k | a_tensor->data.f32[1] = 1; |
91 | 1.00k | a_tensor->data.f32[2] = 3; |
92 | 1.00k | a_tensor->data.f32[3] = 5; |
93 | 1.00k | ccv_nnc_tensor_variable_t bias = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 2)); |
94 | 1.00k | ccv_nnc_tensor_t* const bias_tensor = ccv_nnc_tensor_from_variable(graph, bias); |
95 | 1.00k | bias_tensor->data.f32[0] = 1; |
96 | 1.00k | bias_tensor->data.f32[1] = -1; |
97 | 1.00k | ccv_nnc_tensor_variable_t b = ccv_nnc_tensor_variable_new(graph); |
98 | 1.00k | ccv_nnc_dynamic_graph_exec(graph, CMD_GEMM_FORWARD(NO_TRANSPOSE, TRANSPOSE(0, 1)), ccv_nnc_no_hint, 0, TENSOR_VARIABLE_LIST(a, w, bias), TENSOR_VARIABLE_LIST(b), 0, 0); |
99 | 1.00k | ccv_nnc_tensor_variable_t c = ccv_nnc_tensor_variable_new(graph); |
100 | 1.00k | ccv_nnc_dynamic_graph_exec(graph, CMD_EWPROD_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_VARIABLE_LIST(b, b), TENSOR_VARIABLE_LIST(c), 0, 0); |
101 | 1.00k | ccv_nnc_tensor_variable_t s = ccv_nnc_tensor_variable_new(graph); |
102 | 1.00k | ccv_nnc_dynamic_graph_exec(graph, CMD_REDUCE_SUM_FORWARD(0, 1), ccv_nnc_no_hint, 0, TENSOR_VARIABLE_LIST(c), TENSOR_VARIABLE_LIST(s), 0, 0); |
103 | 1.00k | ccv_nnc_dynamic_graph_minimize(graph, CMD_SGD_FORWARD(0, 0.001, 1, 0.995, 0.9, 0.9), TENSOR_VARIABLE_LIST(s), 0, TENSOR_VARIABLE_LIST(w), &aux, 0, 0); |
104 | 1.00k | ccv_nnc_tensor_variable_free(graph, a); |
105 | 1.00k | ccv_nnc_tensor_variable_free(graph, b); |
106 | 1.00k | ccv_nnc_tensor_variable_free(graph, bias); |
107 | 1.00k | ccv_nnc_tensor_variable_free(graph, c); |
108 | 1.00k | ccv_nnc_tensor_variable_free(graph, s); |
109 | 1.00k | } |
110 | 1 | DYNAMIC_GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
111 | 1 | ccv_nnc_tensor_t* const w_tensor = ccv_nnc_tensor_from_variable(graph, w); |
112 | 1 | REQUIRE_EQ_WITH_TOLERANCE(10 * w_tensor->data.f32[0] + 1 * w_tensor->data.f32[1], -1, 1e-3, "converge for vector 1"); |
113 | 1 | REQUIRE_EQ_WITH_TOLERANCE(10 * w_tensor->data.f32[2] + 1 * w_tensor->data.f32[3], 1, 1e-3, "converge for vector 1"); |
114 | 1 | REQUIRE_EQ_WITH_TOLERANCE(3 * w_tensor->data.f32[0] + 5 * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 2"); |
115 | 1 | REQUIRE_EQ_WITH_TOLERANCE(3 * w_tensor->data.f32[2] + 5 * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 2"); |
116 | 1 | ccv_nnc_dynamic_graph_free(graph); |
117 | 1 | } |
118 | | |
119 | | TEST_CASE("solve least square sum with stochastic gradient descent on dynamic graph, backward & apply gradients") |
120 | 1 | { |
121 | 1 | ccv_nnc_dynamic_graph_t* const graph = ccv_nnc_dynamic_graph_new(); |
122 | 1 | ccv_nnc_tensor_variable_t w = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 2, 2)); |
123 | 1 | ccv_nnc_tensor_variable_t aux = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 2, 2)); |
124 | 1 | ccv_nnc_dynamic_graph_exec(graph, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_VARIABLE_LIST(aux), 0, 0); |
125 | 1 | ccv_nnc_dynamic_graph_exec(graph, CMD_RANDOM_UNIFORM_FORWARD(-0.5, 0.5), ccv_nnc_no_hint, 0, 0, 0, TENSOR_VARIABLE_LIST(w), 0, 0); |
126 | 1 | int i; |
127 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
128 | 1.00k | { |
129 | 1.00k | ccv_nnc_tensor_variable_t a = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 2, 2)); |
130 | 1.00k | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_variable(graph, a); |
131 | 1.00k | a_tensor->data.f32[0] = 10; |
132 | 1.00k | a_tensor->data.f32[1] = 1; |
133 | 1.00k | a_tensor->data.f32[2] = 3; |
134 | 1.00k | a_tensor->data.f32[3] = 5; |
135 | 1.00k | ccv_nnc_tensor_variable_t bias = ccv_nnc_tensor_variable_new(graph, CPU_TENSOR_NHWC(32F, 2)); |
136 | 1.00k | ccv_nnc_tensor_t* const bias_tensor = ccv_nnc_tensor_from_variable(graph, bias); |
137 | 1.00k | bias_tensor->data.f32[0] = 1; |
138 | 1.00k | bias_tensor->data.f32[1] = -1; |
139 | 1.00k | ccv_nnc_tensor_variable_t b = ccv_nnc_tensor_variable_new(graph); |
140 | 1.00k | ccv_nnc_dynamic_graph_exec(graph, CMD_GEMM_FORWARD(NO_TRANSPOSE, TRANSPOSE(0, 1)), ccv_nnc_no_hint, 0, TENSOR_VARIABLE_LIST(a, w, bias), TENSOR_VARIABLE_LIST(b), 0, 0); |
141 | 1.00k | ccv_nnc_tensor_variable_t c = ccv_nnc_tensor_variable_new(graph); |
142 | 1.00k | ccv_nnc_dynamic_graph_exec(graph, CMD_EWPROD_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_VARIABLE_LIST(b, b), TENSOR_VARIABLE_LIST(c), 0, 0); |
143 | 1.00k | ccv_nnc_tensor_variable_t s = ccv_nnc_tensor_variable_new(graph); |
144 | 1.00k | ccv_nnc_dynamic_graph_exec(graph, CMD_REDUCE_SUM_FORWARD(0, 1), ccv_nnc_no_hint, 0, TENSOR_VARIABLE_LIST(c), TENSOR_VARIABLE_LIST(s), 0, 0); |
145 | 1.00k | ccv_nnc_tensor_variable_t g = ccv_nnc_tensor_variable_new(graph); |
146 | 1.00k | ccv_nnc_dynamic_graph_backward(graph, TENSOR_VARIABLE_LIST(s), 0, TENSOR_VARIABLE_LIST(w), TENSOR_VARIABLE_LIST(g), 0); |
147 | 1.00k | ccv_nnc_dynamic_graph_apply_gradients(graph, CMD_SGD_FORWARD(0, 0.001, 1, 0.995, 0.9, 0.9), TENSOR_VARIABLE_LIST(g), TENSOR_VARIABLE_LIST(w), &aux, 0, 0); |
148 | 1.00k | ccv_nnc_tensor_variable_free(graph, a); |
149 | 1.00k | ccv_nnc_tensor_variable_free(graph, b); |
150 | 1.00k | ccv_nnc_tensor_variable_free(graph, bias); |
151 | 1.00k | ccv_nnc_tensor_variable_free(graph, c); |
152 | 1.00k | ccv_nnc_tensor_variable_free(graph, s); |
153 | 1.00k | ccv_nnc_tensor_variable_free(graph, g); |
154 | 1.00k | } |
155 | 1 | DYNAMIC_GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
156 | 1 | ccv_nnc_tensor_t* const w_tensor = ccv_nnc_tensor_from_variable(graph, w); |
157 | 1 | REQUIRE_EQ_WITH_TOLERANCE(10 * w_tensor->data.f32[0] + 1 * w_tensor->data.f32[1], -1, 1e-3, "converge for vector 1"); |
158 | 1 | REQUIRE_EQ_WITH_TOLERANCE(10 * w_tensor->data.f32[2] + 1 * w_tensor->data.f32[3], 1, 1e-3, "converge for vector 1"); |
159 | 1 | REQUIRE_EQ_WITH_TOLERANCE(3 * w_tensor->data.f32[0] + 5 * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 2"); |
160 | 1 | REQUIRE_EQ_WITH_TOLERANCE(3 * w_tensor->data.f32[2] + 5 * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 2"); |
161 | 1 | ccv_nnc_dynamic_graph_free(graph); |
162 | 1 | } |
163 | | |
164 | | TEST_CASE("solve least square sum with adam on symbolic graph") |
165 | 1 | { |
166 | 1 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
167 | 1 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "a"); |
168 | 1 | ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "w"); |
169 | 1 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2), "bias"); |
170 | 1 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "b"); |
171 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GEMM_FORWARD(NO_TRANSPOSE, TRANSPOSE(0, 1)), TENSOR_SYMBOL_LIST(a, w, bias), TENSOR_SYMBOL_LIST(b), "gemm"); |
172 | 1 | ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "c"); |
173 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWPROD_FORWARD(), TENSOR_SYMBOL_LIST(b, b), TENSOR_SYMBOL_LIST(c), "square"); |
174 | 1 | ccv_nnc_tensor_symbol_t s = ccv_nnc_tensor_symbol_new(symbolic_graph, ccv_nnc_tensor_auto, "s"); |
175 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_REDUCE_SUM_FORWARD(0, 1), TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(s), "sum"); |
176 | 1 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
177 | 1 | ccv_nnc_tensor_symbol_t updates[1]; |
178 | 1 | ccv_nnc_tensor_symbol_map_t aux[2]; |
179 | 1 | ccv_nnc_graph_exec_symbol_t update_execs[1]; |
180 | 1 | ccv_nnc_symbolic_graph_minimize(symbolic_graph, CMD_ADAM_FORWARD(1, 0.002, 0.9, 0.98, 0, 1e-9, 0), TENSOR_SYMBOL_LIST(s), TENSOR_SYMBOL_LIST(w), 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), 0, updates, aux, update_execs); |
181 | 1 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
182 | 1 | ccv_nnc_graph_t* graph; |
183 | 1 | ccv_nnc_tensor_arena_t* tensor_arena; |
184 | 1 | ccv_nnc_graph_exec_arena_t* graph_exec_arena; |
185 | 1 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), update_execs, 1, &graph, &tensor_arena, &graph_exec_arena); |
186 | 1 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
187 | | // Relies on the inplace ops for ADAM set on both updated w / bias, and momentum. |
188 | 1 | ccv_nnc_tensor_t* const w_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, w); |
189 | 1 | ccv_nnc_cmd_exec(CMD_RANDOM_UNIFORM_FORWARD(-0.5, 0.5), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(w_tensor), 0); |
190 | 1 | ccv_nnc_tensor_t* const bias_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bias); |
191 | 1 | int i; |
192 | 3 | for (i = 0; i < 2; i++2 ) |
193 | 2 | { |
194 | 2 | ccv_nnc_tensor_t* const aux_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, aux[i].source); |
195 | 2 | ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(aux_tensor), 0); |
196 | 2 | } |
197 | 1 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a); |
198 | 1 | ccv_nnc_tensor_t* const f_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, s)); |
199 | 1 | ccv_nnc_graph_exec_t adam = ccv_nnc_graph_exec_from_symbol(graph_exec_arena, update_execs[0]); |
200 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
201 | 1.00k | { |
202 | 1.00k | a_tensor->data.f32[0] = 10; |
203 | 1.00k | a_tensor->data.f32[1] = 1; |
204 | 1.00k | a_tensor->data.f32[2] = 3; |
205 | 1.00k | a_tensor->data.f32[3] = 5; |
206 | 1.00k | f_tensor->data.f32[0] = 1; |
207 | 1.00k | bias_tensor->data.f32[0] = 1; |
208 | 1.00k | bias_tensor->data.f32[1] = -1; |
209 | 1.00k | ccv_nnc_graph_exec_set(graph, adam, CMD_ADAM_FORWARD(i + 1, 0.002, 0.9, 0.98, 0, 1e-9, 0)); |
210 | 1.00k | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
211 | 1.00k | } |
212 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[0] * w_tensor->data.f32[0] + a_tensor->data.f32[1] * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 1"); |
213 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[0] * w_tensor->data.f32[2] + a_tensor->data.f32[1] * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 1"); |
214 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[2] * w_tensor->data.f32[0] + a_tensor->data.f32[3] * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 2"); |
215 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[2] * w_tensor->data.f32[2] + a_tensor->data.f32[3] * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 2"); |
216 | 1 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
217 | 1 | ccv_nnc_graph_free(graph); |
218 | 1 | ccv_nnc_tensor_arena_free(tensor_arena); |
219 | 1 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
220 | 1 | } |
221 | | |
222 | | TEST_CASE("solve least square sum with rmsprop on symbolic graph") |
223 | 1 | { |
224 | 1 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
225 | 1 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "a"); |
226 | 1 | ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "w"); |
227 | 1 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2), "bias"); |
228 | 1 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "b"); |
229 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GEMM_FORWARD(NO_TRANSPOSE, TRANSPOSE(0, 1)), TENSOR_SYMBOL_LIST(a, w, bias), TENSOR_SYMBOL_LIST(b), "gemm"); |
230 | 1 | ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "c"); |
231 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWPROD_FORWARD(), TENSOR_SYMBOL_LIST(b, b), TENSOR_SYMBOL_LIST(c), "square"); |
232 | 1 | ccv_nnc_tensor_symbol_t s = ccv_nnc_tensor_symbol_new(symbolic_graph, ccv_nnc_tensor_auto, "s"); |
233 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_REDUCE_SUM_FORWARD(0, 1), TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(s), "sum"); |
234 | 1 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
235 | 1 | ccv_nnc_tensor_symbol_t updates[1]; |
236 | 1 | ccv_nnc_tensor_symbol_map_t aux[2]; |
237 | 1 | ccv_nnc_graph_exec_symbol_t update_execs[1]; |
238 | 1 | ccv_nnc_symbolic_graph_minimize(symbolic_graph, CMD_RMSPROP_FORWARD(0.001, 0.0001, 0.9, 0.9, 1e-9), TENSOR_SYMBOL_LIST(s), TENSOR_SYMBOL_LIST(w), 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), 0, updates, aux, update_execs); |
239 | 1 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
240 | 1 | ccv_nnc_graph_t* graph; |
241 | 1 | ccv_nnc_tensor_arena_t* tensor_arena; |
242 | 1 | ccv_nnc_graph_exec_arena_t* graph_exec_arena; |
243 | 1 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), update_execs, 1, &graph, &tensor_arena, &graph_exec_arena); |
244 | 1 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
245 | | // Relies on the inplace ops for ADAM set on both updated w / bias, and momentum. |
246 | 1 | ccv_nnc_tensor_t* const w_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, w); |
247 | 1 | ccv_nnc_cmd_exec(CMD_RANDOM_UNIFORM_FORWARD(-0.5, 0.5), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(w_tensor), 0); |
248 | 1 | ccv_nnc_tensor_t* const bias_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bias); |
249 | 1 | int i; |
250 | 3 | for (i = 0; i < 2; i++2 ) |
251 | 2 | { |
252 | 2 | ccv_nnc_tensor_t* const aux_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, aux[i].source); |
253 | 2 | ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(aux_tensor), 0); |
254 | 2 | } |
255 | 1 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a); |
256 | 1 | ccv_nnc_tensor_t* const f_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, s)); |
257 | 1 | ccv_nnc_graph_exec_t adam = ccv_nnc_graph_exec_from_symbol(graph_exec_arena, update_execs[0]); |
258 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
259 | 1.00k | { |
260 | 1.00k | a_tensor->data.f32[0] = 10; |
261 | 1.00k | a_tensor->data.f32[1] = 1; |
262 | 1.00k | a_tensor->data.f32[2] = 3; |
263 | 1.00k | a_tensor->data.f32[3] = 5; |
264 | 1.00k | f_tensor->data.f32[0] = 1; |
265 | 1.00k | bias_tensor->data.f32[0] = 1; |
266 | 1.00k | bias_tensor->data.f32[1] = -1; |
267 | 1.00k | ccv_nnc_graph_exec_set(graph, adam, CMD_RMSPROP_FORWARD(0.001, 0.0001, 0.9, 0.9, 1e-9)); |
268 | 1.00k | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
269 | 1.00k | } |
270 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[0] * w_tensor->data.f32[0] + a_tensor->data.f32[1] * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 1"); |
271 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[0] * w_tensor->data.f32[2] + a_tensor->data.f32[1] * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 1"); |
272 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[2] * w_tensor->data.f32[0] + a_tensor->data.f32[3] * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 2"); |
273 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[2] * w_tensor->data.f32[2] + a_tensor->data.f32[3] * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 2"); |
274 | 1 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
275 | 1 | ccv_nnc_graph_free(graph); |
276 | 1 | ccv_nnc_tensor_arena_free(tensor_arena); |
277 | 1 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
278 | 1 | } |
279 | | |
280 | | TEST_CASE("solve least square sum with adamW on symbolic graph") |
281 | 1 | { |
282 | 1 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
283 | 1 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "a"); |
284 | 1 | ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "w"); |
285 | 1 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2), "bias"); |
286 | 1 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "b"); |
287 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GEMM_FORWARD(NO_TRANSPOSE, TRANSPOSE(0, 1)), TENSOR_SYMBOL_LIST(a, w, bias), TENSOR_SYMBOL_LIST(b), "gemm"); |
288 | 1 | ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2), "c"); |
289 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWPROD_FORWARD(), TENSOR_SYMBOL_LIST(b, b), TENSOR_SYMBOL_LIST(c), "square"); |
290 | 1 | ccv_nnc_tensor_symbol_t s = ccv_nnc_tensor_symbol_new(symbolic_graph, ccv_nnc_tensor_auto, "s"); |
291 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_REDUCE_SUM_FORWARD(0, 1), TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(s), "sum"); |
292 | 1 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
293 | 1 | ccv_nnc_tensor_symbol_t updates[1]; |
294 | 1 | ccv_nnc_tensor_symbol_map_t aux[2]; |
295 | 1 | ccv_nnc_graph_exec_symbol_t update_execs[1]; |
296 | 1 | ccv_nnc_symbolic_graph_minimize(symbolic_graph, CMD_ADAMW_FORWARD(1, 0.002, 0.9, 0.98, 0, 1e-9, 0), TENSOR_SYMBOL_LIST(s), TENSOR_SYMBOL_LIST(w), 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), 0, updates, aux, update_execs); |
297 | 1 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
298 | 1 | ccv_nnc_graph_t* graph; |
299 | 1 | ccv_nnc_tensor_arena_t* tensor_arena; |
300 | 1 | ccv_nnc_graph_exec_arena_t* graph_exec_arena; |
301 | 1 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), update_execs, 1, &graph, &tensor_arena, &graph_exec_arena); |
302 | 1 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
303 | | // Relies on the inplace ops for ADAM set on both updated w / bias, and momentum. |
304 | 1 | ccv_nnc_tensor_t* const w_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, w); |
305 | 1 | ccv_nnc_cmd_exec(CMD_RANDOM_UNIFORM_FORWARD(-0.5, 0.5), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(w_tensor), 0); |
306 | 1 | ccv_nnc_tensor_t* const bias_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bias); |
307 | 1 | int i; |
308 | 3 | for (i = 0; i < 2; i++2 ) |
309 | 2 | { |
310 | 2 | ccv_nnc_tensor_t* const aux_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, aux[i].source); |
311 | 2 | ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(aux_tensor), 0); |
312 | 2 | } |
313 | 1 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a); |
314 | 1 | ccv_nnc_tensor_t* const f_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, s)); |
315 | 1 | ccv_nnc_graph_exec_t adamW = ccv_nnc_graph_exec_from_symbol(graph_exec_arena, update_execs[0]); |
316 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
317 | 1.00k | { |
318 | 1.00k | a_tensor->data.f32[0] = 10; |
319 | 1.00k | a_tensor->data.f32[1] = 1; |
320 | 1.00k | a_tensor->data.f32[2] = 3; |
321 | 1.00k | a_tensor->data.f32[3] = 5; |
322 | 1.00k | f_tensor->data.f32[0] = 1; |
323 | 1.00k | bias_tensor->data.f32[0] = 1; |
324 | 1.00k | bias_tensor->data.f32[1] = -1; |
325 | 1.00k | ccv_nnc_graph_exec_set(graph, adamW, CMD_ADAMW_FORWARD(i + 1, 0.002, 0.9, 0.98, 0, 1e-9, 0)); |
326 | 1.00k | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
327 | 1.00k | } |
328 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[0] * w_tensor->data.f32[0] + a_tensor->data.f32[1] * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 1"); |
329 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[0] * w_tensor->data.f32[2] + a_tensor->data.f32[1] * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 1"); |
330 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[2] * w_tensor->data.f32[0] + a_tensor->data.f32[3] * w_tensor->data.f32[1], -1, 1e-1, "converge for vector 2"); |
331 | 1 | REQUIRE_EQ_WITH_TOLERANCE(a_tensor->data.f32[2] * w_tensor->data.f32[2] + a_tensor->data.f32[3] * w_tensor->data.f32[3], 1, 1e-1, "converge for vector 2"); |
332 | 1 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
333 | 1 | ccv_nnc_graph_free(graph); |
334 | 1 | ccv_nnc_tensor_arena_free(tensor_arena); |
335 | 1 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
336 | 1 | } |
337 | | |
338 | | #include "case_main.h" |