/home/liu/actions-runner/_work/ccv/ccv/test/unit/nnc/micro.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include "3rdparty/dsfmt/dSFMT.h" |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("represent convolution with micro ops, with external variables") |
15 | 1 | { |
16 | 1 | ccv_nnc_micro_io_t x = ccv_nnc_micro_input(4); |
17 | 1 | ccv_nnc_micro_io_t xx = ccv_nnc_micro_reindex((const char*[]){ |
18 | 1 | "dA0", |
19 | 1 | "dA1 - $kh + 1", |
20 | 1 | "dA2 - $kw + 1", |
21 | 1 | "$kh", |
22 | 1 | "$kw", |
23 | 1 | "dA3", |
24 | 1 | "$kc" |
25 | 1 | }, 7, &x, 1, (const char*[]){ |
26 | 1 | "i0", |
27 | 1 | "i1 + i3", |
28 | 1 | "i2 + i4", |
29 | 1 | "i5" |
30 | 1 | }, 4, x); |
31 | 1 | ccv_nnc_micro_io_t w = ccv_nnc_micro_input(4); |
32 | 1 | ccv_nnc_micro_io_t ww = ccv_nnc_micro_reindex((const char*[]){ |
33 | 1 | "dA0", |
34 | 1 | "dA1 - $kh + 1", |
35 | 1 | "dA2 - $kw + 1", |
36 | 1 | "$kh", |
37 | 1 | "$kw", |
38 | 1 | "dA3", |
39 | 1 | "$kc" |
40 | 1 | }, 7, &x, 1, (const char*[]){ |
41 | 1 | "i6", |
42 | 1 | "i3", |
43 | 1 | "i4", |
44 | 1 | "i5" |
45 | 1 | }, 4, w); |
46 | 1 | ccv_nnc_micro_io_t yy = ccv_nnc_micro_binary(CCV_NNC_MICRO_BINARY_OP_MUL, xx, ww); |
47 | 1 | ccv_nnc_micro_io_t y = ccv_nnc_micro_reduce(CCV_NNC_MICRO_REDUCE_OP_SUM, (const int[]){ |
48 | 1 | 3, |
49 | 1 | 4, |
50 | 1 | 5 |
51 | 1 | }, 3, yy); |
52 | 1 | ccv_nnc_micro_io_t dy = ccv_nnc_micro_grad(y); |
53 | 1 | ccv_nnc_micro_io_t dx = ccv_nnc_micro_grad(x); |
54 | 1 | ccv_nnc_micro_io_t dw = ccv_nnc_micro_grad(w); |
55 | 1 | ccv_nnc_micro_combine_t* combine = ccv_nnc_micro_combine_new((ccv_nnc_micro_io_t[]){ |
56 | 1 | x, |
57 | 1 | w |
58 | 1 | }, 2, (const char*[]){ |
59 | 1 | "$kh", |
60 | 1 | "$kw", |
61 | 1 | "$kc" |
62 | 1 | }, 3, &y, 1, (ccv_nnc_micro_io_t[]){ |
63 | 1 | dy, |
64 | 1 | x, |
65 | 1 | w |
66 | 1 | }, 3, (ccv_nnc_micro_io_t[]){ |
67 | 1 | dx, |
68 | 1 | dw |
69 | 1 | }, 2); |
70 | 1 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 4, 4, 5), 0); |
71 | 1 | ccv_nnc_tensor_t* const w_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3, 3, 5), 0); |
72 | 1 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, 2), 0); |
73 | 1 | dsfmt_t dsfmt; |
74 | 1 | dsfmt_init_gen_rand(&dsfmt, 1); |
75 | 1 | int i; |
76 | 81 | for (i = 0; i < 4 * 4 * 5; i++80 ) |
77 | 80 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
78 | 91 | for (i = 0; i < 2 * 3 * 3 * 5; i++90 ) |
79 | 90 | w_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
80 | 1 | ccv_nnc_micro_combine_interpret(combine, CCV_NNC_CUSTOM_FORWARD, TENSOR_LIST(x_tensor, w_tensor), |
81 | 1 | (const ccv_nnc_micro_scalar_t[]){ |
82 | 1 | { |
83 | 1 | .type = CCV_32S, |
84 | 1 | .i32 = 3, |
85 | 1 | }, |
86 | 1 | { |
87 | 1 | .type = CCV_32S, |
88 | 1 | .i32 = 3, |
89 | 1 | }, |
90 | 1 | { |
91 | 1 | .type = CCV_32S, |
92 | 1 | .i32 = 2, |
93 | 1 | } |
94 | 1 | }, 3, TENSOR_LIST(y_tensor)); |
95 | 1 | ccv_nnc_tensor_t* const gty_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, 2), 0); |
96 | 1 | ccv_nnc_cmd_exec(CMD_CONVOLUTION_FORWARD(1, 2, 3, 3, 5), HINT((1, 1)), 0, TENSOR_LIST(x_tensor, w_tensor), TENSOR_LIST(gty_tensor), 0); |
97 | 1 | REQUIRE_TENSOR_EQ(y_tensor, gty_tensor, "micro op composed convolution should match the existing convolution"); |
98 | 1 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 4, 4, 5), 0); |
99 | 1 | ccv_nnc_tensor_t* const dw_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3, 3, 5), 0); |
100 | 1 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, 2), 0); |
101 | 9 | for (i = 0; i < 2 * 2 * 2; i++8 ) |
102 | 8 | dy_tensor->data.f32[i] = 1; |
103 | 1 | ccv_nnc_tensor_t dy_tensor_t = ccv_nnc_tensor(dy_tensor->data.f32, CPU_TENSOR_NHWC(32F, 1, 2, 2, 1, 1, 1, 2), 0); |
104 | 1 | ccv_nnc_micro_combine_interpret(combine, CCV_NNC_CUSTOM_BACKWARD, TENSOR_LIST(&dy_tensor_t, x_tensor, w_tensor), |
105 | 1 | (const ccv_nnc_micro_scalar_t[]){ |
106 | 1 | { |
107 | 1 | .type = CCV_32S, |
108 | 1 | .i32 = 3, |
109 | 1 | }, |
110 | 1 | { |
111 | 1 | .type = CCV_32S, |
112 | 1 | .i32 = 3, |
113 | 1 | }, |
114 | 1 | { |
115 | 1 | .type = CCV_32S, |
116 | 1 | .i32 = 2, |
117 | 1 | } |
118 | 1 | }, 3, TENSOR_LIST(dx_tensor, dw_tensor)); |
119 | 1 | ccv_nnc_tensor_t* const gtdx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 5), 0); |
120 | 1 | ccv_nnc_tensor_t* const gtdw_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3, 3, 5), 0); |
121 | 1 | ccv_nnc_cmd_exec(CMD_CONVOLUTION_BACKWARD(1, 2, 3, 3, 5), HINT((1, 1)), 0, TENSOR_LIST(dy_tensor, x_tensor, w_tensor), TENSOR_LIST(gtdx_tensor, gtdw_tensor), 0); |
122 | 1 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dx_tensor->data.f32, gtdx_tensor->data.f32, 4 * 4 * 5, 1e-5, "micro op composed convolution should match the existing convolution"); |
123 | 1 | REQUIRE_TENSOR_EQ(dw_tensor, gtdw_tensor, "micro op composed convolution should match the existing convolution"); |
124 | 1 | ccv_nnc_tensor_free(gtdx_tensor); |
125 | 1 | ccv_nnc_tensor_free(gtdw_tensor); |
126 | 1 | ccv_nnc_tensor_free(dx_tensor); |
127 | 1 | ccv_nnc_tensor_free(dw_tensor); |
128 | 1 | ccv_nnc_tensor_free(dy_tensor); |
129 | 1 | ccv_nnc_tensor_free(x_tensor); |
130 | 1 | ccv_nnc_tensor_free(w_tensor); |
131 | 1 | ccv_nnc_tensor_free(y_tensor); |
132 | 1 | ccv_nnc_tensor_free(gty_tensor); |
133 | 1 | ccv_nnc_micro_combine_free(combine); |
134 | 1 | } |
135 | | |
136 | | TEST_CASE("represent convolution with micro ops, no external variables") |
137 | 1 | { |
138 | 1 | ccv_nnc_micro_io_t x = ccv_nnc_micro_input(4); |
139 | 1 | ccv_nnc_micro_io_t w = ccv_nnc_micro_input(4); |
140 | 1 | ccv_nnc_micro_io_t xx = ccv_nnc_micro_reindex((const char*[]){ |
141 | 1 | "dA0", |
142 | 1 | "dA1 - dB1 + 1", |
143 | 1 | "dA2 - dB2 + 1", |
144 | 1 | "dB1", |
145 | 1 | "dB2", |
146 | 1 | "dA3[=dB3]", |
147 | 1 | "dB0" |
148 | 1 | }, 7, (const ccv_nnc_micro_io_t[]){ |
149 | 1 | x, |
150 | 1 | w |
151 | 1 | }, 2, (const char*[]){ |
152 | 1 | "i0", |
153 | 1 | "i1 + i3", |
154 | 1 | "i2 + i4", |
155 | 1 | "i5" |
156 | 1 | }, 4, x); |
157 | 1 | ccv_nnc_micro_io_t ww = ccv_nnc_micro_reindex((const char*[]){ |
158 | 1 | "dA0", |
159 | 1 | "dA1 - dB1 + 1", |
160 | 1 | "dA2 - dB2 + 1", |
161 | 1 | "dB1", |
162 | 1 | "dB2", |
163 | 1 | "dA3[=dB3]", |
164 | 1 | "dB0" |
165 | 1 | }, 7, (const ccv_nnc_micro_io_t[]){ |
166 | 1 | x, |
167 | 1 | w |
168 | 1 | }, 2, (const char*[]){ |
169 | 1 | "i6", |
170 | 1 | "i3", |
171 | 1 | "i4", |
172 | 1 | "i5" |
173 | 1 | }, 4, w); |
174 | 1 | ccv_nnc_micro_io_t yy = ccv_nnc_micro_binary(CCV_NNC_MICRO_BINARY_OP_MUL, xx, ww); |
175 | 1 | ccv_nnc_micro_io_t y = ccv_nnc_micro_reduce(CCV_NNC_MICRO_REDUCE_OP_SUM, (const int[]){ |
176 | 1 | 3, |
177 | 1 | 4, |
178 | 1 | 5 |
179 | 1 | }, 3, yy); |
180 | 1 | ccv_nnc_micro_io_t dy = ccv_nnc_micro_grad(y); |
181 | 1 | ccv_nnc_micro_io_t dx = ccv_nnc_micro_grad(x); |
182 | 1 | ccv_nnc_micro_io_t dw = ccv_nnc_micro_grad(w); |
183 | 1 | ccv_nnc_micro_combine_t* combine = ccv_nnc_micro_combine_new((ccv_nnc_micro_io_t[]){ |
184 | 1 | x, |
185 | 1 | w |
186 | 1 | }, 2, 0, 0, &y, 1, (ccv_nnc_micro_io_t[]){ |
187 | 1 | dy, |
188 | 1 | x, |
189 | 1 | w |
190 | 1 | }, 3, (ccv_nnc_micro_io_t[]){ |
191 | 1 | dx, |
192 | 1 | dw |
193 | 1 | }, 2); |
194 | 1 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 4, 4, 5), 0); |
195 | 1 | ccv_nnc_tensor_t* const w_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3, 3, 5), 0); |
196 | 1 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, 2), 0); |
197 | 1 | dsfmt_t dsfmt; |
198 | 1 | dsfmt_init_gen_rand(&dsfmt, 1); |
199 | 1 | int i; |
200 | 81 | for (i = 0; i < 4 * 4 * 5; i++80 ) |
201 | 80 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
202 | 91 | for (i = 0; i < 2 * 3 * 3 * 5; i++90 ) |
203 | 90 | w_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
204 | 1 | ccv_nnc_micro_combine_interpret(combine, CCV_NNC_CUSTOM_FORWARD, TENSOR_LIST(x_tensor, w_tensor), 0, 0, TENSOR_LIST(y_tensor)); |
205 | 1 | ccv_nnc_tensor_t* const gty_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, 2), 0); |
206 | 1 | ccv_nnc_cmd_exec(CMD_CONVOLUTION_FORWARD(1, 2, 3, 3, 5), HINT((1, 1)), 0, TENSOR_LIST(x_tensor, w_tensor), TENSOR_LIST(gty_tensor), 0); |
207 | 1 | REQUIRE_TENSOR_EQ(y_tensor, gty_tensor, "micro op composed convolution should match the existing convolution"); |
208 | 1 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 4, 4, 5), 0); |
209 | 1 | ccv_nnc_tensor_t* const dw_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3, 3, 5), 0); |
210 | 1 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, 2), 0); |
211 | 9 | for (i = 0; i < 2 * 2 * 2; i++8 ) |
212 | 8 | dy_tensor->data.f32[i] = 1; |
213 | 1 | ccv_nnc_tensor_t dy_tensor_t = ccv_nnc_tensor(dy_tensor->data.f32, CPU_TENSOR_NHWC(32F, 1, 2, 2, 1, 1, 1, 2), 0); |
214 | 1 | ccv_nnc_micro_combine_interpret(combine, CCV_NNC_CUSTOM_BACKWARD, TENSOR_LIST(&dy_tensor_t, x_tensor, w_tensor), 0, 0, TENSOR_LIST(dx_tensor, dw_tensor)); |
215 | 1 | ccv_nnc_tensor_t* const gtdx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 5), 0); |
216 | 1 | ccv_nnc_tensor_t* const gtdw_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3, 3, 5), 0); |
217 | 1 | ccv_nnc_cmd_exec(CMD_CONVOLUTION_BACKWARD(1, 2, 3, 3, 5), HINT((1, 1)), 0, TENSOR_LIST(dy_tensor, x_tensor, w_tensor), TENSOR_LIST(gtdx_tensor, gtdw_tensor), 0); |
218 | 1 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dx_tensor->data.f32, gtdx_tensor->data.f32, 4 * 4 * 5, 1e-5, "micro op composed convolution should match the existing convolution"); |
219 | 1 | REQUIRE_TENSOR_EQ(dw_tensor, gtdw_tensor, "micro op composed convolution should match the existing convolution"); |
220 | 1 | ccv_nnc_tensor_free(gtdx_tensor); |
221 | 1 | ccv_nnc_tensor_free(gtdw_tensor); |
222 | 1 | ccv_nnc_tensor_free(dx_tensor); |
223 | 1 | ccv_nnc_tensor_free(dw_tensor); |
224 | 1 | ccv_nnc_tensor_free(dy_tensor); |
225 | 1 | ccv_nnc_tensor_free(x_tensor); |
226 | 1 | ccv_nnc_tensor_free(w_tensor); |
227 | 1 | ccv_nnc_tensor_free(y_tensor); |
228 | 1 | ccv_nnc_tensor_free(gty_tensor); |
229 | 1 | ccv_nnc_micro_combine_free(combine); |
230 | 1 | } |
231 | | |
232 | | TEST_CASE("represent matrix multiplication with micro ops") |
233 | 1 | { |
234 | 1 | ccv_nnc_micro_io_t a = ccv_nnc_micro_input(2); |
235 | 1 | ccv_nnc_micro_io_t b = ccv_nnc_micro_input(2); |
236 | 1 | ccv_nnc_micro_io_t aa = ccv_nnc_micro_reindex((const char*[]){ |
237 | 1 | "dA0", |
238 | 1 | "dA1[=dB0]", |
239 | 1 | "dB1" |
240 | 1 | }, 3, (const ccv_nnc_micro_io_t[]){ |
241 | 1 | a, |
242 | 1 | b |
243 | 1 | }, 2, (const char*[]){ |
244 | 1 | "i0", |
245 | 1 | "i1" |
246 | 1 | }, 2, a); |
247 | 1 | ccv_nnc_micro_io_t bb = ccv_nnc_micro_reindex((const char*[]){ |
248 | 1 | "dA0", |
249 | 1 | "dB0[=dA1]", |
250 | 1 | "dB1" |
251 | 1 | }, 3, (const ccv_nnc_micro_io_t[]){ |
252 | 1 | a, |
253 | 1 | b |
254 | 1 | }, 2, (const char*[]){ |
255 | 1 | "i1", |
256 | 1 | "i2" |
257 | 1 | }, 2, b); |
258 | 1 | ccv_nnc_micro_io_t cc = ccv_nnc_micro_binary(CCV_NNC_MICRO_BINARY_OP_MUL, aa, bb); |
259 | 1 | ccv_nnc_micro_io_t c = ccv_nnc_micro_reduce(CCV_NNC_MICRO_REDUCE_OP_SUM, (const int[]){ |
260 | 1 | 1 |
261 | 1 | }, 1, cc); |
262 | 1 | ccv_nnc_micro_io_t dc = ccv_nnc_micro_grad(c); |
263 | 1 | ccv_nnc_micro_io_t da = ccv_nnc_micro_grad(a); |
264 | 1 | ccv_nnc_micro_io_t db = ccv_nnc_micro_grad(b); |
265 | 1 | ccv_nnc_micro_combine_t* combine = ccv_nnc_micro_combine_new((ccv_nnc_micro_io_t[]){ |
266 | 1 | a, |
267 | 1 | b |
268 | 1 | }, 2, 0, 0, &c, 1, (ccv_nnc_micro_io_t[]){ |
269 | 1 | dc, |
270 | 1 | a, |
271 | 1 | b |
272 | 1 | }, 3, (ccv_nnc_micro_io_t[]){ |
273 | 1 | da, |
274 | 1 | db |
275 | 1 | }, 2); |
276 | 1 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
277 | 1 | ccv_nnc_tensor_t* const b_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
278 | 1 | ccv_nnc_tensor_t* const c_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 3), 0); |
279 | 1 | dsfmt_t dsfmt; |
280 | 1 | dsfmt_init_gen_rand(&dsfmt, 1); |
281 | 1 | int i; |
282 | 9 | for (i = 0; i < 4 * 2; i++8 ) |
283 | 8 | a_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
284 | 7 | for (i = 0; i < 2 * 3; i++6 ) |
285 | 6 | b_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
286 | 1 | ccv_nnc_micro_combine_interpret(combine, CCV_NNC_CUSTOM_FORWARD, TENSOR_LIST(a_tensor, b_tensor), 0, 0, TENSOR_LIST(c_tensor)); |
287 | 1 | ccv_nnc_tensor_t* const gtc_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 3), 0); |
288 | 1 | ccv_nnc_cmd_exec(CMD_GEMM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a_tensor, b_tensor), TENSOR_LIST(gtc_tensor), 0); |
289 | 1 | REQUIRE_TENSOR_EQ(c_tensor, gtc_tensor, "micro op composed matrix multiplication should match the existing matrix multiplication"); |
290 | 1 | ccv_nnc_tensor_t* const da_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
291 | 1 | ccv_nnc_tensor_t* const db_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
292 | 1 | ccv_nnc_tensor_t* const dc_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 3), 0); |
293 | 13 | for (i = 0; i < 4 * 3; i++12 ) |
294 | 12 | dc_tensor->data.f32[i] = 1; |
295 | 1 | ccv_nnc_tensor_t dc_tensor_t = ccv_nnc_tensor(dc_tensor->data.f32, CPU_TENSOR_NHWC(32F, 4, 1, 3), 0); |
296 | 1 | ccv_nnc_micro_combine_interpret(combine, CCV_NNC_CUSTOM_BACKWARD, TENSOR_LIST(&dc_tensor_t, a_tensor, b_tensor), 0, 0, TENSOR_LIST(da_tensor, db_tensor)); |
297 | 1 | ccv_nnc_tensor_t* const gtda_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
298 | 1 | ccv_nnc_tensor_t* const gtdb_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
299 | 1 | ccv_nnc_cmd_exec(CMD_GEMM_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dc_tensor, a_tensor, b_tensor), TENSOR_LIST(gtda_tensor, gtdb_tensor), 0); |
300 | 1 | REQUIRE_TENSOR_EQ(da_tensor, gtda_tensor, "micro op composed matrix multiplication should match the existing matrix multiplication"); |
301 | 1 | REQUIRE_TENSOR_EQ(db_tensor, gtdb_tensor, "micro op composed matrix multiplication should match the existing matrix multiplication"); |
302 | 1 | ccv_nnc_tensor_free(gtda_tensor); |
303 | 1 | ccv_nnc_tensor_free(gtdb_tensor); |
304 | 1 | ccv_nnc_tensor_free(da_tensor); |
305 | 1 | ccv_nnc_tensor_free(db_tensor); |
306 | 1 | ccv_nnc_tensor_free(dc_tensor); |
307 | 1 | ccv_nnc_tensor_free(gtc_tensor); |
308 | 1 | ccv_nnc_tensor_free(c_tensor); |
309 | 1 | ccv_nnc_tensor_free(b_tensor); |
310 | 1 | ccv_nnc_tensor_free(a_tensor); |
311 | 1 | ccv_nnc_micro_combine_free(combine); |
312 | 1 | } |
313 | | |
314 | | #include "case_main.h" |