/home/liu/actions-runner/_work/ccv/ccv/test/int/nnc/mpsdnn.tests.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include <3rdparty/dsfmt/dSFMT.h> |
8 | | #include <nnc/ccv_nnc_internal.h> |
9 | | |
10 | | TEST_SETUP() |
11 | | { |
12 | | ccv_nnc_init(); |
13 | | } |
14 | | |
15 | 0 | #define INPUT_DIM (3) |
16 | 0 | #define OUTPUT_DIM (96) |
17 | | |
18 | 0 | #define INPUT_SIZE (224) |
19 | 0 | #define OUTPUT_SIZE (112) |
20 | | |
21 | 0 | #define KERNEL_SIZE (7) |
22 | | |
23 | | #define BATCH_SIZE (64) |
24 | | |
25 | 0 | #define LN_DIM (10) |
26 | 0 | #define GN_C_DIM (16) |
27 | | #define GN_RC_DIM (4) |
28 | | |
29 | | TEST_CASE("mps forward convolution") |
30 | 1 | { |
31 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_FORWARD, CCV_NNC_BACKEND_MPS)); |
32 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
33 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
34 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(1, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM); |
35 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
36 | 0 | assert(cmd.backend >= 0); |
37 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(cmd.info, a->info, b->info); |
38 | 0 | assert(ccv_nnc_hint_verify(hint, cmd.info, a->info, b->info) == 0); |
39 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
40 | 0 | ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM), 0); |
41 | | // configure the inlets. |
42 | 0 | dsfmt_t dsfmt; |
43 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
44 | 0 | int i; |
45 | 0 | for (i = 0; i < INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE * OUTPUT_DIM; i++) |
46 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE); |
47 | 0 | for (i = 0; i < INPUT_SIZE * INPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
48 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
49 | 0 | for (i = 0; i < OUTPUT_DIM; i++) |
50 | 0 | bias->data.f32[i] = (float)i / OUTPUT_DIM; |
51 | | // Copy generated matrix values over to GPU. |
52 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
53 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
54 | 0 | ccv_nnc_tensor_t* gwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
55 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM), 0); |
56 | 0 | ccv_nnc_cmd_t move = CMD_DATA_TRANSFER_FORWARD(); |
57 | 0 | move.backend = CCV_NNC_BACKEND_MPS; |
58 | 0 | assert(move.backend >= 0); |
59 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(ga, gw, gbias), 0); |
60 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0); |
61 | 0 | ccv_nnc_tensor_t* gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
62 | |
|
63 | 0 | ccv_nnc_cmd_t transform = CMD_FORMAT_TRANSFORM_FORWARD(); |
64 | 0 | transform.backend = CCV_NNC_BACKEND_MPS; |
65 | 0 | assert(transform.backend >= 0); |
66 | 0 | ccv_nnc_stream_context_t* stream_context = ccv_nnc_stream_context_new(CCV_STREAM_CONTEXT_GPU); |
67 | 0 | ccv_nnc_cmd_exec(transform, ccv_nnc_no_hint, 0, TENSOR_LIST(gw), TENSOR_LIST(gwo), stream_context); |
68 | 0 | ccv_nnc_stream_context_wait(stream_context); |
69 | 0 | ccv_nnc_tensor_free(gw); |
70 | |
|
71 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
72 | 0 | assert(cmd.backend >= 0); |
73 | 0 | cmd.algorithm = -1; |
74 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(ga, gwo, gbias), TENSOR_LIST(gc), stream_context); |
75 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(ga, gwo, gbias), TENSOR_LIST(gc), stream_context)); |
76 | 0 | ccv_nnc_stream_context_wait(stream_context); |
77 | 0 | ccv_nnc_stream_context_free(stream_context); |
78 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
79 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
80 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, b->data.f32, c->data.f32, OUTPUT_DIM * OUTPUT_SIZE * OUTPUT_SIZE, 1e-5, "output from mps should match from CPU"); |
81 | 0 | ccv_nnc_tensor_free(c); |
82 | 0 | ccv_nnc_tensor_free(gc); |
83 | 0 | ccv_nnc_tensor_free(bias); |
84 | 0 | ccv_nnc_tensor_free(w); |
85 | 0 | ccv_nnc_tensor_free(b); |
86 | 0 | ccv_nnc_tensor_free(a); |
87 | 0 | ccv_nnc_tensor_free(gbias); |
88 | 0 | ccv_nnc_tensor_free(gwo); |
89 | 0 | ccv_nnc_tensor_free(ga); |
90 | 0 | } |
91 | | |
92 | | TEST_CASE("mps forward convolution in nchw format") |
93 | 1 | { |
94 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_FORWARD, CCV_NNC_BACKEND_MPS)); |
95 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, BATCH_SIZE, INPUT_DIM, INPUT_SIZE, INPUT_SIZE), 0); |
96 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, BATCH_SIZE, OUTPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
97 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(1, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM); |
98 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
99 | 0 | assert(cmd.backend >= 0); |
100 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(cmd.info, a->info, b->info); |
101 | 0 | assert(ccv_nnc_hint_verify(hint, cmd.info, a->info, b->info) == 0); |
102 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
103 | 0 | ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM), 0); |
104 | | // configure the inlets. |
105 | 0 | dsfmt_t dsfmt; |
106 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
107 | 0 | int i; |
108 | 0 | for (i = 0; i < INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE * OUTPUT_DIM; i++) |
109 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE); |
110 | 0 | for (i = 0; i < INPUT_SIZE * INPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
111 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
112 | 0 | for (i = 0; i < OUTPUT_DIM; i++) |
113 | 0 | bias->data.f32[i] = (float)i / OUTPUT_DIM; |
114 | | // Copy generated matrix values over to GPU. |
115 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, INPUT_DIM, INPUT_SIZE, INPUT_SIZE), 0); |
116 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
117 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM), 0); |
118 | 0 | ccv_nnc_cmd_t move = CMD_DATA_TRANSFER_FORWARD(); |
119 | 0 | move.backend = CCV_NNC_BACKEND_MPS; |
120 | 0 | assert(move.backend >= 0); |
121 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(ga, gw, gbias), 0); |
122 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0); |
123 | 0 | ccv_nnc_tensor_t* gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, OUTPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
124 | |
|
125 | 0 | ccv_nnc_cmd_t transform = CMD_FORMAT_TRANSFORM_FORWARD(); |
126 | 0 | transform.backend = CCV_NNC_BACKEND_MPS; |
127 | 0 | assert(transform.backend >= 0); |
128 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
129 | 0 | assert(cmd.backend >= 0); |
130 | 0 | cmd.algorithm = -1; |
131 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(ga, gw, gbias), TENSOR_LIST(gc), 0); |
132 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(ga, gw, gbias), TENSOR_LIST(gc), 0)); |
133 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, BATCH_SIZE, OUTPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
134 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
135 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, b->data.f32, c->data.f32, OUTPUT_DIM * OUTPUT_SIZE * OUTPUT_SIZE, 1e-5, "output from mps should match from CPU"); |
136 | 0 | ccv_nnc_tensor_free(c); |
137 | 0 | ccv_nnc_tensor_free(gc); |
138 | 0 | ccv_nnc_tensor_free(bias); |
139 | 0 | ccv_nnc_tensor_free(w); |
140 | 0 | ccv_nnc_tensor_free(b); |
141 | 0 | ccv_nnc_tensor_free(a); |
142 | 0 | ccv_nnc_tensor_free(gbias); |
143 | 0 | ccv_nnc_tensor_free(gw); |
144 | 0 | ccv_nnc_tensor_free(ga); |
145 | 0 | } |
146 | | |
147 | | TEST_CASE("mps forward convolution with 1x1 kernel") |
148 | 1 | { |
149 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_FORWARD, CCV_NNC_BACKEND_MPS)); |
150 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, INPUT_DIM), 0); |
151 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
152 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(1, OUTPUT_DIM, 1, 1, INPUT_DIM); |
153 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
154 | 0 | assert(cmd.backend >= 0); |
155 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(cmd.info, a->info, b->info); |
156 | 0 | assert(ccv_nnc_hint_verify(hint, cmd.info, a->info, b->info) == 0); |
157 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, 1, 1, INPUT_DIM), 0); |
158 | 0 | ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM), 0); |
159 | | // configure the inlets. |
160 | 0 | dsfmt_t dsfmt; |
161 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
162 | 0 | int i; |
163 | 0 | for (i = 0; i < INPUT_DIM * 1 * 1 * OUTPUT_DIM; i++) |
164 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * 1 * 1); |
165 | 0 | for (i = 0; i < OUTPUT_SIZE * OUTPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
166 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
167 | 0 | for (i = 0; i < OUTPUT_DIM; i++) |
168 | 0 | bias->data.f32[i] = (float)i / OUTPUT_DIM; |
169 | | // Copy generated matrix values over to GPU. |
170 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, INPUT_DIM), 0); |
171 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, 1, 1, INPUT_DIM), 0); |
172 | 0 | ccv_nnc_tensor_t* gwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, 1, 1), 0); |
173 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM), 0); |
174 | 0 | ccv_nnc_cmd_t move = CMD_DATA_TRANSFER_FORWARD(); |
175 | 0 | move.backend = CCV_NNC_BACKEND_MPS; |
176 | 0 | assert(move.backend >= 0); |
177 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(ga, gw, gbias), 0); |
178 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0); |
179 | 0 | ccv_nnc_tensor_t* gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
180 | |
|
181 | 0 | ccv_nnc_cmd_t transform = CMD_FORMAT_TRANSFORM_FORWARD(); |
182 | 0 | transform.backend = CCV_NNC_BACKEND_MPS; |
183 | 0 | assert(transform.backend >= 0); |
184 | 0 | ccv_nnc_stream_context_t* stream_context = ccv_nnc_stream_context_new(CCV_STREAM_CONTEXT_GPU); |
185 | 0 | ccv_nnc_cmd_exec(transform, ccv_nnc_no_hint, 0, TENSOR_LIST(gw), TENSOR_LIST(gwo), stream_context); |
186 | 0 | ccv_nnc_stream_context_wait(stream_context); |
187 | 0 | ccv_nnc_tensor_free(gw); |
188 | |
|
189 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
190 | 0 | assert(cmd.backend >= 0); |
191 | 0 | cmd.algorithm = -1; |
192 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(ga, gwo, gbias), TENSOR_LIST(gc), stream_context); |
193 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(ga, gwo, gbias), TENSOR_LIST(gc), stream_context)); |
194 | 0 | ccv_nnc_stream_context_wait(stream_context); |
195 | 0 | ccv_nnc_stream_context_free(stream_context); |
196 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
197 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
198 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, b->data.f32, c->data.f32, OUTPUT_DIM * OUTPUT_SIZE * OUTPUT_SIZE, 1e-5, "output from mps should match from CPU"); |
199 | 0 | ccv_nnc_tensor_free(c); |
200 | 0 | ccv_nnc_tensor_free(gc); |
201 | 0 | ccv_nnc_tensor_free(bias); |
202 | 0 | ccv_nnc_tensor_free(w); |
203 | 0 | ccv_nnc_tensor_free(b); |
204 | 0 | ccv_nnc_tensor_free(a); |
205 | 0 | ccv_nnc_tensor_free(gbias); |
206 | 0 | ccv_nnc_tensor_free(gwo); |
207 | 0 | ccv_nnc_tensor_free(ga); |
208 | 0 | } |
209 | | |
210 | | TEST_CASE("mps forward convolution in nchw format with 1x1 kernel") |
211 | 1 | { |
212 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_FORWARD, CCV_NNC_BACKEND_MPS)); |
213 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, BATCH_SIZE, INPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
214 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, BATCH_SIZE, OUTPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
215 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(1, OUTPUT_DIM, 1, 1, INPUT_DIM); |
216 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
217 | 0 | assert(cmd.backend >= 0); |
218 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(cmd.info, a->info, b->info); |
219 | 0 | assert(ccv_nnc_hint_verify(hint, cmd.info, a->info, b->info) == 0); |
220 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, INPUT_DIM, 1, 1), 0); |
221 | 0 | ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM), 0); |
222 | | // configure the inlets. |
223 | 0 | dsfmt_t dsfmt; |
224 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
225 | 0 | int i; |
226 | 0 | for (i = 0; i < INPUT_DIM * 1 * 1 * OUTPUT_DIM; i++) |
227 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * 1 * 1); |
228 | 0 | for (i = 0; i < OUTPUT_SIZE * OUTPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
229 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
230 | 0 | for (i = 0; i < OUTPUT_DIM; i++) |
231 | 0 | bias->data.f32[i] = (float)i / OUTPUT_DIM; |
232 | | // Copy generated matrix values over to GPU. |
233 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, INPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
234 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, 1, 1), 0); |
235 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM), 0); |
236 | 0 | ccv_nnc_cmd_t move = CMD_DATA_TRANSFER_FORWARD(); |
237 | 0 | move.backend = CCV_NNC_BACKEND_MPS; |
238 | 0 | assert(move.backend >= 0); |
239 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(ga, gw, gbias), 0); |
240 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0); |
241 | 0 | ccv_nnc_tensor_t* gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, OUTPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
242 | |
|
243 | 0 | ccv_nnc_cmd_t transform = CMD_FORMAT_TRANSFORM_FORWARD(); |
244 | 0 | transform.backend = CCV_NNC_BACKEND_MPS; |
245 | 0 | assert(transform.backend >= 0); |
246 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
247 | 0 | assert(cmd.backend >= 0); |
248 | 0 | cmd.algorithm = -1; |
249 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(ga, gw, gbias), TENSOR_LIST(gc), 0); |
250 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(ga, gw, gbias), TENSOR_LIST(gc), 0)); |
251 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, BATCH_SIZE, OUTPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
252 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
253 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, b->data.f32, c->data.f32, OUTPUT_DIM * OUTPUT_SIZE * OUTPUT_SIZE, 1e-5, "output from mps should match from CPU"); |
254 | 0 | ccv_nnc_tensor_free(c); |
255 | 0 | ccv_nnc_tensor_free(gc); |
256 | 0 | ccv_nnc_tensor_free(bias); |
257 | 0 | ccv_nnc_tensor_free(w); |
258 | 0 | ccv_nnc_tensor_free(b); |
259 | 0 | ccv_nnc_tensor_free(a); |
260 | 0 | ccv_nnc_tensor_free(gbias); |
261 | 0 | ccv_nnc_tensor_free(gw); |
262 | 0 | ccv_nnc_tensor_free(ga); |
263 | 0 | } |
264 | | |
265 | | TEST_CASE("mps forward convolution in half precision") |
266 | 1 | { |
267 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_FORWARD, CCV_NNC_BACKEND_MPS)); |
268 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
269 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
270 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(1, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM); |
271 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
272 | 0 | assert(cmd.backend >= 0); |
273 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(cmd.info, a->info, b->info); |
274 | 0 | assert(ccv_nnc_hint_verify(hint, cmd.info, a->info, b->info) == 0); |
275 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
276 | 0 | ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM), 0); |
277 | | // configure the inlets. |
278 | 0 | dsfmt_t dsfmt; |
279 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
280 | 0 | int i; |
281 | 0 | for (i = 0; i < INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE * OUTPUT_DIM; i++) |
282 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE); |
283 | 0 | for (i = 0; i < INPUT_SIZE * INPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
284 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
285 | 0 | for (i = 0; i < OUTPUT_DIM; i++) |
286 | 0 | bias->data.f32[i] = (float)i / OUTPUT_DIM; |
287 | 0 | ccv_nnc_tensor_t* a1 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
288 | 0 | ccv_nnc_tensor_t* w1 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
289 | 0 | ccv_nnc_tensor_t* bias1 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, OUTPUT_DIM), 0); |
290 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(a1, w1, bias1), 0); |
291 | | // Copy generated matrix values over to GPU. |
292 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
293 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
294 | 0 | ccv_nnc_tensor_t* gwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 16F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
295 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, OUTPUT_DIM), 0); |
296 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a1, w1, bias1), TENSOR_LIST(ga, gw, gbias), 0); |
297 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0); |
298 | 0 | ccv_nnc_tensor_t* gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
299 | |
|
300 | 0 | ccv_nnc_cmd_t transform = CMD_FORMAT_TRANSFORM_FORWARD(); |
301 | 0 | transform.backend = CCV_NNC_BACKEND_MPS; |
302 | 0 | assert(transform.backend >= 0); |
303 | 0 | ccv_nnc_stream_context_t* stream_context = ccv_nnc_stream_context_new(CCV_STREAM_CONTEXT_GPU); |
304 | 0 | ccv_nnc_cmd_exec(transform, ccv_nnc_no_hint, 0, TENSOR_LIST(gw), TENSOR_LIST(gwo), stream_context); |
305 | 0 | ccv_nnc_stream_context_wait(stream_context); |
306 | 0 | ccv_nnc_tensor_free(gw); |
307 | |
|
308 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
309 | 0 | assert(cmd.backend >= 0); |
310 | 0 | cmd.algorithm = -1; |
311 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(ga, gwo, gbias), TENSOR_LIST(gc), stream_context); |
312 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(ga, gwo, gbias), TENSOR_LIST(gc), stream_context)); |
313 | 0 | ccv_nnc_stream_context_wait(stream_context); |
314 | 0 | ccv_nnc_stream_context_free(stream_context); |
315 | 0 | ccv_nnc_tensor_t* c1 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
316 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c1), 0); |
317 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
318 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(c1), TENSOR_LIST(c), 0); |
319 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, b->data.f32, c->data.f32, OUTPUT_DIM * OUTPUT_SIZE * OUTPUT_SIZE, 5e-3, "output from mps should match from CPU"); |
320 | 0 | ccv_nnc_tensor_free(c); |
321 | 0 | ccv_nnc_tensor_free(gc); |
322 | 0 | ccv_nnc_tensor_free(bias); |
323 | 0 | ccv_nnc_tensor_free(w); |
324 | 0 | ccv_nnc_tensor_free(b); |
325 | 0 | ccv_nnc_tensor_free(a); |
326 | 0 | ccv_nnc_tensor_free(c1); |
327 | 0 | ccv_nnc_tensor_free(bias1); |
328 | 0 | ccv_nnc_tensor_free(w1); |
329 | 0 | ccv_nnc_tensor_free(a1); |
330 | 0 | ccv_nnc_tensor_free(gbias); |
331 | 0 | ccv_nnc_tensor_free(gwo); |
332 | 0 | ccv_nnc_tensor_free(ga); |
333 | 0 | } |
334 | | |
335 | | TEST_CASE("mps forward convolution with dilation 2, 3") |
336 | 1 | { |
337 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_FORWARD, CCV_NNC_BACKEND_MPS)); |
338 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
339 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
340 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(1, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM); |
341 | 0 | cmd.info.convolution.dilation[0] = 2; |
342 | 0 | cmd.info.convolution.dilation[1] = 3; |
343 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
344 | 0 | assert(cmd.backend >= 0); |
345 | 0 | ccv_nnc_cmd_param_t modified_cmd = cmd.info; |
346 | 0 | modified_cmd.size.dim[0] = (cmd.info.size.dim[0] - 1) * ccv_max(cmd.info.convolution.dilation[0], 1) + 1; |
347 | 0 | modified_cmd.size.dim[1] = (cmd.info.size.dim[1] - 1) * ccv_max(cmd.info.convolution.dilation[1], 1) + 1; |
348 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(modified_cmd, a->info, b->info); |
349 | 0 | assert(ccv_nnc_hint_verify(hint, modified_cmd, a->info, b->info) == 0); |
350 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
351 | 0 | ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM), 0); |
352 | | // configure the inlets. |
353 | 0 | dsfmt_t dsfmt; |
354 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
355 | 0 | int i; |
356 | 0 | for (i = 0; i < INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE * OUTPUT_DIM; i++) |
357 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE); |
358 | 0 | for (i = 0; i < INPUT_SIZE * INPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
359 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
360 | 0 | for (i = 0; i < OUTPUT_DIM; i++) |
361 | 0 | bias->data.f32[i] = (float)i / OUTPUT_DIM; |
362 | | // Copy generated matrix values over to GPU. |
363 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
364 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
365 | 0 | ccv_nnc_tensor_t* gwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
366 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM), 0); |
367 | 0 | ccv_nnc_cmd_t move = CMD_DATA_TRANSFER_FORWARD(); |
368 | 0 | move.backend = CCV_NNC_BACKEND_MPS; |
369 | 0 | assert(move.backend >= 0); |
370 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(ga, gw, gbias), 0); |
371 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0); |
372 | 0 | ccv_nnc_tensor_t* gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
373 | |
|
374 | 0 | ccv_nnc_cmd_t transform = CMD_FORMAT_TRANSFORM_FORWARD(); |
375 | 0 | transform.backend = CCV_NNC_BACKEND_MPS; |
376 | 0 | assert(transform.backend >= 0); |
377 | 0 | ccv_nnc_stream_context_t* stream_context = ccv_nnc_stream_context_new(CCV_STREAM_CONTEXT_GPU); |
378 | 0 | ccv_nnc_cmd_exec(transform, ccv_nnc_no_hint, 0, TENSOR_LIST(gw), TENSOR_LIST(gwo), stream_context); |
379 | 0 | ccv_nnc_stream_context_wait(stream_context); |
380 | 0 | ccv_nnc_tensor_free(gw); |
381 | |
|
382 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
383 | 0 | assert(cmd.backend >= 0); |
384 | 0 | cmd.algorithm = -1; |
385 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(ga, gwo, gbias), TENSOR_LIST(gc), stream_context); |
386 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(ga, gwo, gbias), TENSOR_LIST(gc), stream_context)); |
387 | 0 | ccv_nnc_stream_context_wait(stream_context); |
388 | 0 | ccv_nnc_stream_context_free(stream_context); |
389 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
390 | 0 | ccv_nnc_cmd_exec(move, ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
391 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, b->data.f32, c->data.f32, OUTPUT_DIM * OUTPUT_SIZE * OUTPUT_SIZE, 1e-5, "output from mps should match from CPU"); |
392 | 0 | ccv_nnc_tensor_free(c); |
393 | 0 | ccv_nnc_tensor_free(gc); |
394 | 0 | ccv_nnc_tensor_free(bias); |
395 | 0 | ccv_nnc_tensor_free(w); |
396 | 0 | ccv_nnc_tensor_free(b); |
397 | 0 | ccv_nnc_tensor_free(a); |
398 | 0 | ccv_nnc_tensor_free(gbias); |
399 | 0 | ccv_nnc_tensor_free(gwo); |
400 | 0 | ccv_nnc_tensor_free(ga); |
401 | 0 | } |
402 | | |
403 | | TEST_CASE("compare softmax with mps") |
404 | 1 | { |
405 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SOFTMAX_FORWARD, CCV_NNC_BACKEND_MPS)); |
406 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
407 | 0 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 32F, 20, 10), "a"); |
408 | 0 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 32F, 20, 10), "b"); |
409 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_SOFTMAX_FORWARD(), TENSOR_SYMBOL_LIST(a), TENSOR_SYMBOL_LIST(b), "softmax"); |
410 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
411 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
412 | 0 | ccv_nnc_graph_t* graph = 0; |
413 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
414 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
415 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
416 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
417 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
418 | 0 | dsfmt_t dsfmt; |
419 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
420 | 0 | int i; |
421 | 0 | for (i = 0; i < 20 * 10; i++) |
422 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
423 | 0 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a); |
424 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(a_tensor), 0); |
425 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
426 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
427 | 0 | ccv_nnc_tensor_t* const b_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, b); |
428 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b_tensor), TENSOR_LIST(y_tensor), 0); |
429 | 0 | ccv_nnc_tensor_t* const ty = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
430 | 0 | ccv_nnc_cmd_exec(CMD_SOFTMAX_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(ty), 0); |
431 | 0 | REQUIRE_TENSOR_EQ(ty, y_tensor, "softmax from mps should match from CPU"); |
432 | 0 | ccv_nnc_tensor_free(x_tensor); |
433 | 0 | ccv_nnc_tensor_free(y_tensor); |
434 | 0 | ccv_nnc_tensor_free(ty); |
435 | 0 | ccv_nnc_graph_free(graph); |
436 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
437 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
438 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
439 | 0 | } |
440 | | |
441 | | TEST_CASE("compare softmax with mps in half precision") |
442 | 1 | { |
443 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SOFTMAX_FORWARD, CCV_NNC_BACKEND_MPS)); |
444 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
445 | 0 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 16F, 20, 10), "a"); |
446 | 0 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 16F, 20, 10), "b"); |
447 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_SOFTMAX_FORWARD(), TENSOR_SYMBOL_LIST(a), TENSOR_SYMBOL_LIST(b), "softmax"); |
448 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
449 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
450 | 0 | ccv_nnc_graph_t* graph = 0; |
451 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
452 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
453 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
454 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
455 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
456 | 0 | dsfmt_t dsfmt; |
457 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
458 | 0 | int i; |
459 | 0 | for (i = 0; i < 20 * 10; i++) |
460 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
461 | 0 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a); |
462 | 0 | ccv_nnc_tensor_t* const x16_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(16F, 20, 10), 0); |
463 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(x16_tensor), 0); |
464 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x16_tensor), TENSOR_LIST(a_tensor), 0); |
465 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
466 | 0 | ccv_nnc_tensor_t* const y16_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(16F, 20, 10), 0); |
467 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
468 | 0 | ccv_nnc_tensor_t* const b_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, b); |
469 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b_tensor), TENSOR_LIST(y16_tensor), 0); |
470 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(y16_tensor), TENSOR_LIST(y_tensor), 0); |
471 | 0 | ccv_nnc_tensor_t* const ty = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
472 | 0 | ccv_nnc_cmd_exec(CMD_SOFTMAX_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(ty), 0); |
473 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, ty->data.f32, y_tensor->data.f32, 20 * 10, 1e-3, "softmax from mps should match from CPU"); |
474 | 0 | ccv_nnc_tensor_free(x_tensor); |
475 | 0 | ccv_nnc_tensor_free(x16_tensor); |
476 | 0 | ccv_nnc_tensor_free(y16_tensor); |
477 | 0 | ccv_nnc_tensor_free(y_tensor); |
478 | 0 | ccv_nnc_tensor_free(ty); |
479 | 0 | ccv_nnc_graph_free(graph); |
480 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
481 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
482 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
483 | 0 | } |
484 | | |
485 | | TEST_CASE("compare softmax gradient with mps") |
486 | 1 | { |
487 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SOFTMAX_FORWARD, CCV_NNC_BACKEND_MPS) && |
488 | 1 | ccv_nnc_cmd_ok(CCV_NNC_SOFTMAX_BACKWARD, CCV_NNC_BACKEND_MPS)); |
489 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
490 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 100), "x"); |
491 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 100), "y"); |
492 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_SOFTMAX_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "softmax"); |
493 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
494 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(y), TENSOR_SYMBOL_LIST(x), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
495 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
496 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
497 | 0 | ccv_nnc_tensor_symbol_t dy = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, y); |
498 | 0 | ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x); |
499 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
500 | 0 | dsfmt_t dsfmt; |
501 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
502 | 0 | int i; |
503 | 0 | for (i = 0; i < 10 * 100; i++) |
504 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
505 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
506 | 0 | for (i = 0; i < 10 * 100; i++) |
507 | 0 | dy_tensor->data.f32[i] = 0; |
508 | 0 | for (i = 0; i < 10; i++) |
509 | 0 | dy_tensor->data.f32[i * 100 + i] = 1; |
510 | 0 | ccv_nnc_tensor_t* const dyt = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
511 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dyt), 0); |
512 | 0 | ccv_nnc_graph_t* graph = 0; |
513 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
514 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
515 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, TENSOR_BIND_MAP(KV(dy, dyt)), 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
516 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
517 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
518 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
519 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
520 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
521 | 0 | ccv_nnc_tensor_t* const dxt = ccv_nnc_tensor_from_symbol(tensor_arena, dx); |
522 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
523 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
524 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dxt), TENSOR_LIST(dx_tensor), 0); |
525 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(y_tensor), 0); |
526 | 0 | ccv_nnc_tensor_t* const ty_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
527 | 0 | ccv_nnc_cmd_exec(CMD_SOFTMAX_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(ty_tensor), 0); |
528 | 0 | REQUIRE_TENSOR_EQ(ty_tensor, y_tensor, "forward pass should match"); |
529 | 0 | ccv_nnc_tensor_t* const tdx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
530 | 0 | ccv_nnc_cmd_exec(CMD_SOFTMAX_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor, 0, ty_tensor), TENSOR_LIST(tdx_tensor), 0); |
531 | 0 | REQUIRE_TENSOR_EQ(tdx_tensor, dx_tensor, "backward pass should match"); |
532 | 0 | ccv_nnc_tensor_free(x_tensor); |
533 | 0 | ccv_nnc_tensor_free(y_tensor); |
534 | 0 | ccv_nnc_tensor_free(dx_tensor); |
535 | 0 | ccv_nnc_tensor_free(dy_tensor); |
536 | 0 | ccv_nnc_tensor_free(ty_tensor); |
537 | 0 | ccv_nnc_tensor_free(tdx_tensor); |
538 | 0 | ccv_nnc_tensor_free(dyt); |
539 | 0 | ccv_nnc_graph_free(graph); |
540 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
541 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
542 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
543 | 0 | } |
544 | | |
545 | | TEST_CASE("compare sigmoid with mps") |
546 | 1 | { |
547 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SIGMOID_FORWARD, CCV_NNC_BACKEND_MPS)); |
548 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
549 | 0 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 32F, 20, 10), "a"); |
550 | 0 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 32F, 20, 10), "b"); |
551 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_SIGMOID_FORWARD(), TENSOR_SYMBOL_LIST(a), TENSOR_SYMBOL_LIST(b), "sigmoid"); |
552 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
553 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
554 | 0 | ccv_nnc_graph_t* graph = 0; |
555 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
556 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
557 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
558 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
559 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
560 | 0 | dsfmt_t dsfmt; |
561 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
562 | 0 | int i; |
563 | 0 | for (i = 0; i < 20 * 10; i++) |
564 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
565 | 0 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a); |
566 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(a_tensor), 0); |
567 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
568 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
569 | 0 | ccv_nnc_tensor_t* const b_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, b); |
570 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b_tensor), TENSOR_LIST(y_tensor), 0); |
571 | 0 | ccv_nnc_tensor_t* const ty = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
572 | 0 | ccv_nnc_cmd_exec(CMD_SIGMOID_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(ty), 0); |
573 | 0 | REQUIRE_TENSOR_EQ(ty, y_tensor, "sigmoid from mps should match from CPU"); |
574 | 0 | ccv_nnc_tensor_free(x_tensor); |
575 | 0 | ccv_nnc_tensor_free(y_tensor); |
576 | 0 | ccv_nnc_tensor_free(ty); |
577 | 0 | ccv_nnc_graph_free(graph); |
578 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
579 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
580 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
581 | 0 | } |
582 | | |
583 | | TEST_CASE("compare sigmoid with mps in half precision") |
584 | 1 | { |
585 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SIGMOID_FORWARD, CCV_NNC_BACKEND_MPS)); |
586 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
587 | 0 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 16F, 20, 10), "a"); |
588 | 0 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 16F, 20, 10), "b"); |
589 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_SIGMOID_FORWARD(), TENSOR_SYMBOL_LIST(a), TENSOR_SYMBOL_LIST(b), "sigmoid"); |
590 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
591 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
592 | 0 | ccv_nnc_graph_t* graph = 0; |
593 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
594 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
595 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
596 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
597 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
598 | 0 | dsfmt_t dsfmt; |
599 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
600 | 0 | int i; |
601 | 0 | for (i = 0; i < 20 * 10; i++) |
602 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
603 | 0 | ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a); |
604 | 0 | ccv_nnc_tensor_t* const x16_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(16F, 20, 10), 0); |
605 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(x16_tensor), 0); |
606 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x16_tensor), TENSOR_LIST(a_tensor), 0); |
607 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
608 | 0 | ccv_nnc_tensor_t* const y16_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(16F, 20, 10), 0); |
609 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
610 | 0 | ccv_nnc_tensor_t* const b_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, b); |
611 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b_tensor), TENSOR_LIST(y16_tensor), 0); |
612 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(y16_tensor), TENSOR_LIST(y_tensor), 0); |
613 | 0 | ccv_nnc_tensor_t* const ty = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 20, 10), 0); |
614 | 0 | ccv_nnc_cmd_exec(CMD_SIGMOID_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(ty), 0); |
615 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, ty->data.f32, y_tensor->data.f32, 20 * 10, 1e-3, "sigmoid from mps should match from CPU"); |
616 | 0 | ccv_nnc_tensor_free(x_tensor); |
617 | 0 | ccv_nnc_tensor_free(x16_tensor); |
618 | 0 | ccv_nnc_tensor_free(y16_tensor); |
619 | 0 | ccv_nnc_tensor_free(y_tensor); |
620 | 0 | ccv_nnc_tensor_free(ty); |
621 | 0 | ccv_nnc_graph_free(graph); |
622 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
623 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
624 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
625 | 0 | } |
626 | | |
627 | | |
628 | | TEST_CASE("compare sigmoid gradient with mps") |
629 | 1 | { |
630 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SIGMOID_FORWARD, CCV_NNC_BACKEND_MPS) && |
631 | 1 | ccv_nnc_cmd_ok(CCV_NNC_SIGMOID_BACKWARD, CCV_NNC_BACKEND_MPS)); |
632 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
633 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 100), "x"); |
634 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 100), "y"); |
635 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_SIGMOID_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "sigmoid"); |
636 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
637 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(y), TENSOR_SYMBOL_LIST(x), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
638 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
639 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
640 | 0 | ccv_nnc_tensor_symbol_t dy = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, y); |
641 | 0 | ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x); |
642 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
643 | 0 | dsfmt_t dsfmt; |
644 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
645 | 0 | int i; |
646 | 0 | for (i = 0; i < 10 * 100; i++) |
647 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
648 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
649 | 0 | for (i = 0; i < 10 * 100; i++) |
650 | 0 | dy_tensor->data.f32[i] = 0; |
651 | 0 | for (i = 0; i < 10; i++) |
652 | 0 | dy_tensor->data.f32[i * 100 + i] = 1; |
653 | 0 | ccv_nnc_tensor_t* const dyt = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
654 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dyt), 0); |
655 | 0 | ccv_nnc_graph_t* graph = 0; |
656 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
657 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
658 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, TENSOR_BIND_MAP(KV(dy, dyt)), TENSOR_SYMBOL_LIST(y), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
659 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
660 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
661 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
662 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
663 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
664 | 0 | ccv_nnc_tensor_t* const dxt = ccv_nnc_tensor_from_symbol(tensor_arena, dx); |
665 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
666 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
667 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dxt), TENSOR_LIST(dx_tensor), 0); |
668 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(y_tensor), 0); |
669 | 0 | ccv_nnc_tensor_t* const ty_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
670 | 0 | ccv_nnc_cmd_exec(CMD_SIGMOID_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(ty_tensor), 0); |
671 | 0 | REQUIRE_TENSOR_EQ(ty_tensor, y_tensor, "forward pass should match"); |
672 | 0 | ccv_nnc_tensor_t* const tdx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
673 | 0 | ccv_nnc_cmd_exec(CMD_SIGMOID_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor, 0, ty_tensor), TENSOR_LIST(tdx_tensor), 0); |
674 | 0 | REQUIRE_TENSOR_EQ(tdx_tensor, dx_tensor, "backward pass should match"); |
675 | 0 | ccv_nnc_tensor_free(x_tensor); |
676 | 0 | ccv_nnc_tensor_free(y_tensor); |
677 | 0 | ccv_nnc_tensor_free(dx_tensor); |
678 | 0 | ccv_nnc_tensor_free(dy_tensor); |
679 | 0 | ccv_nnc_tensor_free(ty_tensor); |
680 | 0 | ccv_nnc_tensor_free(tdx_tensor); |
681 | 0 | ccv_nnc_tensor_free(dyt); |
682 | 0 | ccv_nnc_graph_free(graph); |
683 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
684 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
685 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
686 | 0 | } |
687 | | |
688 | | TEST_CASE("compare relu with mps") |
689 | 1 | { |
690 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_RELU_FORWARD, CCV_NNC_BACKEND_MPS)); |
691 | 0 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
692 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 7, 7, 10), "x"); |
693 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 7, 7, 10), "y"); |
694 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_RELU_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "relu"); |
695 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
696 | 0 | ccv_nnc_graph_t* graph = 0; |
697 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
698 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
699 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
700 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
701 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
702 | 0 | dsfmt_t dsfmt; |
703 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
704 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
705 | 0 | int i; |
706 | 0 | for (i = 0; i < 7 * 7 * 10; i++) |
707 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
708 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
709 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
710 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
711 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
712 | 0 | ccv_nnc_cmd_exec(CMD_RELU_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(y_tensor), 0); |
713 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
714 | 0 | ccv_nnc_tensor_t* const cpu_y = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
715 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(cpu_y), 0); |
716 | 0 | REQUIRE_TENSOR_EQ(y_tensor, cpu_y, "mps result should equal to cpu result"); |
717 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
718 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
719 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
720 | 0 | ccv_nnc_graph_free(graph); |
721 | 0 | ccv_nnc_tensor_free(x_tensor); |
722 | 0 | ccv_nnc_tensor_free(y_tensor); |
723 | 0 | ccv_nnc_tensor_free(cpu_y); |
724 | 0 | } |
725 | | |
726 | | TEST_CASE("compare relu with mps in half precision") |
727 | 1 | { |
728 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_RELU_FORWARD, CCV_NNC_BACKEND_MPS)); |
729 | 0 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
730 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 7, 7, 10), "x"); |
731 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 7, 7, 10), "y"); |
732 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_RELU_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "relu"); |
733 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
734 | 0 | ccv_nnc_graph_t* graph = 0; |
735 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
736 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
737 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
738 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
739 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
740 | 0 | dsfmt_t dsfmt; |
741 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
742 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
743 | 0 | int i; |
744 | 0 | for (i = 0; i < 7 * 7 * 10; i++) |
745 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
746 | 0 | ccv_nnc_tensor_t* const x16_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 7, 7, 10), 0); |
747 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
748 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(x16_tensor), 0); |
749 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x16_tensor), TENSOR_LIST(xt), 0); |
750 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
751 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
752 | 0 | ccv_nnc_cmd_exec(CMD_RELU_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(y_tensor), 0); |
753 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
754 | 0 | ccv_nnc_tensor_t* const cpu_y16 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 7, 7, 10), 0); |
755 | 0 | ccv_nnc_tensor_t* const cpu_y = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
756 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(cpu_y16), 0); |
757 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(cpu_y16), TENSOR_LIST(cpu_y), 0); |
758 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cpu_y->data.f32, 7 * 7 * 10, 1e-3, "mps result should equal to cpu result"); |
759 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
760 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
761 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
762 | 0 | ccv_nnc_graph_free(graph); |
763 | 0 | ccv_nnc_tensor_free(x_tensor); |
764 | 0 | ccv_nnc_tensor_free(x16_tensor); |
765 | 0 | ccv_nnc_tensor_free(y_tensor); |
766 | 0 | ccv_nnc_tensor_free(cpu_y); |
767 | 0 | ccv_nnc_tensor_free(cpu_y16); |
768 | 0 | } |
769 | | |
770 | | TEST_CASE("compare layer norm with mps") |
771 | 1 | { |
772 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
773 | 1 | ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS)); |
774 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
775 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "host x"); |
776 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 10), "x"); |
777 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 10), "y"); |
778 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "host y"); |
779 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 2, 2, 10), "scale"); |
780 | 0 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 2, 2, 10), "bias"); |
781 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_mean"); |
782 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_inv_std"); |
783 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(bx), "transfer x"); |
784 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-6, 1, 1, 2, 3), TENSOR_SYMBOL_LIST(bx, scale, bias), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "layer_norm"); |
785 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(y), "transfer y"); |
786 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
787 | 0 | ccv_nnc_graph_t* graph = 0; |
788 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
789 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
790 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
791 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
792 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
793 | 0 | dsfmt_t dsfmt; |
794 | 0 | float xdata[2 * 2 * 2 * 10]; |
795 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
796 | 0 | int i; |
797 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
798 | 0 | for (i = 0; i < 2 * 2 * 2 * 10; i++) |
799 | 0 | x_tensor->data.f32[i] = xdata[i] = dsfmt_genrand_open_close(&dsfmt); |
800 | 0 | float scaledata[1 * 2 * 2 * 10]; |
801 | 0 | float biasdata[1 * 2 * 2 * 10]; |
802 | 0 | for (i = 0; i < 1 * 2 * 2 * 10; i++) |
803 | 0 | { |
804 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
805 | 0 | biasdata[i] = dsfmt_genrand_open_close(&dsfmt); |
806 | 0 | } |
807 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, 2, 2, 10), 0); |
808 | 0 | ccv_nnc_tensor_t bias_tensor = ccv_nnc_tensor(biasdata, CPU_TENSOR_NHWC(32F, 1, 2, 2, 10), 0); |
809 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor, &bias_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale), ccv_nnc_tensor_from_symbol(tensor_arena, bias)), 0); |
810 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
811 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
812 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
813 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
814 | 0 | ccv_nnc_graph_free(graph); |
815 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
816 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "x"); |
817 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "y"); |
818 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 2, 2, 10), "scale"); |
819 | 0 | ccv_nnc_tensor_symbol_t cbias = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 2, 2, 10), "bias"); |
820 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_mean"); |
821 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_inv_std"); |
822 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-6, 1, 1, 2, 3), TENSOR_SYMBOL_LIST(cx, cscale, cbias), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "layer_norm"); |
823 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
824 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
825 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
826 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
827 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
828 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
829 | 0 | memcpy(cx_tensor->data.f32, xdata, sizeof(float) * 2 * 2 * 2 * 10); |
830 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
831 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * 2 * 2 * 10); |
832 | 0 | ccv_nnc_tensor_t* const cbias_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cbias); |
833 | 0 | memcpy(cbias_tensor->data.f32, biasdata, sizeof(float) * 1 * 2 * 2 * 10); |
834 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
835 | 0 | ccv_nnc_tensor_t* const cy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cy); |
836 | | // Note that MPS and my other implementations treat epsilon differently. |
837 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cy_tensor->data.f32, 2 * 2 * 2 * 10, 1e-4, "layer norm result from mps should match the one from reference implementation"); |
838 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
839 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
840 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
841 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
842 | 0 | ccv_nnc_graph_free(cpu_graph); |
843 | 0 | } |
844 | | |
845 | | TEST_CASE("compare layer norm with mps without scale / bias") |
846 | 1 | { |
847 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
848 | 1 | ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS)); |
849 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
850 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "host x"); |
851 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 10), "x"); |
852 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 10), "y"); |
853 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "host y"); |
854 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_mean"); |
855 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_inv_std"); |
856 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(bx), "transfer x"); |
857 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-6, 0, 1, 2, 3), TENSOR_SYMBOL_LIST(bx), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "layer_norm"); |
858 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(y), "transfer y"); |
859 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
860 | 0 | ccv_nnc_graph_t* graph = 0; |
861 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
862 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
863 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
864 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
865 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
866 | 0 | dsfmt_t dsfmt; |
867 | 0 | float xdata[2 * 2 * 2 * 10]; |
868 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
869 | 0 | int i; |
870 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
871 | 0 | for (i = 0; i < 2 * 2 * 2 * 10; i++) |
872 | 0 | x_tensor->data.f32[i] = xdata[i] = dsfmt_genrand_open_close(&dsfmt); |
873 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
874 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
875 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
876 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
877 | 0 | ccv_nnc_graph_free(graph); |
878 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
879 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "x"); |
880 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "y"); |
881 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_mean"); |
882 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_inv_std"); |
883 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-6, 0, 1, 2, 3), TENSOR_SYMBOL_LIST(cx), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "layer_norm"); |
884 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
885 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
886 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
887 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
888 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
889 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
890 | 0 | memcpy(cx_tensor->data.f32, xdata, sizeof(float) * 2 * 2 * 2 * 10); |
891 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
892 | 0 | ccv_nnc_tensor_t* const cy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cy); |
893 | | // Note that MPS and my other implementations treat epsilon differently. |
894 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cy_tensor->data.f32, 2 * 2 * 2 * 10, 1e-4, "layer norm result from mps should match the one from reference implementation"); |
895 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
896 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
897 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
898 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
899 | 0 | ccv_nnc_graph_free(cpu_graph); |
900 | 0 | } |
901 | | |
902 | | TEST_CASE("compare group norm with mps") |
903 | 1 | { |
904 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
905 | 1 | ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS)); |
906 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
907 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 16, 2, 10), "host x"); |
908 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 16, 2, 10), "x"); |
909 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 16, 2, 10), "y"); |
910 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 16, 2, 10), "host y"); |
911 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 16, 2, 10), "scale"); |
912 | 0 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 16, 2, 10), "bias"); |
913 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 4, 2, 10), "saved_mean"); |
914 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 4, 2, 10), "saved_inv_std"); |
915 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(bx), "transfer x"); |
916 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-7, 1), TENSOR_SYMBOL_LIST(bx, scale, bias), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "group_norm"); |
917 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(y), "transfer y"); |
918 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
919 | 0 | ccv_nnc_graph_t* graph = 0; |
920 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
921 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
922 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
923 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
924 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
925 | 0 | dsfmt_t dsfmt; |
926 | 0 | float xdata[2 * 16 * 2 * 10]; |
927 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
928 | 0 | int i; |
929 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
930 | 0 | for (i = 0; i < 2 * 16 * 2 * 10; i++) |
931 | 0 | x_tensor->data.f32[i] = xdata[i] = dsfmt_genrand_open_close(&dsfmt); |
932 | 0 | float scaledata[1 * 16 * 2 * 10]; |
933 | 0 | float biasdata[1 * 16 * 2 * 10]; |
934 | 0 | for (i = 0; i < 1 * 16 * 2 * 10; i++) |
935 | 0 | { |
936 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
937 | 0 | biasdata[i] = dsfmt_genrand_open_close(&dsfmt); |
938 | 0 | } |
939 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, 16, 2, 10), 0); |
940 | 0 | ccv_nnc_tensor_t bias_tensor = ccv_nnc_tensor(biasdata, CPU_TENSOR_NHWC(32F, 1, 16, 2, 10), 0); |
941 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor, &bias_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale), ccv_nnc_tensor_from_symbol(tensor_arena, bias)), 0); |
942 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
943 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
944 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
945 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
946 | 0 | ccv_nnc_graph_free(graph); |
947 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
948 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 16, 2, 10), "x"); |
949 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 16, 2, 10), "y"); |
950 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 16, 2, 10), "scale"); |
951 | 0 | ccv_nnc_tensor_symbol_t cbias = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 16, 2, 10), "bias"); |
952 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 4, 2, 10), "saved_mean"); |
953 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 4, 2, 10), "saved_inv_std"); |
954 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-7, 1), TENSOR_SYMBOL_LIST(cx, cscale, cbias), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "group_norm"); |
955 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
956 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
957 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
958 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
959 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
960 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
961 | 0 | memcpy(cx_tensor->data.f32, xdata, sizeof(float) * 2 * 16 * 2 * 10); |
962 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
963 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * 16 * 2 * 10); |
964 | 0 | ccv_nnc_tensor_t* const cbias_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cbias); |
965 | 0 | memcpy(cbias_tensor->data.f32, biasdata, sizeof(float) * 1 * 16 * 2 * 10); |
966 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
967 | 0 | ccv_nnc_tensor_t* const cy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cy); |
968 | | // Note that MPS and my other implementations treat epsilon differently. |
969 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cy_tensor->data.f32, 2 * 16 * 2 * 10, 1e-3, "group norm result from mps should match the one from reference implementation"); |
970 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
971 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
972 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
973 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
974 | 0 | ccv_nnc_graph_free(cpu_graph); |
975 | 0 | } |
976 | | |
977 | | TEST_CASE("compare group norm with mps without scale / bias") |
978 | 1 | { |
979 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
980 | 1 | ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS)); |
981 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
982 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 16, 2, 10), "host x"); |
983 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 16, 2, 10), "x"); |
984 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 16, 2, 10), "y"); |
985 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 16, 2, 10), "host y"); |
986 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 4, 2, 10), "saved_mean"); |
987 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 4, 2, 10), "saved_inv_std"); |
988 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(bx), "transfer x"); |
989 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-7, 0), TENSOR_SYMBOL_LIST(bx), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "group_norm"); |
990 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(y), "transfer y"); |
991 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
992 | 0 | ccv_nnc_graph_t* graph = 0; |
993 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
994 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
995 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
996 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
997 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
998 | 0 | dsfmt_t dsfmt; |
999 | 0 | float xdata[2 * 16 * 2 * 10]; |
1000 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
1001 | 0 | int i; |
1002 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
1003 | 0 | for (i = 0; i < 2 * 16 * 2 * 10; i++) |
1004 | 0 | x_tensor->data.f32[i] = xdata[i] = dsfmt_genrand_open_close(&dsfmt); |
1005 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1006 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
1007 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1008 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1009 | 0 | ccv_nnc_graph_free(graph); |
1010 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1011 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 16, 2, 10), "x"); |
1012 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 16, 2, 10), "y"); |
1013 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 4, 2, 10), "saved_mean"); |
1014 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 4, 2, 10), "saved_inv_std"); |
1015 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-7, 0), TENSOR_SYMBOL_LIST(cx), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "group_norm"); |
1016 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1017 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
1018 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
1019 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
1020 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
1021 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
1022 | 0 | memcpy(cx_tensor->data.f32, xdata, sizeof(float) * 2 * 16 * 2 * 10); |
1023 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
1024 | 0 | ccv_nnc_tensor_t* const cy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cy); |
1025 | | // Note that MPS and my other implementations treat epsilon differently. |
1026 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cy_tensor->data.f32, 2 * 16 * 2 * 10, 1e-3, "group norm result from mps should match the one from reference implementation"); |
1027 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
1028 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1029 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
1030 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
1031 | 0 | ccv_nnc_graph_free(cpu_graph); |
1032 | 0 | } |
1033 | | |
1034 | | TEST_CASE("compare rmsnorm with mps") |
1035 | 1 | { |
1036 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_RMSNORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
1037 | 1 | ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS)); |
1038 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1039 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "host x"); |
1040 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 10), "x"); |
1041 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 10), "y"); |
1042 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "host y"); |
1043 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 2, 2, 10), "scale"); |
1044 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_inv_std"); |
1045 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(bx), "transfer x"); |
1046 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_RMSNORM_FORWARD(1e-6, 1, 2, 3), TENSOR_SYMBOL_LIST(bx, scale), TENSOR_SYMBOL_LIST(by, saved_inv_std), "rmsnorm"); |
1047 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(y), "transfer y"); |
1048 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1049 | 0 | ccv_nnc_graph_t* graph = 0; |
1050 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1051 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1052 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1053 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1054 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1055 | 0 | dsfmt_t dsfmt; |
1056 | 0 | float xdata[2 * 2 * 2 * 10]; |
1057 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
1058 | 0 | int i; |
1059 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
1060 | 0 | for (i = 0; i < 2 * 2 * 2 * 10; i++) |
1061 | 0 | x_tensor->data.f32[i] = xdata[i] = dsfmt_genrand_open_close(&dsfmt); |
1062 | 0 | float scaledata[1 * 2 * 2 * 10]; |
1063 | 0 | for (i = 0; i < 1 * 2 * 2 * 10; i++) |
1064 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
1065 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, 2, 2, 10), 0); |
1066 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale)), 0); |
1067 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1068 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
1069 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1070 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1071 | 0 | ccv_nnc_graph_free(graph); |
1072 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1073 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "x"); |
1074 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, 10), "y"); |
1075 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 2, 2, 10), "scale"); |
1076 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_inv_std"); |
1077 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_RMSNORM_FORWARD(1e-6, 1, 2, 3), TENSOR_SYMBOL_LIST(cx, cscale), TENSOR_SYMBOL_LIST(cy, csaved_inv_std), "rmsnorm"); |
1078 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1079 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
1080 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
1081 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
1082 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
1083 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
1084 | 0 | memcpy(cx_tensor->data.f32, xdata, sizeof(float) * 2 * 2 * 2 * 10); |
1085 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
1086 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * 2 * 2 * 10); |
1087 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
1088 | 0 | ccv_nnc_tensor_t* const cy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cy); |
1089 | | // Note that MPS and my other implementations treat epsilon differently. |
1090 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cy_tensor->data.f32, 2 * 2 * 2 * 10, 1e-4, "rmsnorm result from mps should match the one from reference implementation"); |
1091 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
1092 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1093 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
1094 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
1095 | 0 | ccv_nnc_graph_free(cpu_graph); |
1096 | 0 | } |
1097 | | |
1098 | | TEST_CASE("compare add with mps") |
1099 | 1 | { |
1100 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS)); |
1101 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1102 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), "x"); |
1103 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), "y"); |
1104 | 0 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 5, 3), "a"); |
1105 | 0 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 1, 3), "b"); |
1106 | 0 | ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 5, 3), "c"); |
1107 | 0 | ccv_nnc_tensor_symbol_t z = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), "z"); |
1108 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x, y), TENSOR_SYMBOL_LIST(a, b), "transfer"); |
1109 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_ADD_FORWARD(0.5, 0.2), TENSOR_SYMBOL_LIST(a, b), TENSOR_SYMBOL_LIST(c), "add"); |
1110 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(z), "transfer"); |
1111 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1112 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1113 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1114 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), 0); |
1115 | 0 | ccv_nnc_graph_t* graph = 0; |
1116 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1117 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1118 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, TENSOR_BIND_MAP(KV(x, x_tensor), KV(y, y_tensor)), TENSOR_SYMBOL_LIST(z), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1119 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1120 | 0 | dsfmt_t dsfmt; |
1121 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1122 | 0 | int i; |
1123 | 0 | for (i = 0; i < 10 * 5 * 5 * 3; i++) |
1124 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1125 | 0 | for (i = 0; i < 10 * 5 * 1 * 3; i++) |
1126 | 0 | y_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1127 | 0 | ccv_nnc_tensor_t* zt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1128 | 0 | ccv_nnc_cmd_exec(CMD_ADD_FORWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor, y_tensor), TENSOR_LIST(zt), 0); |
1129 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1130 | 0 | ccv_nnc_tensor_t* const z_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, z); |
1131 | 0 | REQUIRE_TENSOR_EQ(zt, z_tensor, "add should match"); |
1132 | 0 | ccv_nnc_tensor_free(x_tensor); |
1133 | 0 | ccv_nnc_tensor_free(y_tensor); |
1134 | 0 | ccv_nnc_tensor_free(zt); |
1135 | 0 | ccv_nnc_graph_free(graph); |
1136 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1137 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1138 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1139 | 0 | } |
1140 | | |
1141 | | TEST_CASE("compare add with mps in half precision") |
1142 | 1 | { |
1143 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS)); |
1144 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1145 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), "x"); |
1146 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), "y"); |
1147 | 0 | ccv_nnc_tensor_symbol_t x16 = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(16F, 10, 5, 5, 3), "x 16"); |
1148 | 0 | ccv_nnc_tensor_symbol_t y16 = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(16F, 10, 5, 1, 3), "y 16"); |
1149 | 0 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 10, 5, 5, 3), "a"); |
1150 | 0 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 10, 5, 1, 3), "b"); |
1151 | 0 | ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 10, 5, 5, 3), "c"); |
1152 | 0 | ccv_nnc_tensor_symbol_t z = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), "z"); |
1153 | 0 | ccv_nnc_tensor_symbol_t z16 = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(16F, 10, 5, 5, 3), "z 16"); |
1154 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATATYPE_CONVERSION_FORWARD(), TENSOR_SYMBOL_LIST(x, y), TENSOR_SYMBOL_LIST(x16, y16), "convert"); |
1155 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x16, y16), TENSOR_SYMBOL_LIST(a, b), "transfer"); |
1156 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_ADD_FORWARD(0.5, 0.2), TENSOR_SYMBOL_LIST(a, b), TENSOR_SYMBOL_LIST(c), "add"); |
1157 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(z16), "transfer"); |
1158 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATATYPE_CONVERSION_FORWARD(), TENSOR_SYMBOL_LIST(z16), TENSOR_SYMBOL_LIST(z), "convert"); |
1159 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1160 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1161 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1162 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), 0); |
1163 | 0 | ccv_nnc_graph_t* graph = 0; |
1164 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1165 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1166 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, TENSOR_BIND_MAP(KV(x, x_tensor), KV(y, y_tensor)), TENSOR_SYMBOL_LIST(z), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1167 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1168 | 0 | dsfmt_t dsfmt; |
1169 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1170 | 0 | int i; |
1171 | 0 | for (i = 0; i < 10 * 5 * 5 * 3; i++) |
1172 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1173 | 0 | for (i = 0; i < 10 * 5 * 1 * 3; i++) |
1174 | 0 | y_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1175 | 0 | ccv_nnc_tensor_t* zt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1176 | 0 | ccv_nnc_cmd_exec(CMD_ADD_FORWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor, y_tensor), TENSOR_LIST(zt), 0); |
1177 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1178 | 0 | ccv_nnc_tensor_t* const z_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, z); |
1179 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, zt->data.f32, z_tensor->data.f32, 10 * 5 * 5 * 3, 1e-3, "add should match"); |
1180 | 0 | ccv_nnc_tensor_free(x_tensor); |
1181 | 0 | ccv_nnc_tensor_free(y_tensor); |
1182 | 0 | ccv_nnc_tensor_free(zt); |
1183 | 0 | ccv_nnc_graph_free(graph); |
1184 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1185 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1186 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1187 | 0 | } |
1188 | | |
1189 | | TEST_CASE("compare add gradient with mps") |
1190 | 1 | { |
1191 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS) && |
1192 | 1 | ccv_nnc_cmd_ok(CCV_NNC_ADD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
1193 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1194 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), "x"); |
1195 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), "y"); |
1196 | 0 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 5, 3), "a"); |
1197 | 0 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 1, 3), "b"); |
1198 | 0 | ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 5, 3), "c"); |
1199 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x, y), TENSOR_SYMBOL_LIST(a, b), "transfer"); |
1200 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_ADD_FORWARD(0.5, 0.2), TENSOR_SYMBOL_LIST(a, b), TENSOR_SYMBOL_LIST(c), "add"); |
1201 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1202 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(x, y), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
1203 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1204 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1205 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1206 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), 0); |
1207 | 0 | ccv_nnc_graph_t* graph = 0; |
1208 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1209 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1210 | 0 | ccv_nnc_tensor_symbol_t dc = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, c); |
1211 | 0 | ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x); |
1212 | 0 | ccv_nnc_tensor_symbol_t dy = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, y); |
1213 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, TENSOR_BIND_MAP(KV(x, x_tensor), KV(y, y_tensor)), TENSOR_SYMBOL_LIST(dx, dy), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1214 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1215 | 0 | dsfmt_t dsfmt; |
1216 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1217 | 0 | int i; |
1218 | 0 | for (i = 0; i < 10 * 5 * 5 * 3; i++) |
1219 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1220 | 0 | for (i = 0; i < 10 * 5 * 1 * 3; i++) |
1221 | 0 | y_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1222 | 0 | ccv_nnc_tensor_t* dct = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1223 | 0 | for (i = 0; i < 10 * 5 * 5 * 3; i++) |
1224 | 0 | dct->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1225 | 0 | ccv_nnc_tensor_t* const dc_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dc); |
1226 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dct), TENSOR_LIST(dc_tensor), 0); |
1227 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1228 | 0 | ccv_nnc_tensor_t* zt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1229 | 0 | ccv_nnc_cmd_exec(CMD_ADD_FORWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor, y_tensor), TENSOR_LIST(zt), 0); |
1230 | 0 | ccv_nnc_tensor_t* dxt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1231 | 0 | ccv_nnc_tensor_t* dyt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), 0); |
1232 | 0 | ccv_nnc_cmd_exec(CMD_ADD_BACKWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(dct, x_tensor, y_tensor, zt), TENSOR_LIST(dxt, dyt), 0); |
1233 | 0 | ccv_nnc_tensor_t* dx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dx); |
1234 | 0 | ccv_nnc_tensor_t* dy_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dy); |
1235 | 0 | REQUIRE_TENSOR_EQ(dxt, dx_tensor, "backward pass should match"); |
1236 | 0 | REQUIRE_TENSOR_EQ(dyt, dy_tensor, "backward pass should match"); |
1237 | 0 | ccv_nnc_tensor_free(x_tensor); |
1238 | 0 | ccv_nnc_tensor_free(y_tensor); |
1239 | 0 | ccv_nnc_tensor_free(dct); |
1240 | 0 | ccv_nnc_tensor_free(zt); |
1241 | 0 | ccv_nnc_tensor_free(dxt); |
1242 | 0 | ccv_nnc_tensor_free(dyt); |
1243 | 0 | ccv_nnc_graph_free(graph); |
1244 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1245 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1246 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1247 | 0 | } |
1248 | | |
1249 | | TEST_CASE("compare add gradient with mps no dyt ") |
1250 | 1 | { |
1251 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS) && |
1252 | 1 | ccv_nnc_cmd_ok(CCV_NNC_ADD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
1253 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1254 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), "x"); |
1255 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), "y"); |
1256 | 0 | ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 5, 3), "a"); |
1257 | 0 | ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 1, 3), "b"); |
1258 | 0 | ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 5, 5, 3), "c"); |
1259 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_DATA_TRANSFER_FORWARD(), TENSOR_SYMBOL_LIST(x, y), TENSOR_SYMBOL_LIST(a, b), "transfer"); |
1260 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_ADD_FORWARD(0.5, 0.2), TENSOR_SYMBOL_LIST(a, b), TENSOR_SYMBOL_LIST(c), "add"); |
1261 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1262 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(x, y), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
1263 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1264 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1265 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1266 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 1, 3), 0); |
1267 | 0 | ccv_nnc_graph_t* graph = 0; |
1268 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1269 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1270 | 0 | ccv_nnc_tensor_symbol_t dc = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, c); |
1271 | 0 | ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x); |
1272 | 0 | ccv_nnc_tensor_symbol_t dy = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, y); |
1273 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, TENSOR_BIND_MAP(KV(x, x_tensor), KV(y, y_tensor)), TENSOR_SYMBOL_LIST(dx, dy), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1274 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1275 | 0 | dsfmt_t dsfmt; |
1276 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1277 | 0 | int i; |
1278 | 0 | for (i = 0; i < 10 * 5 * 5 * 3; i++) |
1279 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1280 | 0 | for (i = 0; i < 10 * 5 * 1 * 3; i++) |
1281 | 0 | y_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1282 | 0 | ccv_nnc_tensor_t* dct = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1283 | 0 | for (i = 0; i < 10 * 5 * 5 * 3; i++) |
1284 | 0 | dct->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1285 | 0 | ccv_nnc_tensor_t* const dc_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dc); |
1286 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dct), TENSOR_LIST(dc_tensor), 0); |
1287 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1288 | 0 | ccv_nnc_tensor_t* zt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1289 | 0 | ccv_nnc_cmd_exec(CMD_ADD_FORWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor, y_tensor), TENSOR_LIST(zt), 0); |
1290 | 0 | ccv_nnc_tensor_t* dxt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5, 5, 3), 0); |
1291 | 0 | ccv_nnc_cmd_exec(CMD_ADD_BACKWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(dct, x_tensor, y_tensor, zt), TENSOR_LIST(dxt, 0), 0); |
1292 | 0 | ccv_nnc_tensor_t* dx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dx); |
1293 | 0 | REQUIRE_TENSOR_EQ(dxt, dx_tensor, "backward pass should match"); |
1294 | 0 | ccv_nnc_tensor_free(x_tensor); |
1295 | 0 | ccv_nnc_tensor_free(y_tensor); |
1296 | 0 | ccv_nnc_tensor_free(dct); |
1297 | 0 | ccv_nnc_tensor_free(zt); |
1298 | 0 | ccv_nnc_tensor_free(dxt); |
1299 | 0 | ccv_nnc_graph_free(graph); |
1300 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1301 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1302 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1303 | 0 | } |
1304 | | |
1305 | | TEST_CASE("broadcasting semantics for add backward mps (a,b)") |
1306 | 1 | { |
1307 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS) && |
1308 | 1 | ccv_nnc_cmd_ok(CCV_NNC_ADD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
1309 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1310 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1311 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
1312 | 0 | ccv_nnc_tensor_t* const da = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1313 | 0 | ccv_nnc_tensor_t* const db = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1314 | 0 | ccv_nnc_tensor_t* const dat = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1315 | 0 | ccv_nnc_tensor_t* const dbt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1316 | 0 | a->data.f32[0] = 1; |
1317 | 0 | a->data.f32[1] = 2; |
1318 | 0 | a->data.f32[2] = 3; |
1319 | 0 | a->data.f32[3] = 4; |
1320 | 0 | b->data.f32[0] = 5; |
1321 | 0 | b->data.f32[1] = 6; |
1322 | 0 | float ctp[] = { |
1323 | 0 | 6, 7, |
1324 | 0 | 7, 8, |
1325 | 0 | 8, 9, |
1326 | 0 | 9, 10 |
1327 | 0 | }; |
1328 | 0 | memcpy(c->data.f32, ctp, sizeof(ctp)); |
1329 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
1330 | 0 | ccv_nnc_tensor_t* const gb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2), 0); |
1331 | 0 | ccv_nnc_tensor_t* const gda = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
1332 | 0 | ccv_nnc_tensor_t* const gdb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2), 0); |
1333 | 0 | ccv_nnc_tensor_t* const gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 2), 0); |
1334 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b, c), TENSOR_LIST(ga, gb, gc), 0); |
1335 | 0 | ccv_nnc_cmd_exec(CMD_ADD_BACKWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(gc, ga, gb), TENSOR_LIST(gda, gdb), 0); |
1336 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gda, gdb), TENSOR_LIST(da, db), 0); |
1337 | 0 | ccv_nnc_cmd_exec(CMD_ADD_BACKWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(c, a, b), TENSOR_LIST(dat, dbt), 0); |
1338 | 0 | REQUIRE_TENSOR_EQ(dat, da, "gradient of a should be equal"); |
1339 | 0 | REQUIRE_TENSOR_EQ(dbt, db, "gradient of b should be equal"); |
1340 | 0 | ccv_nnc_tensor_free(a); |
1341 | 0 | ccv_nnc_tensor_free(b); |
1342 | 0 | ccv_nnc_tensor_free(c); |
1343 | 0 | ccv_nnc_tensor_free(da); |
1344 | 0 | ccv_nnc_tensor_free(db); |
1345 | 0 | ccv_nnc_tensor_free(dat); |
1346 | 0 | ccv_nnc_tensor_free(dbt); |
1347 | 0 | ccv_nnc_tensor_free(ga); |
1348 | 0 | ccv_nnc_tensor_free(gb); |
1349 | 0 | ccv_nnc_tensor_free(gc); |
1350 | 0 | ccv_nnc_tensor_free(gda); |
1351 | 0 | ccv_nnc_tensor_free(gdb); |
1352 | 0 | } |
1353 | | |
1354 | | TEST_CASE("broadcasting semantics for add backward mps (a, nil)") |
1355 | 1 | { |
1356 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS) && |
1357 | 1 | ccv_nnc_cmd_ok(CCV_NNC_ADD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
1358 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1359 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1360 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
1361 | 0 | ccv_nnc_tensor_t* const da = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1362 | 0 | ccv_nnc_tensor_t* const dat = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1363 | 0 | a->data.f32[0] = 1; |
1364 | 0 | a->data.f32[1] = 2; |
1365 | 0 | a->data.f32[2] = 3; |
1366 | 0 | a->data.f32[3] = 4; |
1367 | 0 | b->data.f32[0] = 5; |
1368 | 0 | b->data.f32[1] = 6; |
1369 | 0 | float ctp[] = { |
1370 | 0 | 6, 7, |
1371 | 0 | 7, 8, |
1372 | 0 | 8, 9, |
1373 | 0 | 9, 10 |
1374 | 0 | }; |
1375 | 0 | memcpy(c->data.f32, ctp, sizeof(ctp)); |
1376 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
1377 | 0 | ccv_nnc_tensor_t* const gb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2), 0); |
1378 | 0 | ccv_nnc_tensor_t* const gda = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
1379 | 0 | ccv_nnc_tensor_t* const gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 2), 0); |
1380 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b, c), TENSOR_LIST(ga, gb, gc), 0); |
1381 | 0 | ccv_nnc_cmd_exec(CMD_ADD_BACKWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(gc, ga, ), TENSOR_LIST(gda, ), 0); |
1382 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gda, ), TENSOR_LIST(da, ), 0); |
1383 | 0 | ccv_nnc_cmd_exec(CMD_ADD_BACKWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(c, a, ), TENSOR_LIST(dat, ), 0); |
1384 | 0 | REQUIRE_TENSOR_EQ(dat, da, "gradient of a should be equal"); |
1385 | 0 | ccv_nnc_tensor_free(a); |
1386 | 0 | ccv_nnc_tensor_free(b); |
1387 | 0 | ccv_nnc_tensor_free(c); |
1388 | 0 | ccv_nnc_tensor_free(da); |
1389 | 0 | ccv_nnc_tensor_free(dat); |
1390 | 0 | ccv_nnc_tensor_free(ga); |
1391 | 0 | ccv_nnc_tensor_free(gb); |
1392 | 0 | ccv_nnc_tensor_free(gc); |
1393 | 0 | ccv_nnc_tensor_free(gda); |
1394 | 0 | } |
1395 | | |
1396 | | TEST_CASE("broadcasting semantics for add backward mps (nil,b)") |
1397 | 1 | { |
1398 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS) && |
1399 | 1 | ccv_nnc_cmd_ok(CCV_NNC_ADD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
1400 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1401 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1402 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
1403 | 0 | ccv_nnc_tensor_t* const db = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1404 | 0 | ccv_nnc_tensor_t* const dbt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1405 | 0 | a->data.f32[0] = 1; |
1406 | 0 | a->data.f32[1] = 2; |
1407 | 0 | a->data.f32[2] = 3; |
1408 | 0 | a->data.f32[3] = 4; |
1409 | 0 | b->data.f32[0] = 5; |
1410 | 0 | b->data.f32[1] = 6; |
1411 | 0 | float ctp[] = { |
1412 | 0 | 6, 7, |
1413 | 0 | 7, 8, |
1414 | 0 | 8, 9, |
1415 | 0 | 9, 10 |
1416 | 0 | }; |
1417 | 0 | memcpy(c->data.f32, ctp, sizeof(ctp)); |
1418 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
1419 | 0 | ccv_nnc_tensor_t* const gb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2), 0); |
1420 | 0 | ccv_nnc_tensor_t* const gdb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2), 0); |
1421 | 0 | ccv_nnc_tensor_t* const gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 2), 0); |
1422 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b, c), TENSOR_LIST(ga, gb, gc), 0); |
1423 | 0 | ccv_nnc_cmd_exec(CMD_ADD_BACKWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(gc, ga, gb), TENSOR_LIST(0, gdb), 0); |
1424 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gdb), TENSOR_LIST(db), 0); |
1425 | 0 | ccv_nnc_cmd_exec(CMD_ADD_BACKWARD(0.5, 0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(c, 0, b), TENSOR_LIST(0, dbt), 0); |
1426 | 0 | REQUIRE_TENSOR_EQ(dbt, db, "gradient of b should be equal"); |
1427 | 0 | ccv_nnc_tensor_free(a); |
1428 | 0 | ccv_nnc_tensor_free(b); |
1429 | 0 | ccv_nnc_tensor_free(c); |
1430 | 0 | ccv_nnc_tensor_free(db); |
1431 | 0 | ccv_nnc_tensor_free(dbt); |
1432 | 0 | ccv_nnc_tensor_free(ga); |
1433 | 0 | ccv_nnc_tensor_free(gb); |
1434 | 0 | ccv_nnc_tensor_free(gc); |
1435 | 0 | ccv_nnc_tensor_free(gdb); |
1436 | 0 | } |
1437 | | |
1438 | | TEST_CASE("compare ewsum with mps") |
1439 | 1 | { |
1440 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_EWSUM_FORWARD, CCV_NNC_BACKEND_MPS)); |
1441 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 100), 0); |
1442 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 100), 0); |
1443 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 100), 0); |
1444 | 0 | ccv_nnc_tensor_t* const d = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 100), 0); |
1445 | 0 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1446 | 0 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1447 | 0 | ccv_nnc_tensor_t* const hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1448 | 0 | ccv_nnc_tensor_t* const hd = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1449 | 0 | ccv_nnc_tensor_t* const gd = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1450 | 0 | int i; |
1451 | 0 | for (i = 0; i < 100; i++) |
1452 | 0 | { |
1453 | 0 | ha->data.f32[i] = 1; |
1454 | 0 | hb->data.f32[i] = 0.5; |
1455 | 0 | hc->data.f32[i] = 0.25; |
1456 | 0 | gd->data.f32[i] = 1.75; |
1457 | 0 | } |
1458 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb, hc), TENSOR_LIST(a, b, c), 0); |
1459 | 0 | ccv_nnc_cmd_exec(CMD_EWSUM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b, c), TENSOR_LIST(d), 0); |
1460 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(d), TENSOR_LIST(hd), 0); |
1461 | 0 | REQUIRE_TENSOR_EQ(hd, gd, "ewsum result should be the same"); |
1462 | 0 | ccv_nnc_tensor_free(a); |
1463 | 0 | ccv_nnc_tensor_free(b); |
1464 | 0 | ccv_nnc_tensor_free(c); |
1465 | 0 | ccv_nnc_tensor_free(d); |
1466 | 0 | ccv_nnc_tensor_free(ha); |
1467 | 0 | ccv_nnc_tensor_free(hb); |
1468 | 0 | ccv_nnc_tensor_free(hc); |
1469 | 0 | ccv_nnc_tensor_free(hd); |
1470 | 0 | ccv_nnc_tensor_free(gd); |
1471 | 0 | } |
1472 | | |
1473 | | TEST_CASE("compare ewsum with mps in half precision") |
1474 | 1 | { |
1475 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_EWSUM_FORWARD, CCV_NNC_BACKEND_MPS)); |
1476 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, 100), 0); |
1477 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, 100), 0); |
1478 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, 100), 0); |
1479 | 0 | ccv_nnc_tensor_t* const d = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, 100), 0); |
1480 | 0 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1481 | 0 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1482 | 0 | ccv_nnc_tensor_t* const hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1483 | 0 | ccv_nnc_tensor_t* const hd = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1484 | 0 | ccv_nnc_tensor_t* const ha16 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 100), 0); |
1485 | 0 | ccv_nnc_tensor_t* const hb16 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 100), 0); |
1486 | 0 | ccv_nnc_tensor_t* const hc16 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 100), 0); |
1487 | 0 | ccv_nnc_tensor_t* const hd16 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 100), 0); |
1488 | 0 | ccv_nnc_tensor_t* const gd = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100), 0); |
1489 | 0 | int i; |
1490 | 0 | for (i = 0; i < 100; i++) |
1491 | 0 | { |
1492 | 0 | ha->data.f32[i] = 1; |
1493 | 0 | hb->data.f32[i] = 0.5; |
1494 | 0 | hc->data.f32[i] = 0.25; |
1495 | 0 | gd->data.f32[i] = 1.75; |
1496 | 0 | } |
1497 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb, hc), TENSOR_LIST(ha16, hb16, hc16), 0); |
1498 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha16, hb16, hc16), TENSOR_LIST(a, b, c), 0); |
1499 | 0 | ccv_nnc_cmd_exec(CMD_EWSUM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b, c), TENSOR_LIST(d), 0); |
1500 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(d), TENSOR_LIST(hd16), 0); |
1501 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(hd16), TENSOR_LIST(hd), 0); |
1502 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, hd->data.f32, gd->data.f32, 100, 1e-3, "ewsum result should be the same"); |
1503 | 0 | ccv_nnc_tensor_free(a); |
1504 | 0 | ccv_nnc_tensor_free(b); |
1505 | 0 | ccv_nnc_tensor_free(c); |
1506 | 0 | ccv_nnc_tensor_free(d); |
1507 | 0 | ccv_nnc_tensor_free(ha); |
1508 | 0 | ccv_nnc_tensor_free(hb); |
1509 | 0 | ccv_nnc_tensor_free(hc); |
1510 | 0 | ccv_nnc_tensor_free(hd); |
1511 | 0 | ccv_nnc_tensor_free(ha16); |
1512 | 0 | ccv_nnc_tensor_free(hb16); |
1513 | 0 | ccv_nnc_tensor_free(hc16); |
1514 | 0 | ccv_nnc_tensor_free(hd16); |
1515 | 0 | ccv_nnc_tensor_free(gd); |
1516 | 0 | } |
1517 | | |
1518 | | TEST_CASE("compare transpose two tensor views") |
1519 | 1 | { |
1520 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_TRANSPOSE_FORWARD, CCV_NNC_BACKEND_MPS)); |
1521 | 0 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 6, 5, 4), 0); |
1522 | 0 | memset(ha->data.f32, 0, sizeof(float) * 7 * 6 * 5 * 4); |
1523 | 0 | ccv_nnc_tensor_view_t ha_view = ccv_nnc_tensor_view(ha, CPU_TENSOR_NHWC(32F, 4, 3, 2, 2), DIM_ALLOC(3, 2, 1, 0), DIM_ALLOC(6 * 5 * 4, 5 * 4, 4, 1)); |
1524 | 0 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 8, 7, 6, 5), 0); |
1525 | 0 | memset(hb->data.f32, 0, sizeof(float) * 8 * 7 * 6 * 5); |
1526 | 0 | ccv_nnc_tensor_view_t hb_view = ccv_nnc_tensor_view(hb, CPU_TENSOR_NHWC(32F, 4, 2, 2, 3), DIM_ALLOC(3, 2, 1, 0), DIM_ALLOC(7 * 6 * 5, 6 * 5, 5, 1)); |
1527 | 0 | int i, j, k, l; |
1528 | 0 | for (i = 0; i < 4; i++) |
1529 | 0 | for (j = 0; j < 3; j++) |
1530 | 0 | for (k = 0; k < 2; k++) |
1531 | 0 | for (l = 0; l < 2; l++) |
1532 | 0 | ha->data.f32[(i + 3) * 6 * 5 * 4 + (j + 2) * 5 * 4 + (k + 1) * 4 + l] = i * 3 * 2 * 2 + j * 2 * 2 + k * 2 + l; |
1533 | 0 | ccv_nnc_cmd_exec(CMD_TRANSPOSE_FORWARD(1, 3), ccv_nnc_no_hint, 0, TENSOR_LIST((ccv_nnc_tensor_t*)&ha_view), TENSOR_LIST((ccv_nnc_tensor_t*)&hb_view), 0); |
1534 | 0 | ccv_nnc_tensor_t* hd = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 6, 5, 4), 0); |
1535 | 0 | memset(hd->data.f32, 0, sizeof(float) * 7 * 6 * 5 * 4); |
1536 | 0 | ccv_nnc_tensor_view_t hd_view = ccv_nnc_tensor_view(hd, CPU_TENSOR_NHWC(32F, 4, 3, 2, 2), DIM_ALLOC(3, 2, 1, 0), DIM_ALLOC(6 * 5 * 4, 5 * 4, 4, 1)); |
1537 | 0 | ccv_nnc_cmd_exec(CMD_TRANSPOSE_FORWARD(1, 3), ccv_nnc_no_hint, 0, TENSOR_LIST((ccv_nnc_tensor_t*)&hb_view), TENSOR_LIST((ccv_nnc_tensor_t*)&hd_view), 0); |
1538 | 0 | REQUIRE_TENSOR_EQ(hd, ha, "4x3x2x2 tensor should be exactly the same."); |
1539 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 7, 6, 5, 4), 0); |
1540 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
1541 | 0 | ccv_nnc_tensor_view_t a_view = ccv_nnc_tensor_view(a, GPU_TENSOR_NHWC(000, 32F, 4, 3, 2, 2), DIM_ALLOC(3, 2, 1, 0), DIM_ALLOC(6 * 5 * 4, 5 * 4, 4, 1)); |
1542 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 8, 7, 6, 5), 0); |
1543 | 0 | ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, TENSOR_LIST(), TENSOR_LIST(b), 0); |
1544 | 0 | ccv_nnc_tensor_view_t b_view = ccv_nnc_tensor_view(b, GPU_TENSOR_NHWC(000, 32F, 4, 2, 2, 3), DIM_ALLOC(3, 2, 1, 0), DIM_ALLOC(7 * 6 * 5, 6 * 5, 5, 1)); |
1545 | 0 | ccv_nnc_cmd_exec(CMD_TRANSPOSE_FORWARD(1, 3), ccv_nnc_no_hint, 0, TENSOR_LIST((ccv_nnc_tensor_t*)&a_view), TENSOR_LIST((ccv_nnc_tensor_t*)&b_view), 0); |
1546 | 0 | ccv_nnc_tensor_t* d = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 7, 6, 5, 4), 0); |
1547 | 0 | ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, TENSOR_LIST(), TENSOR_LIST(d), 0); |
1548 | 0 | ccv_nnc_tensor_view_t d_view = ccv_nnc_tensor_view(d, GPU_TENSOR_NHWC(000, 32F, 4, 3, 2, 2), DIM_ALLOC(3, 2, 1, 0), DIM_ALLOC(6 * 5 * 4, 5 * 4, 4, 1)); |
1549 | 0 | ccv_nnc_cmd_exec(CMD_TRANSPOSE_FORWARD(1, 3), ccv_nnc_no_hint, 0, TENSOR_LIST((ccv_nnc_tensor_t*)&b_view), TENSOR_LIST((ccv_nnc_tensor_t*)&d_view), 0); |
1550 | 0 | ccv_nnc_tensor_t* const hbt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 8, 7, 6, 5), 0); |
1551 | 0 | ccv_nnc_tensor_t* const hdt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 6, 5, 4), 0); |
1552 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b, d), TENSOR_LIST(hbt, hdt), 0); |
1553 | 0 | REQUIRE_TENSOR_EQ(hbt, hb, "4x2x2x3 tensor should be exactly the same."); |
1554 | 0 | REQUIRE_TENSOR_EQ(hdt, hd, "4x3x2x2 tensor should be exactly the same."); |
1555 | 0 | ccv_nnc_tensor_free(ha); |
1556 | 0 | ccv_nnc_tensor_free(hb); |
1557 | 0 | ccv_nnc_tensor_free(hd); |
1558 | 0 | ccv_nnc_tensor_free(hbt); |
1559 | 0 | ccv_nnc_tensor_free(hdt); |
1560 | 0 | ccv_nnc_tensor_free(a); |
1561 | 0 | ccv_nnc_tensor_free(b); |
1562 | 0 | ccv_nnc_tensor_free(d); |
1563 | 0 | } |
1564 | | |
1565 | | TEST_CASE("broadcasting semantics for add [[1, 2, 3], [4, 5, 6]] + [7, 8, 9]") |
1566 | 1 | { |
1567 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS)); |
1568 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1569 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3), 0); |
1570 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1571 | 0 | a->data.f32[0] = 1; |
1572 | 0 | a->data.f32[1] = 2; |
1573 | 0 | a->data.f32[2] = 3; |
1574 | 0 | a->data.f32[3] = 4; |
1575 | 0 | a->data.f32[4] = 5; |
1576 | 0 | a->data.f32[5] = 6; |
1577 | 0 | b->data.f32[0] = 7; |
1578 | 0 | b->data.f32[1] = 8; |
1579 | 0 | b->data.f32[2] = 9; |
1580 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
1581 | 0 | ccv_nnc_tensor_t* const gb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 3), 0); |
1582 | 0 | ccv_nnc_tensor_t* const gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
1583 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(ga, gb), 0); |
1584 | 0 | ccv_nnc_cmd_exec(CMD_ADD_FORWARD(1, 1), ccv_nnc_no_hint, 0, TENSOR_LIST(ga, gb), TENSOR_LIST(gc), 0); |
1585 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
1586 | 0 | float ctp[] = { |
1587 | 0 | 8, 10, 12, |
1588 | 0 | 11, 13, 15 |
1589 | 0 | }; |
1590 | 0 | ccv_nnc_tensor_t ct = ccv_nnc_tensor(ctp, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1591 | 0 | REQUIRE_TENSOR_EQ(c, &ct, "result should be equal"); |
1592 | 0 | ccv_nnc_tensor_free(a); |
1593 | 0 | ccv_nnc_tensor_free(b); |
1594 | 0 | ccv_nnc_tensor_free(c); |
1595 | 0 | ccv_nnc_tensor_free(ga); |
1596 | 0 | ccv_nnc_tensor_free(gb); |
1597 | 0 | ccv_nnc_tensor_free(gc); |
1598 | 0 | } |
1599 | | |
1600 | | TEST_CASE("broadcasting semantics for add [[1], [2], [3], [4]] + [5, 6]") |
1601 | 1 | { |
1602 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_ADD_FORWARD, CCV_NNC_BACKEND_MPS)); |
1603 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1604 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1605 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
1606 | 0 | a->data.f32[0] = 1; |
1607 | 0 | a->data.f32[1] = 2; |
1608 | 0 | a->data.f32[2] = 3; |
1609 | 0 | a->data.f32[3] = 4; |
1610 | 0 | b->data.f32[0] = 5; |
1611 | 0 | b->data.f32[1] = 6; |
1612 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
1613 | 0 | ccv_nnc_tensor_t* const gb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2), 0); |
1614 | 0 | ccv_nnc_tensor_t* const gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 2), 0); |
1615 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(ga, gb), 0); |
1616 | 0 | ccv_nnc_cmd_exec(CMD_ADD_FORWARD(1, 1), ccv_nnc_no_hint, 0, TENSOR_LIST(ga, gb), TENSOR_LIST(gc), 0); |
1617 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
1618 | 0 | float ctp[] = { |
1619 | 0 | 6, 7, |
1620 | 0 | 7, 8, |
1621 | 0 | 8, 9, |
1622 | 0 | 9, 10 |
1623 | 0 | }; |
1624 | 0 | ccv_nnc_tensor_t ct = ccv_nnc_tensor(ctp, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
1625 | 0 | REQUIRE_TENSOR_EQ(c, &ct, "result should be equal"); |
1626 | 0 | ccv_nnc_tensor_free(a); |
1627 | 0 | ccv_nnc_tensor_free(b); |
1628 | 0 | ccv_nnc_tensor_free(c); |
1629 | 0 | ccv_nnc_tensor_free(ga); |
1630 | 0 | ccv_nnc_tensor_free(gb); |
1631 | 0 | ccv_nnc_tensor_free(gc); |
1632 | 0 | } |
1633 | | |
1634 | | TEST_CASE("broadcasting semantics for mul [[1, 2, 3], [4, 5, 6]] * [7, 8, 9]") |
1635 | 1 | { |
1636 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MUL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1637 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1638 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3), 0); |
1639 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1640 | 0 | a->data.f32[0] = 1; |
1641 | 0 | a->data.f32[1] = 2; |
1642 | 0 | a->data.f32[2] = 3; |
1643 | 0 | a->data.f32[3] = 4; |
1644 | 0 | a->data.f32[4] = 5; |
1645 | 0 | a->data.f32[5] = 6; |
1646 | 0 | b->data.f32[0] = 7; |
1647 | 0 | b->data.f32[1] = 8; |
1648 | 0 | b->data.f32[2] = 9; |
1649 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
1650 | 0 | ccv_nnc_tensor_t* const gb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 3), 0); |
1651 | 0 | ccv_nnc_tensor_t* const gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
1652 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(ga, gb), 0); |
1653 | 0 | ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1), ccv_nnc_no_hint, 0, TENSOR_LIST(ga, gb), TENSOR_LIST(gc), 0); |
1654 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
1655 | 0 | float ctp[] = { |
1656 | 0 | 7, 16, 27, |
1657 | 0 | 28, 40, 54 |
1658 | 0 | }; |
1659 | 0 | ccv_nnc_tensor_t ct = ccv_nnc_tensor(ctp, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1660 | 0 | REQUIRE_TENSOR_EQ(c, &ct, "result should be equal"); |
1661 | 0 | ccv_nnc_tensor_free(a); |
1662 | 0 | ccv_nnc_tensor_free(b); |
1663 | 0 | ccv_nnc_tensor_free(c); |
1664 | 0 | ccv_nnc_tensor_free(ga); |
1665 | 0 | ccv_nnc_tensor_free(gb); |
1666 | 0 | ccv_nnc_tensor_free(gc); |
1667 | 0 | } |
1668 | | |
1669 | | TEST_CASE("broadcasting semantics for mul [[1], [2], [3], [4]] * [5, 6]") |
1670 | 1 | { |
1671 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MUL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1672 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
1673 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
1674 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
1675 | 0 | a->data.f32[0] = 1; |
1676 | 0 | a->data.f32[1] = 2; |
1677 | 0 | a->data.f32[2] = 3; |
1678 | 0 | a->data.f32[3] = 4; |
1679 | 0 | b->data.f32[0] = 5; |
1680 | 0 | b->data.f32[1] = 6; |
1681 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
1682 | 0 | ccv_nnc_tensor_t* const gb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2), 0); |
1683 | 0 | ccv_nnc_tensor_t* const gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 2), 0); |
1684 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(ga, gb), 0); |
1685 | 0 | ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1), ccv_nnc_no_hint, 0, TENSOR_LIST(ga, gb), TENSOR_LIST(gc), 0); |
1686 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
1687 | 0 | float ctp[] = { |
1688 | 0 | 5, 6, |
1689 | 0 | 10, 12, |
1690 | 0 | 15, 18, |
1691 | 0 | 20, 24 |
1692 | 0 | }; |
1693 | 0 | ccv_nnc_tensor_t ct = ccv_nnc_tensor(ctp, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
1694 | 0 | REQUIRE_TENSOR_EQ(c, &ct, "result should be equal"); |
1695 | 0 | ccv_nnc_tensor_free(a); |
1696 | 0 | ccv_nnc_tensor_free(b); |
1697 | 0 | ccv_nnc_tensor_free(c); |
1698 | 0 | ccv_nnc_tensor_free(ga); |
1699 | 0 | ccv_nnc_tensor_free(gb); |
1700 | 0 | ccv_nnc_tensor_free(gc); |
1701 | 0 | } |
1702 | | |
1703 | | TEST_CASE("scalar mul [[1, 2, 3], [4, 5, 6]] * 0.3") |
1704 | 1 | { |
1705 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MUL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1706 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1707 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1708 | 0 | a->data.f32[0] = 1; |
1709 | 0 | a->data.f32[1] = 2; |
1710 | 0 | a->data.f32[2] = 3; |
1711 | 0 | a->data.f32[3] = 4; |
1712 | 0 | a->data.f32[4] = 5; |
1713 | 0 | a->data.f32[5] = 6; |
1714 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
1715 | 0 | ccv_nnc_tensor_t* const gc = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
1716 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(ga), 0); |
1717 | 0 | ccv_nnc_cmd_exec(CMD_SCALAR_MUL_FORWARD(0.3), ccv_nnc_no_hint, 0, TENSOR_LIST(ga), TENSOR_LIST(gc), 0); |
1718 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gc), TENSOR_LIST(c), 0); |
1719 | 0 | float ctp[] = { |
1720 | 0 | 0.3, 0.6, 0.9, |
1721 | 0 | 1.2, 1.5, 1.8, |
1722 | 0 | }; |
1723 | 0 | ccv_nnc_tensor_t ct = ccv_nnc_tensor(ctp, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
1724 | 0 | REQUIRE_TENSOR_EQ(c, &ct, "result should be equal"); |
1725 | 0 | ccv_nnc_tensor_free(a); |
1726 | 0 | ccv_nnc_tensor_free(c); |
1727 | 0 | ccv_nnc_tensor_free(ga); |
1728 | 0 | ccv_nnc_tensor_free(gc); |
1729 | 0 | } |
1730 | | |
1731 | | TEST_CASE("compare average pooling with mps") |
1732 | 1 | { |
1733 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_AVERAGE_POOL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1734 | 0 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1735 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 7, 7, 10), "x"); |
1736 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 3, 3, 10), "y"); |
1737 | 0 | ccv_nnc_graph_exec_symbol_t avg_pool = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_AVERAGE_POOL_FORWARD(5, 5), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "avg_pool"); |
1738 | 0 | ccv_nnc_graph_exec_symbol_set_hint(symbolic_graph, avg_pool, HINT((2, 2), (1, 1))); |
1739 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1740 | 0 | ccv_nnc_graph_t* graph = 0; |
1741 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1742 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1743 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1744 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1745 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1746 | 0 | dsfmt_t dsfmt; |
1747 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1748 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
1749 | 0 | int i; |
1750 | 0 | for (i = 0; i < 7 * 7 * 10; i++) |
1751 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1752 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
1753 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
1754 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1755 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1756 | 0 | ccv_nnc_cmd_exec(CMD_AVERAGE_POOL_FORWARD(5, 5), HINT((2, 2), (1, 1)), 0, TENSOR_LIST(x_tensor), TENSOR_LIST(y_tensor), 0); |
1757 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
1758 | 0 | ccv_nnc_tensor_t* const cpu_y = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1759 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(cpu_y), 0); |
1760 | 0 | REQUIRE_TENSOR_EQ(y_tensor, cpu_y, "mps result should equal to cpu result"); |
1761 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1762 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1763 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1764 | 0 | ccv_nnc_graph_free(graph); |
1765 | 0 | ccv_nnc_tensor_free(x_tensor); |
1766 | 0 | ccv_nnc_tensor_free(y_tensor); |
1767 | 0 | ccv_nnc_tensor_free(cpu_y); |
1768 | 0 | } |
1769 | | |
1770 | | TEST_CASE("compare average pooling with mps in half precision") |
1771 | 1 | { |
1772 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_AVERAGE_POOL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1773 | 0 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1774 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 7, 7, 10), "x"); |
1775 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 3, 3, 10), "y"); |
1776 | 0 | ccv_nnc_graph_exec_symbol_t avg_pool = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_AVERAGE_POOL_FORWARD(5, 5), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "avg_pool"); |
1777 | 0 | ccv_nnc_graph_exec_symbol_set_hint(symbolic_graph, avg_pool, HINT((2, 2), (1, 1))); |
1778 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1779 | 0 | ccv_nnc_graph_t* graph = 0; |
1780 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1781 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1782 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1783 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1784 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1785 | 0 | dsfmt_t dsfmt; |
1786 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1787 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
1788 | 0 | int i; |
1789 | 0 | for (i = 0; i < 7 * 7 * 10; i++) |
1790 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1791 | 0 | ccv_nnc_tensor_t* const x16_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 7, 7, 10), 0); |
1792 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
1793 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(x16_tensor), 0); |
1794 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x16_tensor), TENSOR_LIST(xt), 0); |
1795 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1796 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1797 | 0 | ccv_nnc_cmd_exec(CMD_AVERAGE_POOL_FORWARD(5, 5), HINT((2, 2), (1, 1)), 0, TENSOR_LIST(x_tensor), TENSOR_LIST(y_tensor), 0); |
1798 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
1799 | 0 | ccv_nnc_tensor_t* const cpu_y = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1800 | 0 | ccv_nnc_tensor_t* const cpu_y16 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 3, 3, 10), 0); |
1801 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(cpu_y16), 0); |
1802 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(cpu_y16), TENSOR_LIST(cpu_y), 0); |
1803 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cpu_y->data.f32, 3 * 3 * 10, 1e-3, "mps result should equal to cpu result"); |
1804 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1805 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1806 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1807 | 0 | ccv_nnc_graph_free(graph); |
1808 | 0 | ccv_nnc_tensor_free(x_tensor); |
1809 | 0 | ccv_nnc_tensor_free(x16_tensor); |
1810 | 0 | ccv_nnc_tensor_free(y_tensor); |
1811 | 0 | ccv_nnc_tensor_free(cpu_y); |
1812 | 0 | ccv_nnc_tensor_free(cpu_y16); |
1813 | 0 | } |
1814 | | |
1815 | | TEST_CASE("compare max pooling with mps") |
1816 | 1 | { |
1817 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MAX_POOL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1818 | 0 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1819 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 7, 7, 10), "x"); |
1820 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 3, 3, 10), "y"); |
1821 | 0 | ccv_nnc_graph_exec_symbol_t max_pool = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_MAX_POOL_FORWARD(5, 5), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "max_pool"); |
1822 | 0 | ccv_nnc_graph_exec_symbol_set_hint(symbolic_graph, max_pool, HINT((2, 2), (1, 1))); |
1823 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1824 | 0 | ccv_nnc_graph_t* graph = 0; |
1825 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1826 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1827 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1828 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1829 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1830 | 0 | dsfmt_t dsfmt; |
1831 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1832 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
1833 | 0 | int i; |
1834 | 0 | for (i = 0; i < 7 * 7 * 10; i++) |
1835 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1836 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
1837 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
1838 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1839 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1840 | 0 | ccv_nnc_cmd_exec(CMD_MAX_POOL_FORWARD(5, 5), HINT((2, 2), (1, 1)), 0, TENSOR_LIST(x_tensor), TENSOR_LIST(y_tensor), 0); |
1841 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
1842 | 0 | ccv_nnc_tensor_t* const cpu_y = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1843 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(cpu_y), 0); |
1844 | 0 | REQUIRE_TENSOR_EQ(y_tensor, cpu_y, "mps result should equal to cpu result"); |
1845 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1846 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1847 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1848 | 0 | ccv_nnc_graph_free(graph); |
1849 | 0 | ccv_nnc_tensor_free(x_tensor); |
1850 | 0 | ccv_nnc_tensor_free(y_tensor); |
1851 | 0 | ccv_nnc_tensor_free(cpu_y); |
1852 | 0 | } |
1853 | | |
1854 | | TEST_CASE("compare max pooling with mps in half precision") |
1855 | 1 | { |
1856 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MAX_POOL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1857 | 0 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1858 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 7, 7, 10), "x"); |
1859 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 3, 3, 10), "y"); |
1860 | 0 | ccv_nnc_graph_exec_symbol_t max_pool = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_MAX_POOL_FORWARD(5, 5), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "max_pool"); |
1861 | 0 | ccv_nnc_graph_exec_symbol_set_hint(symbolic_graph, max_pool, HINT((2, 2), (1, 1))); |
1862 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1863 | 0 | ccv_nnc_graph_t* graph = 0; |
1864 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1865 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1866 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1867 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1868 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1869 | 0 | dsfmt_t dsfmt; |
1870 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1871 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 7, 7, 10), 0); |
1872 | 0 | int i; |
1873 | 0 | for (i = 0; i < 7 * 7 * 10; i++) |
1874 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1875 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
1876 | 0 | ccv_nnc_tensor_t* const x16_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 7, 7, 10), 0); |
1877 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(x16_tensor), 0); |
1878 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x16_tensor), TENSOR_LIST(xt), 0); |
1879 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1880 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1881 | 0 | ccv_nnc_cmd_exec(CMD_MAX_POOL_FORWARD(5, 5), HINT((2, 2), (1, 1)), 0, TENSOR_LIST(x_tensor), TENSOR_LIST(y_tensor), 0); |
1882 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
1883 | 0 | ccv_nnc_tensor_t* const cpu_y16 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 3, 3, 10), 0); |
1884 | 0 | ccv_nnc_tensor_t* const cpu_y = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1885 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(cpu_y16), 0); |
1886 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(cpu_y16), TENSOR_LIST(cpu_y), 0); |
1887 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cpu_y->data.f32, 3 * 3 * 10, 1e-3, "mps result should equal to cpu result"); |
1888 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1889 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1890 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1891 | 0 | ccv_nnc_graph_free(graph); |
1892 | 0 | ccv_nnc_tensor_free(x_tensor); |
1893 | 0 | ccv_nnc_tensor_free(x16_tensor); |
1894 | 0 | ccv_nnc_tensor_free(y_tensor); |
1895 | 0 | ccv_nnc_tensor_free(cpu_y); |
1896 | 0 | ccv_nnc_tensor_free(cpu_y16); |
1897 | 0 | } |
1898 | | |
1899 | | TEST_CASE("compare max pooling 2x2 with mps") |
1900 | 1 | { |
1901 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MAX_POOL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1902 | 0 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1903 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 32F, 10, 6, 6), "x"); |
1904 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 32F, 10, 3, 3), "y"); |
1905 | 0 | ccv_nnc_graph_exec_symbol_t max_pool = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_MAX_POOL_FORWARD(2, 2), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "max_pool"); |
1906 | 0 | ccv_nnc_graph_exec_symbol_set_hint(symbolic_graph, max_pool, HINT((2, 2), (0, 0))); |
1907 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1908 | 0 | ccv_nnc_graph_t* graph = 0; |
1909 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1910 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1911 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1912 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1913 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1914 | 0 | dsfmt_t dsfmt; |
1915 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1916 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 6, 6), 0); |
1917 | 0 | int i, j; |
1918 | 0 | for (i = 0; i < 6 * 6 * 10; i++) |
1919 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1920 | 0 | ccv_nnc_tensor_t* const gt_x = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 6, 6, 10), 0); |
1921 | 0 | for (i = 0; i < 10; i++) |
1922 | 0 | for (j = 0; j < 6 * 6; j++) |
1923 | 0 | gt_x->data.f32[j * 10 + i] = x_tensor->data.f32[i * 6 * 6 + j]; |
1924 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
1925 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
1926 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1927 | 0 | ccv_nnc_tensor_t* const gt_y= ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1928 | 0 | ccv_nnc_cmd_exec(CMD_MAX_POOL_FORWARD(2, 2), HINT((2, 2), (0, 0)), 0, TENSOR_LIST(gt_x), TENSOR_LIST(gt_y), 0); |
1929 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
1930 | 0 | ccv_nnc_tensor_t* const cpu_y = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 3, 3), 0); |
1931 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(cpu_y), 0); |
1932 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 3, 3), 0); |
1933 | 0 | for (i = 0; i < 10; i++) |
1934 | 0 | for (j = 0; j < 3 * 3; j++) |
1935 | 0 | y_tensor->data.f32[i * 3 * 3 + j] = gt_y->data.f32[j * 10 + i]; |
1936 | 0 | REQUIRE_TENSOR_EQ(y_tensor, cpu_y, "mps result should equal to cpu result"); |
1937 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1938 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1939 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1940 | 0 | ccv_nnc_graph_free(graph); |
1941 | 0 | ccv_nnc_tensor_free(x_tensor); |
1942 | 0 | ccv_nnc_tensor_free(y_tensor); |
1943 | 0 | ccv_nnc_tensor_free(cpu_y); |
1944 | 0 | } |
1945 | | |
1946 | | TEST_CASE("compare max pooling 2x2 with mps in half precision") |
1947 | 1 | { |
1948 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MAX_POOL_FORWARD, CCV_NNC_BACKEND_MPS)); |
1949 | 0 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
1950 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 16F, 10, 6, 6), "x"); |
1951 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NCHW(000, 16F, 10, 3, 3), "y"); |
1952 | 0 | ccv_nnc_graph_exec_symbol_t max_pool = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_MAX_POOL_FORWARD(2, 2), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "max_pool"); |
1953 | 0 | ccv_nnc_graph_exec_symbol_set_hint(symbolic_graph, max_pool, HINT((2, 2), (0, 0))); |
1954 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
1955 | 0 | ccv_nnc_graph_t* graph = 0; |
1956 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
1957 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
1958 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
1959 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
1960 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
1961 | 0 | dsfmt_t dsfmt; |
1962 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
1963 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 6, 6), 0); |
1964 | 0 | int i, j; |
1965 | 0 | for (i = 0; i < 6 * 6 * 10; i++) |
1966 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
1967 | 0 | ccv_nnc_tensor_t* const gt_x = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 6, 6, 10), 0); |
1968 | 0 | for (i = 0; i < 10; i++) |
1969 | 0 | for (j = 0; j < 6 * 6; j++) |
1970 | 0 | gt_x->data.f32[j * 10 + i] = x_tensor->data.f32[i * 6 * 6 + j]; |
1971 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
1972 | 0 | ccv_nnc_tensor_t* const x16_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(16F, 10, 6, 6), 0); |
1973 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(x16_tensor), 0); |
1974 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x16_tensor), TENSOR_LIST(xt), 0); |
1975 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
1976 | 0 | ccv_nnc_tensor_t* const gt_y= ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 10), 0); |
1977 | 0 | ccv_nnc_cmd_exec(CMD_MAX_POOL_FORWARD(2, 2), HINT((2, 2), (0, 0)), 0, TENSOR_LIST(gt_x), TENSOR_LIST(gt_y), 0); |
1978 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
1979 | 0 | ccv_nnc_tensor_t* const cpu_y16 = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(16F, 10, 3, 3), 0); |
1980 | 0 | ccv_nnc_tensor_t* const cpu_y = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 3, 3), 0); |
1981 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(cpu_y16), 0); |
1982 | 0 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(cpu_y16), TENSOR_LIST(cpu_y), 0); |
1983 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 3, 3), 0); |
1984 | 0 | for (i = 0; i < 10; i++) |
1985 | 0 | for (j = 0; j < 3 * 3; j++) |
1986 | 0 | y_tensor->data.f32[i * 3 * 3 + j] = gt_y->data.f32[j * 10 + i]; |
1987 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, y_tensor->data.f32, cpu_y->data.f32, 10 * 3 * 3, 1e-3, "mps result should equal to cpu result"); |
1988 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
1989 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
1990 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
1991 | 0 | ccv_nnc_graph_free(graph); |
1992 | 0 | ccv_nnc_tensor_free(x_tensor); |
1993 | 0 | ccv_nnc_tensor_free(x16_tensor); |
1994 | 0 | ccv_nnc_tensor_free(y_tensor); |
1995 | 0 | ccv_nnc_tensor_free(cpu_y); |
1996 | 0 | ccv_nnc_tensor_free(cpu_y16); |
1997 | 0 | } |
1998 | | |
1999 | | |
2000 | | TEST_CASE("mps mse mean loss forward") |
2001 | 1 | { |
2002 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MSE_FORWARD, CCV_NNC_BACKEND_MPS)); |
2003 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2004 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2005 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 1), 0); |
2006 | 0 | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2007 | 0 | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2008 | 0 | ccv_nnc_tensor_t* hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 1), 0); |
2009 | 0 | dsfmt_t dsfmt; |
2010 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2011 | 0 | int i; |
2012 | 0 | for (i = 0; i < 1000; i++) |
2013 | 0 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2014 | 0 | for (i = 0; i < 1000; i++) |
2015 | 0 | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2016 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(a, b), 0); |
2017 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(hc), 0); |
2018 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
2019 | 0 | ccv_nnc_tensor_t* tc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 1), 0); |
2020 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(c), TENSOR_LIST(tc), 0); |
2021 | 0 | REQUIRE_TENSOR_EQ(tc, hc, "MPS computed output should be the same as CPU computed ones"); |
2022 | 0 | ccv_nnc_tensor_free(a); |
2023 | 0 | ccv_nnc_tensor_free(b); |
2024 | 0 | ccv_nnc_tensor_free(c); |
2025 | 0 | ccv_nnc_tensor_free(ha); |
2026 | 0 | ccv_nnc_tensor_free(hb); |
2027 | 0 | ccv_nnc_tensor_free(hc); |
2028 | 0 | ccv_nnc_tensor_free(tc); |
2029 | 0 | } |
2030 | | |
2031 | | TEST_CASE("mps mse sum loss forward") |
2032 | 1 | { |
2033 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MSE_FORWARD, CCV_NNC_BACKEND_MPS)); |
2034 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2035 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2036 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 1), 0); |
2037 | 0 | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2038 | 0 | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2039 | 0 | ccv_nnc_tensor_t* hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 1), 0); |
2040 | 0 | dsfmt_t dsfmt; |
2041 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2042 | 0 | int i; |
2043 | 0 | for (i = 0; i < 1000; i++) |
2044 | 0 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2045 | 0 | for (i = 0; i < 1000; i++) |
2046 | 0 | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2047 | | |
2048 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(a, b), 0); |
2049 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(hc), 0); |
2050 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
2051 | 0 | ccv_nnc_tensor_t* tc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 1), 0); |
2052 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(c), TENSOR_LIST(tc), 0); |
2053 | |
|
2054 | 0 | REQUIRE_TENSOR_EQ(tc, hc, "MPS computed output should be the same as CPU computed ones"); |
2055 | 0 | ccv_nnc_tensor_free(a); |
2056 | 0 | ccv_nnc_tensor_free(b); |
2057 | 0 | ccv_nnc_tensor_free(c); |
2058 | 0 | ccv_nnc_tensor_free(ha); |
2059 | 0 | ccv_nnc_tensor_free(hb); |
2060 | 0 | ccv_nnc_tensor_free(hc); |
2061 | 0 | ccv_nnc_tensor_free(tc); |
2062 | 0 | } |
2063 | | |
2064 | | TEST_CASE("mps mse mean loss backward") |
2065 | 1 | { |
2066 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MSE_FORWARD, CCV_NNC_BACKEND_MPS) && |
2067 | 1 | ccv_nnc_cmd_ok(CCV_NNC_MSE_BACKWARD, CCV_NNC_BACKEND_MPS)); |
2068 | |
|
2069 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2070 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2071 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10), 0); |
2072 | 0 | ccv_nnc_tensor_t* da = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2073 | 0 | ccv_nnc_tensor_t* db = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2074 | 0 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10), 0); |
2075 | 0 | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2076 | 0 | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2077 | 0 | ccv_nnc_tensor_t* hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
2078 | 0 | ccv_nnc_tensor_t* hda = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2079 | 0 | ccv_nnc_tensor_t* hdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2080 | 0 | ccv_nnc_tensor_t* hg = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
2081 | 0 | dsfmt_t dsfmt; |
2082 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2083 | 0 | int i; |
2084 | 0 | for (i = 0; i < 1000; i++) |
2085 | 0 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2086 | 0 | for (i = 0; i < 1000; i++) |
2087 | 0 | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2088 | 0 | for (i = 0; i < 10; i++) |
2089 | 0 | hg->data.f32[i] = 1; |
2090 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb, hg), TENSOR_LIST(a, b, g), 0); |
2091 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(hc), 0); |
2092 | 0 | ccv_nnc_cmd_exec(CMD_MSE_BACKWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(hg, ha, hb), TENSOR_LIST(hda, hdb), 0); |
2093 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
2094 | 0 | ccv_nnc_cmd_exec(CMD_MSE_BACKWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(g, a, b), TENSOR_LIST(da, db), 0); |
2095 | 0 | ccv_nnc_tensor_t* tda = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2096 | 0 | ccv_nnc_tensor_t* tdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2097 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(da, db), TENSOR_LIST(tda, tdb), 0); |
2098 | |
|
2099 | 0 | REQUIRE_TENSOR_EQ(tda, hda, "MPS computed output should be the same as CPU computed ones"); |
2100 | 0 | REQUIRE_TENSOR_EQ(tdb, hdb, "MPS computed output should be the same as CPU computed ones"); |
2101 | |
|
2102 | 0 | ccv_nnc_tensor_free(a); |
2103 | 0 | ccv_nnc_tensor_free(b); |
2104 | 0 | ccv_nnc_tensor_free(c); |
2105 | 0 | ccv_nnc_tensor_free(da); |
2106 | 0 | ccv_nnc_tensor_free(db); |
2107 | 0 | ccv_nnc_tensor_free(g); |
2108 | 0 | ccv_nnc_tensor_free(ha); |
2109 | 0 | ccv_nnc_tensor_free(hb); |
2110 | 0 | ccv_nnc_tensor_free(hc); |
2111 | 0 | ccv_nnc_tensor_free(hda); |
2112 | 0 | ccv_nnc_tensor_free(hdb); |
2113 | 0 | ccv_nnc_tensor_free(hg); |
2114 | 0 | ccv_nnc_tensor_free(tda); |
2115 | 0 | ccv_nnc_tensor_free(tdb); |
2116 | 0 | } |
2117 | | |
2118 | | TEST_CASE("mps mse sum loss backward") |
2119 | 1 | { |
2120 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MSE_FORWARD, CCV_NNC_BACKEND_MPS) && |
2121 | 1 | ccv_nnc_cmd_ok(CCV_NNC_MSE_BACKWARD, CCV_NNC_BACKEND_MPS)); |
2122 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2123 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2124 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10), 0); |
2125 | 0 | ccv_nnc_tensor_t* da = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2126 | 0 | ccv_nnc_tensor_t* db = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2127 | 0 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10), 0); |
2128 | 0 | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2129 | 0 | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2130 | 0 | ccv_nnc_tensor_t* hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
2131 | 0 | ccv_nnc_tensor_t* hda = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2132 | 0 | ccv_nnc_tensor_t* hdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2133 | 0 | ccv_nnc_tensor_t* hg = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
2134 | 0 | dsfmt_t dsfmt; |
2135 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2136 | 0 | int i; |
2137 | 0 | for (i = 0; i < 1000; i++) |
2138 | 0 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2139 | 0 | for (i = 0; i < 1000; i++) |
2140 | 0 | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2141 | 0 | for (i = 0; i < 10; i++) |
2142 | 0 | hg->data.f32[i] = 1; |
2143 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb, hg), TENSOR_LIST(a, b, g), 0); |
2144 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(hc), 0); |
2145 | 0 | ccv_nnc_cmd_exec(CMD_MSE_BACKWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(hg, ha, hb), TENSOR_LIST(hda, hdb), 0); |
2146 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
2147 | 0 | ccv_nnc_cmd_exec(CMD_MSE_BACKWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(g, a, b), TENSOR_LIST(da, db), 0); |
2148 | 0 | ccv_nnc_tensor_t* tda = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2149 | 0 | ccv_nnc_tensor_t* tdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2150 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(da, db), TENSOR_LIST(tda, tdb), 0); |
2151 | 0 | REQUIRE_TENSOR_EQ(tda, hda, "MPS computed output should be the same as CPU computed ones"); |
2152 | 0 | REQUIRE_TENSOR_EQ(tdb, hdb, "MPS computed output should be the same as CPU computed ones"); |
2153 | 0 | ccv_nnc_tensor_free(a); |
2154 | 0 | ccv_nnc_tensor_free(b); |
2155 | 0 | ccv_nnc_tensor_free(c); |
2156 | 0 | ccv_nnc_tensor_free(da); |
2157 | 0 | ccv_nnc_tensor_free(db); |
2158 | 0 | ccv_nnc_tensor_free(g); |
2159 | 0 | ccv_nnc_tensor_free(ha); |
2160 | 0 | ccv_nnc_tensor_free(hb); |
2161 | 0 | ccv_nnc_tensor_free(hc); |
2162 | 0 | ccv_nnc_tensor_free(hda); |
2163 | 0 | ccv_nnc_tensor_free(hdb); |
2164 | 0 | ccv_nnc_tensor_free(hg); |
2165 | 0 | ccv_nnc_tensor_free(tda); |
2166 | 0 | ccv_nnc_tensor_free(tdb); |
2167 | 0 | } |
2168 | | |
2169 | | |
2170 | | TEST_CASE("mps mse sum loss backward (no output db)") |
2171 | 1 | { |
2172 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MSE_FORWARD, CCV_NNC_BACKEND_MPS) && |
2173 | 1 | ccv_nnc_cmd_ok(CCV_NNC_MSE_BACKWARD, CCV_NNC_BACKEND_MPS)); |
2174 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2175 | 0 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2176 | 0 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10), 0); |
2177 | 0 | ccv_nnc_tensor_t* da = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2178 | 0 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10), 0); |
2179 | 0 | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2180 | 0 | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2181 | 0 | ccv_nnc_tensor_t* hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
2182 | 0 | ccv_nnc_tensor_t* hda = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2183 | 0 | ccv_nnc_tensor_t* hg = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
2184 | 0 | dsfmt_t dsfmt; |
2185 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2186 | 0 | int i; |
2187 | 0 | for (i = 0; i < 1000; i++) |
2188 | 0 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2189 | 0 | for (i = 0; i < 1000; i++) |
2190 | 0 | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2191 | 0 | for (i = 0; i < 10; i++) |
2192 | 0 | hg->data.f32[i] = 1; |
2193 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb, hg), TENSOR_LIST(a, b, g), 0); |
2194 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(hc), 0); |
2195 | 0 | ccv_nnc_cmd_exec(CMD_MSE_BACKWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(hg, ha, hb), TENSOR_LIST(hda, 0), 0); |
2196 | 0 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
2197 | 0 | ccv_nnc_cmd_exec(CMD_MSE_BACKWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(g, a, b), TENSOR_LIST(da, 0), 0); |
2198 | 0 | ccv_nnc_tensor_t* tda = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2199 | 0 | ccv_nnc_tensor_t* tdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2200 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(da, 0), TENSOR_LIST(tda, 0), 0); |
2201 | 0 | REQUIRE_TENSOR_EQ(tda, hda, "MPS computed output should be the same as CPU computed ones"); |
2202 | 0 | ccv_nnc_tensor_free(a); |
2203 | 0 | ccv_nnc_tensor_free(b); |
2204 | 0 | ccv_nnc_tensor_free(c); |
2205 | 0 | ccv_nnc_tensor_free(da); |
2206 | 0 | ccv_nnc_tensor_free(g); |
2207 | 0 | ccv_nnc_tensor_free(ha); |
2208 | 0 | ccv_nnc_tensor_free(hb); |
2209 | 0 | ccv_nnc_tensor_free(hc); |
2210 | 0 | ccv_nnc_tensor_free(hda); |
2211 | 0 | ccv_nnc_tensor_free(hg); |
2212 | 0 | ccv_nnc_tensor_free(tda); |
2213 | 0 | ccv_nnc_tensor_free(tdb); |
2214 | 0 | } |
2215 | | |
2216 | | TEST_CASE("mps leaky relu gradient in float") |
2217 | 1 | { |
2218 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_LEAKY_RELU_FORWARD, CCV_NNC_BACKEND_MPS) && |
2219 | 1 | ccv_nnc_cmd_ok(CCV_NNC_LEAKY_RELU_BACKWARD, CCV_NNC_BACKEND_MPS)); |
2220 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2221 | 0 | ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 100), "x"); |
2222 | 0 | ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 10, 100), "y"); |
2223 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_LEAKY_RELU_FORWARD(0.2), TENSOR_SYMBOL_LIST(x), TENSOR_SYMBOL_LIST(y), "leaky relu"); |
2224 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2225 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(y), TENSOR_SYMBOL_LIST(x), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
2226 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2227 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
2228 | 0 | ccv_nnc_tensor_symbol_t dy = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, y); |
2229 | 0 | ccv_nnc_tensor_symbol_t dx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, x); |
2230 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2231 | 0 | dsfmt_t dsfmt; |
2232 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2233 | 0 | int i; |
2234 | 0 | for (i = 0; i < 10 * 100; i++) |
2235 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2236 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2237 | 0 | for (i = 0; i < 10 * 100; i++) |
2238 | 0 | dy_tensor->data.f32[i] = 0; |
2239 | 0 | for (i = 0; i < 10; i++) |
2240 | 0 | dy_tensor->data.f32[i * 100 + i] = 1; |
2241 | 0 | ccv_nnc_tensor_t* const dyt = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10, 100), 0); |
2242 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dyt), 0); |
2243 | 0 | ccv_nnc_graph_t* graph = 0; |
2244 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
2245 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
2246 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, TENSOR_BIND_MAP(KV(dy, dyt)), TENSOR_SYMBOL_LIST(y), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
2247 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
2248 | 0 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
2249 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
2250 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
2251 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2252 | 0 | ccv_nnc_tensor_t* const dxt = ccv_nnc_tensor_from_symbol(tensor_arena, dx); |
2253 | 0 | ccv_nnc_tensor_t* const y_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2254 | 0 | ccv_nnc_tensor_t* const yt = ccv_nnc_tensor_from_symbol(tensor_arena, y); |
2255 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dxt), TENSOR_LIST(dx_tensor), 0); |
2256 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(yt), TENSOR_LIST(y_tensor), 0); |
2257 | 0 | ccv_nnc_tensor_t* const ty_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2258 | 0 | ccv_nnc_cmd_exec(CMD_LEAKY_RELU_FORWARD(0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(ty_tensor), 0); |
2259 | 0 | REQUIRE_TENSOR_EQ(ty_tensor, y_tensor, "forward pass should match"); |
2260 | 0 | ccv_nnc_tensor_t* const tdx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 100), 0); |
2261 | 0 | ccv_nnc_cmd_exec(CMD_LEAKY_RELU_BACKWARD(0.2), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor, 0, y_tensor), TENSOR_LIST(tdx_tensor), 0); |
2262 | 0 | REQUIRE_TENSOR_EQ(tdx_tensor, dx_tensor, "backward pass should match"); |
2263 | 0 | ccv_nnc_tensor_free(x_tensor); |
2264 | 0 | ccv_nnc_tensor_free(y_tensor); |
2265 | 0 | ccv_nnc_tensor_free(dx_tensor); |
2266 | 0 | ccv_nnc_tensor_free(dy_tensor); |
2267 | 0 | ccv_nnc_tensor_free(ty_tensor); |
2268 | 0 | ccv_nnc_tensor_free(tdx_tensor); |
2269 | 0 | ccv_nnc_tensor_free(dyt); |
2270 | 0 | ccv_nnc_graph_free(graph); |
2271 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
2272 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
2273 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
2274 | 0 | } |
2275 | | |
2276 | | TEST_CASE("compare layer norm gradient with mps") |
2277 | 1 | { |
2278 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
2279 | 1 | ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
2280 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
2281 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2282 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "x"); |
2283 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "y"); |
2284 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 2, 2, LN_DIM), "scale"); |
2285 | 0 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 2, 2, LN_DIM), "bias"); |
2286 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_mean"); |
2287 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_inv_std"); |
2288 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-4, 1, 1, 2, 3), TENSOR_SYMBOL_LIST(bx, scale, bias), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "layer_norm"); |
2289 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2290 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx, scale, bias), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
2291 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2292 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
2293 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
2294 | 0 | ccv_nnc_graph_t* graph = 0; |
2295 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
2296 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
2297 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
2298 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
2299 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
2300 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
2301 | 0 | dsfmt_t dsfmt; |
2302 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2303 | 0 | int i; |
2304 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
2305 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2306 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
2307 | |
|
2308 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
2309 | 0 | float scaledata[1 * 2 * 2 * LN_DIM]; |
2310 | 0 | float biasdata[1 * 2 * 2 * LN_DIM]; |
2311 | 0 | for (i = 0; i < 1 * 2 * 2 * LN_DIM; i++) |
2312 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
2313 | 0 | for (i = 0; i < 1 * 2 * 2 * LN_DIM; i++) |
2314 | 0 | biasdata[i] = dsfmt_genrand_open_close(&dsfmt); |
2315 | |
|
2316 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2317 | 0 | ccv_nnc_tensor_t bias_tensor = ccv_nnc_tensor(biasdata, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2318 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor, &bias_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale), ccv_nnc_tensor_from_symbol(tensor_arena, bias)), 0); |
2319 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2320 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
2321 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2322 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
2323 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
2324 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
2325 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
2326 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2327 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
2328 | 0 | ccv_nnc_tensor_t* const dbscale_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, scale)); |
2329 | 0 | ccv_nnc_tensor_t* const dbbias_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bias)); |
2330 | 0 | ccv_nnc_tensor_t* const dscale_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2331 | 0 | ccv_nnc_tensor_t* const dbias_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2332 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbscale_tensor, dbbias_tensor), TENSOR_LIST(dscale_tensor, dbias_tensor), 0); |
2333 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
2334 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
2335 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
2336 | 0 | ccv_nnc_graph_free(graph); |
2337 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2338 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "x"); |
2339 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "y"); |
2340 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), "scale"); |
2341 | 0 | ccv_nnc_tensor_symbol_t cbias = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), "bias"); |
2342 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_mean"); |
2343 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_inv_std"); |
2344 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-4, 1, 1, 2, 3), TENSOR_SYMBOL_LIST(cx, cscale, cbias), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "layer_norm"); |
2345 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2346 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx, cscale, cbias), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
2347 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2348 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
2349 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
2350 | 0 | ccv_nnc_tensor_symbol_t dcscale = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cscale); |
2351 | 0 | ccv_nnc_tensor_symbol_t dcbias = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cbias); |
2352 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
2353 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
2354 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
2355 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
2356 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
2357 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2358 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
2359 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2360 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
2361 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * 2 * 2 * LN_DIM); |
2362 | 0 | ccv_nnc_tensor_t* const cbias_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cbias); |
2363 | 0 | memcpy(cbias_tensor->data.f32, biasdata, sizeof(float) * 1 * 2 * 2 * LN_DIM); |
2364 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
2365 | 0 | ccv_nnc_tensor_t* const dcscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcscale); |
2366 | 0 | ccv_nnc_tensor_t* const dcbias_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcbias); |
2367 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
2368 | |
|
2369 | 0 | REQUIRE_TENSOR_EQ(dx_tensor, dcx_tensor, "layer norm gradient result from cudnn should match the one from reference implementation"); |
2370 | 0 | REQUIRE_TENSOR_EQ(dscale_tensor, dcscale_tensor, "layer norm scale gradient result from cudnn should match the one from reference implementation"); |
2371 | 0 | REQUIRE_TENSOR_EQ(dbias_tensor, dcbias_tensor, "layer norm bias gradient result from cudnn should match the one from reference implementation"); |
2372 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
2373 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
2374 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
2375 | 0 | ccv_nnc_graph_free(cpu_graph); |
2376 | 0 | ccv_nnc_tensor_free(x_tensor); |
2377 | 0 | ccv_nnc_tensor_free(dy_tensor); |
2378 | 0 | ccv_nnc_tensor_free(dx_tensor); |
2379 | 0 | ccv_nnc_tensor_free(dscale_tensor); |
2380 | 0 | ccv_nnc_tensor_free(dbias_tensor); |
2381 | 0 | } |
2382 | | |
2383 | | TEST_CASE("compare layer norm gradient with mps (no bias)") |
2384 | 1 | { |
2385 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
2386 | 1 | ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
2387 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
2388 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2389 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "x"); |
2390 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "y"); |
2391 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 2, 2, LN_DIM), "scale"); |
2392 | 0 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 2, 2, LN_DIM), "bias"); |
2393 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_mean"); |
2394 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_inv_std"); |
2395 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-4, 1, 1, 2, 3), TENSOR_SYMBOL_LIST(bx, scale, bias), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "layer_norm"); |
2396 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2397 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx, scale), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
2398 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2399 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
2400 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
2401 | 0 | ccv_nnc_graph_t* graph = 0; |
2402 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
2403 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
2404 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
2405 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
2406 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
2407 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
2408 | 0 | dsfmt_t dsfmt; |
2409 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2410 | 0 | int i; |
2411 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
2412 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2413 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
2414 | |
|
2415 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
2416 | 0 | float scaledata[1 * 2 * 2 * LN_DIM]; |
2417 | 0 | float biasdata[1 * 2 * 2 * LN_DIM]; |
2418 | 0 | for (i = 0; i < 1 * 2 * 2 * LN_DIM; i++) |
2419 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
2420 | 0 | for (i = 0; i < 1 * 2 * 2 * LN_DIM; i++) |
2421 | 0 | biasdata[i] = dsfmt_genrand_open_close(&dsfmt); |
2422 | |
|
2423 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2424 | 0 | ccv_nnc_tensor_t bias_tensor = ccv_nnc_tensor(biasdata, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2425 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor, &bias_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale), ccv_nnc_tensor_from_symbol(tensor_arena, bias)), 0); |
2426 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2427 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
2428 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2429 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
2430 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
2431 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
2432 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
2433 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2434 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
2435 | 0 | ccv_nnc_tensor_t* const dbscale_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, scale)); |
2436 | 0 | ccv_nnc_tensor_t* const dscale_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2437 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbscale_tensor), TENSOR_LIST(dscale_tensor), 0); |
2438 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
2439 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
2440 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
2441 | 0 | ccv_nnc_graph_free(graph); |
2442 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2443 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "x"); |
2444 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "y"); |
2445 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), "scale"); |
2446 | 0 | ccv_nnc_tensor_symbol_t cbias = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), "bias"); |
2447 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_mean"); |
2448 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_inv_std"); |
2449 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-4, 1, 1, 2, 3), TENSOR_SYMBOL_LIST(cx, cscale, cbias), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "layer_norm"); |
2450 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2451 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx, cscale), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
2452 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2453 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
2454 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
2455 | 0 | ccv_nnc_tensor_symbol_t dcscale = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cscale); |
2456 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
2457 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
2458 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
2459 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
2460 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
2461 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2462 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
2463 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2464 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
2465 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * 2 * 2 * LN_DIM); |
2466 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
2467 | 0 | ccv_nnc_tensor_t* const dcscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcscale); |
2468 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
2469 | |
|
2470 | 0 | REQUIRE_TENSOR_EQ(dx_tensor, dcx_tensor, "layer norm gradient result from cudnn should match the one from reference implementation"); |
2471 | 0 | REQUIRE_TENSOR_EQ(dscale_tensor, dcscale_tensor, "layer norm scale gradient result from cudnn should match the one from reference implementation"); |
2472 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
2473 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
2474 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
2475 | 0 | ccv_nnc_graph_free(cpu_graph); |
2476 | 0 | ccv_nnc_tensor_free(x_tensor); |
2477 | 0 | ccv_nnc_tensor_free(dy_tensor); |
2478 | 0 | ccv_nnc_tensor_free(dx_tensor); |
2479 | 0 | ccv_nnc_tensor_free(dscale_tensor); |
2480 | 0 | } |
2481 | | |
2482 | | TEST_CASE("compare layer norm gradient with mps without scale / bias") |
2483 | 1 | { |
2484 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
2485 | 1 | ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
2486 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
2487 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2488 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "x"); |
2489 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "y"); |
2490 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_mean"); |
2491 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_inv_std"); |
2492 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-4, 0, 1, 2, 3), TENSOR_SYMBOL_LIST(bx), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "layer_norm"); |
2493 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2494 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
2495 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2496 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
2497 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
2498 | 0 | ccv_nnc_graph_t* graph = 0; |
2499 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
2500 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
2501 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
2502 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
2503 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
2504 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
2505 | 0 | dsfmt_t dsfmt; |
2506 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2507 | 0 | int i; |
2508 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
2509 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2510 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
2511 | |
|
2512 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
2513 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2514 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
2515 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2516 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
2517 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
2518 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
2519 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
2520 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2521 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
2522 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
2523 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
2524 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
2525 | 0 | ccv_nnc_graph_free(graph); |
2526 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2527 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "x"); |
2528 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "y"); |
2529 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_mean"); |
2530 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_inv_std"); |
2531 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-4, 0, 1, 2, 3), TENSOR_SYMBOL_LIST(cx), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "layer_norm"); |
2532 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2533 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
2534 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2535 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
2536 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
2537 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
2538 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
2539 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
2540 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
2541 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
2542 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2543 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
2544 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2545 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
2546 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
2547 | |
|
2548 | 0 | REQUIRE_TENSOR_EQ(dx_tensor, dcx_tensor, "layer norm gradient result from cudnn should match the one from reference implementation"); |
2549 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
2550 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
2551 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
2552 | 0 | ccv_nnc_graph_free(cpu_graph); |
2553 | 0 | ccv_nnc_tensor_free(x_tensor); |
2554 | 0 | ccv_nnc_tensor_free(dy_tensor); |
2555 | 0 | ccv_nnc_tensor_free(dx_tensor); |
2556 | 0 | } |
2557 | | |
2558 | | TEST_CASE("compare layer norm gradient with mps (no bias) without scale / bias") |
2559 | 1 | { |
2560 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
2561 | 1 | ccv_nnc_cmd_ok(CCV_NNC_LAYER_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
2562 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
2563 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2564 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "x"); |
2565 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "y"); |
2566 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_mean"); |
2567 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_inv_std"); |
2568 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-4, 0, 1, 2, 3), TENSOR_SYMBOL_LIST(bx), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "layer_norm"); |
2569 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2570 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
2571 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2572 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
2573 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
2574 | 0 | ccv_nnc_graph_t* graph = 0; |
2575 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
2576 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
2577 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
2578 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
2579 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
2580 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
2581 | 0 | dsfmt_t dsfmt; |
2582 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2583 | 0 | int i; |
2584 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
2585 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2586 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
2587 | |
|
2588 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
2589 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2590 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
2591 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2592 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
2593 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
2594 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
2595 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
2596 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2597 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
2598 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
2599 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
2600 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
2601 | 0 | ccv_nnc_graph_free(graph); |
2602 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2603 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "x"); |
2604 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "y"); |
2605 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_mean"); |
2606 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_inv_std"); |
2607 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_LAYER_NORM_FORWARD(1e-4, 0, 1, 2, 3), TENSOR_SYMBOL_LIST(cx), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "layer_norm"); |
2608 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2609 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
2610 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2611 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
2612 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
2613 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
2614 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
2615 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
2616 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
2617 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
2618 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2619 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
2620 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2621 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
2622 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
2623 | |
|
2624 | 0 | REQUIRE_TENSOR_EQ(dx_tensor, dcx_tensor, "layer norm gradient result from cudnn should match the one from reference implementation"); |
2625 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
2626 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
2627 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
2628 | 0 | ccv_nnc_graph_free(cpu_graph); |
2629 | 0 | ccv_nnc_tensor_free(x_tensor); |
2630 | 0 | ccv_nnc_tensor_free(dy_tensor); |
2631 | 0 | ccv_nnc_tensor_free(dx_tensor); |
2632 | 0 | } |
2633 | | |
2634 | | TEST_CASE("compare rmsnorm gradient with mps") |
2635 | 1 | { |
2636 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_RMSNORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
2637 | 1 | ccv_nnc_cmd_ok(CCV_NNC_RMSNORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
2638 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
2639 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2640 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "x"); |
2641 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, LN_DIM), "y"); |
2642 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, 2, 2, LN_DIM), "scale"); |
2643 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, 1, 1, 1), "saved_inv_std"); |
2644 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_RMSNORM_FORWARD(1e-4, 1, 2, 3), TENSOR_SYMBOL_LIST(bx, scale), TENSOR_SYMBOL_LIST(by, saved_inv_std), "rmsnorm"); |
2645 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2646 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx, scale), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
2647 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2648 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
2649 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
2650 | 0 | ccv_nnc_graph_t* graph = 0; |
2651 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
2652 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
2653 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
2654 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
2655 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
2656 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
2657 | 0 | dsfmt_t dsfmt; |
2658 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2659 | 0 | int i; |
2660 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
2661 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2662 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
2663 | |
|
2664 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
2665 | 0 | float scaledata[1 * 2 * 2 * LN_DIM]; |
2666 | 0 | for (i = 0; i < 1 * 2 * 2 * LN_DIM; i++) |
2667 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
2668 | |
|
2669 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2670 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale)), 0); |
2671 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2672 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
2673 | 0 | for (i = 0; i < 2 * 2 * 2 * LN_DIM; i++) |
2674 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
2675 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
2676 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
2677 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
2678 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), 0); |
2679 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
2680 | 0 | ccv_nnc_tensor_t* const dbscale_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, scale)); |
2681 | 0 | ccv_nnc_tensor_t* const dscale_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), 0); |
2682 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbscale_tensor), TENSOR_LIST(dscale_tensor), 0); |
2683 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
2684 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
2685 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
2686 | 0 | ccv_nnc_graph_free(graph); |
2687 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2688 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "x"); |
2689 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 2, 2, LN_DIM), "y"); |
2690 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, 2, 2, LN_DIM), "scale"); |
2691 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, 1, 1, 1), "saved_inv_std"); |
2692 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_RMSNORM_FORWARD(1e-4, 1, 2, 3), TENSOR_SYMBOL_LIST(cx, cscale), TENSOR_SYMBOL_LIST(cy, csaved_inv_std), "layer_norm"); |
2693 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2694 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx, cscale), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
2695 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2696 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
2697 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
2698 | 0 | ccv_nnc_tensor_symbol_t dcscale = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cscale); |
2699 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
2700 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
2701 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
2702 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
2703 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
2704 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2705 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
2706 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * 2 * 2 * LN_DIM); |
2707 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
2708 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * 2 * 2 * LN_DIM); |
2709 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
2710 | 0 | ccv_nnc_tensor_t* const dcscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcscale); |
2711 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
2712 | |
|
2713 | 0 | REQUIRE_TENSOR_EQ(dx_tensor, dcx_tensor, "layer norm gradient result from cudnn should match the one from reference implementation"); |
2714 | 0 | REQUIRE_TENSOR_EQ(dscale_tensor, dcscale_tensor, "layer norm scale gradient result from cudnn should match the one from reference implementation"); |
2715 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
2716 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
2717 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
2718 | 0 | ccv_nnc_graph_free(cpu_graph); |
2719 | 0 | ccv_nnc_tensor_free(x_tensor); |
2720 | 0 | ccv_nnc_tensor_free(dy_tensor); |
2721 | 0 | ccv_nnc_tensor_free(dx_tensor); |
2722 | 0 | ccv_nnc_tensor_free(dscale_tensor); |
2723 | 0 | } |
2724 | | |
2725 | | TEST_CASE("mps backward convolution in nchw format") |
2726 | 1 | { |
2727 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_BACKWARD, CCV_NNC_BACKEND_MPS)); |
2728 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2729 | 0 | ccv_nnc_tensor_t* h = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2730 | 0 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
2731 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_BACKWARD(1, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM); |
2732 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
2733 | 0 | assert(cmd.backend >= 0); |
2734 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(cmd.info, a->info, g->info); |
2735 | 0 | assert(ccv_nnc_hint_verify(hint, cmd.info, a->info, g->info) == 0); |
2736 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2737 | 0 | ccv_nnc_tensor_t* dw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2738 | 0 | ccv_nnc_tensor_t* dbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 1, 1, OUTPUT_DIM), 0); |
2739 | | // configure the inlets. |
2740 | 0 | dsfmt_t dsfmt; |
2741 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2742 | 0 | int i; |
2743 | 0 | for (i = 0; i < INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE * OUTPUT_DIM; i++) |
2744 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE); |
2745 | 0 | for (i = 0; i < INPUT_SIZE * INPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
2746 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2747 | 0 | for (i = 0; i < OUTPUT_SIZE * OUTPUT_SIZE * OUTPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
2748 | 0 | g->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / OUTPUT_DIM; // (OUTPUT_SIZE * OUTPUT_SIZE * OUTPUT_DIM); |
2749 | | // Copy generated matrix values over to GPU. |
2750 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2751 | 0 | ccv_nnc_tensor_t* gg = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
2752 | 0 | ccv_nnc_tensor_t* gh = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2753 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2754 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 1, 1, OUTPUT_DIM), 0); |
2755 | 0 | ccv_nnc_tensor_t* gdw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2756 | 0 | ccv_nnc_tensor_t* gdbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 1, 1, OUTPUT_DIM), 0); |
2757 | 0 | ccv_nnc_tensor_t* gao = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, INPUT_DIM, INPUT_SIZE, INPUT_SIZE), 0); |
2758 | 0 | ccv_nnc_tensor_t* ggo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, OUTPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
2759 | 0 | ccv_nnc_tensor_t* gho = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, INPUT_DIM, INPUT_SIZE, INPUT_SIZE), 0); |
2760 | 0 | ccv_nnc_tensor_t* gwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
2761 | 0 | ccv_nnc_tensor_t* gbiaso = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 1, OUTPUT_DIM, 1, 1), 0); |
2762 | 0 | ccv_nnc_tensor_t* gdwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
2763 | 0 | ccv_nnc_tensor_t* gdbiaso = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 1, OUTPUT_DIM, 1, 1), 0); |
2764 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, g), TENSOR_LIST(ga, gw, gg), 0); |
2765 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(g, a, w), TENSOR_LIST(h, dw, dbias), 0); |
2766 | 0 | ccv_nnc_cmd_exec(CMD_FORMAT_TRANSFORM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ga, gw, gg), TENSOR_LIST(gao, gwo, ggo), 0); |
2767 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
2768 | |
|
2769 | 0 | assert(cmd.backend >= 0); |
2770 | 0 | cmd.algorithm = -1; |
2771 | 0 | ccv_nnc_stream_context_t* stream_context = ccv_nnc_stream_context_new(CCV_STREAM_CONTEXT_GPU); |
2772 | |
|
2773 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(ggo, gao, gwo), TENSOR_LIST(gho, gdwo, gdbiaso), stream_context); |
2774 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(ggo, gao, gwo), TENSOR_LIST(gho, gdwo, gdbiaso), stream_context)); |
2775 | 0 | ccv_nnc_stream_context_wait(stream_context); |
2776 | 0 | ccv_nnc_stream_context_free(stream_context); |
2777 | 0 | ccv_nnc_tensor_t* ch = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2778 | 0 | ccv_nnc_tensor_t* cdw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2779 | 0 | ccv_nnc_tensor_t* cdbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, OUTPUT_DIM, 1, 1), 0); |
2780 | |
|
2781 | 0 | ccv_nnc_cmd_exec(CMD_FORMAT_TRANSFORM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gho, gdwo, gdbiaso), TENSOR_LIST(gh, gdw, gdbias), 0); |
2782 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gh, gdw, gdbias), TENSOR_LIST(ch, cdw, cdbias), 0); |
2783 | |
|
2784 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dw->data.f32, cdw->data.f32, INPUT_DIM * OUTPUT_DIM * KERNEL_SIZE * KERNEL_SIZE, 5e-1, "output from mps should match from CPU"); |
2785 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dbias->data.f32, cdbias->data.f32, OUTPUT_DIM, 5e-1, "output from mps should match from CPU"); |
2786 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, h->data.f32, ch->data.f32, BATCH_SIZE * INPUT_DIM * INPUT_SIZE * INPUT_SIZE, 1e-4, "output from mps should match from CPU"); |
2787 | 0 | ccv_nnc_tensor_free(gao); |
2788 | 0 | ccv_nnc_tensor_free(ggo); |
2789 | 0 | ccv_nnc_tensor_free(gho); |
2790 | 0 | ccv_nnc_tensor_free(gwo); |
2791 | 0 | ccv_nnc_tensor_free(gbiaso); |
2792 | 0 | ccv_nnc_tensor_free(gdwo); |
2793 | 0 | ccv_nnc_tensor_free(gdbiaso); |
2794 | 0 | ccv_nnc_tensor_free(h); |
2795 | 0 | ccv_nnc_tensor_free(gh); |
2796 | 0 | ccv_nnc_tensor_free(w); |
2797 | 0 | ccv_nnc_tensor_free(g); |
2798 | 0 | ccv_nnc_tensor_free(a); |
2799 | 0 | ccv_nnc_tensor_free(gbias); |
2800 | 0 | ccv_nnc_tensor_free(gdbias); |
2801 | 0 | ccv_nnc_tensor_free(gdw); |
2802 | 0 | ccv_nnc_tensor_free(gw); |
2803 | 0 | ccv_nnc_tensor_free(gg); |
2804 | 0 | ccv_nnc_tensor_free(ga); |
2805 | 0 | ccv_nnc_tensor_free(ch); |
2806 | 0 | ccv_nnc_tensor_free(cdw); |
2807 | 0 | ccv_nnc_tensor_free(cdbias); |
2808 | 0 | } |
2809 | | |
2810 | | TEST_CASE("mps backward convolution in nhwc format") |
2811 | 1 | { |
2812 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_BACKWARD, CCV_NNC_BACKEND_MPS)); |
2813 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2814 | 0 | ccv_nnc_tensor_t* h = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2815 | 0 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
2816 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_BACKWARD(1, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM); |
2817 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
2818 | 0 | assert(cmd.backend >= 0); |
2819 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(cmd.info, a->info, g->info); |
2820 | 0 | assert(ccv_nnc_hint_verify(hint, cmd.info, a->info, g->info) == 0); |
2821 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2822 | 0 | ccv_nnc_tensor_t* dw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2823 | 0 | ccv_nnc_tensor_t* dbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM), 0); |
2824 | | // configure the inlets. |
2825 | 0 | dsfmt_t dsfmt; |
2826 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2827 | 0 | int i; |
2828 | 0 | for (i = 0; i < INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE * OUTPUT_DIM; i++) |
2829 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE); |
2830 | 0 | for (i = 0; i < INPUT_SIZE * INPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
2831 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2832 | 0 | for (i = 0; i < OUTPUT_SIZE * OUTPUT_SIZE * OUTPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
2833 | 0 | g->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / OUTPUT_DIM; // (OUTPUT_SIZE * OUTPUT_SIZE * OUTPUT_DIM); |
2834 | | // Copy generated matrix values over to GPU. |
2835 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2836 | 0 | ccv_nnc_tensor_t* gg = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
2837 | 0 | ccv_nnc_tensor_t* gh = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2838 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2839 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 1, 1, OUTPUT_DIM), 0); |
2840 | 0 | ccv_nnc_tensor_t* gdw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2841 | 0 | ccv_nnc_tensor_t* gdbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 1, 1, OUTPUT_DIM), 0); |
2842 | 0 | ccv_nnc_tensor_t* gwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
2843 | 0 | ccv_nnc_tensor_t* gdwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
2844 | |
|
2845 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, g), TENSOR_LIST(ga, gw, gg), 0); |
2846 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(g, a, w), TENSOR_LIST(h, dw, dbias), 0); |
2847 | 0 | ccv_nnc_cmd_exec(CMD_FORMAT_TRANSFORM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gw), TENSOR_LIST(gwo), 0); |
2848 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
2849 | |
|
2850 | 0 | assert(cmd.backend >= 0); |
2851 | 0 | cmd.algorithm = -1; |
2852 | 0 | ccv_nnc_stream_context_t* stream_context = ccv_nnc_stream_context_new(CCV_STREAM_CONTEXT_GPU); |
2853 | |
|
2854 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(gg, ga, gwo), TENSOR_LIST(gh, gdwo, gdbias), stream_context); |
2855 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(gg, ga, gwo), TENSOR_LIST(gh, gdwo, gdbias), stream_context)); |
2856 | 0 | ccv_nnc_stream_context_wait(stream_context); |
2857 | 0 | ccv_nnc_stream_context_free(stream_context); |
2858 | 0 | ccv_nnc_tensor_t* ch = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2859 | 0 | ccv_nnc_tensor_t* cdw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2860 | 0 | ccv_nnc_tensor_t* cdbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 1, 1, OUTPUT_DIM), 0); |
2861 | | |
2862 | 0 | ccv_nnc_cmd_exec(CMD_FORMAT_TRANSFORM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gdwo), TENSOR_LIST(gdw), 0); |
2863 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gh, gdw, gdbias), TENSOR_LIST(ch, cdw, cdbias), 0); |
2864 | |
|
2865 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dw->data.f32, cdw->data.f32, INPUT_DIM * OUTPUT_DIM * KERNEL_SIZE * KERNEL_SIZE, 5e-1, "output from mps should match from CPU"); |
2866 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dbias->data.f32, cdbias->data.f32, OUTPUT_DIM, 5e-1, "output from mps should match from CPU"); |
2867 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, h->data.f32, ch->data.f32, BATCH_SIZE * INPUT_DIM * INPUT_SIZE * INPUT_SIZE, 1e-4, "output from mps should match from CPU"); |
2868 | |
|
2869 | 0 | ccv_nnc_tensor_free(gwo); |
2870 | 0 | ccv_nnc_tensor_free(gdwo); |
2871 | 0 | ccv_nnc_tensor_free(h); |
2872 | 0 | ccv_nnc_tensor_free(gh); |
2873 | 0 | ccv_nnc_tensor_free(w); |
2874 | 0 | ccv_nnc_tensor_free(g); |
2875 | 0 | ccv_nnc_tensor_free(a); |
2876 | 0 | ccv_nnc_tensor_free(gbias); |
2877 | 0 | ccv_nnc_tensor_free(gdbias); |
2878 | 0 | ccv_nnc_tensor_free(gdw); |
2879 | 0 | ccv_nnc_tensor_free(gw); |
2880 | 0 | ccv_nnc_tensor_free(gg); |
2881 | 0 | ccv_nnc_tensor_free(ga); |
2882 | 0 | ccv_nnc_tensor_free(ch); |
2883 | 0 | ccv_nnc_tensor_free(cdw); |
2884 | 0 | ccv_nnc_tensor_free(cdbias); |
2885 | 0 | } |
2886 | | |
2887 | | TEST_CASE("mps backward convolution in nchw format with dilation 2, 3") |
2888 | 1 | { |
2889 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_CONVOLUTION_BACKWARD, CCV_NNC_BACKEND_MPS)); |
2890 | 0 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2891 | 0 | ccv_nnc_tensor_t* h = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2892 | 0 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
2893 | 0 | ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_BACKWARD(1, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM); |
2894 | 0 | cmd.info.convolution.dilation[0] = 2; |
2895 | 0 | cmd.info.convolution.dilation[1] = 3; |
2896 | 0 | cmd.backend = CCV_NNC_BACKEND_CPU_REF; |
2897 | 0 | assert(cmd.backend >= 0); |
2898 | 0 | ccv_nnc_cmd_param_t modified_cmd = cmd.info; |
2899 | 0 | modified_cmd.size.dim[0] = (cmd.info.size.dim[0] - 1) * ccv_max(cmd.info.convolution.dilation[0], 1) + 1; |
2900 | 0 | modified_cmd.size.dim[1] = (cmd.info.size.dim[1] - 1) * ccv_max(cmd.info.convolution.dilation[1], 1) + 1; |
2901 | 0 | ccv_nnc_hint_t hint = ccv_nnc_hint_auto(modified_cmd, a->info, g->info); |
2902 | 0 | assert(ccv_nnc_hint_verify(hint, modified_cmd, a->info, g->info) == 0); |
2903 | 0 | ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2904 | 0 | ccv_nnc_tensor_t* dw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2905 | 0 | ccv_nnc_tensor_t* dbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 1, 1, OUTPUT_DIM), 0); |
2906 | | // configure the inlets. |
2907 | 0 | dsfmt_t dsfmt; |
2908 | 0 | dsfmt_init_gen_rand(&dsfmt, 0); |
2909 | 0 | int i; |
2910 | 0 | for (i = 0; i < INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE * OUTPUT_DIM; i++) |
2911 | 0 | w->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / (INPUT_DIM * KERNEL_SIZE * KERNEL_SIZE); |
2912 | 0 | for (i = 0; i < INPUT_SIZE * INPUT_SIZE * INPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
2913 | 0 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
2914 | 0 | for (i = 0; i < OUTPUT_SIZE * OUTPUT_SIZE * OUTPUT_DIM * ccv_max(1, BATCH_SIZE); i++) |
2915 | 0 | g->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) / OUTPUT_DIM; // (OUTPUT_SIZE * OUTPUT_SIZE * OUTPUT_DIM); |
2916 | | // Copy generated matrix values over to GPU. |
2917 | 0 | ccv_nnc_tensor_t* ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2918 | 0 | ccv_nnc_tensor_t* gg = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, OUTPUT_SIZE, OUTPUT_SIZE, OUTPUT_DIM), 0); |
2919 | 0 | ccv_nnc_tensor_t* gh = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2920 | 0 | ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2921 | 0 | ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 1, 1, OUTPUT_DIM), 0); |
2922 | 0 | ccv_nnc_tensor_t* gdw = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2923 | 0 | ccv_nnc_tensor_t* gdbias = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 1, 1, OUTPUT_DIM), 0); |
2924 | 0 | ccv_nnc_tensor_t* gao = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, INPUT_DIM, INPUT_SIZE, INPUT_SIZE), 0); |
2925 | 0 | ccv_nnc_tensor_t* ggo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, OUTPUT_DIM, OUTPUT_SIZE, OUTPUT_SIZE), 0); |
2926 | 0 | ccv_nnc_tensor_t* gho = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, BATCH_SIZE, INPUT_DIM, INPUT_SIZE, INPUT_SIZE), 0); |
2927 | 0 | ccv_nnc_tensor_t* gwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
2928 | 0 | ccv_nnc_tensor_t* gbiaso = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 1, OUTPUT_DIM, 1, 1), 0); |
2929 | 0 | ccv_nnc_tensor_t* gdwo = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, OUTPUT_DIM, INPUT_DIM, KERNEL_SIZE, KERNEL_SIZE), 0); |
2930 | 0 | ccv_nnc_tensor_t* gdbiaso = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 1, OUTPUT_DIM, 1, 1), 0); |
2931 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, g), TENSOR_LIST(ga, gw, gg), 0); |
2932 | 0 | ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(g, a, w), TENSOR_LIST(h, dw, dbias), 0); |
2933 | 0 | ccv_nnc_cmd_exec(CMD_FORMAT_TRANSFORM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ga, gw, gg), TENSOR_LIST(gao, gwo, ggo), 0); |
2934 | 0 | cmd.backend = CCV_NNC_BACKEND_MPS; |
2935 | |
|
2936 | 0 | assert(cmd.backend >= 0); |
2937 | 0 | cmd.algorithm = -1; |
2938 | 0 | ccv_nnc_stream_context_t* stream_context = ccv_nnc_stream_context_new(CCV_STREAM_CONTEXT_GPU); |
2939 | |
|
2940 | 0 | cmd = ccv_nnc_cmd_autotune(cmd, 1 * 1024 * 1024 * 1024, hint, 0, TENSOR_LIST(ggo, gao, gwo), TENSOR_LIST(gho, gdwo, gdbiaso), stream_context); |
2941 | 0 | assert(CCV_NNC_EXEC_SUCCESS == ccv_nnc_cmd_exec(cmd, hint, 0, TENSOR_LIST(ggo, gao, gwo), TENSOR_LIST(gho, gdwo, gdbiaso), stream_context)); |
2942 | 0 | ccv_nnc_stream_context_wait(stream_context); |
2943 | 0 | ccv_nnc_stream_context_free(stream_context); |
2944 | 0 | ccv_nnc_tensor_t* ch = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, BATCH_SIZE, INPUT_SIZE, INPUT_SIZE, INPUT_DIM), 0); |
2945 | 0 | ccv_nnc_tensor_t* cdw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, OUTPUT_DIM, KERNEL_SIZE, KERNEL_SIZE, INPUT_DIM), 0); |
2946 | 0 | ccv_nnc_tensor_t* cdbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, OUTPUT_DIM, 1, 1), 0); |
2947 | |
|
2948 | 0 | ccv_nnc_cmd_exec(CMD_FORMAT_TRANSFORM_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gho, gdwo, gdbiaso), TENSOR_LIST(gh, gdw, gdbias), 0); |
2949 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(gh, gdw, gdbias), TENSOR_LIST(ch, cdw, cdbias), 0); |
2950 | |
|
2951 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dw->data.f32, cdw->data.f32, INPUT_DIM * OUTPUT_DIM * KERNEL_SIZE * KERNEL_SIZE, 5e-1, "output from mps should match from CPU"); |
2952 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dbias->data.f32, cdbias->data.f32, OUTPUT_DIM, 5e-1, "output from mps should match from CPU"); |
2953 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, h->data.f32, ch->data.f32, BATCH_SIZE * INPUT_DIM * INPUT_SIZE * INPUT_SIZE, 1e-4, "output from mps should match from CPU"); |
2954 | 0 | ccv_nnc_tensor_free(gao); |
2955 | 0 | ccv_nnc_tensor_free(ggo); |
2956 | 0 | ccv_nnc_tensor_free(gho); |
2957 | 0 | ccv_nnc_tensor_free(gwo); |
2958 | 0 | ccv_nnc_tensor_free(gbiaso); |
2959 | 0 | ccv_nnc_tensor_free(gdwo); |
2960 | 0 | ccv_nnc_tensor_free(gdbiaso); |
2961 | 0 | ccv_nnc_tensor_free(h); |
2962 | 0 | ccv_nnc_tensor_free(gh); |
2963 | 0 | ccv_nnc_tensor_free(w); |
2964 | 0 | ccv_nnc_tensor_free(g); |
2965 | 0 | ccv_nnc_tensor_free(a); |
2966 | 0 | ccv_nnc_tensor_free(gbias); |
2967 | 0 | ccv_nnc_tensor_free(gdbias); |
2968 | 0 | ccv_nnc_tensor_free(gdw); |
2969 | 0 | ccv_nnc_tensor_free(gw); |
2970 | 0 | ccv_nnc_tensor_free(gg); |
2971 | 0 | ccv_nnc_tensor_free(ga); |
2972 | 0 | ccv_nnc_tensor_free(ch); |
2973 | 0 | ccv_nnc_tensor_free(cdw); |
2974 | 0 | ccv_nnc_tensor_free(cdbias); |
2975 | 0 | } |
2976 | | |
2977 | | TEST_CASE("compare group norm gradient with mps") |
2978 | 1 | { |
2979 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
2980 | 1 | ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
2981 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
2982 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
2983 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
2984 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
2985 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, GN_C_DIM, 2, LN_DIM), "scale"); |
2986 | 0 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, GN_C_DIM, 2, LN_DIM), "bias"); |
2987 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_mean"); |
2988 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_inv_std"); |
2989 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-5, 1), TENSOR_SYMBOL_LIST(bx, scale, bias), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "group_norm"); |
2990 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2991 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx, scale, bias), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
2992 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
2993 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
2994 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
2995 | 0 | ccv_nnc_graph_t* graph = 0; |
2996 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
2997 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
2998 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
2999 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
3000 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
3001 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
3002 | 0 | dsfmt_t dsfmt; |
3003 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3004 | 0 | int i; |
3005 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
3006 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3007 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
3008 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
3009 | 0 | float scaledata[1 * GN_C_DIM * 2 * LN_DIM]; |
3010 | 0 | float biasdata[1 * GN_C_DIM * 2 * LN_DIM]; |
3011 | 0 | for (i = 0; i < 1 * GN_C_DIM * 2 * LN_DIM; i++) |
3012 | 0 | { |
3013 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
3014 | 0 | biasdata[i] = dsfmt_genrand_open_close(&dsfmt); |
3015 | 0 | } |
3016 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), 0); |
3017 | 0 | ccv_nnc_tensor_t bias_tensor = ccv_nnc_tensor(biasdata, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), 0); |
3018 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor, &bias_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale), ccv_nnc_tensor_from_symbol(tensor_arena, bias)), 0); |
3019 | | // ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3020 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3021 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
3022 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3023 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
3024 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
3025 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3026 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
3027 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3028 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
3029 | 0 | ccv_nnc_tensor_t* const dbscale_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, scale)); |
3030 | 0 | ccv_nnc_tensor_t* const dbbias_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bias)); |
3031 | 0 | ccv_nnc_tensor_t* const dscale_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), 0); |
3032 | 0 | ccv_nnc_tensor_t* const dbias_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), 0); |
3033 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbscale_tensor, dbbias_tensor), TENSOR_LIST(dscale_tensor, dbias_tensor), 0); |
3034 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
3035 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
3036 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
3037 | 0 | ccv_nnc_graph_free(graph); |
3038 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3039 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3040 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3041 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), "scale"); |
3042 | 0 | ccv_nnc_tensor_symbol_t cbias = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), "bias"); |
3043 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_mean"); |
3044 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_inv_std"); |
3045 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_GROUP_NORM_FORWARD(1, GN_RC_DIM, 1e-5, 1), TENSOR_SYMBOL_LIST(cx, cscale, cbias), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "group_norm"); |
3046 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3047 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx, cscale, cbias), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
3048 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3049 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
3050 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
3051 | 0 | ccv_nnc_tensor_symbol_t dcscale = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cscale); |
3052 | 0 | ccv_nnc_tensor_symbol_t dcbias = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cbias); |
3053 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
3054 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
3055 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
3056 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
3057 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
3058 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3059 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
3060 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3061 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
3062 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * GN_C_DIM * 2 * LN_DIM); |
3063 | 0 | ccv_nnc_tensor_t* const cbias_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cbias); |
3064 | 0 | memcpy(cbias_tensor->data.f32, biasdata, sizeof(float) * 1 * GN_C_DIM * 2 * LN_DIM); |
3065 | |
|
3066 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
3067 | |
|
3068 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
3069 | 0 | ccv_nnc_tensor_t* const dcscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcscale); |
3070 | 0 | ccv_nnc_tensor_t* const dcbias_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcbias); |
3071 | |
|
3072 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dx_tensor->data.f32, dcx_tensor->data.f32, 2 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3073 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dbias_tensor->data.f32, dcbias_tensor->data.f32, 1 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3074 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dscale_tensor->data.f32, dcscale_tensor->data.f32, 1 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3075 | |
|
3076 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
3077 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
3078 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
3079 | 0 | ccv_nnc_graph_free(cpu_graph); |
3080 | 0 | ccv_nnc_tensor_free(x_tensor); |
3081 | 0 | ccv_nnc_tensor_free(dy_tensor); |
3082 | 0 | ccv_nnc_tensor_free(dx_tensor); |
3083 | 0 | ccv_nnc_tensor_free(dscale_tensor); |
3084 | 0 | ccv_nnc_tensor_free(dbias_tensor); |
3085 | 0 | } |
3086 | | |
3087 | | TEST_CASE("compare group norm gradient with mps, variant 1") |
3088 | 1 | { |
3089 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
3090 | 1 | ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
3091 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
3092 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3093 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3094 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3095 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, GN_C_DIM, 1, 1), "scale"); |
3096 | 0 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, GN_C_DIM, 1, 1), "bias"); |
3097 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 1, 1), "saved_mean"); |
3098 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 1, 1), "saved_inv_std"); |
3099 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-5, 1), TENSOR_SYMBOL_LIST(bx, scale, bias), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "group_norm"); |
3100 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3101 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx, scale, bias), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
3102 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3103 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
3104 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
3105 | 0 | ccv_nnc_graph_t* graph = 0; |
3106 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
3107 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
3108 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
3109 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
3110 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
3111 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
3112 | 0 | dsfmt_t dsfmt; |
3113 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3114 | 0 | int i; |
3115 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
3116 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3117 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
3118 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
3119 | 0 | float scaledata[1 * GN_C_DIM * 1 * 1]; |
3120 | 0 | float biasdata[1 * GN_C_DIM * 1 * 1]; |
3121 | 0 | for (i = 0; i < 1 * GN_C_DIM * 1 * 1; i++) |
3122 | 0 | { |
3123 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
3124 | 0 | biasdata[i] = dsfmt_genrand_open_close(&dsfmt); |
3125 | 0 | } |
3126 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 1, 1), 0); |
3127 | 0 | ccv_nnc_tensor_t bias_tensor = ccv_nnc_tensor(biasdata, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 1, 1), 0); |
3128 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor, &bias_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale), ccv_nnc_tensor_from_symbol(tensor_arena, bias)), 0); |
3129 | | // ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3130 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3131 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
3132 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3133 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
3134 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
3135 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3136 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
3137 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3138 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
3139 | 0 | ccv_nnc_tensor_t* const dbscale_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, scale)); |
3140 | 0 | ccv_nnc_tensor_t* const dbbias_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bias)); |
3141 | 0 | ccv_nnc_tensor_t* const dscale_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 1, 1), 0); |
3142 | 0 | ccv_nnc_tensor_t* const dbias_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 1, 1), 0); |
3143 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbscale_tensor, dbbias_tensor), TENSOR_LIST(dscale_tensor, dbias_tensor), 0); |
3144 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
3145 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
3146 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
3147 | 0 | ccv_nnc_graph_free(graph); |
3148 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3149 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3150 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3151 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 1, 1), "scale"); |
3152 | 0 | ccv_nnc_tensor_symbol_t cbias = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 1, 1), "bias"); |
3153 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 1, 1), "saved_mean"); |
3154 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 1, 1), "saved_inv_std"); |
3155 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_GROUP_NORM_FORWARD(1, GN_RC_DIM, 1e-5, 1), TENSOR_SYMBOL_LIST(cx, cscale, cbias), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "group_norm"); |
3156 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3157 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx, cscale, cbias), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
3158 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3159 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
3160 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
3161 | 0 | ccv_nnc_tensor_symbol_t dcscale = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cscale); |
3162 | 0 | ccv_nnc_tensor_symbol_t dcbias = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cbias); |
3163 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
3164 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
3165 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
3166 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
3167 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
3168 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3169 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
3170 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3171 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
3172 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * GN_C_DIM * 1 * 1); |
3173 | 0 | ccv_nnc_tensor_t* const cbias_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cbias); |
3174 | 0 | memcpy(cbias_tensor->data.f32, biasdata, sizeof(float) * 1 * GN_C_DIM * 1 * 1); |
3175 | |
|
3176 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
3177 | |
|
3178 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
3179 | 0 | ccv_nnc_tensor_t* const dcscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcscale); |
3180 | 0 | ccv_nnc_tensor_t* const dcbias_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcbias); |
3181 | |
|
3182 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dx_tensor->data.f32, dcx_tensor->data.f32, 2 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3183 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dbias_tensor->data.f32, dcbias_tensor->data.f32, 1 * GN_C_DIM * 1 * 1, 1e-5, "group norm output from mps should match from CPU"); |
3184 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dscale_tensor->data.f32, dcscale_tensor->data.f32, 1 * GN_C_DIM * 1 * 1, 1e-5, "group norm output from mps should match from CPU"); |
3185 | |
|
3186 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
3187 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
3188 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
3189 | 0 | ccv_nnc_graph_free(cpu_graph); |
3190 | 0 | ccv_nnc_tensor_free(x_tensor); |
3191 | 0 | ccv_nnc_tensor_free(dy_tensor); |
3192 | 0 | ccv_nnc_tensor_free(dx_tensor); |
3193 | 0 | ccv_nnc_tensor_free(dscale_tensor); |
3194 | 0 | ccv_nnc_tensor_free(dbias_tensor); |
3195 | 0 | } |
3196 | | |
3197 | | TEST_CASE("compare group norm gradient with mps (no dbias)") |
3198 | 1 | { |
3199 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
3200 | 1 | ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
3201 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
3202 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3203 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3204 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3205 | 0 | ccv_nnc_tensor_symbol_t scale = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, GN_C_DIM, 2, LN_DIM), "scale"); |
3206 | 0 | ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 1, GN_C_DIM, 2, LN_DIM), "bias"); |
3207 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_mean"); |
3208 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_inv_std"); |
3209 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-5, 1), TENSOR_SYMBOL_LIST(bx, scale, bias), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "group_norm"); |
3210 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3211 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx, scale), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
3212 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3213 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
3214 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
3215 | 0 | ccv_nnc_graph_t* graph = 0; |
3216 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
3217 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
3218 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
3219 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
3220 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
3221 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
3222 | 0 | dsfmt_t dsfmt; |
3223 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3224 | 0 | int i; |
3225 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
3226 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3227 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
3228 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
3229 | 0 | float scaledata[1 * GN_C_DIM * 2 * LN_DIM]; |
3230 | 0 | float biasdata[1 * GN_C_DIM * 2 * LN_DIM]; |
3231 | 0 | for (i = 0; i < 1 * GN_C_DIM * 2 * LN_DIM; i++) |
3232 | 0 | { |
3233 | 0 | scaledata[i] = dsfmt_genrand_open_close(&dsfmt); |
3234 | 0 | biasdata[i] = dsfmt_genrand_open_close(&dsfmt); |
3235 | 0 | } |
3236 | 0 | ccv_nnc_tensor_t scale_tensor = ccv_nnc_tensor(scaledata, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), 0); |
3237 | 0 | ccv_nnc_tensor_t bias_tensor = ccv_nnc_tensor(biasdata, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), 0); |
3238 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(&scale_tensor, &bias_tensor), TENSOR_LIST(ccv_nnc_tensor_from_symbol(tensor_arena, scale), ccv_nnc_tensor_from_symbol(tensor_arena, bias)), 0); |
3239 | | // ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3240 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3241 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
3242 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3243 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
3244 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
3245 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3246 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
3247 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3248 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
3249 | 0 | ccv_nnc_tensor_t* const dbscale_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, ccv_nnc_tensor_symbol_for_backward(symbolic_graph, scale)); |
3250 | 0 | ccv_nnc_tensor_t* const dscale_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), 0); |
3251 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbscale_tensor), TENSOR_LIST(dscale_tensor), 0); |
3252 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
3253 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
3254 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
3255 | 0 | ccv_nnc_graph_free(graph); |
3256 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3257 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3258 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3259 | 0 | ccv_nnc_tensor_symbol_t cscale = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), "scale"); |
3260 | 0 | ccv_nnc_tensor_symbol_t cbias = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 1, GN_C_DIM, 2, LN_DIM), "bias"); |
3261 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_mean"); |
3262 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_inv_std"); |
3263 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_GROUP_NORM_FORWARD(1, GN_RC_DIM, 1e-5, 1), TENSOR_SYMBOL_LIST(cx, cscale, cbias), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "group_norm"); |
3264 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3265 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx, cscale), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
3266 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3267 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
3268 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
3269 | 0 | ccv_nnc_tensor_symbol_t dcscale = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cscale); |
3270 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
3271 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
3272 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
3273 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
3274 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
3275 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3276 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
3277 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3278 | 0 | ccv_nnc_tensor_t* const cscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cscale); |
3279 | 0 | memcpy(cscale_tensor->data.f32, scaledata, sizeof(float) * 1 * GN_C_DIM * 2 * LN_DIM); |
3280 | |
|
3281 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
3282 | |
|
3283 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
3284 | 0 | ccv_nnc_tensor_t* const dcscale_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcscale); |
3285 | |
|
3286 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dx_tensor->data.f32, dcx_tensor->data.f32, 2 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3287 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dscale_tensor->data.f32, dcscale_tensor->data.f32, 1 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3288 | |
|
3289 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
3290 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
3291 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
3292 | 0 | ccv_nnc_graph_free(cpu_graph); |
3293 | 0 | ccv_nnc_tensor_free(x_tensor); |
3294 | 0 | ccv_nnc_tensor_free(dy_tensor); |
3295 | 0 | ccv_nnc_tensor_free(dx_tensor); |
3296 | 0 | ccv_nnc_tensor_free(dscale_tensor); |
3297 | 0 | } |
3298 | | |
3299 | | TEST_CASE("compare group norm gradient with mps without scale / bias") |
3300 | 1 | { |
3301 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
3302 | 1 | ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
3303 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
3304 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3305 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3306 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3307 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_mean"); |
3308 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_inv_std"); |
3309 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-5, 0), TENSOR_SYMBOL_LIST(bx), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "group_norm"); |
3310 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3311 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
3312 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3313 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
3314 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
3315 | 0 | ccv_nnc_graph_t* graph = 0; |
3316 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
3317 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
3318 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
3319 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
3320 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
3321 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
3322 | 0 | dsfmt_t dsfmt; |
3323 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3324 | 0 | int i; |
3325 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
3326 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3327 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
3328 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
3329 | | // ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3330 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3331 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
3332 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3333 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
3334 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
3335 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3336 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
3337 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3338 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
3339 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
3340 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
3341 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
3342 | 0 | ccv_nnc_graph_free(graph); |
3343 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3344 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3345 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3346 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_mean"); |
3347 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_inv_std"); |
3348 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_GROUP_NORM_FORWARD(1, GN_RC_DIM, 1e-5, 0), TENSOR_SYMBOL_LIST(cx), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "group_norm"); |
3349 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3350 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
3351 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3352 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
3353 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
3354 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
3355 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
3356 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
3357 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
3358 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
3359 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3360 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
3361 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3362 | |
|
3363 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
3364 | |
|
3365 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
3366 | |
|
3367 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dx_tensor->data.f32, dcx_tensor->data.f32, 2 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3368 | |
|
3369 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
3370 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
3371 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
3372 | 0 | ccv_nnc_graph_free(cpu_graph); |
3373 | 0 | ccv_nnc_tensor_free(x_tensor); |
3374 | 0 | ccv_nnc_tensor_free(dy_tensor); |
3375 | 0 | ccv_nnc_tensor_free(dx_tensor); |
3376 | 0 | } |
3377 | | |
3378 | | TEST_CASE("compare group norm gradient with mps, variant 1 without scale / bias") |
3379 | 1 | { |
3380 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
3381 | 1 | ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
3382 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
3383 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3384 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3385 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3386 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 1, 1), "saved_mean"); |
3387 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 1, 1), "saved_inv_std"); |
3388 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-5, 0), TENSOR_SYMBOL_LIST(bx), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "group_norm"); |
3389 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3390 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
3391 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3392 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
3393 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
3394 | 0 | ccv_nnc_graph_t* graph = 0; |
3395 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
3396 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
3397 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
3398 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
3399 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
3400 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
3401 | 0 | dsfmt_t dsfmt; |
3402 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3403 | 0 | int i; |
3404 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
3405 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3406 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
3407 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
3408 | | // ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3409 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3410 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
3411 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3412 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
3413 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
3414 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3415 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
3416 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3417 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
3418 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
3419 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
3420 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
3421 | 0 | ccv_nnc_graph_free(graph); |
3422 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3423 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3424 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3425 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 1, 1), "saved_mean"); |
3426 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 1, 1), "saved_inv_std"); |
3427 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_GROUP_NORM_FORWARD(1, GN_RC_DIM, 1e-5, 0), TENSOR_SYMBOL_LIST(cx), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "group_norm"); |
3428 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3429 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
3430 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3431 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
3432 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
3433 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
3434 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
3435 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
3436 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
3437 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
3438 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3439 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
3440 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3441 | |
|
3442 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
3443 | |
|
3444 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
3445 | |
|
3446 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dx_tensor->data.f32, dcx_tensor->data.f32, 2 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3447 | |
|
3448 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
3449 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
3450 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
3451 | 0 | ccv_nnc_graph_free(cpu_graph); |
3452 | 0 | ccv_nnc_tensor_free(x_tensor); |
3453 | 0 | ccv_nnc_tensor_free(dy_tensor); |
3454 | 0 | ccv_nnc_tensor_free(dx_tensor); |
3455 | 0 | } |
3456 | | |
3457 | | TEST_CASE("compare group norm gradient with mps (no dbias) without scale / bias") |
3458 | 1 | { |
3459 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_FORWARD, CCV_NNC_BACKEND_MPS) && |
3460 | 1 | ccv_nnc_cmd_ok(CCV_NNC_GROUP_NORM_BACKWARD, CCV_NNC_BACKEND_MPS) && |
3461 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) || ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS))); |
3462 | 0 | ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3463 | 0 | ccv_nnc_tensor_symbol_t bx = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3464 | 0 | ccv_nnc_tensor_symbol_t by = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3465 | 0 | ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_mean"); |
3466 | 0 | ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_inv_std"); |
3467 | 0 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_GROUP_NORM_FORWARD(1, 4, 1e-5, 0), TENSOR_SYMBOL_LIST(bx), TENSOR_SYMBOL_LIST(by, saved_mean, saved_inv_std), "group_norm"); |
3468 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3469 | 0 | ccv_nnc_symbolic_graph_backward(symbolic_graph, TENSOR_SYMBOL_LIST(by), TENSOR_SYMBOL_LIST(bx), SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph)); |
3470 | 0 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3471 | 0 | ccv_nnc_tensor_symbol_t dby = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, by); |
3472 | 0 | ccv_nnc_tensor_symbol_t dbx = ccv_nnc_tensor_symbol_for_backward(symbolic_graph, bx); |
3473 | 0 | ccv_nnc_graph_t* graph = 0; |
3474 | 0 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
3475 | 0 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
3476 | 0 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
3477 | 0 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
3478 | 0 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
3479 | 0 | ccv_nnc_tensor_t* const bx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, bx); |
3480 | 0 | dsfmt_t dsfmt; |
3481 | 0 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3482 | 0 | int i; |
3483 | 0 | dsfmt_init_gen_rand(&dsfmt, 1); |
3484 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3485 | 0 | x_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 100; |
3486 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(bx_tensor), 0); |
3487 | | // ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3488 | 0 | ccv_nnc_tensor_t* const dy_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3489 | 0 | ccv_nnc_tensor_t* const dby_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dby); |
3490 | 0 | for (i = 0; i < 2 * GN_C_DIM * 2 * LN_DIM; i++) |
3491 | 0 | dy_tensor->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 2 - 1; |
3492 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dy_tensor), TENSOR_LIST(dby_tensor), 0); |
3493 | 0 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
3494 | 0 | ccv_nnc_tensor_t* const dbx_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, dbx); |
3495 | 0 | ccv_nnc_tensor_t* const dx_tensor = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), 0); |
3496 | 0 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(dbx_tensor), TENSOR_LIST(dx_tensor), 0); |
3497 | 0 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
3498 | 0 | ccv_nnc_tensor_arena_free(tensor_arena); |
3499 | 0 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
3500 | 0 | ccv_nnc_graph_free(graph); |
3501 | 0 | ccv_nnc_symbolic_graph_t* const cpu_symbolic_graph = ccv_nnc_symbolic_graph_new(); |
3502 | 0 | ccv_nnc_tensor_symbol_t cx = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "x"); |
3503 | 0 | ccv_nnc_tensor_symbol_t cy = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_C_DIM, 2, LN_DIM), "y"); |
3504 | 0 | ccv_nnc_tensor_symbol_t csaved_mean = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_mean"); |
3505 | 0 | ccv_nnc_tensor_symbol_t csaved_inv_std = ccv_nnc_tensor_symbol_new(cpu_symbolic_graph, CPU_TENSOR_NHWC(32F, 2, GN_RC_DIM, 2, LN_DIM), "saved_inv_std"); |
3506 | 0 | ccv_nnc_graph_exec_symbol_new(cpu_symbolic_graph, CMD_GROUP_NORM_FORWARD(1, GN_RC_DIM, 1e-5, 0), TENSOR_SYMBOL_LIST(cx), TENSOR_SYMBOL_LIST(cy, csaved_mean, csaved_inv_std), "group_norm"); |
3507 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3508 | 0 | ccv_nnc_symbolic_graph_backward(cpu_symbolic_graph, TENSOR_SYMBOL_LIST(cy), TENSOR_SYMBOL_LIST(cx), SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph)); |
3509 | 0 | ccv_nnc_graph_exec_symbol_autogen(cpu_symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
3510 | 0 | ccv_nnc_tensor_symbol_t dcy = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cy); |
3511 | 0 | ccv_nnc_tensor_symbol_t dcx = ccv_nnc_tensor_symbol_for_backward(cpu_symbolic_graph, cx); |
3512 | 0 | ccv_nnc_graph_t* cpu_graph = 0; |
3513 | 0 | ccv_nnc_tensor_arena_t* cpu_tensor_arena = 0; |
3514 | 0 | ccv_nnc_graph_exec_arena_t* cpu_graph_exec_arena = 0; |
3515 | 0 | ccv_nnc_symbolic_graph_compile(cpu_symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(cpu_symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(cpu_symbolic_graph), &cpu_graph, &cpu_tensor_arena, &cpu_graph_exec_arena); |
3516 | 0 | ccv_nnc_tensor_t* const cx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, cx); |
3517 | 0 | memcpy(cx_tensor->data.f32, x_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3518 | 0 | ccv_nnc_tensor_t* const dcy_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcy); |
3519 | 0 | memcpy(dcy_tensor->data.f32, dy_tensor->data.f32, sizeof(float) * 2 * GN_C_DIM * 2 * LN_DIM); |
3520 | |
|
3521 | 0 | ccv_nnc_graph_run(cpu_graph, 0, TRAVERSE_FULL, 0, 0); |
3522 | |
|
3523 | 0 | ccv_nnc_tensor_t* const dcx_tensor = ccv_nnc_tensor_from_symbol(cpu_tensor_arena, dcx); |
3524 | |
|
3525 | 0 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dx_tensor->data.f32, dcx_tensor->data.f32, 2 * GN_C_DIM * 2 * LN_DIM, 1e-5, "group norm output from mps should match from CPU"); |
3526 | |
|
3527 | 0 | ccv_nnc_symbolic_graph_free(cpu_symbolic_graph); |
3528 | 0 | ccv_nnc_tensor_arena_free(cpu_tensor_arena); |
3529 | 0 | ccv_nnc_graph_exec_arena_free(cpu_graph_exec_arena); |
3530 | 0 | ccv_nnc_graph_free(cpu_graph); |
3531 | 0 | ccv_nnc_tensor_free(x_tensor); |
3532 | 0 | ccv_nnc_tensor_free(dy_tensor); |
3533 | 0 | ccv_nnc_tensor_free(dx_tensor); |
3534 | 0 | } |
3535 | | |
3536 | | TEST_CASE("broadcasting semantics for mul backward (a,b)") |
3537 | 1 | { |
3538 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MUL_FORWARD, CCV_NNC_BACKEND_MPS) && |
3539 | 1 | ccv_nnc_cmd_ok(CCV_NNC_MUL_BACKWARD, CCV_NNC_BACKEND_MPS)); |
3540 | 0 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
3541 | 0 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
3542 | 0 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 2), 0); |
3543 | 0 | ccv_nnc_tensor_t* const da = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
3544 | 0 | ccv_nnc_tensor_t* const db = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
3545 | 0 | ccv_nnc_tensor_t* const dat = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 1), 0); |
3546 | 0 | ccv_nnc_tensor_t* const dbt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
3547 | 0 | a->data.f32[0] = 1; |
3548 | 0 | a->data.f32[1] = 2; |
3549 | 0 | a->data.f32[2] = 3; |
3550 | 0 | a->data.f32[3] = 4; |
3551 | 0 | b->data.f32[0] = 5; |
3552 | 0 | b->data.f32[1] = 6; |
3553 | 0 | float ctp[] = { |
3554 | 0 | 6, 7, |
3555 | 0 | 7, 8, |
3556 | 0 | 8, 9, |
3557 | 0 | 9, 10 |
3558 | 0 | }; |
3559 | 0 | memcpy(c->data.f32, ctp, sizeof(ctp)); |
3560 | 0 | ccv_nnc_tensor_t* const ga = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
3561 | 0 | ccv_nnc_tensor_t* const gb = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2), 0); |
3562 | 0 | ccv_nnc_tensor_t* const gda = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 4, 1), 0); |
|