/home/liu/actions-runner/_work/ccv/ccv/test/int/nnc/pad.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include "3rdparty/dsfmt/dSFMT.h" |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("implement pad zero 1d") |
15 | | { |
16 | | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_MPS)); |
17 | | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 6), 0); |
18 | | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 9), 0); |
19 | | ha->data.f32[0] = 1; |
20 | | ha->data.f32[1] = 2; |
21 | | ha->data.f32[2] = 3; |
22 | | ha->data.f32[3] = 4; |
23 | | ha->data.f32[4] = 5; |
24 | | ha->data.f32[5] = 6; |
25 | | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_ZERO, (2), (1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
26 | | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 6), 0); |
27 | | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 9), 0); |
28 | | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
29 | | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_ZERO, (2), (1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
30 | | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 9), 0); |
31 | | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
32 | | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
33 | | ccv_nnc_tensor_free(ha); |
34 | | ccv_nnc_tensor_free(hb); |
35 | | ccv_nnc_tensor_free(a); |
36 | | ccv_nnc_tensor_free(b); |
37 | | ccv_nnc_tensor_free(bt); |
38 | | } |
39 | | |
40 | | TEST_CASE("implement pad zero 2d") |
41 | 1 | { |
42 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_MPS)); |
43 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
44 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4), 0); |
45 | 1 | ha->data.f32[0] = 1; |
46 | 1 | ha->data.f32[1] = 2; |
47 | 1 | ha->data.f32[2] = 3; |
48 | 1 | ha->data.f32[3] = 4; |
49 | 1 | ha->data.f32[4] = 5; |
50 | 1 | ha->data.f32[5] = 6; |
51 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_ZERO, (2, 1), (1, 0)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
52 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
53 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 5, 4), 0); |
54 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
55 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_ZERO, (2, 1), (1, 0)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
56 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4), 0); |
57 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
58 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
59 | 1 | ccv_nnc_tensor_free(ha); |
60 | 1 | ccv_nnc_tensor_free(hb); |
61 | 1 | ccv_nnc_tensor_free(a); |
62 | 1 | ccv_nnc_tensor_free(b); |
63 | 1 | ccv_nnc_tensor_free(bt); |
64 | 1 | } |
65 | | |
66 | | TEST_CASE("implement pad zero 3d") |
67 | 1 | { |
68 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_MPS)); |
69 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 3), 0); |
70 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 4), 0); |
71 | 1 | ha->data.f32[0] = 1; |
72 | 1 | ha->data.f32[1] = 2; |
73 | 1 | ha->data.f32[2] = 3; |
74 | 1 | ha->data.f32[3] = 4; |
75 | 1 | ha->data.f32[4] = 5; |
76 | 1 | ha->data.f32[5] = 6; |
77 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_ZERO, (1, 1, 0), (1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
78 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 2, 3), 0); |
79 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 3, 3, 4), 0); |
80 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
81 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_ZERO, (1, 1, 0), (1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
82 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 4), 0); |
83 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
84 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
85 | 1 | ccv_nnc_tensor_free(ha); |
86 | 1 | ccv_nnc_tensor_free(hb); |
87 | 1 | ccv_nnc_tensor_free(a); |
88 | 1 | ccv_nnc_tensor_free(b); |
89 | 1 | ccv_nnc_tensor_free(bt); |
90 | 1 | } |
91 | | |
92 | | TEST_CASE("implement pad zero 4d") |
93 | 1 | { |
94 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_MPS)); |
95 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, 1), 0); |
96 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4, 3, 2), 0); |
97 | 1 | ha->data.f32[0] = 1; |
98 | 1 | ha->data.f32[1] = 2; |
99 | 1 | ha->data.f32[2] = 3; |
100 | 1 | ha->data.f32[3] = 4; |
101 | 1 | ha->data.f32[4] = 5; |
102 | 1 | ha->data.f32[5] = 6; |
103 | 1 | ha->data.f32[6] = 7; |
104 | 1 | ha->data.f32[7] = 8; |
105 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_ZERO, (2, 1, 1, 0), (1, 1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
106 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 1), 0); |
107 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 5, 4, 3, 2), 0); |
108 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
109 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_ZERO, (2, 1, 1, 0), (1, 1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
110 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4, 3, 2), 0); |
111 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
112 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
113 | 1 | ccv_nnc_tensor_free(ha); |
114 | 1 | ccv_nnc_tensor_free(hb); |
115 | 1 | ccv_nnc_tensor_free(a); |
116 | 1 | ccv_nnc_tensor_free(b); |
117 | 1 | ccv_nnc_tensor_free(bt); |
118 | 1 | } |
119 | | |
120 | | TEST_CASE("implement pad replicate 1d") |
121 | 1 | { |
122 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_MPS)); |
123 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 6), 0); |
124 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 9), 0); |
125 | 1 | ha->data.f32[0] = 1; |
126 | 1 | ha->data.f32[1] = 2; |
127 | 1 | ha->data.f32[2] = 3; |
128 | 1 | ha->data.f32[3] = 4; |
129 | 1 | ha->data.f32[4] = 5; |
130 | 1 | ha->data.f32[5] = 6; |
131 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_REPLICATE, (2), (1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
132 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 6), 0); |
133 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 9), 0); |
134 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
135 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_REPLICATE, (2), (1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
136 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 9), 0); |
137 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
138 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
139 | 1 | ccv_nnc_tensor_free(ha); |
140 | 1 | ccv_nnc_tensor_free(hb); |
141 | 1 | ccv_nnc_tensor_free(a); |
142 | 1 | ccv_nnc_tensor_free(b); |
143 | 1 | ccv_nnc_tensor_free(bt); |
144 | 1 | } |
145 | | |
146 | | TEST_CASE("implement pad replicate 2d") |
147 | 1 | { |
148 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_MPS)); |
149 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
150 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4), 0); |
151 | 1 | ha->data.f32[0] = 1; |
152 | 1 | ha->data.f32[1] = 2; |
153 | 1 | ha->data.f32[2] = 3; |
154 | 1 | ha->data.f32[3] = 4; |
155 | 1 | ha->data.f32[4] = 5; |
156 | 1 | ha->data.f32[5] = 6; |
157 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_REPLICATE, (2, 1), (1, 0)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
158 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
159 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 5, 4), 0); |
160 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
161 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_REPLICATE, (2, 1), (1, 0)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
162 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4), 0); |
163 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
164 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
165 | 1 | ccv_nnc_tensor_free(ha); |
166 | 1 | ccv_nnc_tensor_free(hb); |
167 | 1 | ccv_nnc_tensor_free(a); |
168 | 1 | ccv_nnc_tensor_free(b); |
169 | 1 | ccv_nnc_tensor_free(bt); |
170 | 1 | } |
171 | | |
172 | | TEST_CASE("implement pad replicate 3d") |
173 | 1 | { |
174 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_MPS)); |
175 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 3), 0); |
176 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 4), 0); |
177 | 1 | ha->data.f32[0] = 1; |
178 | 1 | ha->data.f32[1] = 2; |
179 | 1 | ha->data.f32[2] = 3; |
180 | 1 | ha->data.f32[3] = 4; |
181 | 1 | ha->data.f32[4] = 5; |
182 | 1 | ha->data.f32[5] = 6; |
183 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_REPLICATE, (1, 1, 0), (1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
184 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 2, 3), 0); |
185 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 3, 3, 4), 0); |
186 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
187 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_REPLICATE, (1, 1, 0), (1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
188 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 4), 0); |
189 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
190 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
191 | 1 | ccv_nnc_tensor_free(ha); |
192 | 1 | ccv_nnc_tensor_free(hb); |
193 | 1 | ccv_nnc_tensor_free(a); |
194 | 1 | ccv_nnc_tensor_free(b); |
195 | 1 | ccv_nnc_tensor_free(bt); |
196 | 1 | } |
197 | | |
198 | | TEST_CASE("implement pad replicate 4d") |
199 | 1 | { |
200 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_FORWARD, CCV_NNC_BACKEND_MPS)); |
201 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, 1), 0); |
202 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4, 3, 2), 0); |
203 | 1 | ha->data.f32[0] = 1; |
204 | 1 | ha->data.f32[1] = 2; |
205 | 1 | ha->data.f32[2] = 3; |
206 | 1 | ha->data.f32[3] = 4; |
207 | 1 | ha->data.f32[4] = 5; |
208 | 1 | ha->data.f32[5] = 6; |
209 | 1 | ha->data.f32[6] = 7; |
210 | 1 | ha->data.f32[7] = 8; |
211 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_REPLICATE, (2, 1, 1, 0), (1, 1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
212 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 1), 0); |
213 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 5, 4, 3, 2), 0); |
214 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
215 | 1 | ccv_nnc_cmd_exec(CMD_PAD_FORWARD(CCV_NNC_PAD_REPLICATE, (2, 1, 1, 0), (1, 1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
216 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4, 3, 2), 0); |
217 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
218 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
219 | 1 | ccv_nnc_tensor_free(ha); |
220 | 1 | ccv_nnc_tensor_free(hb); |
221 | 1 | ccv_nnc_tensor_free(a); |
222 | 1 | ccv_nnc_tensor_free(b); |
223 | 1 | ccv_nnc_tensor_free(bt); |
224 | 1 | } |
225 | | |
226 | | TEST_CASE("implement pad zero 1d gradient") |
227 | 1 | { |
228 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_BACKWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
229 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 9), 0); |
230 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 6), 0); |
231 | 1 | dsfmt_t dsfmt; |
232 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
233 | 1 | int i; |
234 | 10 | for (i = 0; i < 9; i++9 ) |
235 | 9 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
236 | 1 | ccv_nnc_cmd_exec(CMD_PAD_BACKWARD(CCV_NNC_PAD_ZERO, (2), (1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
237 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 9), 0); |
238 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 6), 0); |
239 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
240 | 1 | ccv_nnc_cmd_exec(CMD_PAD_BACKWARD(CCV_NNC_PAD_ZERO, (2), (1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
241 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 6), 0); |
242 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
243 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
244 | 1 | ccv_nnc_tensor_free(ha); |
245 | 1 | ccv_nnc_tensor_free(hb); |
246 | 1 | ccv_nnc_tensor_free(a); |
247 | 1 | ccv_nnc_tensor_free(b); |
248 | 1 | ccv_nnc_tensor_free(bt); |
249 | 1 | } |
250 | | |
251 | | TEST_CASE("implement pad zero 2d gradient") |
252 | 1 | { |
253 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_BACKWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
254 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4), 0); |
255 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
256 | 1 | dsfmt_t dsfmt; |
257 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
258 | 1 | int i; |
259 | 21 | for (i = 0; i < 20; i++20 ) |
260 | 20 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
261 | 1 | ccv_nnc_cmd_exec(CMD_PAD_BACKWARD(CCV_NNC_PAD_ZERO, (2, 1), (1, 0)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
262 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 5, 4), 0); |
263 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 3), 0); |
264 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
265 | 1 | ccv_nnc_cmd_exec(CMD_PAD_BACKWARD(CCV_NNC_PAD_ZERO, (2, 1), (1, 0)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
266 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 3), 0); |
267 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
268 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
269 | 1 | ccv_nnc_tensor_free(ha); |
270 | 1 | ccv_nnc_tensor_free(hb); |
271 | 1 | ccv_nnc_tensor_free(a); |
272 | 1 | ccv_nnc_tensor_free(b); |
273 | 1 | ccv_nnc_tensor_free(bt); |
274 | 1 | } |
275 | | |
276 | | TEST_CASE("implement pad zero 3d gradient") |
277 | 1 | { |
278 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_BACKWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
279 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 3, 4), 0); |
280 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 3), 0); |
281 | 1 | dsfmt_t dsfmt; |
282 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
283 | 1 | int i; |
284 | 37 | for (i = 0; i < 3 * 3 * 4; i++36 ) |
285 | 36 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
286 | 1 | ccv_nnc_cmd_exec(CMD_PAD_BACKWARD(CCV_NNC_PAD_ZERO, (1, 1, 0), (1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
287 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 3, 3, 4), 0); |
288 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 1, 2, 3), 0); |
289 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
290 | 1 | ccv_nnc_cmd_exec(CMD_PAD_BACKWARD(CCV_NNC_PAD_ZERO, (1, 1, 0), (1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
291 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 1, 2, 3), 0); |
292 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
293 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
294 | 1 | ccv_nnc_tensor_free(ha); |
295 | 1 | ccv_nnc_tensor_free(hb); |
296 | 1 | ccv_nnc_tensor_free(a); |
297 | 1 | ccv_nnc_tensor_free(b); |
298 | 1 | ccv_nnc_tensor_free(bt); |
299 | 1 | } |
300 | | |
301 | | TEST_CASE("implement pad zero 4d gradient") |
302 | 1 | { |
303 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_PAD_BACKWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_PAD_BACKWARD, CCV_NNC_BACKEND_MPS)); |
304 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5, 4, 3, 2), 0); |
305 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, 1), 0); |
306 | 1 | dsfmt_t dsfmt; |
307 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
308 | 1 | int i; |
309 | 121 | for (i = 0; i < 5 * 4 * 3 * 2; i++120 ) |
310 | 120 | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
311 | 1 | ccv_nnc_cmd_exec(CMD_PAD_BACKWARD(CCV_NNC_PAD_ZERO, (2, 1, 1, 0), (1, 1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hb), 0); |
312 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 5, 4, 3, 2), 0); |
313 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 2, 2, 2, 1), 0); |
314 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
315 | 1 | ccv_nnc_cmd_exec(CMD_PAD_BACKWARD(CCV_NNC_PAD_ZERO, (2, 1, 1, 0), (1, 1, 0, 1)), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b), 0); |
316 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2, 2, 1), 0); |
317 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(bt), 0); |
318 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "result should be equal"); |
319 | 1 | ccv_nnc_tensor_free(ha); |
320 | 1 | ccv_nnc_tensor_free(hb); |
321 | 1 | ccv_nnc_tensor_free(a); |
322 | 1 | ccv_nnc_tensor_free(b); |
323 | 1 | ccv_nnc_tensor_free(bt); |
324 | 1 | } |
325 | | |
326 | | #include "case_main.h" |