/home/liu/actions-runner/_work/ccv/ccv/test/unit/nnc/index.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | |
8 | | TEST_SETUP() |
9 | | { |
10 | | ccv_nnc_init(); |
11 | | } |
12 | | |
13 | | TEST_CASE("index select a tensor") |
14 | 1 | { |
15 | 1 | float ap[] = { |
16 | 1 | 1, 2, |
17 | 1 | 2, 3, |
18 | 1 | 3, 4, |
19 | 1 | }; |
20 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(ap, CPU_TENSOR_NHWC(32F, 3, 2), 0); |
21 | 1 | int ip[] = {1, 1}; |
22 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(ip, CPU_TENSOR_NHWC(32S, 2), 0); |
23 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2), 0); |
24 | 1 | ccv_nnc_cmd_exec(CMD_INDEX_SELECT_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, indices), TENSOR_LIST(b), 0); |
25 | 1 | float btp[] = { |
26 | 1 | 2, 3, |
27 | 1 | 2, 3, |
28 | 1 | }; |
29 | 1 | ccv_nnc_tensor_t const bt = ccv_nnc_tensor(btp, CPU_TENSOR_NHWC(32F, 2, 2), 0); |
30 | 1 | REQUIRE_TENSOR_EQ(b, &bt, "should be equal"); |
31 | 1 | ccv_nnc_tensor_free(a); |
32 | 1 | ccv_nnc_tensor_free(indices); |
33 | 1 | ccv_nnc_tensor_free(b); |
34 | 1 | } |
35 | | |
36 | | TEST_CASE("index select a tensor with float") |
37 | 1 | { |
38 | 1 | float ap[] = { |
39 | 1 | 1, 2, |
40 | 1 | 2, 3, |
41 | 1 | 3, 4, |
42 | 1 | }; |
43 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(ap, CPU_TENSOR_NHWC(32F, 3, 2), 0); |
44 | 1 | float ip[] = {1.5, 0.4}; |
45 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(ip, CPU_TENSOR_NHWC(32F, 2), 0); |
46 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 2), 0); |
47 | 1 | ccv_nnc_cmd_exec(CMD_INDEX_SELECT_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, indices), TENSOR_LIST(b), 0); |
48 | 1 | float btp[] = { |
49 | 1 | 2.5, 3.5, |
50 | 1 | 1.4, 2.4, |
51 | 1 | }; |
52 | 1 | REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, b->data.f32, btp, 4, 1e-5, "should be equal"); |
53 | 1 | ccv_nnc_tensor_free(a); |
54 | 1 | ccv_nnc_tensor_free(indices); |
55 | 1 | ccv_nnc_tensor_free(b); |
56 | 1 | } |
57 | | |
58 | | TEST_CASE("index select a 1d tensor") |
59 | 1 | { |
60 | 1 | float ap[] = { |
61 | 1 | 1, 2, 3, 4, 5 |
62 | 1 | }; |
63 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(ap, CPU_TENSOR_NHWC(32F, 5), 0); |
64 | 1 | int ip[] = {3, 2, 4}; |
65 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(ip, CPU_TENSOR_NHWC(32S, 3), 0); |
66 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3), 0); |
67 | 1 | ccv_nnc_cmd_exec(CMD_INDEX_SELECT_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, indices), TENSOR_LIST(b), 0); |
68 | 1 | float btp[] = { |
69 | 1 | 4, 3, 5 |
70 | 1 | }; |
71 | 1 | ccv_nnc_tensor_t const bt = ccv_nnc_tensor(btp, CPU_TENSOR_NHWC(32F, 3), 0); |
72 | 1 | REQUIRE_TENSOR_EQ(b, &bt, "should be equal"); |
73 | 1 | ccv_nnc_tensor_free(a); |
74 | 1 | ccv_nnc_tensor_free(indices); |
75 | 1 | ccv_nnc_tensor_free(b); |
76 | 1 | } |
77 | | |
78 | | TEST_CASE("index select a tensor view") |
79 | 1 | { |
80 | 1 | float ap[] = { |
81 | 1 | 1, 2, 3, 4, |
82 | 1 | 2, 3, 4, 5, |
83 | 1 | 3, 4, 5, 6, |
84 | 1 | }; |
85 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(ap, CPU_TENSOR_NHWC(32F, 3, 4), 0); |
86 | 1 | ccv_nnc_tensor_view_t* const av = ccv_nnc_tensor_view_new(a, CPU_TENSOR_NHWC(32F, 3, 2), DIM_ALLOC(0, 1), DIM_ALLOC(4, 1)); |
87 | 1 | int ip[] = {1, 1}; |
88 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(ip, CPU_TENSOR_NHWC(32S, 2), 0); |
89 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 4), 0); |
90 | 1 | memset(b->data.f32, 0, 2 * 4 * sizeof(float)); |
91 | 1 | ccv_nnc_tensor_view_t* const bv = ccv_nnc_tensor_view_new(b, CPU_TENSOR_NHWC(32F, 2, 2), DIM_ALLOC(0, 1), DIM_ALLOC(4, 1)); |
92 | 1 | ccv_nnc_cmd_exec(CMD_INDEX_SELECT_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST((ccv_nnc_tensor_t*)av, indices), TENSOR_LIST((ccv_nnc_tensor_t*)bv), 0); |
93 | 1 | float btp[] = { |
94 | 1 | 0, 3, 4, 0, |
95 | 1 | 0, 3, 4, 0, |
96 | 1 | }; |
97 | 1 | ccv_nnc_tensor_t const bt = ccv_nnc_tensor(btp, CPU_TENSOR_NHWC(32F, 2, 4), 0); |
98 | 1 | REQUIRE_TENSOR_EQ(b, &bt, "should be equal"); |
99 | 1 | ccv_nnc_tensor_free(a); |
100 | 1 | ccv_nnc_tensor_view_free(av); |
101 | 1 | ccv_nnc_tensor_free(indices); |
102 | 1 | ccv_nnc_tensor_free(b); |
103 | 1 | ccv_nnc_tensor_view_free(bv); |
104 | 1 | } |
105 | | |
106 | | TEST_CASE("backward index select a tensor") |
107 | 1 | { |
108 | 1 | float bp[] = { |
109 | 1 | 1, 2, |
110 | 1 | 2, 3, |
111 | 1 | }; |
112 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 2), 0); |
113 | 1 | int ip[] = {1, 1}; |
114 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(ip, CPU_TENSOR_NHWC(32S, 2), 0); |
115 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(bp, CPU_TENSOR_NHWC(32F, 2, 2), 0); |
116 | 1 | ccv_nnc_cmd_exec(CMD_INDEX_SELECT_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b, 0, indices), TENSOR_LIST(a), 0); |
117 | 1 | float atp[] = { |
118 | 1 | 0, 0, |
119 | 1 | 3, 5, |
120 | 1 | 0, 0, |
121 | 1 | }; |
122 | 1 | ccv_nnc_tensor_t const at = ccv_nnc_tensor(atp, CPU_TENSOR_NHWC(32F, 3, 2), 0); |
123 | 1 | REQUIRE_TENSOR_EQ(a, &at, "should be equal"); |
124 | 1 | ccv_nnc_tensor_free(a); |
125 | 1 | ccv_nnc_tensor_free(indices); |
126 | 1 | ccv_nnc_tensor_free(b); |
127 | 1 | } |
128 | | |
129 | | TEST_CASE("backward index select a 1d tensor") |
130 | 1 | { |
131 | 1 | float bp[] = { |
132 | 1 | 4, 3, 5, |
133 | 1 | }; |
134 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5), 0); |
135 | 1 | int ip[] = {3, 2, 4}; |
136 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(ip, CPU_TENSOR_NHWC(32S, 3), 0); |
137 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(bp, CPU_TENSOR_NHWC(32F, 3), 0); |
138 | 1 | ccv_nnc_cmd_exec(CMD_INDEX_SELECT_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b, 0, indices), TENSOR_LIST(a), 0); |
139 | 1 | float atp[] = { |
140 | 1 | 0, 0, 3, 4, 5 |
141 | 1 | }; |
142 | 1 | ccv_nnc_tensor_t const at = ccv_nnc_tensor(atp, CPU_TENSOR_NHWC(32F, 5), 0); |
143 | 1 | REQUIRE_TENSOR_EQ(a, &at, "should be equal"); |
144 | 1 | ccv_nnc_tensor_free(a); |
145 | 1 | ccv_nnc_tensor_free(indices); |
146 | 1 | ccv_nnc_tensor_free(b); |
147 | 1 | } |
148 | | |
149 | | TEST_CASE("backward index select a tensor view") |
150 | 1 | { |
151 | 1 | float bp[] = { |
152 | 1 | 0, 3, 4, 0, |
153 | 1 | 0, 1, 5, 0, |
154 | 1 | }; |
155 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 3, 4), 0); |
156 | 1 | int i; |
157 | 13 | for (i = 0; i < 3 * 4; i++12 ) |
158 | 12 | a->data.f32[i] = i; |
159 | 1 | ccv_nnc_tensor_view_t* const av = ccv_nnc_tensor_view_new(a, CPU_TENSOR_NHWC(32F, 3, 2), DIM_ALLOC(0, 1), DIM_ALLOC(4, 1)); |
160 | 1 | int ip[] = {1, 1}; |
161 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(ip, CPU_TENSOR_NHWC(32S, 2), 0); |
162 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(bp, CPU_TENSOR_NHWC(32F, 2, 4), 0); |
163 | 1 | ccv_nnc_tensor_view_t* const bv = ccv_nnc_tensor_view_new(b, CPU_TENSOR_NHWC(32F, 2, 2), DIM_ALLOC(0, 1), DIM_ALLOC(4, 1)); |
164 | 1 | ccv_nnc_cmd_exec(CMD_INDEX_SELECT_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST((ccv_nnc_tensor_t*)bv, 0, indices), TENSOR_LIST((ccv_nnc_tensor_t*)av), 0); |
165 | 1 | float atp[] = { |
166 | 1 | 0, 0, 0, 3, |
167 | 1 | 4, 4, 9, 7, |
168 | 1 | 8, 0, 0, 11, |
169 | 1 | }; |
170 | 1 | ccv_nnc_tensor_t const at = ccv_nnc_tensor(atp, CPU_TENSOR_NHWC(32F, 3, 4), 0); |
171 | 1 | REQUIRE_TENSOR_EQ(a, &at, "should be equal"); |
172 | 1 | ccv_nnc_tensor_free(a); |
173 | 1 | ccv_nnc_tensor_view_free(av); |
174 | 1 | ccv_nnc_tensor_free(indices); |
175 | 1 | ccv_nnc_tensor_free(b); |
176 | 1 | ccv_nnc_tensor_view_free(bv); |
177 | 1 | } |
178 | | |
179 | | #include "case_main.h" |