/home/liu/actions-runner/_work/ccv/ccv/test/unit/nnc/nms.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | |
8 | | TEST_SETUP() |
9 | | { |
10 | | ccv_nnc_init(); |
11 | | } |
12 | | |
13 | | TEST_CASE("compare non-maximal suppression forward") |
14 | 1 | { |
15 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
16 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
17 | 1 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10), 0); |
18 | 1 | int i; |
19 | 11 | for (i = 0; i < 10; i++10 ) |
20 | 10 | { |
21 | 10 | a->data.f32[i * 5] = i; |
22 | 10 | a->data.f32[i * 5 + 1] = i; |
23 | 10 | a->data.f32[i * 5 + 2] = 0; |
24 | 10 | a->data.f32[i * 5 + 3] = 1; |
25 | 10 | a->data.f32[i * 5 + 4] = 1; |
26 | 10 | } |
27 | 1 | ccv_nnc_cmd_exec(CMD_NMS_FORWARD(0.5), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b, c), 0); |
28 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
29 | 11 | for (i = 0; i < 10; i++10 ) |
30 | 10 | { |
31 | 10 | bt->data.f32[i * 5] = 9 - i; |
32 | 10 | bt->data.f32[i * 5 + 1] = 9 - i; |
33 | 10 | bt->data.f32[i * 5 + 2] = 0; |
34 | 10 | bt->data.f32[i * 5 + 3] = 1; |
35 | 10 | bt->data.f32[i * 5 + 4] = 1; |
36 | 10 | } |
37 | 1 | REQUIRE_TENSOR_EQ(b, bt, "should be equal"); |
38 | 1 | int ct[10] = {}; |
39 | 11 | for (i = 0; i < 10; i++10 ) |
40 | 10 | ct[i] = 9 - i; |
41 | 1 | REQUIRE_ARRAY_EQ(int, c->data.i32, ct, 10, "should be equal"); |
42 | 1 | ccv_nnc_tensor_free(a); |
43 | 1 | ccv_nnc_tensor_free(b); |
44 | 1 | ccv_nnc_tensor_free(c); |
45 | 1 | ccv_nnc_tensor_free(bt); |
46 | 1 | } |
47 | | |
48 | | TEST_CASE("compare non-maximal suppression forward with tensor views") |
49 | 1 | { |
50 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 8), 0); |
51 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 6), 0); |
52 | 1 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10), 0); |
53 | 1 | int i; |
54 | 11 | for (i = 0; i < 10; i++10 ) |
55 | 10 | { |
56 | 10 | a->data.f32[i * 8] = i; |
57 | 10 | a->data.f32[i * 8 + 1] = i; |
58 | 10 | a->data.f32[i * 8 + 2] = 0; |
59 | 10 | a->data.f32[i * 8 + 3] = 1; |
60 | 10 | a->data.f32[i * 8 + 4] = 1; |
61 | 10 | } |
62 | 1 | memset(b->data.f32, 0, sizeof(float) * 10 * 6); |
63 | 1 | ccv_nnc_tensor_view_t* const av = ccv_nnc_tensor_view_new(a, CPU_TENSOR_NHWC(32F, 10, 5), ccv_nnc_no_ofs, DIM_ALLOC(8, 1)); |
64 | 1 | ccv_nnc_tensor_view_t* const bv = ccv_nnc_tensor_view_new(b, CPU_TENSOR_NHWC(32F, 10, 5), ccv_nnc_no_ofs, DIM_ALLOC(6, 1)); |
65 | 1 | ccv_nnc_cmd_exec(CMD_NMS_FORWARD(0.5), ccv_nnc_no_hint, 0, TENSOR_LIST((ccv_nnc_tensor_t*)av), TENSOR_LIST((ccv_nnc_tensor_t*)bv, c), 0); |
66 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 6), 0); |
67 | 11 | for (i = 0; i < 10; i++10 ) |
68 | 10 | { |
69 | 10 | bt->data.f32[i * 6] = 9 - i; |
70 | 10 | bt->data.f32[i * 6 + 1] = 9 - i; |
71 | 10 | bt->data.f32[i * 6 + 2] = 0; |
72 | 10 | bt->data.f32[i * 6 + 3] = 1; |
73 | 10 | bt->data.f32[i * 6 + 4] = 1; |
74 | 10 | bt->data.f32[i * 6 + 5] = 0; |
75 | 10 | } |
76 | 1 | REQUIRE_TENSOR_EQ(b, bt, "should be equal"); |
77 | 1 | int ct[10] = {}; |
78 | 11 | for (i = 0; i < 10; i++10 ) |
79 | 10 | ct[i] = 9 - i; |
80 | 1 | REQUIRE_ARRAY_EQ(int, c->data.i32, ct, 10, "should be equal"); |
81 | 1 | ccv_nnc_tensor_free(a); |
82 | 1 | ccv_nnc_tensor_free(b); |
83 | 1 | ccv_nnc_tensor_free(c); |
84 | 1 | ccv_nnc_tensor_free(bt); |
85 | 1 | ccv_nnc_tensor_view_free(av); |
86 | 1 | ccv_nnc_tensor_view_free(bv); |
87 | 1 | } |
88 | | |
89 | | TEST_CASE("non-maximal suppression forward with some value suppressed") |
90 | 1 | { |
91 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
92 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
93 | 1 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10), 0); |
94 | 1 | int i; |
95 | 11 | for (i = 0; i < 10; i++10 ) |
96 | 10 | { |
97 | 10 | a->data.f32[i * 5] = i; |
98 | 10 | a->data.f32[i * 5 + 1] = i; |
99 | 10 | a->data.f32[i * 5 + 2] = 0; |
100 | 10 | a->data.f32[i * 5 + 3] = 2; |
101 | 10 | a->data.f32[i * 5 + 4] = 1; |
102 | 10 | } |
103 | 1 | ccv_nnc_cmd_exec(CMD_NMS_FORWARD(0.3), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b, c), 0); |
104 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
105 | 6 | for (i = 0; i < 5; i++5 ) |
106 | 5 | { |
107 | 5 | bt->data.f32[i * 5] = 9 - i * 2; |
108 | 5 | bt->data.f32[i * 5 + 1] = 9 - i * 2; |
109 | 5 | bt->data.f32[i * 5 + 2] = 0; |
110 | 5 | bt->data.f32[i * 5 + 3] = 2; |
111 | 5 | bt->data.f32[i * 5 + 4] = 1; |
112 | 5 | } |
113 | 1 | REQUIRE_ARRAY_EQ(float, b->data.f32, bt->data.f32, 5 * 5, "should be equal"); |
114 | 1 | int ct[10] = {}; |
115 | 6 | for (i = 0; i < 5; i++5 ) |
116 | 5 | ct[i] = 9 - i * 2; |
117 | 6 | for (i = 5; i < 10; i++5 ) |
118 | 5 | ct[i] = -1; |
119 | 1 | REQUIRE_ARRAY_EQ(int, c->data.i32, ct, 10, "should be equal"); |
120 | 1 | ccv_nnc_tensor_free(a); |
121 | 1 | ccv_nnc_tensor_free(b); |
122 | 1 | ccv_nnc_tensor_free(c); |
123 | 1 | ccv_nnc_tensor_free(bt); |
124 | 1 | } |
125 | | |
126 | | TEST_CASE("non-maximal suppression backward with some value suppressed") |
127 | 1 | { |
128 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
129 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
130 | 1 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10), 0); |
131 | 1 | int i; |
132 | 11 | for (i = 0; i < 10; i++10 ) |
133 | 10 | { |
134 | 10 | a->data.f32[i * 5] = i; |
135 | 10 | a->data.f32[i * 5 + 1] = i; |
136 | 10 | a->data.f32[i * 5 + 2] = 0; |
137 | 10 | a->data.f32[i * 5 + 3] = 2; |
138 | 10 | a->data.f32[i * 5 + 4] = 1; |
139 | 10 | } |
140 | 1 | ccv_nnc_cmd_exec(CMD_NMS_FORWARD(0.3), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b, c), 0); |
141 | 1 | ccv_nnc_tensor_t* const db = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
142 | 11 | for (i = 0; i < 10; i++10 ) |
143 | 10 | { |
144 | 10 | db->data.f32[i * 5] = i; |
145 | 10 | db->data.f32[i * 5 + 1] = i; |
146 | 10 | db->data.f32[i * 5 + 2] = 0; |
147 | 10 | db->data.f32[i * 5 + 3] = 2; |
148 | 10 | db->data.f32[i * 5 + 4] = 1; |
149 | 10 | } |
150 | 1 | ccv_nnc_tensor_t* const da = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
151 | 1 | ccv_nnc_cmd_exec(CMD_NMS_BACKWARD(0.3), ccv_nnc_no_hint, 0, TENSOR_LIST(db, 0, 0, 0, c), TENSOR_LIST(da), 0); |
152 | 1 | ccv_nnc_tensor_t* const dat = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 5), 0); |
153 | 1 | memset(dat->data.f32, 0, sizeof(float) * 10 * 5); |
154 | 6 | for (i = 0; i < 5; i++5 ) |
155 | 5 | { |
156 | 5 | const int k = 9 - i * 2; |
157 | 5 | dat->data.f32[k * 5] = i; |
158 | 5 | dat->data.f32[k * 5 + 1] = i; |
159 | 5 | dat->data.f32[k * 5 + 2] = 0; |
160 | 5 | dat->data.f32[k * 5 + 3] = 2; |
161 | 5 | dat->data.f32[k * 5 + 4] = 1; |
162 | 5 | } |
163 | 1 | REQUIRE_TENSOR_EQ(da, dat, "should be equal"); |
164 | 1 | ccv_nnc_tensor_free(a); |
165 | 1 | ccv_nnc_tensor_free(b); |
166 | 1 | ccv_nnc_tensor_free(c); |
167 | 1 | ccv_nnc_tensor_free(db); |
168 | 1 | ccv_nnc_tensor_free(da); |
169 | 1 | ccv_nnc_tensor_free(dat); |
170 | 1 | } |
171 | | |
172 | | #include "case_main.h" |