/home/liu/actions-runner/_work/ccv/ccv/test/int/nnc/nms.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include <3rdparty/dsfmt/dSFMT.h> |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("compare nms forward") |
15 | 1 | { |
16 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_NMS_FORWARD, CCV_NNC_BACKEND_GPU_REF) && |
17 | 1 | ccv_nnc_cmd_ok(CCV_NNC_NMS_BACKWARD, CCV_NNC_BACKEND_GPU_REF)); |
18 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 1000, 5), 0); |
19 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 1000, 5), 0); |
20 | 1 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32S, 1000), 0); |
21 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 1000, 5), 0); |
22 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 1000, 5), 0); |
23 | 1 | ccv_nnc_tensor_t* const hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32S, 1000), 0); |
24 | 1 | int i; |
25 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
26 | 1.00k | { |
27 | 1.00k | ha->data.f32[i * 5] = i; |
28 | 1.00k | ha->data.f32[i * 5 + 1] = i; |
29 | 1.00k | ha->data.f32[i * 5 + 2] = 0; |
30 | 1.00k | ha->data.f32[i * 5 + 3] = 2; |
31 | 1.00k | ha->data.f32[i * 5 + 4] = 1; |
32 | 1.00k | } |
33 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
34 | 1 | ccv_nnc_cmd_exec(CMD_NMS_FORWARD(0.3), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b, c), 0); |
35 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b, c), TENSOR_LIST(hb, hc), 0); |
36 | 1 | ccv_nnc_tensor_t* const hbt = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 1000, 5), 0); |
37 | 1 | ccv_nnc_tensor_t* const hct = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32S, 1000), 0); |
38 | 1 | ccv_nnc_cmd_exec(CMD_NMS_FORWARD(0.3), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(hbt, hct), 0); |
39 | 1 | REQUIRE_TENSOR_EQ(hbt, hb, "should be equal"); |
40 | 1 | REQUIRE_ARRAY_EQ(int, hc->data.i32, hct->data.i32, 1000, "should be equal"); |
41 | 1 | ccv_nnc_tensor_free(a); |
42 | 1 | ccv_nnc_tensor_free(b); |
43 | 1 | ccv_nnc_tensor_free(c); |
44 | 1 | ccv_nnc_tensor_free(ha); |
45 | 1 | ccv_nnc_tensor_free(hb); |
46 | 1 | ccv_nnc_tensor_free(hc); |
47 | 1 | ccv_nnc_tensor_free(hbt); |
48 | 1 | ccv_nnc_tensor_free(hct); |
49 | 1 | } |
50 | | |
51 | | TEST_CASE("compare nms backward") |
52 | 1 | { |
53 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_NMS_FORWARD, CCV_NNC_BACKEND_GPU_REF) && |
54 | 1 | ccv_nnc_cmd_ok(CCV_NNC_NMS_BACKWARD, CCV_NNC_BACKEND_GPU_REF)); |
55 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 100, 5), 0); |
56 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 100, 5), 0); |
57 | 1 | ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32S, 100), 0); |
58 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 100, 5), 0); |
59 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 100, 5), 0); |
60 | 1 | ccv_nnc_tensor_t* const hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32S, 100), 0); |
61 | 1 | int i; |
62 | 101 | for (i = 0; i < 100; i++100 ) |
63 | 100 | { |
64 | 100 | ha->data.f32[i * 5] = i; |
65 | 100 | ha->data.f32[i * 5 + 1] = i; |
66 | 100 | ha->data.f32[i * 5 + 2] = 0; |
67 | 100 | ha->data.f32[i * 5 + 3] = 2; |
68 | 100 | ha->data.f32[i * 5 + 4] = 1; |
69 | 100 | } |
70 | 11 | for (i = 0; i < 10; i++10 ) |
71 | 10 | hc->data.i32[i] = 10 - i; |
72 | 91 | for (i = 10; i < 100; i++90 ) |
73 | 90 | hc->data.i32[i] = -1; |
74 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hc), TENSOR_LIST(a, c), 0); |
75 | 1 | ccv_nnc_cmd_exec(CMD_NMS_BACKWARD(0.3), ccv_nnc_no_hint, 0, TENSOR_LIST(a, 0, 0, 0, c), TENSOR_LIST(b), 0); |
76 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(hb), 0); |
77 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 100, 5), 0); |
78 | 501 | for (i = 0; i < 100 * 5; i++500 ) |
79 | 500 | bt->data.f32[i] = 0; |
80 | 11 | for (i = 1; i < 11; i++10 ) |
81 | 10 | { |
82 | 10 | const int j = 10 - i; |
83 | 10 | bt->data.f32[i * 5] = j; |
84 | 10 | bt->data.f32[i * 5 + 1] = j; |
85 | 10 | bt->data.f32[i * 5 + 2] = 0; |
86 | 10 | bt->data.f32[i * 5 + 3] = 2; |
87 | 10 | bt->data.f32[i * 5 + 4] = 1; |
88 | 10 | } |
89 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "should be equal"); |
90 | 1 | ccv_nnc_tensor_free(a); |
91 | 1 | ccv_nnc_tensor_free(b); |
92 | 1 | ccv_nnc_tensor_free(c); |
93 | 1 | ccv_nnc_tensor_free(ha); |
94 | 1 | ccv_nnc_tensor_free(hb); |
95 | 1 | ccv_nnc_tensor_free(hc); |
96 | 1 | ccv_nnc_tensor_free(bt); |
97 | 1 | } |
98 | | |
99 | | #include "case_main.h" |