/home/liu/actions-runner/_work/ccv/ccv/test/int/nnc/sort.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include <3rdparty/dsfmt/dSFMT.h> |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("sort a 1d tensor") |
15 | 1 | { |
16 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SORT_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_SORT_FORWARD, CCV_NNC_BACKEND_MPS)); |
17 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10000), 0); |
18 | 1 | dsfmt_t dsfmt; |
19 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
20 | 1 | int i; |
21 | 10.0k | for (i = 0; i < 10000; i++10.0k ) |
22 | 10.0k | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
23 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10000), 0); |
24 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10000), 0); |
25 | 1 | ccv_nnc_tensor_t* const hindices = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
26 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32S, 10000), 0); |
27 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10000), 0); |
28 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
29 | 1 | ccv_nnc_cmd_exec(CMD_SORT_FORWARD(0, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b, indices), 0); |
30 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10000), 0); |
31 | 1 | ccv_nnc_tensor_t* const indicest = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
32 | 1 | ccv_nnc_cmd_exec(CMD_SORT_FORWARD(0, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(bt, indicest), 0); |
33 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b, indices), TENSOR_LIST(hb, hindices), 0); |
34 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "should be equal"); |
35 | 1 | REQUIRE_TENSOR_EQ(hindices, indicest, "should be equal"); |
36 | 1 | ccv_nnc_tensor_free(a); |
37 | 1 | ccv_nnc_tensor_free(b); |
38 | 1 | ccv_nnc_tensor_free(indices); |
39 | 1 | ccv_nnc_tensor_free(ha); |
40 | 1 | ccv_nnc_tensor_free(hb); |
41 | 1 | ccv_nnc_tensor_free(hindices); |
42 | 1 | ccv_nnc_tensor_free(bt); |
43 | 1 | ccv_nnc_tensor_free(indicest); |
44 | 1 | } |
45 | | |
46 | | TEST_CASE("sort a 1d tensor, descending") |
47 | 1 | { |
48 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SORT_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_SORT_FORWARD, CCV_NNC_BACKEND_MPS)); |
49 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10000), 0); |
50 | 1 | dsfmt_t dsfmt; |
51 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
52 | 1 | int i; |
53 | 10.0k | for (i = 0; i < 10000; i++10.0k ) |
54 | 10.0k | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) + (i / 1000); |
55 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10000), 0); |
56 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10000), 0); |
57 | 1 | ccv_nnc_tensor_t* const hindices = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
58 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32S, 10000), 0); |
59 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32F, 10000), 0); |
60 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
61 | 1 | ccv_nnc_cmd_exec(CMD_SORT_FORWARD(0, 1), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b, indices), 0); |
62 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10000), 0); |
63 | 1 | ccv_nnc_tensor_t* const indicest = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
64 | 1 | ccv_nnc_cmd_exec(CMD_SORT_FORWARD(0, 1), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(bt, indicest), 0); |
65 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b, indices), TENSOR_LIST(hb, hindices), 0); |
66 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "should be equal"); |
67 | 1 | REQUIRE_TENSOR_EQ(hindices, indicest, "should be equal"); |
68 | 1 | ccv_nnc_tensor_free(a); |
69 | 1 | ccv_nnc_tensor_free(b); |
70 | 1 | ccv_nnc_tensor_free(indices); |
71 | 1 | ccv_nnc_tensor_free(ha); |
72 | 1 | ccv_nnc_tensor_free(hb); |
73 | 1 | ccv_nnc_tensor_free(hindices); |
74 | 1 | ccv_nnc_tensor_free(bt); |
75 | 1 | ccv_nnc_tensor_free(indicest); |
76 | 1 | } |
77 | | |
78 | | TEST_CASE("sort a 1d tensor, int") |
79 | 1 | { |
80 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SORT_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_SORT_FORWARD, CCV_NNC_BACKEND_MPS)); |
81 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
82 | 1 | dsfmt_t dsfmt; |
83 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
84 | 1 | int i; |
85 | 10.0k | for (i = 0; i < 10000; i++10.0k ) |
86 | 10.0k | ha->data.i32[i] = (int)dsfmt_genrand_uint32(&dsfmt); |
87 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
88 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32S, 10000), 0); |
89 | 1 | ccv_nnc_tensor_t* const hindices = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
90 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32S, 10000), 0); |
91 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32S, 10000), 0); |
92 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
93 | 1 | ccv_nnc_cmd_exec(CMD_SORT_FORWARD(0, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b, indices), 0); |
94 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
95 | 1 | ccv_nnc_tensor_t* const indicest = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
96 | 1 | ccv_nnc_cmd_exec(CMD_SORT_FORWARD(0, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(bt, indicest), 0); |
97 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b, indices), TENSOR_LIST(hb, hindices), 0); |
98 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "should be equal"); |
99 | 1 | REQUIRE_TENSOR_EQ(hindices, indicest, "should be equal"); |
100 | 1 | ccv_nnc_tensor_free(a); |
101 | 1 | ccv_nnc_tensor_free(b); |
102 | 1 | ccv_nnc_tensor_free(indices); |
103 | 1 | ccv_nnc_tensor_free(ha); |
104 | 1 | ccv_nnc_tensor_free(hb); |
105 | 1 | ccv_nnc_tensor_free(hindices); |
106 | 1 | ccv_nnc_tensor_free(bt); |
107 | 1 | ccv_nnc_tensor_free(indicest); |
108 | 1 | } |
109 | | |
110 | | TEST_CASE("sort a 1d tensor, descending, int") |
111 | 1 | { |
112 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_SORT_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_SORT_FORWARD, CCV_NNC_BACKEND_MPS)); |
113 | 1 | ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
114 | 1 | dsfmt_t dsfmt; |
115 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
116 | 1 | int i; |
117 | 10.0k | for (i = 0; i < 10000; i++10.0k ) |
118 | 10.0k | ha->data.i32[i] = (int)dsfmt_genrand_uint32(&dsfmt); |
119 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
120 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32S, 10000), 0); |
121 | 1 | ccv_nnc_tensor_t* const hindices = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
122 | 1 | ccv_nnc_tensor_t* const indices = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32S, 10000), 0); |
123 | 1 | ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 32S, 10000), 0); |
124 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(a), 0); |
125 | 1 | ccv_nnc_cmd_exec(CMD_SORT_FORWARD(0, 1), ccv_nnc_no_hint, 0, TENSOR_LIST(a), TENSOR_LIST(b, indices), 0); |
126 | 1 | ccv_nnc_tensor_t* const bt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
127 | 1 | ccv_nnc_tensor_t* const indicest = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32S, 10000), 0); |
128 | 1 | ccv_nnc_cmd_exec(CMD_SORT_FORWARD(0, 1), ccv_nnc_no_hint, 0, TENSOR_LIST(ha), TENSOR_LIST(bt, indicest), 0); |
129 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b, indices), TENSOR_LIST(hb, hindices), 0); |
130 | 1 | REQUIRE_TENSOR_EQ(hb, bt, "should be equal"); |
131 | 1 | REQUIRE_TENSOR_EQ(hindices, indicest, "should be equal"); |
132 | 1 | ccv_nnc_tensor_free(a); |
133 | 1 | ccv_nnc_tensor_free(b); |
134 | 1 | ccv_nnc_tensor_free(indices); |
135 | 1 | ccv_nnc_tensor_free(ha); |
136 | 1 | ccv_nnc_tensor_free(hb); |
137 | 1 | ccv_nnc_tensor_free(hindices); |
138 | 1 | ccv_nnc_tensor_free(bt); |
139 | 1 | ccv_nnc_tensor_free(indicest); |
140 | 1 | } |
141 | | |
142 | | #include "case_main.h" |