/home/liu/actions-runner/_work/ccv/ccv/test/int/nnc/compare.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include <3rdparty/dsfmt/dSFMT.h> |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("min forward") |
15 | | { |
16 | | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MIN_FORWARD, CCV_NNC_BACKEND_GPU_REF)); |
17 | | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
18 | | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
19 | | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
20 | | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
21 | | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
22 | | ccv_nnc_tensor_t* hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
23 | | ccv_nnc_tensor_t* ct = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
24 | | dsfmt_t dsfmt; |
25 | | dsfmt_init_gen_rand(&dsfmt, 0); |
26 | | int i; |
27 | | for (i = 0; i < 1000; i++) |
28 | | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 10 - 1; |
29 | | for (i = 0; i < 1000; i++) |
30 | | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 10 - 1; |
31 | | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(a, b), 0); |
32 | | ccv_nnc_cmd_exec(CMD_MIN_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
33 | | ccv_nnc_cmd_exec(CMD_MIN_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(ct), 0); |
34 | | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(c), TENSOR_LIST(hc), 0); |
35 | | REQUIRE_TENSOR_EQ(ct, hc, "GPU computed output should be the same as CPU computed ones"); |
36 | | ccv_nnc_tensor_free(a); |
37 | | ccv_nnc_tensor_free(b); |
38 | | ccv_nnc_tensor_free(c); |
39 | | ccv_nnc_tensor_free(ha); |
40 | | ccv_nnc_tensor_free(hb); |
41 | | ccv_nnc_tensor_free(hc); |
42 | | ccv_nnc_tensor_free(ct); |
43 | | } |
44 | | |
45 | | TEST_CASE("max forward") |
46 | 1 | { |
47 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MAX_FORWARD, CCV_NNC_BACKEND_GPU_REF)); |
48 | 1 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
49 | 1 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
50 | 1 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
51 | 1 | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
52 | 1 | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
53 | 1 | ccv_nnc_tensor_t* hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
54 | 1 | ccv_nnc_tensor_t* ct = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
55 | 1 | dsfmt_t dsfmt; |
56 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
57 | 1 | int i; |
58 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
59 | 1.00k | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 10 - 1; |
60 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
61 | 1.00k | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 10 - 1; |
62 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(a, b), 0); |
63 | 1 | ccv_nnc_cmd_exec(CMD_MAX_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
64 | 1 | ccv_nnc_cmd_exec(CMD_MAX_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(ct), 0); |
65 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(c), TENSOR_LIST(hc), 0); |
66 | 1 | REQUIRE_TENSOR_EQ(ct, hc, "GPU computed output should be the same as CPU computed ones"); |
67 | 1 | ccv_nnc_tensor_free(a); |
68 | 1 | ccv_nnc_tensor_free(b); |
69 | 1 | ccv_nnc_tensor_free(c); |
70 | 1 | ccv_nnc_tensor_free(ha); |
71 | 1 | ccv_nnc_tensor_free(hb); |
72 | 1 | ccv_nnc_tensor_free(hc); |
73 | 1 | ccv_nnc_tensor_free(ct); |
74 | 1 | } |
75 | | |
76 | | TEST_CASE("min backward") |
77 | 1 | { |
78 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MIN_FORWARD, CCV_NNC_BACKEND_GPU_REF) && |
79 | 1 | ccv_nnc_cmd_ok(CCV_NNC_MIN_BACKWARD, CCV_NNC_BACKEND_GPU_REF)); |
80 | 1 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
81 | 1 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
82 | 1 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
83 | 1 | ccv_nnc_tensor_t* da = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
84 | 1 | ccv_nnc_tensor_t* db = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
85 | 1 | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
86 | 1 | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
87 | 1 | ccv_nnc_tensor_t* hda = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
88 | 1 | ccv_nnc_tensor_t* hdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
89 | 1 | ccv_nnc_tensor_t* hg = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
90 | 1 | ccv_nnc_tensor_t* dat = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
91 | 1 | ccv_nnc_tensor_t* dbt = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
92 | 1 | dsfmt_t dsfmt; |
93 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
94 | 1 | int i; |
95 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
96 | 1.00k | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 10; |
97 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
98 | 1.00k | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 10; |
99 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
100 | 1.00k | hg->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
101 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb, hg), TENSOR_LIST(a, b, g), 0); |
102 | 1 | ccv_nnc_cmd_exec(CMD_MIN_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(g, a, b), TENSOR_LIST(da, db), 0); |
103 | 1 | ccv_nnc_cmd_exec(CMD_MIN_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(hg, ha, hb), TENSOR_LIST(dat, dbt), 0); |
104 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(da, db), TENSOR_LIST(hda, hdb), 0); |
105 | 1 | REQUIRE_TENSOR_EQ(dat, hda, "GPU computed output should be the same as CPU computed ones"); |
106 | 1 | REQUIRE_TENSOR_EQ(dbt, hdb, "GPU computed output should be the same as CPU computed ones"); |
107 | 1 | ccv_nnc_tensor_free(a); |
108 | 1 | ccv_nnc_tensor_free(b); |
109 | 1 | ccv_nnc_tensor_free(g); |
110 | 1 | ccv_nnc_tensor_free(da); |
111 | 1 | ccv_nnc_tensor_free(db); |
112 | 1 | ccv_nnc_tensor_free(ha); |
113 | 1 | ccv_nnc_tensor_free(hb); |
114 | 1 | ccv_nnc_tensor_free(hg); |
115 | 1 | ccv_nnc_tensor_free(hda); |
116 | 1 | ccv_nnc_tensor_free(hdb); |
117 | 1 | ccv_nnc_tensor_free(dat); |
118 | 1 | ccv_nnc_tensor_free(dbt); |
119 | 1 | } |
120 | | |
121 | | TEST_CASE("max backward") |
122 | 1 | { |
123 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_MAX_FORWARD, CCV_NNC_BACKEND_GPU_REF) && |
124 | 1 | ccv_nnc_cmd_ok(CCV_NNC_MAX_BACKWARD, CCV_NNC_BACKEND_GPU_REF)); |
125 | 1 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
126 | 1 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
127 | 1 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
128 | 1 | ccv_nnc_tensor_t* da = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
129 | 1 | ccv_nnc_tensor_t* db = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 10, 100), 0); |
130 | 1 | ccv_nnc_tensor_t* ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
131 | 1 | ccv_nnc_tensor_t* hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
132 | 1 | ccv_nnc_tensor_t* hda = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
133 | 1 | ccv_nnc_tensor_t* hdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
134 | 1 | ccv_nnc_tensor_t* hg = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
135 | 1 | ccv_nnc_tensor_t* dat = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
136 | 1 | ccv_nnc_tensor_t* dbt = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 10, 100), 0); |
137 | 1 | dsfmt_t dsfmt; |
138 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
139 | 1 | int i; |
140 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
141 | 1.00k | ha->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 10; |
142 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
143 | 1.00k | hb->data.f32[i] = dsfmt_genrand_open_close(&dsfmt) * 10; |
144 | 1.00k | for (i = 0; i < 1000; i++1.00k ) |
145 | 1.00k | hg->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
146 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb, hg), TENSOR_LIST(a, b, g), 0); |
147 | 1 | ccv_nnc_cmd_exec(CMD_MAX_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(g, a, b), TENSOR_LIST(da, db), 0); |
148 | 1 | ccv_nnc_cmd_exec(CMD_MAX_BACKWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(hg, ha, hb), TENSOR_LIST(dat, dbt), 0); |
149 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(da, db), TENSOR_LIST(hda, hdb), 0); |
150 | 1 | REQUIRE_TENSOR_EQ(dat, hda, "GPU computed output should be the same as CPU computed ones"); |
151 | 1 | REQUIRE_TENSOR_EQ(dbt, hdb, "GPU computed output should be the same as CPU computed ones"); |
152 | 1 | ccv_nnc_tensor_free(a); |
153 | 1 | ccv_nnc_tensor_free(b); |
154 | 1 | ccv_nnc_tensor_free(g); |
155 | 1 | ccv_nnc_tensor_free(da); |
156 | 1 | ccv_nnc_tensor_free(db); |
157 | 1 | ccv_nnc_tensor_free(ha); |
158 | 1 | ccv_nnc_tensor_free(hb); |
159 | 1 | ccv_nnc_tensor_free(hg); |
160 | 1 | ccv_nnc_tensor_free(hda); |
161 | 1 | ccv_nnc_tensor_free(hdb); |
162 | 1 | ccv_nnc_tensor_free(dat); |
163 | 1 | ccv_nnc_tensor_free(dbt); |
164 | 1 | } |
165 | | |
166 | | #include "case_main.h" |