/home/liu/actions-runner/_work/ccv/ccv/test/unit/nnc/loss.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include <3rdparty/dsfmt/dSFMT.h> |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("mse mean loss forward") |
15 | 1 | { |
16 | 1 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
17 | 1 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
18 | 1 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
19 | 1 | dsfmt_t dsfmt; |
20 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
21 | 1 | int i, j; |
22 | 101 | for (i = 0; i < 100; i++100 ) |
23 | 100 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
24 | 101 | for (i = 0; i < 100; i++100 ) |
25 | 100 | b->data.f32[i] = 0; |
26 | 1 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
27 | 1 | ccv_nnc_tensor_t* tc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
28 | 11 | for (i = 0; i < 10; i++10 ) |
29 | 10 | { |
30 | 10 | tc->data.f32[i] = 0; |
31 | 110 | for (j = 0; j < 10; j++100 ) |
32 | 100 | tc->data.f32[i] += a->data.f32[j + i * 10] * a->data.f32[j + i * 10]; |
33 | 10 | tc->data.f32[i] *= 1.0 / 10.0; |
34 | 10 | } |
35 | 1 | REQUIRE_TENSOR_EQ(tc, c, "CPU computed output should be the same as simply computed ones"); |
36 | 1 | ccv_nnc_tensor_free(a); |
37 | 1 | ccv_nnc_tensor_free(b); |
38 | 1 | ccv_nnc_tensor_free(c); |
39 | 1 | ccv_nnc_tensor_free(tc); |
40 | 1 | } |
41 | | |
42 | | TEST_CASE("mse mean loss backward") |
43 | 1 | { |
44 | 1 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
45 | 1 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
46 | 1 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
47 | 1 | ccv_nnc_tensor_t* da = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
48 | 1 | ccv_nnc_tensor_t* db = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
49 | 1 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
50 | 1 | dsfmt_t dsfmt; |
51 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
52 | 1 | int i; |
53 | 101 | for (i = 0; i < 100; i++100 ) |
54 | 100 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
55 | 101 | for (i = 0; i < 100; i++100 ) |
56 | 100 | b->data.f32[i] = 0; |
57 | 11 | for (i = 0; i < 10; i++10 ) |
58 | 10 | g->data.f32[i] = 1; |
59 | 1 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
60 | 1 | ccv_nnc_cmd_exec(CMD_MSE_BACKWARD(CCV_NNC_MSE_REDUCE_MEAN), ccv_nnc_no_hint, 0, TENSOR_LIST(g, a, b), TENSOR_LIST(da, db), 0); |
61 | 1 | ccv_nnc_tensor_t* tda = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
62 | 1 | ccv_nnc_tensor_t* tdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
63 | 101 | for (i = 0; i < 100; i++100 ) |
64 | 100 | tda->data.f32[i] = 2 * a->data.f32[i] / 10; |
65 | 101 | for (i = 0; i < 100; i++100 ) |
66 | 100 | tdb->data.f32[i] = -2 * a->data.f32[i] / 10; |
67 | 1 | REQUIRE_TENSOR_EQ(tda, da, "CPU computed output should be the same as simply computed ones"); |
68 | 1 | REQUIRE_TENSOR_EQ(tdb, db, "CPU computed output should be the same as simply computed ones"); |
69 | 1 | ccv_nnc_tensor_free(a); |
70 | 1 | ccv_nnc_tensor_free(b); |
71 | 1 | ccv_nnc_tensor_free(c); |
72 | 1 | ccv_nnc_tensor_free(da); |
73 | 1 | ccv_nnc_tensor_free(db); |
74 | 1 | ccv_nnc_tensor_free(g); |
75 | 1 | ccv_nnc_tensor_free(tda); |
76 | 1 | ccv_nnc_tensor_free(tdb); |
77 | 1 | } |
78 | | |
79 | | TEST_CASE("mse sum loss forward") |
80 | 1 | { |
81 | 1 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
82 | 1 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
83 | 1 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
84 | 1 | dsfmt_t dsfmt; |
85 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
86 | 1 | int i, j; |
87 | 101 | for (i = 0; i < 100; i++100 ) |
88 | 100 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
89 | 101 | for (i = 0; i < 100; i++100 ) |
90 | 100 | b->data.f32[i] = 0; |
91 | 1 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
92 | 1 | ccv_nnc_tensor_t* tc = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
93 | 11 | for (i = 0; i < 10; i++10 ) |
94 | 10 | { |
95 | 10 | tc->data.f32[i] = 0; |
96 | 110 | for (j = 0; j < 10; j++100 ) |
97 | 100 | tc->data.f32[i] += a->data.f32[j + i * 10] * a->data.f32[j + i * 10]; |
98 | 10 | } |
99 | 1 | REQUIRE_TENSOR_EQ(tc, c, "CPU computed output should be the same as simply computed ones"); |
100 | 1 | ccv_nnc_tensor_free(a); |
101 | 1 | ccv_nnc_tensor_free(b); |
102 | 1 | ccv_nnc_tensor_free(c); |
103 | 1 | ccv_nnc_tensor_free(tc); |
104 | 1 | } |
105 | | |
106 | | TEST_CASE("mse sum loss backward") |
107 | 1 | { |
108 | 1 | ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
109 | 1 | ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
110 | 1 | ccv_nnc_tensor_t* c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
111 | 1 | ccv_nnc_tensor_t* da = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
112 | 1 | ccv_nnc_tensor_t* db = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
113 | 1 | ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10), 0); |
114 | 1 | dsfmt_t dsfmt; |
115 | 1 | dsfmt_init_gen_rand(&dsfmt, 0); |
116 | 1 | int i; |
117 | 101 | for (i = 0; i < 100; i++100 ) |
118 | 100 | a->data.f32[i] = dsfmt_genrand_open_close(&dsfmt); |
119 | 101 | for (i = 0; i < 100; i++100 ) |
120 | 100 | b->data.f32[i] = 0; |
121 | 11 | for (i = 0; i < 10; i++10 ) |
122 | 10 | g->data.f32[i] = 1; |
123 | 1 | ccv_nnc_cmd_exec(CMD_MSE_FORWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0); |
124 | 1 | ccv_nnc_cmd_exec(CMD_MSE_BACKWARD(CCV_NNC_MSE_REDUCE_SUM), ccv_nnc_no_hint, 0, TENSOR_LIST(g, a, b), TENSOR_LIST(da, db), 0); |
125 | 1 | ccv_nnc_tensor_t* tda = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
126 | 1 | ccv_nnc_tensor_t* tdb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 10, 10), 0); |
127 | 101 | for (i = 0; i < 100; i++100 ) |
128 | 100 | tda->data.f32[i] = 2 * a->data.f32[i]; |
129 | 101 | for (i = 0; i < 100; i++100 ) |
130 | 100 | tdb->data.f32[i] = -2 * a->data.f32[i]; |
131 | 1 | REQUIRE_TENSOR_EQ(tda, da, "CPU computed output should be the same as simply computed ones"); |
132 | 1 | REQUIRE_TENSOR_EQ(tdb, db, "CPU computed output should be the same as simply computed ones"); |
133 | 1 | ccv_nnc_tensor_free(a); |
134 | 1 | ccv_nnc_tensor_free(b); |
135 | 1 | ccv_nnc_tensor_free(c); |
136 | 1 | ccv_nnc_tensor_free(da); |
137 | 1 | ccv_nnc_tensor_free(db); |
138 | 1 | ccv_nnc_tensor_free(g); |
139 | 1 | ccv_nnc_tensor_free(tda); |
140 | 1 | ccv_nnc_tensor_free(tdb); |
141 | 1 | } |
142 | | |
143 | | #include "case_main.h" |