/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd/reduce/ccv_nnc_reduce.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "ccv.h" |
2 | | #include "nnc/ccv_nnc.h" |
3 | | #include "nnc/ccv_nnc_internal.h" |
4 | | |
5 | | static void _ccv_nnc_reduce_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size) |
6 | 4.07k | { |
7 | 4.07k | assert(input_size == 1); |
8 | 4.07k | assert(output_size == 1); |
9 | 4.07k | outputs[0] = inputs[0]; |
10 | 4.07k | int i; |
11 | 12.1k | for (i = 0; i < cmd.reduce.count; i++8.12k ) |
12 | 8.12k | outputs[0].dim[cmd.reduce.axis[i]] = 1; // Reduce the dimension to 1. |
13 | 4.07k | } |
14 | | |
15 | | static int _ccv_nnc_reduce_sum_or_mean_forw_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
16 | 4 | { |
17 | 4 | if (input_bitmasks[0] == 1u && output_bitmasks[0] == 1u) |
18 | 4 | return 1; |
19 | 0 | return 0; |
20 | 4 | } |
21 | | |
22 | | static int _ccv_nnc_reduce_sum_or_mean_back_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
23 | 8.08k | { |
24 | | // Output the propagated error. |
25 | 8.08k | if ((input_bitmasks[0] & 1u) == 1u && output_bitmasks[0] == 1u6.05k ) |
26 | 6.05k | return 1; |
27 | 2.02k | return 0; |
28 | 8.08k | } |
29 | | |
30 | | REGISTER_COMMAND(CCV_NNC_REDUCE_SUM_FORWARD)(ccv_nnc_cmd_registry_t* const registry) |
31 | | FIND_BACKEND(ccv_nnc_reduce_sum_cpu_ref.c, gpu/ccv_nnc_reduce_sum_gpu_cudnn.cu, mps/ccv_nnc_reduce_sum_mps.m) |
32 | 1 | { |
33 | 1 | registry->bitmask = _ccv_nnc_reduce_sum_or_mean_forw_bitmask; |
34 | 1 | registry->tensor_auto = _ccv_nnc_reduce_tensor_auto_forw; |
35 | 1 | } |
36 | | |
37 | | REGISTER_COMMAND(CCV_NNC_REDUCE_SUM_BACKWARD)(ccv_nnc_cmd_registry_t* const registry) |
38 | | FIND_BACKEND(ccv_nnc_reduce_sum_cpu_ref.c, gpu/ccv_nnc_reduce_sum_gpu_cudnn.cu, mps/ccv_nnc_reduce_sum_mps.m) |
39 | 1 | { |
40 | 1 | registry->bitmask = _ccv_nnc_reduce_sum_or_mean_back_bitmask; |
41 | 1 | registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient; |
42 | 1 | } |
43 | | |
44 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_SUM_FORWARD) |
45 | | #define CMD_REDUCE_SUM_FORWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_SUM_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
46 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_SUM_BACKWARD) |
47 | | #define CMD_REDUCE_SUM_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_SUM_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
48 | | |
49 | | REGISTER_COMMAND(CCV_NNC_REDUCE_MEAN_FORWARD)(ccv_nnc_cmd_registry_t* const registry) |
50 | | FIND_BACKEND(ccv_nnc_reduce_mean_cpu_ref.c, gpu/ccv_nnc_reduce_mean_gpu_cudnn.cu, mps/ccv_nnc_reduce_mean_mps.m) |
51 | 1 | { |
52 | 1 | registry->bitmask = _ccv_nnc_reduce_sum_or_mean_forw_bitmask; |
53 | 1 | registry->tensor_auto = _ccv_nnc_reduce_tensor_auto_forw; |
54 | 1 | } |
55 | | |
56 | | REGISTER_COMMAND(CCV_NNC_REDUCE_MEAN_BACKWARD)(ccv_nnc_cmd_registry_t* const registry) |
57 | | FIND_BACKEND(ccv_nnc_reduce_mean_cpu_ref.c, gpu/ccv_nnc_reduce_mean_gpu_cudnn.cu, mps/ccv_nnc_reduce_mean_mps.m) |
58 | 1 | { |
59 | 1 | registry->bitmask = _ccv_nnc_reduce_sum_or_mean_back_bitmask; |
60 | 1 | registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient; |
61 | 1 | } |
62 | | |
63 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_MEAN_FORWARD) |
64 | | #define CMD_REDUCE_MEAN_FORWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_MEAN_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
65 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_MEAN_BACKWARD) |
66 | | #define CMD_REDUCE_MEAN_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_MEAN_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
67 | | |
68 | | static int _ccv_nnc_reduce_max_forw_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
69 | 2 | { |
70 | 2 | if (input_bitmasks[0] == 1u && output_bitmasks[0] == 1u) |
71 | 2 | return 1; |
72 | 0 | return 0; |
73 | 2 | } |
74 | | |
75 | | static int _ccv_nnc_reduce_max_back_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
76 | 4 | { |
77 | | // Output the propagated error. |
78 | 4 | if ((input_bitmasks[0] & 7u) == 7u && output_bitmasks[0] == 1u1 ) |
79 | 1 | return 1; |
80 | 3 | return 0; |
81 | 4 | } |
82 | | |
83 | | REGISTER_COMMAND(CCV_NNC_REDUCE_MAX_FORWARD)(ccv_nnc_cmd_registry_t* const registry) |
84 | | FIND_BACKEND(ccv_nnc_reduce_max_cpu_ref.c, mps/ccv_nnc_reduce_max_mps.m) |
85 | 1 | { |
86 | 1 | registry->bitmask = _ccv_nnc_reduce_max_forw_bitmask; |
87 | 1 | registry->tensor_auto = _ccv_nnc_reduce_tensor_auto_forw; |
88 | 1 | } |
89 | | |
90 | | REGISTER_COMMAND(CCV_NNC_REDUCE_MAX_BACKWARD)(ccv_nnc_cmd_registry_t* const registry) |
91 | | FIND_BACKEND(ccv_nnc_reduce_max_cpu_ref.c) |
92 | 1 | { |
93 | 1 | registry->bitmask = _ccv_nnc_reduce_max_back_bitmask; |
94 | 1 | registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient; |
95 | 1 | } |
96 | | |
97 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_MAX_FORWARD) |
98 | | #define CMD_REDUCE_MAX_FORWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_MAX_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
99 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_MAX_BACKWARD) |
100 | | #define CMD_REDUCE_MAX_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_MAX_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
101 | | |
102 | | static int _ccv_nnc_reduce_min_forw_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
103 | 2 | { |
104 | 2 | if (input_bitmasks[0] == 1u && output_bitmasks[0] == 1u) |
105 | 2 | return 1; |
106 | 0 | return 0; |
107 | 2 | } |
108 | | |
109 | | static int _ccv_nnc_reduce_min_back_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
110 | 0 | { |
111 | | // Output the propagated error. |
112 | 0 | if ((input_bitmasks[0] & 7u) == 7u && output_bitmasks[0] == 1u) |
113 | 0 | return 1; |
114 | 0 | return 0; |
115 | 0 | } |
116 | | |
117 | | REGISTER_COMMAND(CCV_NNC_REDUCE_MIN_FORWARD)(ccv_nnc_cmd_registry_t* const registry) |
118 | | FIND_BACKEND(ccv_nnc_reduce_min_cpu_ref.c, mps/ccv_nnc_reduce_min_mps.m) |
119 | 1 | { |
120 | 1 | registry->bitmask = _ccv_nnc_reduce_min_forw_bitmask; |
121 | 1 | registry->tensor_auto = _ccv_nnc_reduce_tensor_auto_forw; |
122 | 1 | } |
123 | | |
124 | | REGISTER_COMMAND(CCV_NNC_REDUCE_MIN_BACKWARD)(ccv_nnc_cmd_registry_t* const registry) |
125 | | FIND_BACKEND(ccv_nnc_reduce_min_cpu_ref.c) |
126 | 1 | { |
127 | 1 | registry->bitmask = _ccv_nnc_reduce_min_back_bitmask; |
128 | 1 | registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient; |
129 | 1 | } |
130 | | |
131 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_MIN_FORWARD) |
132 | | #define CMD_REDUCE_MIN_FORWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_MIN_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
133 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_MIN_BACKWARD) |
134 | | #define CMD_REDUCE_MIN_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_MIN_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
135 | | |
136 | | static int _ccv_nnc_reduce_norm2_forw_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
137 | 2 | { |
138 | 2 | if (input_bitmasks[0] == 1u && output_bitmasks[0] == 1u) |
139 | 2 | return 1; |
140 | 0 | return 0; |
141 | 2 | } |
142 | | |
143 | | static int _ccv_nnc_reduce_norm2_back_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
144 | 0 | { |
145 | | // Output the propagated error. |
146 | 0 | if ((input_bitmasks[0] & 7u) == 7u && output_bitmasks[0] == 1u) |
147 | 0 | return 1; |
148 | 0 | return 0; |
149 | 0 | } |
150 | | |
151 | | REGISTER_COMMAND(CCV_NNC_REDUCE_NORM2_FORWARD)(ccv_nnc_cmd_registry_t* const registry) |
152 | | FIND_BACKEND(ccv_nnc_reduce_norm2_cpu_ref.c, gpu/ccv_nnc_reduce_norm2_gpu_cudnn.cu, mps/ccv_nnc_reduce_norm2_mps.m) |
153 | 1 | { |
154 | 1 | registry->bitmask = _ccv_nnc_reduce_norm2_forw_bitmask; |
155 | 1 | registry->tensor_auto = _ccv_nnc_reduce_tensor_auto_forw; |
156 | 1 | } |
157 | | |
158 | | REGISTER_COMMAND(CCV_NNC_REDUCE_NORM2_BACKWARD)(ccv_nnc_cmd_registry_t* const registry) |
159 | | FIND_BACKEND(ccv_nnc_reduce_norm2_cpu_ref.c, gpu/ccv_nnc_reduce_norm2_gpu_cudnn.cu, mps/ccv_nnc_reduce_norm2_mps.m) |
160 | 1 | { |
161 | 1 | registry->bitmask = _ccv_nnc_reduce_norm2_back_bitmask; |
162 | 1 | registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient; |
163 | 1 | } |
164 | | |
165 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_NORM2_FORWARD) |
166 | | #define CMD_REDUCE_NORM2_FORWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_NORM2_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
167 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_NORM2_BACKWARD) |
168 | | #define CMD_REDUCE_NORM2_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_NORM2_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
169 | | |
170 | | static int _ccv_nnc_argmax_forw_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
171 | 2 | { |
172 | 2 | if (input_bitmasks[0] == 1u && output_bitmasks[0] == 1u) |
173 | 2 | return 1; |
174 | 0 | return 0; |
175 | 2 | } |
176 | | |
177 | | static int _ccv_nnc_argmax_back_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
178 | 0 | { |
179 | | // Doesn't support. |
180 | 0 | return 0; |
181 | 0 | } |
182 | | |
183 | | static void _ccv_nnc_argmax_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size) |
184 | 4 | { |
185 | 4 | assert(input_size == 1); |
186 | 4 | assert(output_size == 1); |
187 | 4 | outputs[0] = inputs[0]; |
188 | 4 | int i; |
189 | 8 | for (i = 0; i < cmd.reduce.count; i++4 ) |
190 | 4 | outputs[0].dim[cmd.reduce.axis[i]] = 1; // Reduce the dimension to 1. |
191 | 4 | outputs[0].datatype = CCV_32S; |
192 | 4 | } |
193 | | |
194 | | static void _ccv_nnc_argmax_tensor_auto_back(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size) |
195 | 0 | { |
196 | | // Doesn't support. |
197 | 0 | } |
198 | | |
199 | | REGISTER_COMMAND(CCV_NNC_ARGMAX_FORWARD)(ccv_nnc_cmd_registry_t* const registry) |
200 | | FIND_BACKEND(ccv_nnc_argmax_cpu_ref.c, gpu/ccv_nnc_argmax_gpu_ref.cu, mps/ccv_nnc_argmax_mps.m) |
201 | 1 | { |
202 | 1 | registry->bitmask = _ccv_nnc_argmax_forw_bitmask; |
203 | 1 | registry->tensor_auto = _ccv_nnc_argmax_tensor_auto_forw; |
204 | 1 | } |
205 | | |
206 | | REGISTER_COMMAND(CCV_NNC_ARGMAX_BACKWARD)(ccv_nnc_cmd_registry_t* const registry) |
207 | | FIND_BACKEND(ccv_nnc_argmax_cpu_ref.c, gpu/ccv_nnc_argmax_gpu_ref.cu, mps/ccv_nnc_argmax_mps.m) |
208 | 1 | { |
209 | 1 | registry->bitmask = _ccv_nnc_argmax_back_bitmask; |
210 | 1 | registry->tensor_auto = _ccv_nnc_argmax_tensor_auto_back; |
211 | 1 | } |
212 | | |
213 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_ARGMAX_FORWARD) |
214 | | #define CMD_ARGMAX_FORWARD(...) ccv_nnc_cmd(CCV_NNC_ARGMAX_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
215 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_ARGMAX_BACKWARD) |
216 | | #define CMD_ARGMAX_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_ARGMAX_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
217 | | |
218 | | static int _ccv_nnc_argmin_forw_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
219 | 2 | { |
220 | 2 | if (input_bitmasks[0] == 1u && output_bitmasks[0] == 1u) |
221 | 2 | return 1; |
222 | 0 | return 0; |
223 | 2 | } |
224 | | |
225 | | static int _ccv_nnc_argmin_back_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
226 | 0 | { |
227 | | // Doesn't support. |
228 | 0 | return 0; |
229 | 0 | } |
230 | | |
231 | | static void _ccv_nnc_argmin_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size) |
232 | 4 | { |
233 | 4 | assert(input_size == 1); |
234 | 4 | assert(output_size == 1); |
235 | 4 | outputs[0] = inputs[0]; |
236 | 4 | int i; |
237 | 8 | for (i = 0; i < cmd.reduce.count; i++4 ) |
238 | 4 | outputs[0].dim[cmd.reduce.axis[i]] = 1; // Reduce the dimension to 1. |
239 | 4 | outputs[0].datatype = CCV_32S; |
240 | 4 | } |
241 | | |
242 | | static void _ccv_nnc_argmin_tensor_auto_back(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size) |
243 | 0 | { |
244 | | // Doesn't support. |
245 | 0 | } |
246 | | |
247 | | REGISTER_COMMAND(CCV_NNC_ARGMIN_FORWARD)(ccv_nnc_cmd_registry_t* const registry) |
248 | | FIND_BACKEND(ccv_nnc_argmin_cpu_ref.c, gpu/ccv_nnc_argmin_gpu_ref.cu, mps/ccv_nnc_argmin_mps.m) |
249 | 1 | { |
250 | 1 | registry->bitmask = _ccv_nnc_argmin_forw_bitmask; |
251 | 1 | registry->tensor_auto = _ccv_nnc_argmin_tensor_auto_forw; |
252 | 1 | } |
253 | | |
254 | | REGISTER_COMMAND(CCV_NNC_ARGMIN_BACKWARD)(ccv_nnc_cmd_registry_t* const registry) |
255 | | FIND_BACKEND(ccv_nnc_argmin_cpu_ref.c, gpu/ccv_nnc_argmin_gpu_ref.cu, mps/ccv_nnc_argmin_mps.m) |
256 | 1 | { |
257 | 1 | registry->bitmask = _ccv_nnc_argmin_back_bitmask; |
258 | 1 | registry->tensor_auto = _ccv_nnc_argmin_tensor_auto_back; |
259 | 1 | } |
260 | | |
261 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_ARGMIN_FORWARD) |
262 | | #define CMD_ARGMIN_FORWARD(...) ccv_nnc_cmd(CCV_NNC_ARGMIN_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |
263 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_ARGMIN_BACKWARD) |
264 | | #define CMD_ARGMIN_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_ARGMIN_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0) |