/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd/loss/ccv_nnc_mse.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "ccv.h" |
2 | | #include "nnc/ccv_nnc.h" |
3 | | #include "nnc/ccv_nnc_easy.h" |
4 | | #include "nnc/ccv_nnc_internal.h" |
5 | | |
6 | | static int _ccv_nnc_mse_forw_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
7 | 0 | { |
8 | | // input: activation, label |
9 | | // output: loss |
10 | 0 | if ((input_bitmasks[0] & 3u) == 3u && output_bitmasks[0] == 1u) |
11 | 0 | return 1; |
12 | 0 | return 0; |
13 | 0 | } |
14 | | |
15 | | static int _ccv_nnc_mse_back_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size) |
16 | 21 | { |
17 | | // input: [gradient of loss], activation, label, [loss] |
18 | | // output: w.r.t [activation], [label] |
19 | 21 | if ((input_bitmasks[0] & 7u) == 7u && (9 (output_bitmasks[0] & 1u) == 1u9 || (output_bitmasks[0] & 2u) == 2u0 )) |
20 | 9 | return 1; |
21 | 12 | return 0; |
22 | 21 | } |
23 | | |
24 | | static void _ccv_nnc_mse_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size) |
25 | 9 | { |
26 | 9 | assert(input_size == 2); |
27 | 9 | assert(output_size >= 1); |
28 | 9 | outputs[0] = inputs[0]; |
29 | | // The output should have the same dimentionality of the label data. |
30 | 9 | memcpy(outputs[0].dim, inputs[1].dim, sizeof(outputs[0].dim)); |
31 | 9 | const int nd = ccv_nnc_tensor_nd(outputs[0].dim); |
32 | | // Set channel to 1 if it is not.. |
33 | 9 | if (ccv_nnc_tensor_get_c(outputs[0]) > 1) |
34 | 9 | ccv_nnc_tensor_set_c(&outputs[0], nd, 1); |
35 | 9 | } |
36 | | |
37 | | static void _ccv_nnc_mse_tensor_auto_back(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size) |
38 | 3 | { |
39 | 3 | assert(input_size >= 3); |
40 | 3 | assert(output_size >= 1); |
41 | 3 | outputs[0] = inputs[1]; |
42 | 3 | if (output_size > 1) |
43 | 3 | outputs[1] = inputs[2]; |
44 | 3 | } |
45 | | |
46 | | REGISTER_COMMAND(CCV_NNC_MSE_FORWARD)(ccv_nnc_cmd_registry_t* const registry) |
47 | | FIND_BACKEND(ccv_nnc_mse_cpu_ref.c, gpu/ccv_nnc_mse_gpu_ref.cu, mps/ccv_nnc_mse_mps.m) |
48 | 1 | { |
49 | 1 | registry->bitmask = _ccv_nnc_mse_forw_bitmask; |
50 | 1 | registry->tensor_auto = _ccv_nnc_mse_tensor_auto_forw; |
51 | 1 | } |
52 | | |
53 | | REGISTER_COMMAND(CCV_NNC_MSE_BACKWARD)(ccv_nnc_cmd_registry_t* const registry) |
54 | | FIND_BACKEND(ccv_nnc_mse_cpu_ref.c, gpu/ccv_nnc_mse_gpu_ref.cu, mps/ccv_nnc_mse_mps.m) |
55 | 1 | { |
56 | 1 | registry->flags = CCV_NNC_CMD_ATTR_NULL_IS_ONES; |
57 | 1 | registry->bitmask = _ccv_nnc_mse_back_bitmask; |
58 | 1 | registry->tensor_auto = _ccv_nnc_mse_tensor_auto_back; |
59 | 1 | } |
60 | | |
61 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_MSE_FORWARD) |
62 | | #define CMD_MSE_FORWARD(_reduce_op) ccv_nnc_cmd(CCV_NNC_MSE_FORWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.mse={.reduce_op=_reduce_op}}), 0) |
63 | | //@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_MSE_BACKWARD) |
64 | | #define CMD_MSE_BACKWARD(_reduce_op) ccv_nnc_cmd(CCV_NNC_MSE_BACKWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.mse={.reduce_op=_reduce_op}}), 0) |