Coverage Report

Created: 2022-08-03 23:52

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/cmd/reduce/ccv_nnc_reduce.c
Line
Count
Source (jump to first uncovered line)
1
#include "ccv.h"
2
#include "nnc/ccv_nnc.h"
3
#include "nnc/ccv_nnc_internal.h"
4
5
static void _ccv_nnc_reduce_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size)
6
4.03k
{
7
4.03k
  assert(input_size == 1);
8
4.03k
  assert(output_size == 1);
9
4.03k
  outputs[0] = inputs[0];
10
4.03k
  int i;
11
12.0k
  for (i = 0; i < cmd.reduce.count; 
i++8.06k
)
12
8.06k
    outputs[0].dim[cmd.reduce.axis[i]] = 1; // Reduce the dimension to 1.
13
4.03k
}
14
15
static int _ccv_nnc_reduce_sum_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
16
4
{
17
4
  if (input_bitmasks[0] == 1u && 
output_bitmasks[0] == 1u2
)
18
2
    return 1;
19
2
  return 0;
20
4
}
21
22
static int _ccv_nnc_reduce_sum_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
23
8.03k
{
24
  // Output the propagated error.
25
8.03k
  if ((input_bitmasks[0] & 1u) == 1u && 
output_bitmasks[0] == 1u6.02k
)
26
6.02k
    return 1;
27
2.01k
  return 0;
28
8.03k
}
29
30
REGISTER_COMMAND(CCV_NNC_REDUCE_SUM_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
31
  FIND_BACKEND(ccv_nnc_reduce_sum_cpu_ref.c, gpu/ccv_nnc_reduce_sum_gpu_cudnn.cu)
32
1
{
33
1
  registry->bitmask = _ccv_nnc_reduce_sum_forw_bitmask;
34
1
  registry->tensor_auto = _ccv_nnc_reduce_tensor_auto_forw;
35
1
}
36
37
REGISTER_COMMAND(CCV_NNC_REDUCE_SUM_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
38
  FIND_BACKEND(ccv_nnc_reduce_sum_cpu_ref.c, gpu/ccv_nnc_reduce_sum_gpu_cudnn.cu)
39
1
{
40
1
  registry->bitmask = _ccv_nnc_reduce_sum_back_bitmask;
41
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
42
1
}
43
44
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_SUM_FORWARD)
45
#define CMD_REDUCE_SUM_FORWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_SUM_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0)
46
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_SUM_BACKWARD)
47
#define CMD_REDUCE_SUM_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_SUM_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0)
48
49
static int _ccv_nnc_reduce_max_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
50
4
{
51
4
  if (input_bitmasks[0] == 1u && 
output_bitmasks[0] == 1u2
)
52
2
    return 1;
53
2
  return 0;
54
4
}
55
56
static int _ccv_nnc_reduce_max_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
57
4
{
58
  // Output the propagated error.
59
4
  if ((input_bitmasks[0] & 7u) == 7u && 
output_bitmasks[0] == 1u1
)
60
1
    return 1;
61
3
  return 0;
62
4
}
63
64
REGISTER_COMMAND(CCV_NNC_REDUCE_MAX_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
65
  FIND_BACKEND(ccv_nnc_reduce_max_cpu_ref.c)
66
1
{
67
1
  registry->bitmask = _ccv_nnc_reduce_max_forw_bitmask;
68
1
  registry->tensor_auto = _ccv_nnc_reduce_tensor_auto_forw;
69
1
}
70
71
REGISTER_COMMAND(CCV_NNC_REDUCE_MAX_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
72
  FIND_BACKEND(ccv_nnc_reduce_max_cpu_ref.c)
73
1
{
74
1
  registry->bitmask = _ccv_nnc_reduce_max_back_bitmask;
75
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
76
1
}
77
78
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_MAX_FORWARD)
79
#define CMD_REDUCE_MAX_FORWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_MAX_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0)
80
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_MAX_BACKWARD)
81
#define CMD_REDUCE_MAX_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_MAX_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0)
82
83
static int _ccv_nnc_reduce_norm2_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
84
4
{
85
4
  if (input_bitmasks[0] == 1u && 
output_bitmasks[0] == 1u2
)
86
2
    return 1;
87
2
  return 0;
88
4
}
89
90
static int _ccv_nnc_reduce_norm2_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
91
0
{
92
  // Output the propagated error.
93
0
  if ((input_bitmasks[0] & 7u) == 7u && output_bitmasks[0] == 1u)
94
0
    return 1;
95
0
  return 0;
96
0
}
97
98
REGISTER_COMMAND(CCV_NNC_REDUCE_NORM2_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
99
  FIND_BACKEND(ccv_nnc_reduce_norm2_cpu_ref.c, gpu/ccv_nnc_reduce_norm2_gpu_cudnn.cu)
100
1
{
101
1
  registry->bitmask = _ccv_nnc_reduce_norm2_forw_bitmask;
102
1
  registry->tensor_auto = _ccv_nnc_reduce_tensor_auto_forw;
103
1
}
104
105
REGISTER_COMMAND(CCV_NNC_REDUCE_NORM2_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
106
  FIND_BACKEND(ccv_nnc_reduce_norm2_cpu_ref.c, gpu/ccv_nnc_reduce_norm2_gpu_cudnn.cu)
107
1
{
108
1
  registry->bitmask = _ccv_nnc_reduce_norm2_back_bitmask;
109
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
110
1
}
111
112
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_NORM2_FORWARD)
113
#define CMD_REDUCE_NORM2_FORWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_NORM2_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0)
114
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_REDUCE_NORM2_BACKWARD)
115
#define CMD_REDUCE_NORM2_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_REDUCE_NORM2_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0)
116
117
static int _ccv_nnc_argmax_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
118
0
{
119
0
  if (input_bitmasks[0] == 1u && output_bitmasks[0] == 1u)
120
0
    return 1;
121
0
  return 0;
122
0
}
123
124
static int _ccv_nnc_argmax_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
125
0
{
126
  // Doesn't support.
127
0
  return 0;
128
0
}
129
130
static void _ccv_nnc_argmax_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size)
131
0
{
132
0
  assert(input_size == 1);
133
0
  assert(output_size == 1);
134
0
  outputs[0] = inputs[0];
135
0
  int i;
136
0
  for (i = 0; i < cmd.reduce.count; i++)
137
0
    outputs[0].dim[cmd.reduce.axis[i]] = 1; // Reduce the dimension to 1.
138
0
  outputs[0].datatype = CCV_32S;
139
0
}
140
141
static void _ccv_nnc_argmax_tensor_auto_back(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size)
142
0
{
143
  // Doesn't support.
144
0
}
145
146
REGISTER_COMMAND(CCV_NNC_ARGMAX_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
147
  FIND_BACKEND(ccv_nnc_argmax_cpu_ref.c, gpu/ccv_nnc_argmax_gpu_ref.cu)
148
1
{
149
1
  registry->bitmask = _ccv_nnc_argmax_forw_bitmask;
150
1
  registry->tensor_auto = _ccv_nnc_argmax_tensor_auto_forw;
151
1
}
152
153
REGISTER_COMMAND(CCV_NNC_ARGMAX_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
154
  FIND_BACKEND(ccv_nnc_argmax_cpu_ref.c, gpu/ccv_nnc_argmax_gpu_ref.cu)
155
1
{
156
1
  registry->bitmask = _ccv_nnc_argmax_back_bitmask;
157
1
  registry->tensor_auto = _ccv_nnc_argmax_tensor_auto_back;
158
1
}
159
160
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_ARGMAX_FORWARD)
161
#define CMD_ARGMAX_FORWARD(...) ccv_nnc_cmd(CCV_NNC_ARGMAX_FORWARD, 0, CMD_REDUCE(__VA_ARGS__), 0)
162
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_ARGMAX_BACKWARD)
163
#define CMD_ARGMAX_BACKWARD(...) ccv_nnc_cmd(CCV_NNC_ARGMAX_BACKWARD, 0, CMD_REDUCE(__VA_ARGS__), 0)