Coverage Report

Created: 2019-07-03 22:50

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/cmd/pool/ccv_nnc_pool.c
Line
Count
Source
1
#include <ccv.h>
2
#include <nnc/ccv_nnc.h>
3
#include <nnc/ccv_nnc_easy.h>
4
#include <nnc/ccv_nnc_internal.h>
5
6
static int _ccv_nnc_max_pool_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
7
30
{
8
30
  if ((input_bitmasks[0] & 1u) == 1u && 
output_bitmasks[0] == 1u15
)
9
15
    return 1;
10
15
  return 0;
11
15
}
12
13
static int _ccv_nnc_max_pool_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
14
50
{
15
50
  if ((input_bitmasks[0] & 7u) == ((1u << 0) | (1u << 1) | (1u << 2)) && 
output_bitmasks[0] == 1u13
)
16
13
    return 1;
17
37
  return 0;
18
37
}
19
20
static void _ccv_nnc_pool_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* outputs, const int output_size)
21
184
{
22
184
  assert(output_size == 1);
23
184
  outputs[0] = inputs[0];
24
184
  ccv_nnc_hint_tensor_forward(cmd, inputs[0], hint, outputs);
25
184
}
26
27
static void _ccv_nnc_pool_tensor_auto_back(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* outputs, const int output_size)
28
71
{
29
71
  assert(output_size == 1);
30
71
  outputs[0] = inputs[0];
31
71
  ccv_nnc_hint_tensor_backward(cmd, inputs[0], hint, outputs);
32
71
}
33
34
REGISTER_COMMAND(CCV_NNC_MAX_POOL_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
35
  FIND_BACKEND(ccv_nnc_max_pool_cpu_ref.c, gpu/ccv_nnc_max_pool_gpu_cudnn.cu)
36
1
{
37
1
  registry->bitmask = _ccv_nnc_max_pool_forw_bitmask;
38
1
  registry->tensor_auto = _ccv_nnc_pool_tensor_auto_forw;
39
1
}
40
41
REGISTER_COMMAND(CCV_NNC_MAX_POOL_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
42
  FIND_BACKEND(ccv_nnc_max_pool_cpu_ref.c, gpu/ccv_nnc_max_pool_gpu_cudnn.cu)
43
1
{
44
1
  registry->bitmask = _ccv_nnc_max_pool_back_bitmask;
45
1
  registry->tensor_auto = _ccv_nnc_pool_tensor_auto_back;
46
1
}
47
48
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_MAX_POOL_FORWARD)
49
#define CMD_MAX_POOL_FORWARD(rows, cols) ccv_nnc_cmd(CCV_NNC_MAX_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={rows, cols,1}}}), 0)
50
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_MAX_POOL_BACKWARD)
51
#define CMD_MAX_POOL_BACKWARD(rows, cols) ccv_nnc_cmd(CCV_NNC_MAX_POOL_BACKWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={rows, cols,1}}}), 0)
52
53
static int _ccv_nnc_avg_pool_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
54
24
{
55
24
  if ((input_bitmasks[0] & 1u) == 1u && 
output_bitmasks[0] == 1u12
)
56
12
    return 1;
57
12
  return 0;
58
12
}
59
60
static int _ccv_nnc_avg_pool_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
61
86
{
62
86
  if ((input_bitmasks[0] & 1u) == 1u && 
output_bitmasks[0] == 1u54
)
63
54
    return 1;
64
32
  return 0;
65
32
}
66
67
REGISTER_COMMAND(CCV_NNC_AVERAGE_POOL_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
68
  FIND_BACKEND(ccv_nnc_avg_pool_cpu_ref.c, gpu/ccv_nnc_avg_pool_gpu_cudnn.cu)
69
1
{
70
1
  registry->bitmask = _ccv_nnc_avg_pool_forw_bitmask;
71
1
  registry->tensor_auto = _ccv_nnc_pool_tensor_auto_forw;
72
1
}
73
74
REGISTER_COMMAND(CCV_NNC_AVERAGE_POOL_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
75
  FIND_BACKEND(ccv_nnc_avg_pool_cpu_ref.c, gpu/ccv_nnc_avg_pool_gpu_cudnn.cu)
76
1
{
77
1
  registry->bitmask = _ccv_nnc_avg_pool_back_bitmask;
78
1
  registry->tensor_auto = _ccv_nnc_pool_tensor_auto_back;
79
1
}
80
81
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_AVERAGE_POOL_FORWARD)
82
#define CMD_AVERAGE_POOL_FORWARD(rows, cols) ccv_nnc_cmd(CCV_NNC_AVERAGE_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={rows, cols,1}}}), 0)
83
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_AVERAGE_POOL_BACKWARD)
84
#define CMD_AVERAGE_POOL_BACKWARD(rows, cols) ccv_nnc_cmd(CCV_NNC_AVERAGE_POOL_BACKWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={rows, cols,1}}}), 0)