Coverage Report

Created: 2019-07-03 22:50

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/cmd/convolution/ccv_nnc_convolution.c
Line
Count
Source
1
#include <ccv.h>
2
#include <nnc/ccv_nnc.h>
3
#include <nnc/ccv_nnc_easy.h>
4
#include <nnc/ccv_nnc_internal.h>
5
6
static int _ccv_nnc_conv_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
7
128
{
8
128
  if (input_size == 3 && 
(input_bitmasks[0] & 7u) == ((1u << 0) | (1u << 1) | (1u << 2))80
&&
output_bitmasks[0] == 1u20
)
9
20
    return 1;
10
108
  // Ignore bias.
11
108
  if (input_size == 2 && 
(input_bitmasks[0] & 3u) == ((1u << 0) | (1u << 1))48
&&
output_bitmasks[0] == 1u16
)
12
16
    return 1;
13
92
  return 0;
14
92
}
15
16
static int _ccv_nnc_conv_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
17
335
{
18
335
  // Output the propagated error, gradient w.r.t. w and bias.
19
335
  if ((input_bitmasks[0] & 7u) == ((1u << 0) | (1u << 1) | (1u << 2) | (0 << 3)) && 
output_bitmasks[0] == ((1u << 0) | (1u << 1) | (1u << 2))89
)
20
45
    return 1;
21
290
  // Ignore bias.
22
290
  if ((input_bitmasks[0] & 7u) == ((1u << 0) | (1u << 1) | (1u << 2) | (0 << 3)) && 
output_bitmasks[0] == ((1u << 0) | (1u << 1) | (0u << 2))44
)
23
30
    return 1;
24
260
  // Don't propagate error, only gradient w.r.t. w and bias.
25
260
  if ((input_bitmasks[0] & 3u) == ((1u << 0) | (1u << 1) | (0 << 2) | (0 << 3)) && 
output_bitmasks[0] == ((0 << 0) | (1u << 1) | (1u << 2))104
)
26
48
    return 1;
27
212
  // Ignore bias.
28
212
  if ((input_bitmasks[0] & 3u) == ((1u << 0) | (1u << 1) | (0 << 2) | (0 << 3)) && 
output_bitmasks[0] == ((0 << 0) | (1u << 1) | (0u << 2))56
)
29
6
    return 1;
30
206
  return 0;
31
206
}
32
33
static void _ccv_nnc_conv_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* outputs, const int output_size)
34
290
{
35
290
  assert(output_size == 1);
36
290
  outputs[0].type = inputs[0].type;
37
290
  outputs[0].format = inputs[0].format;
38
290
  outputs[0].datatype = inputs[0].datatype;
39
290
  // Get the channel output from the weight matrix.
40
290
  const int count = ccv_nnc_tensor_get_n(inputs[1]);
41
290
  assert(count == cmd.convolution.count);
42
290
  ccv_nnc_tensor_set_c(outputs, ccv_nnc_tensor_nd(inputs[0].dim), count);
43
290
  ccv_nnc_tensor_set_n(outputs, ccv_nnc_tensor_get_n(inputs[0]));
44
290
  ccv_nnc_hint_tensor_forward(cmd, inputs[0], hint, outputs);
45
290
}
46
47
REGISTER_COMMAND(CCV_NNC_CONVOLUTION_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
48
  FIND_BACKEND(ccv_nnc_conv_cpu_ref.c, ccv_nnc_conv_cpu_opt.c, gpu/ccv_nnc_conv_gpu_cudnn.cu)
49
1
{
50
1
  registry->bitmask = _ccv_nnc_conv_forw_bitmask;
51
1
  registry->tensor_auto = _ccv_nnc_conv_tensor_auto_forw;
52
1
}
53
54
REGISTER_COMMAND(CCV_NNC_CONVOLUTION_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
55
  FIND_BACKEND(ccv_nnc_conv_cpu_ref.c, ccv_nnc_conv_cpu_opt.c, gpu/ccv_nnc_conv_gpu_cudnn.cu)
56
1
{
57
1
  registry->bitmask = _ccv_nnc_conv_back_bitmask;
58
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_inputs;
59
1
}
60
61
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_CONVOLUTION_FORWARD)
62
#define CMD_CONVOLUTION_FORWARD(_groups, _count, ...) ccv_nnc_cmd(CCV_NNC_CONVOLUTION_FORWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={__VA_ARGS__}},.convolution={.count=_count,.groups=_groups}}), 0)
63
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_CONVOLUTION_BACKWARD)
64
#define CMD_CONVOLUTION_BACKWARD(_groups, _count, ...) ccv_nnc_cmd(CCV_NNC_CONVOLUTION_BACKWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={__VA_ARGS__}},.convolution={.count=_count,.groups=_groups}}), 0)