Coverage Report

Created: 2019-07-03 22:50

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/cmd/convolution/ccv_nnc_conv_cpu_opt.c
Line
Count
Source (jump to first uncovered line)
1
#include <ccv.h>
2
#include <ccv_internal.h>
3
#include <nnc/ccv_nnc.h>
4
#include <nnc/ccv_nnc_easy.h>
5
#include <nnc/ccv_nnc_internal.h>
6
7
#include "_ccv_nnc_conv_cpu_opt.h"
8
9
FIND_FILE(cpu_opt/_ccv_nnc_conv_cpu_4x4_3x3_winograd.c, cpu_opt/_ccv_nnc_conv_cpu_fft.c, cpu_opt/_ccv_nnc_conv_cpu_gemm.c, cpu_opt/_ccv_nnc_conv_cpu_opt.c)
10
11
enum {
12
  CCV_NNC_CMD_OPT_CONV_ALGO_DC, // Direct convolution
13
  CCV_NNC_CMD_OPT_CONV_ALGO_GEMM, // GEMM (for 1x1)
14
  CCV_NNC_CMD_OPT_CONV_ALGO_WINOGRAD, // Winograd algorithm
15
  CCV_NNC_CMD_OPT_CONV_ALGO_FFT, // Fast Fourier transform
16
  CCV_NNC_CMD_OPT_CONV_ALGO_COUNT
17
};
18
19
static int _ccv_nnc_conv_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
20
1.62k
{
21
1.62k
  assert(input_size >= 2);
22
1.62k
  const ccv_nnc_tensor_view_t* a = (ccv_nnc_tensor_view_t*)inputs[0];
23
1.62k
  const ccv_nnc_tensor_t* w = inputs[1];
24
1.62k
  assert(!CCV_IS_TENSOR_VIEW(w));
25
1.62k
  const ccv_nnc_tensor_t* bias = input_size > 2 ? 
inputs[2]1.62k
:
01
;
26
1.62k
  assert(!bias || !CCV_IS_TENSOR_VIEW(bias));
27
1.62k
  assert(output_size == 1);
28
1.62k
  ccv_nnc_tensor_view_t* b = (ccv_nnc_tensor_view_t*)outputs[0];
29
1.62k
  const int a_nd = ccv_nnc_tensor_nd(a->info.dim);
30
1.62k
  assert(a_nd == CCV_NNC_MAX_DIM + 1 || a_nd == CCV_NNC_MAX_DIM + 2);
31
1.62k
  const int* adim = (a_nd == CCV_NNC_MAX_DIM + 1) ? 
a->info.dim451
:
a->info.dim + 11.17k
;
32
1.62k
  const int b_nd = ccv_nnc_tensor_nd(b->info.dim);
33
1.62k
  assert(b_nd == CCV_NNC_MAX_DIM + 1 || b_nd == CCV_NNC_MAX_DIM + 2);
34
1.62k
  const int* bdim = (b_nd == CCV_NNC_MAX_DIM + 1) ? 
b->info.dim451
:
b->info.dim + 11.17k
;
35
1.62k
  assert(w->info.dim[CCV_NNC_MAX_DIM + 1] == adim[CCV_NNC_MAX_DIM]);
36
1.62k
  assert(bdim[CCV_NNC_MAX_DIM] == cmd.info.convolution.count);
37
1.62k
  if (cmd.info.convolution.groups != 1)
38
0
    return CCV_NNC_EXEC_INVALID;
39
1.62k
  int i;
40
1.62k
  // Make sure the weights dimension matches the network dimension
41
5.32k
  for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC; 
i++3.69k
)
42
5.32k
  {
43
5.32k
    if (w->info.dim[i] == 0 || 
cmd.info.size.dim[i - 1] == 04.87k
)
44
1.62k
      break;
45
3.69k
    assert(w->info.dim[i] == cmd.info.size.dim[i - 1]);
46
3.69k
  }
47
1.62k
  switch (cmd.algorithm)
48
1.62k
  {
49
1.62k
    case CCV_NNC_CMD_OPT_CONV_ALGO_DC:
50
1.13k
      return _ccv_nnc_conv_forw_cpu_opt(a, w, bias, hint, b);
51
1.62k
    case CCV_NNC_CMD_OPT_CONV_ALGO_GEMM:
52
159
      if (w->info.dim[1] == 1 && 
w->info.dim[2] == 10
&&
hint.stride.dim[0] <= 10
&&
hint.stride.dim[1] <= 10
&&
53
159
        
hint.border.begin[0] == 00
&&
hint.border.begin[1] == 00
&&
hint.border.end[0] == 00
&&
hint.border.end[1] == 00
&&
54
159
        
!0
CCV_IS_TENSOR_VIEW0
(a) &&
!0
CCV_IS_TENSOR_VIEW0
(b) &&
!0
CCV_IS_TENSOR_VIEW0
(w) &&
(0
!bias0
||
!0
CCV_IS_TENSOR_VIEW0
(bias)))
55
0
        return _ccv_nnc_conv_forw_gemm_cpu_opt(a, w, bias, hint, b);
56
159
      return CCV_NNC_EXEC_INVALID;
57
170
    case CCV_NNC_CMD_OPT_CONV_ALGO_WINOGRAD:
58
170
      if (w->info.dim[1] == 3 && 
w->info.dim[2] == 3107
&&
hint.stride.dim[0] <= 1107
&&
hint.stride.dim[1] <= 1107
)
59
107
        return _ccv_nnc_conv_forw_4x4_3x3_winograd_cpu_opt(a, w, bias, hint, b, stream_context);
60
63
      return CCV_NNC_EXEC_INVALID;
61
159
    case CCV_NNC_CMD_OPT_CONV_ALGO_FFT:
62
159
      return CCV_NNC_EXEC_INVALID; // Placeholder, for fft.
63
63
    case -1:
64
0
      // Pass-through
65
0
      break;
66
0
  }
67
0
  // If the size is 3x3, and no stride, choose Winograd kernel
68
0
  if (w->info.dim[1] == 3 && w->info.dim[2] == 3 && hint.stride.dim[0] <= 1 && hint.stride.dim[1] <= 1)
69
0
    return _ccv_nnc_conv_forw_4x4_3x3_winograd_cpu_opt(a, w, bias, hint, b, stream_context);
70
0
  // If the size is 1x1, and no stride, and not a tensor view object, no padding, choose GEMM kernel
71
0
  if (w->info.dim[1] == 1 && w->info.dim[2] == 1 && hint.stride.dim[0] <= 1 && hint.stride.dim[1] <= 1 &&
72
0
    hint.border.begin[0] == 0 && hint.border.begin[1] == 0 && hint.border.end[0] == 0 && hint.border.end[1] == 0 &&
73
0
    !CCV_IS_TENSOR_VIEW(a) && !CCV_IS_TENSOR_VIEW(b) && !CCV_IS_TENSOR_VIEW(w) && (!bias || !CCV_IS_TENSOR_VIEW(bias)))
74
0
    return _ccv_nnc_conv_forw_gemm_cpu_opt(a, w, bias, hint, b);
75
0
  // Otherwise, use direct convolution kernel
76
0
  return _ccv_nnc_conv_forw_cpu_opt(a, w, bias, hint, b);
77
0
}
78
79
REGISTER_COMMAND_BACKEND(CCV_NNC_CONVOLUTION_FORWARD, CCV_NNC_BACKEND_CPU_OPT)(ccv_nnc_cmd_backend_registry_t* const registry)
80
1
{
81
1
  registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC;
82
1
  registry->tensor_datatypes = CCV_32F;
83
1
  registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
84
1
  registry->algorithms = CCV_NNC_CMD_OPT_CONV_ALGO_COUNT;
85
1
  registry->exec = _ccv_nnc_conv_forw;
86
1
}