Coverage Report

Created: 2019-07-03 22:50

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/cmd/sgd/ccv_nnc_sgd.c
Line
Count
Source (jump to first uncovered line)
1
#include <ccv.h>
2
#include <nnc/ccv_nnc.h>
3
#include <nnc/ccv_nnc_internal.h>
4
5
static int _ccv_nnc_sgd_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
6
4.06k
{
7
4.06k
  // 3 inputs (gradient, x, momentum)
8
4.06k
  // 2 outputs (y, new momentum)
9
4.06k
  if (input_bitmasks[0] == 7u && 
output_bitmasks[0] == 3u2.03k
)
10
2.03k
    return 1;
11
2.03k
  return 0;
12
2.03k
}
13
14
static int _ccv_nnc_sgd_allow_inplace(const int input_idx, const int input_size, const int output_idx, const int output_size)
15
3.03k
{
16
3.03k
  if (input_idx == output_idx + 1)
17
276
    return 1;
18
2.76k
  return 0;
19
2.76k
}
20
21
static int _ccv_nnc_sgd_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
22
0
{
23
0
  // Doesn't support.
24
0
  return 0;
25
0
}
26
27
static void _ccv_nnc_sgd_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size)
28
1.32k
{
29
1.32k
  int i;
30
3.98k
  for (i = 0; i < output_size; 
i++2.65k
)
31
2.65k
    outputs[i] = inputs[0];
32
1.32k
}
33
34
static void _ccv_nnc_sgd_tensor_auto_back(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size)
35
0
{
36
0
  // Doesn't support.
37
0
}
38
39
REGISTER_COMMAND(CCV_NNC_SGD_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
40
  FIND_BACKEND(ccv_nnc_sgd_cpu_ref.c, gpu/ccv_nnc_sgd_gpu_cudnn.cu)
41
1
{
42
1
  registry->bitmask = _ccv_nnc_sgd_forw_bitmask;
43
1
  registry->tensor_auto = _ccv_nnc_sgd_tensor_auto_forw;
44
1
  registry->allow_inplace = _ccv_nnc_sgd_allow_inplace;
45
1
}
46
47
REGISTER_COMMAND(CCV_NNC_SGD_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
48
  FIND_BACKEND(ccv_nnc_sgd_cpu_ref.c, gpu/ccv_nnc_sgd_gpu_cudnn.cu)
49
1
{
50
1
  registry->bitmask = _ccv_nnc_sgd_back_bitmask;
51
1
  registry->tensor_auto = _ccv_nnc_sgd_tensor_auto_back;
52
1
}
53
54
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_SGD_FORWARD)
55
#define CMD_SGD_FORWARD(_nesterov, _rate, _scale, _decay, _momentum, _dampening) ccv_nnc_cmd(CCV_NNC_SGD_FORWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.minimize={.nesterov=_nesterov,.rate=_rate,.scale=_scale,.decay=_decay,.momentum=_momentum,.dampening=_dampening}}), 0)