Coverage Report

Created: 2024-08-18 16:21

/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd/sgd/ccv_nnc_sgd.c
Line
Count
Source (jump to first uncovered line)
1
#include "ccv.h"
2
#include "nnc/ccv_nnc.h"
3
#include "nnc/ccv_nnc_internal.h"
4
5
static int _ccv_nnc_sgd_forw_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
6
24.4k
{
7
  // 3 inputs (gradient, x, momentum)
8
  // 2 outputs (y, new momentum)
9
24.4k
  if (input_bitmasks[0] == 7u && 
output_bitmasks[0] == 3u12.2k
)
10
12.2k
    return 1;
11
12.2k
  return 0;
12
24.4k
}
13
14
static int _ccv_nnc_sgd_allow_inplace(const ccv_nnc_cmd_param_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size)
15
3.49k
{
16
3.49k
  if (input_idx == output_idx + 1)
17
414
    return 1;
18
3.08k
  return 0;
19
3.49k
}
20
21
static int _ccv_nnc_sgd_back_bitmask(const ccv_nnc_cmd_param_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
22
0
{
23
  // Doesn't support.
24
0
  return 0;
25
0
}
26
27
static void _ccv_nnc_sgd_tensor_auto_forw(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size)
28
18.7k
{
29
18.7k
  int i;
30
56.1k
  for (i = 0; i < output_size; 
i++37.4k
)
31
37.4k
    outputs[i] = inputs[0];
32
18.7k
}
33
34
static void _ccv_nnc_sgd_tensor_auto_back(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size)
35
0
{
36
  // Doesn't support.
37
0
}
38
39
REGISTER_COMMAND(CCV_NNC_SGD_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
40
  FIND_BACKEND(ccv_nnc_sgd_cpu_ref.c, gpu/ccv_nnc_sgd_gpu_ref.cu)
41
1
{
42
1
  registry->bitmask = _ccv_nnc_sgd_forw_bitmask;
43
1
  registry->tensor_auto = _ccv_nnc_sgd_tensor_auto_forw;
44
1
  registry->allow_inplace = _ccv_nnc_sgd_allow_inplace;
45
1
}
46
47
REGISTER_COMMAND(CCV_NNC_SGD_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
48
  FIND_BACKEND(ccv_nnc_sgd_cpu_ref.c, gpu/ccv_nnc_sgd_gpu_ref.cu)
49
1
{
50
1
  registry->bitmask = _ccv_nnc_sgd_back_bitmask;
51
1
  registry->tensor_auto = _ccv_nnc_sgd_tensor_auto_back;
52
1
}
53
54
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_SGD_FORWARD)
55
#define CMD_SGD_FORWARD(_nesterov, _rate, _scale, _decay, _momentum, _dampening) ccv_nnc_cmd(CCV_NNC_SGD_FORWARD, 0, ((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.sgd={.nesterov=_nesterov,.rate=_rate,.scale=_scale,.decay=_decay,.momentum=_momentum,.dampening=_dampening}}), 0)