Coverage Report

Created: 2022-08-03 23:52

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/cmd/tanh/ccv_nnc_tanh_cpu_ref.c
Line
Count
Source (jump to first uncovered line)
1
#include "ccv.h"
2
#include "ccv_internal.h"
3
#include "nnc/ccv_nnc.h"
4
#include "nnc/ccv_nnc_easy.h"
5
#include "nnc/ccv_nnc_internal.h"
6
#ifdef USE_OPENMP
7
#include <omp.h>
8
#endif
9
#ifdef USE_DISPATCH
10
#include <dispatch/dispatch.h>
11
#endif
12
13
static int _ccv_nnc_tanh_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
14
4
{
15
4
  assert(input_size == 1);
16
4
  const ccv_nnc_tensor_t* a = inputs[0];
17
4
  assert(CCV_IS_TENSOR_CONTIGUOUS(a));
18
4
  assert(output_size == 1);
19
4
  ccv_nnc_tensor_t* b = outputs[0];
20
4
  assert(CCV_IS_TENSOR_CONTIGUOUS(b));
21
4
  const int count = ccv_nnc_tensor_count(a->info);
22
4
  int i;
23
12
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && a->info.dim[i] > 0; 
i++8
)
24
8
    { assert(a->info.dim[i] == b->info.dim[i]); }
25
4
  float* const ap = a->data.f32;
26
4
  float* const bp = b->data.f32;
27
2.40k
  for (i = 0; i < count; 
i++2.40k
)
28
2.40k
    bp[i] = tanh(ap[i]);
29
4
  return CCV_NNC_EXEC_SUCCESS;
30
4
}
31
32
static int _ccv_nnc_tanh_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
33
2
{
34
2
  assert(input_size == 3);
35
2
  assert(output_size == 1);
36
2
  const ccv_nnc_tensor_t* g = inputs[0];
37
2
  const ccv_nnc_tensor_t* b = inputs[2];
38
2
  assert(CCV_IS_TENSOR_CONTIGUOUS(b));
39
2
  ccv_nnc_tensor_t* h = outputs[0];
40
2
  assert(CCV_IS_TENSOR_CONTIGUOUS(h));
41
2
  const int count = ccv_nnc_tensor_count(b->info);
42
2
  int i;
43
6
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && g->info.dim[i] > 0; 
i++4
)
44
4
    { assert(h->info.dim[i] == b->info.dim[i]); }
45
2
  if (g)
46
2
  {
47
2
    assert(CCV_IS_TENSOR_CONTIGUOUS(g));
48
6
    
for (i = 0; 2
i < CCV_NNC_MAX_DIM_ALLOC && g->info.dim[i] > 0;
i++4
)
49
4
      { assert(g->info.dim[i] == h->info.dim[i]); }
50
2
    float* const gp = g->data.f32;
51
2
    float* const bp = b->data.f32;
52
2
    float* const hp = h->data.f32;
53
2.00k
    for (i = 0; i < count; 
i++2.00k
)
54
2.00k
      hp[i] = gp[i] * (1 - bp[i] * bp[i]);
55
2
  } else {
56
0
    float* const bp = b->data.f32;
57
0
    float* const hp = h->data.f32;
58
0
    for (i = 0; i < count; i++)
59
0
      hp[i] = 1 - bp[i] * bp[i];
60
0
  }
61
2
  return CCV_NNC_EXEC_SUCCESS;
62
2
}
63
64
REGISTER_COMMAND_BACKEND(CCV_NNC_TANH_FORWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
65
1
{
66
1
  registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW;
67
1
  registry->tensor_datatypes = CCV_32F;
68
1
  registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
69
1
  registry->algorithms = 1;
70
1
  registry->exec = _ccv_nnc_tanh_forw;
71
1
}
72
73
REGISTER_COMMAND_BACKEND(CCV_NNC_TANH_BACKWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
74
1
{
75
1
  registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW;
76
1
  registry->tensor_datatypes = CCV_32F;
77
1
  registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
78
1
  registry->algorithms = 1;
79
1
  registry->exec = _ccv_nnc_tanh_back;
80
1
}