Coverage Report

Created: 2024-08-19 11:27

/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd/tanh/ccv_nnc_tanh_cpu_ref.c
Line
Count
Source (jump to first uncovered line)
1
#include "ccv.h"
2
#include "ccv_internal.h"
3
#include "nnc/ccv_nnc.h"
4
#include "nnc/ccv_nnc_easy.h"
5
#include "nnc/ccv_nnc_internal.h"
6
#ifdef USE_OPENMP
7
#include <omp.h>
8
#endif
9
#ifdef USE_DISPATCH
10
#include <dispatch/dispatch.h>
11
#endif
12
13
static int _ccv_nnc_tanh_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
14
5
{
15
5
  assert(input_size == 1);
16
5
  const ccv_nnc_tensor_t* a = inputs[0];
17
5
  assert(CCV_IS_TENSOR_CONTIGUOUS(a));
18
5
  assert(output_size == 1);
19
5
  ccv_nnc_tensor_t* b = outputs[0];
20
5
  assert(CCV_IS_TENSOR_CONTIGUOUS(b));
21
5
  const int count = ccv_nnc_tensor_count(a->info);
22
5
  int i;
23
14
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && a->info.dim[i] > 0; 
i++9
)
24
9
    { assert(a->info.dim[i] == b->info.dim[i]); }
25
5
  float* const ap = a->data.f32;
26
5
  float* const bp = b->data.f32;
27
2.41k
  for (i = 0; i < count; 
i++2.41k
)
28
2.41k
    bp[i] = tanh(ap[i]);
29
5
  return CCV_NNC_EXEC_SUCCESS;
30
5
}
31
32
static int _ccv_nnc_tanh_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
33
3
{
34
3
  assert(input_size == 3);
35
3
  assert(output_size == 1);
36
3
  const ccv_nnc_tensor_t* g = inputs[0];
37
3
  const ccv_nnc_tensor_t* b = inputs[2];
38
3
  assert(CCV_IS_TENSOR_CONTIGUOUS(b));
39
3
  ccv_nnc_tensor_t* h = outputs[0];
40
3
  assert(CCV_IS_TENSOR_CONTIGUOUS(h));
41
3
  const int count = ccv_nnc_tensor_count(b->info);
42
3
  int i;
43
8
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && g->info.dim[i] > 0; 
i++5
)
44
5
    { assert(h->info.dim[i] == b->info.dim[i]); }
45
3
  if (g)
46
3
  {
47
3
    assert(CCV_IS_TENSOR_CONTIGUOUS(g));
48
8
    
for (i = 0; 3
i < CCV_NNC_MAX_DIM_ALLOC && g->info.dim[i] > 0;
i++5
)
49
5
      { assert(g->info.dim[i] == h->info.dim[i]); }
50
3
    float* const gp = g->data.f32;
51
3
    float* const bp = b->data.f32;
52
3
    float* const hp = h->data.f32;
53
2.01k
    for (i = 0; i < count; 
i++2.01k
)
54
2.01k
      hp[i] = gp[i] * (1 - bp[i] * bp[i]);
55
3
  } else {
56
0
    float* const bp = b->data.f32;
57
0
    float* const hp = h->data.f32;
58
0
    for (i = 0; i < count; i++)
59
0
      hp[i] = 1 - bp[i] * bp[i];
60
0
  }
61
3
  return CCV_NNC_EXEC_SUCCESS;
62
3
}
63
64
REGISTER_COMMAND_BACKEND(CCV_NNC_TANH_FORWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
65
1
{
66
1
  registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW;
67
1
  registry->tensor_datatypes = CCV_32F;
68
1
  registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
69
1
  registry->algorithms = 1;
70
1
  registry->exec = _ccv_nnc_tanh_forw;
71
1
}
72
73
REGISTER_COMMAND_BACKEND(CCV_NNC_TANH_BACKWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
74
1
{
75
1
  registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW;
76
1
  registry->tensor_datatypes = CCV_32F;
77
1
  registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
78
1
  registry->algorithms = 1;
79
1
  registry->exec = _ccv_nnc_tanh_back;
80
1
}