/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd/sigmoid/ccv_nnc_sigmoid_cpu_ref.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "ccv.h" |
2 | | #include "ccv_internal.h" |
3 | | #include "nnc/ccv_nnc.h" |
4 | | #include "nnc/ccv_nnc_easy.h" |
5 | | #include "nnc/ccv_nnc_internal.h" |
6 | | #ifdef USE_OPENMP |
7 | | #include <omp.h> |
8 | | #endif |
9 | | #ifdef USE_DISPATCH |
10 | | #include <dispatch/dispatch.h> |
11 | | #endif |
12 | | |
13 | | static int _ccv_nnc_sigmoid_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) |
14 | 12 | { |
15 | 12 | assert(input_size == 1); |
16 | 12 | const ccv_nnc_tensor_t* a = inputs[0]; |
17 | 12 | assert(CCV_IS_TENSOR_CONTIGUOUS(a)); |
18 | 12 | assert(output_size == 1); |
19 | 12 | ccv_nnc_tensor_t* b = outputs[0]; |
20 | 12 | assert(CCV_IS_TENSOR_CONTIGUOUS(b)); |
21 | 12 | const int count = ccv_nnc_tensor_count(a->info); |
22 | 12 | int i; |
23 | 31 | for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && a->info.dim[i] > 0; i++19 ) |
24 | 19 | { assert(a->info.dim[i] == b->info.dim[i]); } |
25 | 12 | float* const ap = a->data.f32; |
26 | 12 | float* const bp = b->data.f32; |
27 | 2.44k | for (i = 0; i < count; i++2.43k ) |
28 | 2.43k | bp[i] = 1. / (1. + exp(-ap[i])); |
29 | 12 | return CCV_NNC_EXEC_SUCCESS; |
30 | 12 | } |
31 | | |
32 | | static int _ccv_nnc_sigmoid_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) |
33 | 5 | { |
34 | 5 | assert(input_size == 3); |
35 | 5 | assert(output_size == 1); |
36 | 5 | const ccv_nnc_tensor_t* g = inputs[0]; |
37 | 5 | const ccv_nnc_tensor_t* b = inputs[2]; |
38 | 5 | assert(CCV_IS_TENSOR_CONTIGUOUS(b)); |
39 | 5 | ccv_nnc_tensor_t* h = outputs[0]; |
40 | 5 | assert(CCV_IS_TENSOR_CONTIGUOUS(h)); |
41 | 5 | const int count = ccv_nnc_tensor_count(b->info); |
42 | 5 | int i; |
43 | 14 | for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && g->info.dim[i] > 0; i++9 ) |
44 | 9 | { assert(h->info.dim[i] == b->info.dim[i]); } |
45 | 5 | if (g) |
46 | 5 | { |
47 | 5 | assert(CCV_IS_TENSOR_CONTIGUOUS(g)); |
48 | 14 | for (i = 0; 5 i < CCV_NNC_MAX_DIM_ALLOC && g->info.dim[i] > 0; i++9 ) |
49 | 9 | { assert(g->info.dim[i] == h->info.dim[i]); } |
50 | 5 | float* const gp = g->data.f32; |
51 | 5 | float* const bp = b->data.f32; |
52 | 5 | float* const hp = h->data.f32; |
53 | 2.02k | for (i = 0; i < count; i++2.02k ) |
54 | 2.02k | hp[i] = gp[i] * bp[i] * (1 - bp[i]); |
55 | 5 | } else { |
56 | 0 | float* const bp = b->data.f32; |
57 | 0 | float* const hp = h->data.f32; |
58 | 0 | for (i = 0; i < count; i++) |
59 | 0 | hp[i] = bp[i] * (1 - bp[i]); |
60 | 0 | } |
61 | 5 | return CCV_NNC_EXEC_SUCCESS; |
62 | 5 | } |
63 | | |
64 | | REGISTER_COMMAND_BACKEND(CCV_NNC_SIGMOID_FORWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry) |
65 | 1 | { |
66 | 1 | registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW; |
67 | 1 | registry->tensor_datatypes = CCV_32F; |
68 | 1 | registry->tensor_memory = CCV_TENSOR_CPU_MEMORY; |
69 | 1 | registry->algorithms = 1; |
70 | 1 | registry->exec = _ccv_nnc_sigmoid_forw; |
71 | 1 | } |
72 | | |
73 | | REGISTER_COMMAND_BACKEND(CCV_NNC_SIGMOID_BACKWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry) |
74 | 1 | { |
75 | 1 | registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW; |
76 | 1 | registry->tensor_datatypes = CCV_32F; |
77 | 1 | registry->tensor_memory = CCV_TENSOR_CPU_MEMORY; |
78 | 1 | registry->algorithms = 1; |
79 | 1 | registry->exec = _ccv_nnc_sigmoid_back; |
80 | 1 | } |