/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd/relu/ccv_nnc_relu_cpu_ref.c
Line | Count | Source |
1 | | #include "ccv.h" |
2 | | #include "ccv_internal.h" |
3 | | #include "nnc/ccv_nnc.h" |
4 | | #include "nnc/ccv_nnc_easy.h" |
5 | | #include "nnc/ccv_nnc_internal.h" |
6 | | #ifdef USE_OPENMP |
7 | | #include <omp.h> |
8 | | #endif |
9 | | #ifdef USE_DISPATCH |
10 | | #include <dispatch/dispatch.h> |
11 | | #endif |
12 | | |
13 | | static int _ccv_nnc_relu_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) |
14 | 1.26k | { |
15 | 1.26k | assert(input_size == 1); |
16 | 1.26k | const ccv_nnc_tensor_t* a = inputs[0]; |
17 | 1.26k | assert(CCV_IS_TENSOR_CONTIGUOUS(a)); |
18 | 1.26k | assert(output_size == 1); |
19 | 1.26k | ccv_nnc_tensor_t* b = outputs[0]; |
20 | 1.26k | assert(CCV_IS_TENSOR_CONTIGUOUS(b)); |
21 | 1.26k | int i, count = ccv_nnc_tensor_count(a->info); |
22 | 5.65k | for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && a->info.dim[i] > 0; i++4.39k ) |
23 | 4.39k | { |
24 | 4.39k | assert(a->info.dim[i] == b->info.dim[i]); |
25 | 4.39k | } |
26 | 1.26k | float* ap = a->data.f32; |
27 | 1.26k | float* bp = b->data.f32; |
28 | 39.1M | for (i = 0; i < count; i++39.1M ) |
29 | 39.1M | bp[i] = ccv_max(ap[i], 0); |
30 | 1.26k | return CCV_NNC_EXEC_SUCCESS; |
31 | 1.26k | } |
32 | | |
33 | | static int _ccv_nnc_relu_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) |
34 | 1.47k | { |
35 | 1.47k | assert(input_size == 3); |
36 | 1.47k | const ccv_nnc_tensor_t* g = inputs[0]; // gradient |
37 | 1.47k | assert(CCV_IS_TENSOR_CONTIGUOUS(g)); |
38 | 1.47k | const ccv_nnc_tensor_t* b = inputs[2]; |
39 | 1.47k | assert(CCV_IS_TENSOR_CONTIGUOUS(b)); |
40 | 1.47k | assert(output_size == 1); |
41 | 1.47k | ccv_nnc_tensor_t* h = outputs[0]; |
42 | 1.47k | assert(CCV_IS_TENSOR_CONTIGUOUS(h)); |
43 | 1.47k | int i, count = ccv_nnc_tensor_count(g->info); |
44 | 6.61k | for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && g->info.dim[i] > 0; i++5.14k ) |
45 | 5.14k | { |
46 | 5.14k | assert(b->info.dim[i] == g->info.dim[i]); |
47 | 5.14k | assert(g->info.dim[i] == h->info.dim[i]); |
48 | 5.14k | } |
49 | 1.47k | float* bp = b->data.f32; |
50 | 1.47k | float* gp = g->data.f32; |
51 | 1.47k | float* hp = h->data.f32; |
52 | 15.1M | for (i = 0; i < count; i++15.1M ) |
53 | 15.1M | hp[i] = (bp[i] > 0) ? gp[i]7.32M : 07.86M ; |
54 | 1.47k | return CCV_NNC_EXEC_SUCCESS; |
55 | 1.47k | } |
56 | | |
57 | | REGISTER_COMMAND_BACKEND(CCV_NNC_RELU_FORWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry) |
58 | 1 | { |
59 | 1 | registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_CHWN; |
60 | 1 | registry->tensor_datatypes = CCV_32F; |
61 | 1 | registry->tensor_memory = CCV_TENSOR_CPU_MEMORY; |
62 | 1 | registry->algorithms = 1; |
63 | 1 | registry->exec = _ccv_nnc_relu_forw; |
64 | 1 | } |
65 | | |
66 | | REGISTER_COMMAND_BACKEND(CCV_NNC_RELU_BACKWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry) |
67 | 1 | { |
68 | 1 | registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_CHWN; |
69 | 1 | registry->tensor_datatypes = CCV_32F; |
70 | 1 | registry->tensor_memory = CCV_TENSOR_CPU_MEMORY; |
71 | 1 | registry->algorithms = 1; |
72 | 1 | registry->exec = _ccv_nnc_relu_back; |
73 | 1 | } |