Bug Summary

File:nnc/ccv_cnnp_model_addons.c
Warning:line 1149, column 2
The left operand of '%' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_cnnp_model_addons.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -resource-dir /usr/local/lib/clang/19 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2026-05-09-001339-2066917-1 -x c ccv_cnnp_model_addons.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_cnnp_model.h"
6
7// MARK - Add-on Functions
8
9static int _ccv_cnnp_model_clip_grad_norm_reduce_norm2(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
10{
11 const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type)(((inputs[0]->info.type) & 0xfff00) >> 8);
12 ccv_nnc_tensor_t* const old_norm2 = outputs[1 + device_id * 2];
13 ccv_nnc_tensor_t* const norm2 = outputs[1 + device_id * 2 + 1];
14 const int tensor_count = ccv_nnc_tensor_count(inputs[0]->info);
15 if (tensor_count == 1)
16 ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1)ccv_nnc_cmd(CCV_NNC_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, hint, flags, TENSOR_LIST(inputs[0], inputs[0])(ccv_nnc_tensor_t* []){inputs[0], inputs[0]}, (1 +1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2)(ccv_nnc_tensor_t* []){norm2}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
17 else {
18 ccv_nnc_cmd_exec(CMD_REDUCE_NORM2_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_NORM2_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
, hint, flags, TENSOR_LIST(inputs[0])(ccv_nnc_tensor_t* []){inputs[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2)(ccv_nnc_tensor_t* []){norm2}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
19 ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1)ccv_nnc_cmd(CCV_NNC_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, hint, flags, TENSOR_LIST(norm2, norm2)(ccv_nnc_tensor_t* []){norm2, norm2}, (1 +1 +1 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2)(ccv_nnc_tensor_t* []){norm2}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
20 }
21 ccv_nnc_cmd_exec(CMD_ADD_FORWARD(1, 1)ccv_nnc_cmd(CCV_NNC_ADD_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1, 1}}}, 0)
, hint, flags, TENSOR_LIST(old_norm2, norm2)(ccv_nnc_tensor_t* []){old_norm2, norm2}, (1 +1 +1 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(old_norm2)(ccv_nnc_tensor_t* []){old_norm2}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
22 return CCV_NNC_EXEC_SUCCESS;
23}
24
25static ccv_nnc_cmd_vtab_t clip_grad_norm_reduce_norm2_vtab = {
26 .exec = _ccv_cnnp_model_clip_grad_norm_reduce_norm2
27};
28
29static int _ccv_cnnp_model_clip_grad_norm_scatter_norm2(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
30{
31 const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type)(((inputs[0]->info.type) & 0xfff00) >> 8);
32 ccv_nnc_tensor_t* const norm2 = inputs[1 + device_id * 2];
33 ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1)ccv_nnc_cmd(CCV_NNC_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, hint, flags, TENSOR_LIST(inputs[0], norm2)(ccv_nnc_tensor_t* []){inputs[0], norm2}, (1 +1 +1 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(outputs[0])(ccv_nnc_tensor_t* []){outputs[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
34 return CCV_NNC_EXEC_SUCCESS;
35}
36
37static ccv_nnc_cmd_vtab_t clip_grad_norm_scatter_norm2_vtab = {
38 .exec = _ccv_cnnp_model_clip_grad_norm_scatter_norm2
39};
40
41void ccv_cnnp_model_parameters_clip_grad_norm(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, int norm_type, float max_norm, ccv_nnc_stream_context_t* const stream_context)
42{
43 assert(norm_type == 2)((void) sizeof ((norm_type == 2) ? 1 : 0), __extension__ ({ if
(norm_type == 2) ; else __assert_fail ("norm_type == 2", "ccv_cnnp_model_addons.c"
, 43, __extension__ __PRETTY_FUNCTION__); }))
;
44 ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
45 assert(compiled_data)((void) sizeof ((compiled_data) ? 1 : 0), __extension__ ({ if
(compiled_data) ; else __assert_fail ("compiled_data", "ccv_cnnp_model_addons.c"
, 45, __extension__ __PRETTY_FUNCTION__); }))
;
46 const int parallel_count = ccv_max(model->parallel_count, 1)({ typeof (model->parallel_count) _a = (model->parallel_count
); typeof (1) _b = (1); (_a > _b) ? _a : _b; })
;
47 ccv_nnc_tensor_t* norm2[parallel_count * 2];
48 ccv_nnc_tensor_t* max_normt[parallel_count];
49 const int stream_type = model->compiled_data->stream_type;
50 int i;
51 if (stream_type == CCV_STREAM_CONTEXT_GPU)
52 {
53 for (i = 0; i < parallel_count; i++)
54 {
55 ccv_nnc_tensor_param_t info = {
56 .type = CCV_TENSOR_GPU_MEMORY,
57 .format = CCV_TENSOR_FORMAT_NHWC,
58 .datatype = CCV_32F,
59 .dim = {1},
60 };
61 CCV_TENSOR_SET_DEVICE_ID(info.type, i)(info.type) = (((info.type) & ~0xfff00) | (((i) & 0xfff
) << 8))
;
62 norm2[i * 2] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
63 norm2[i * 2 + 1] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
64 max_normt[i] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
65 }
66 } else {
67 for (i = 0; i < parallel_count; i++)
68 {
69 ccv_nnc_tensor_param_t info = {
70 .type = CCV_TENSOR_CPU_MEMORY,
71 .format = CCV_TENSOR_FORMAT_NHWC,
72 .datatype = CCV_32F,
73 .dim = {1},
74 };
75 norm2[i * 2] = ccv_nnc_tensor_new(0, info, 0);
76 norm2[i * 2 + 1] = ccv_nnc_tensor_new(0, info, 0);
77 max_normt[i] = ccv_nnc_tensor_new(0, info, 0);
78 }
79 }
80 // zero out old norm2.
81 if (parallel_count > 1)
82 {
83 ccv_nnc_stream_context_t* streams[parallel_count];
84 ccv_nnc_stream_signal_t* signal;
85 if (stream_context)
86 signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
87 for (i = 0; i < parallel_count; i++)
88 {
89 const int stream_type = CCV_TENSOR_GET_MEMORY(norm2[i * 2]->info.type)((norm2[i * 2]->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
90 const int device_id = CCV_TENSOR_GET_DEVICE_ID(norm2[i * 2]->info.type)(((norm2[i * 2]->info.type) & 0xfff00) >> 8);
91 int type = stream_type;
92 CCV_STREAM_SET_DEVICE_ID(type, device_id)(type) = (((type) & ~0xfff00) | (((device_id) & 0xfff
) << 8))
;
93 ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
94 // Wait signal to finish.
95 if (stream_context)
96 ccv_nnc_stream_context_wait_signal(stream_0, signal);
97 ccv_nnc_cmd_exec(CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
98 if (stream_context)
99 {
100 ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
101 ccv_nnc_stream_context_wait_signal(stream_context, signal);
102 }
103 streams[i] = stream_0;
104 }
105 // If this should be blocking, blocking it.
106 if (!stream_context)
107 for (i = 0; i < parallel_count; i++)
108 if (streams[i])
109 ccv_nnc_stream_context_wait(streams[i]);
110 } else {
111 ccv_nnc_cmd_exec(CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
112 }
113 // Gather norm2.
114 ccv_nnc_cmd_t reduce_cmd = {
115 .cmd = CCV_NNC_CUSTOM_FORWARD,
116 .isa = &clip_grad_norm_reduce_norm2_vtab,
117 };
118 ccv_cnnp_model_parameter_gradients_map(model, parameters, reduce_cmd, ccv_nnc_no_hint, 0, 0, 0, norm2, parallel_count * 2, stream_context);
119 // Now compute max(max_norm / norm2, 1.0).
120 if (parallel_count > 1)
121 {
122 ccv_nnc_stream_context_t* streams[parallel_count];
123 ccv_nnc_stream_signal_t* signal;
124 if (stream_context)
125 signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
126 for (i = 0; i < parallel_count; i++)
127 {
128 const int stream_type = CCV_TENSOR_GET_MEMORY(norm2[i * 2]->info.type)((norm2[i * 2]->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
129 const int device_id = CCV_TENSOR_GET_DEVICE_ID(norm2[i * 2]->info.type)(((norm2[i * 2]->info.type) & 0xfff00) >> 8);
130 int type = stream_type;
131 CCV_STREAM_SET_DEVICE_ID(type, device_id)(type) = (((type) & ~0xfff00) | (((device_id) & 0xfff
) << 8))
;
132 ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
133 // Wait signal to finish.
134 if (stream_context)
135 ccv_nnc_stream_context_wait_signal(stream_0, signal);
136 ccv_nnc_cmd_exec(CMD_EWSQRT_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSQRT_FORWARD, 0, ccv_nnc_cmd_auto, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
137 ccv_nnc_cmd_exec(CMD_SET_FORWARD(max_norm)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={max_norm,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(max_normt[i])(ccv_nnc_tensor_t* []){max_normt[i]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
138 ccv_nnc_cmd_exec(CMD_EWDIV_FORWARD()ccv_nnc_cmd(CCV_NNC_EWDIV_FORWARD, 0, ccv_nnc_cmd_auto, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(max_normt[i], norm2[i * 2])(ccv_nnc_tensor_t* []){max_normt[i], norm2[i * 2]}, (1 +1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1
)
, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
139 ccv_nnc_cmd_exec(CMD_CLAMP_FORWARD(NAN, 1)ccv_nnc_cmd(CCV_NNC_CLAMP_FORWARD, 0, (ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.clamp={.min=(__builtin_nanf ("")),.max=1
}}, 0)
, ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
140 if (stream_context)
141 {
142 ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
143 ccv_nnc_stream_context_wait_signal(stream_context, signal);
144 }
145 streams[i] = stream_0;
146 }
147 // If this should be blocking, blocking it.
148 if (!stream_context)
149 for (i = 0; i < parallel_count; i++)
150 if (streams[i])
151 ccv_nnc_stream_context_wait(streams[i]);
152 } else {
153 ccv_nnc_cmd_exec(CMD_EWSQRT_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSQRT_FORWARD, 0, ccv_nnc_cmd_auto, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
154 ccv_nnc_cmd_exec(CMD_SET_FORWARD(max_norm)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={max_norm,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(max_normt[0])(ccv_nnc_tensor_t* []){max_normt[0]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
155 ccv_nnc_cmd_exec(CMD_EWDIV_FORWARD()ccv_nnc_cmd(CCV_NNC_EWDIV_FORWARD, 0, ccv_nnc_cmd_auto, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(max_normt[0], norm2[0])(ccv_nnc_tensor_t* []){max_normt[0], norm2[0]}, (1 +1 +1 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
156 ccv_nnc_cmd_exec(CMD_CLAMP_FORWARD(NAN, 1)ccv_nnc_cmd(CCV_NNC_CLAMP_FORWARD, 0, (ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.clamp={.min=(__builtin_nanf ("")),.max=1
}}, 0)
, ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
157 }
158 ccv_nnc_cmd_t scatter_cmd = {
159 .cmd = CCV_NNC_CUSTOM_FORWARD,
160 .isa = &clip_grad_norm_scatter_norm2_vtab,
161 };
162 ccv_cnnp_model_parameter_gradients_map(model, parameters, scatter_cmd, ccv_nnc_no_hint, 0, norm2, parallel_count * 2, 0, 0, stream_context);
163 if (stream_type == CCV_STREAM_CONTEXT_GPU)
164 for (i = 0; i < parallel_count; i++)
165 {
166 ccv_nnc_xpu_free(&compiled_data->xpu_alloc, norm2[i * 2]->data.u8);
167 ccv_nnc_xpu_free(&compiled_data->xpu_alloc, norm2[i * 2 + 1]->data.u8);
168 ccv_nnc_xpu_free(&compiled_data->xpu_alloc, max_normt[i]->data.u8);
169 }
170 for (i = 0; i < parallel_count; i++)
171 {
172 ccv_nnc_tensor_free(norm2[i * 2]);
173 ccv_nnc_tensor_free(norm2[i * 2 + 1]);
174 ccv_nnc_tensor_free(max_normt[i]);
175 }
176}
177
178// MARK - Add-on Functions
179
180static int _ccv_cnnp_model_isnan(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
181{
182 const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type)(((inputs[0]->info.type) & 0xfff00) >> 8);
183 ccv_nnc_tensor_t* const old_isnanr = outputs[1 + device_id * 2];
184 ccv_nnc_tensor_t* const isnanr = outputs[1 + device_id * 2 + 1];
185 ccv_nnc_cmd_t reduce_cmd = CMD_REDUCE_ISNAN_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_ISNAN_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
186 reduce_cmd.info.reduce.count = ccv_nnc_tensor_nd(inputs[0]->info.dim);
187 int i;
188 for (i = 0; i < cmd.info.reduce.count; i++)
189 reduce_cmd.info.reduce.axis[i] = i;
190 ccv_nnc_cmd_exec(reduce_cmd, hint, flags, TENSOR_LIST(inputs[0])(ccv_nnc_tensor_t* []){inputs[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(isnanr)(ccv_nnc_tensor_t* []){isnanr}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
191 ccv_nnc_cmd_exec(CMD_EWSUM_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, ccv_nnc_cmd_auto, 0), hint, flags, TENSOR_LIST(old_isnanr, isnanr)(ccv_nnc_tensor_t* []){old_isnanr, isnanr}, (1 +1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(old_isnanr)(ccv_nnc_tensor_t* []){old_isnanr}, (1 +1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
192 return CCV_NNC_EXEC_SUCCESS;
193}
194
195static ccv_nnc_cmd_vtab_t reduce_isnan_vtab = {
196 .exec = _ccv_cnnp_model_isnan
197};
198
199int ccv_cnnp_model_parameter_gradients_isnan(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, ccv_nnc_stream_context_t* const stream_context)
200{
201 ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
202 assert(compiled_data)((void) sizeof ((compiled_data) ? 1 : 0), __extension__ ({ if
(compiled_data) ; else __assert_fail ("compiled_data", "ccv_cnnp_model_addons.c"
, 202, __extension__ __PRETTY_FUNCTION__); }))
;
203 const int parallel_count = ccv_max(model->parallel_count, 1)({ typeof (model->parallel_count) _a = (model->parallel_count
); typeof (1) _b = (1); (_a > _b) ? _a : _b; })
;
204 ccv_nnc_tensor_t* isnanr[parallel_count * 2];
205 const int stream_type = model->compiled_data->stream_type;
206 int i;
207 if (stream_type == CCV_STREAM_CONTEXT_GPU)
208 {
209 for (i = 0; i < parallel_count; i++)
210 {
211 ccv_nnc_tensor_param_t info = {
212 .type = CCV_TENSOR_GPU_MEMORY,
213 .format = CCV_TENSOR_FORMAT_NHWC,
214 .datatype = CCV_32S,
215 .dim = {1},
216 };
217 CCV_TENSOR_SET_DEVICE_ID(info.type, i)(info.type) = (((info.type) & ~0xfff00) | (((i) & 0xfff
) << 8))
;
218 isnanr[i * 2] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
219 isnanr[i * 2 + 1] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
220 }
221 } else {
222 for (i = 0; i < parallel_count; i++)
223 {
224 ccv_nnc_tensor_param_t info = {
225 .type = CCV_TENSOR_CPU_MEMORY,
226 .format = CCV_TENSOR_FORMAT_NHWC,
227 .datatype = CCV_32S,
228 .dim = {1},
229 };
230 isnanr[i * 2] = ccv_nnc_tensor_new(0, info, 0);
231 isnanr[i * 2 + 1] = ccv_nnc_tensor_new(0, info, 0);
232 }
233 }
234 // zero out old isnanr.
235 if (parallel_count > 1)
236 {
237 ccv_nnc_stream_context_t* streams[parallel_count];
238 ccv_nnc_stream_signal_t* signal;
239 if (stream_context)
240 signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
241 for (i = 0; i < parallel_count; i++)
242 {
243 const int stream_type = CCV_TENSOR_GET_MEMORY(isnanr[i * 2]->info.type)((isnanr[i * 2]->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
244 const int device_id = CCV_TENSOR_GET_DEVICE_ID(isnanr[i * 2]->info.type)(((isnanr[i * 2]->info.type) & 0xfff00) >> 8);
245 int type = stream_type;
246 CCV_STREAM_SET_DEVICE_ID(type, device_id)(type) = (((type) & ~0xfff00) | (((device_id) & 0xfff
) << 8))
;
247 ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
248 // Wait signal to finish.
249 if (stream_context)
250 ccv_nnc_stream_context_wait_signal(stream_0, signal);
251 ccv_nnc_cmd_exec(CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(isnanr[i * 2])(ccv_nnc_tensor_t* []){isnanr[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
252 if (stream_context)
253 {
254 ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
255 ccv_nnc_stream_context_wait_signal(stream_context, signal);
256 }
257 streams[i] = stream_0;
258 }
259 // If this should be blocking, blocking it.
260 if (!stream_context)
261 for (i = 0; i < parallel_count; i++)
262 if (streams[i])
263 ccv_nnc_stream_context_wait(streams[i]);
264 } else
265 ccv_nnc_cmd_exec(CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(isnanr[0])(ccv_nnc_tensor_t* []){isnanr[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
266 // Gather isnanr.
267 ccv_nnc_cmd_t reduce_cmd = {
268 .cmd = CCV_NNC_CUSTOM_FORWARD,
269 .isa = &reduce_isnan_vtab,
270 };
271 ccv_cnnp_model_parameter_gradients_map(model, parameters, reduce_cmd, ccv_nnc_no_hint, 0, 0, 0, isnanr, parallel_count * 2, stream_context);
272 for (i = 0; i < parallel_count; i++)
273 ccv_nnc_tensor_free(isnanr[i * 2 + 1]);
274 int retval = 0;
275 if (stream_type == CCV_TENSOR_GPU_MEMORY)
276 {
277 ccv_nnc_tensor_param_t info = {
278 .type = CCV_TENSOR_CPU_MEMORY,
279 .format = CCV_TENSOR_FORMAT_NHWC,
280 .datatype = CCV_32S,
281 .dim = {1},
282 };
283 ccv_nnc_tensor_t* checknan = ccv_nnc_tensor_new(0, info, 0);
284 for (i = 0; i < parallel_count; i++)
285 {
286 ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, ccv_nnc_no_hint, 0, TENSOR_LIST(isnanr[i * 2])(ccv_nnc_tensor_t* []){isnanr[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(checknan)(ccv_nnc_tensor_t* []){checknan}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, 0);
287 if (checknan->data.i32[0] > 0)
288 {
289 retval = 1;
290 break;
291 }
292 }
293 ccv_nnc_tensor_free(checknan);
294 } else {
295 for (i = 0; i < parallel_count; i++)
296 if (isnanr[i * 2]->data.i32[0] > 0)
297 {
298 retval = 1;
299 break;
300 }
301 }
302 for (i = 0; i < parallel_count; i++)
303 ccv_nnc_tensor_free(isnanr[i * 2]);
304 return retval;
305}
306
307// MARK - Core Layers
308
309static void _ccv_cnnp_sum_build(ccv_cnnp_model_t* const self, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
310{
311 PRINT(CCV_CLI_VERBOSE, "[cnnp_sum_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sum_build] -\n"); fflush(stdout); } } while (
0)
;
312 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 312, __extension__ __PRETTY_FUNCTION__
); }))
;
313 outputs[0] = ccv_nnc_tensor_symbol_new(graph, ccv_nnc_tensor_symbol_params(graph, inputs[0]), 0);
314 ccv_nnc_graph_exec_symbol_new(graph, CMD_EWSUM_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, ccv_nnc_cmd_auto, 0), inputs, input_size, outputs, output_size, 0);
315}
316
317static ccv_cnnp_model_t* _ccv_cnnp_sum_copy(const ccv_cnnp_model_t* const self, void* const context);
318
319static const ccv_cnnp_model_vtab_t ccv_cnnp_sum_isa = {
320 .build = _ccv_cnnp_sum_build,
321 .copy = _ccv_cnnp_sum_copy,
322};
323
324typedef struct {
325 ccv_cnnp_model_t super;
326 ccv_nnc_tensor_symbol_t output;
327} ccv_cnnp_model_sum_t;
328
329ccv_cnnp_model_t* ccv_cnnp_sum(const char* const name)
330{
331 ccv_cnnp_model_sum_t* const model_sum = (ccv_cnnp_model_sum_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_sum_t));
332 model_sum->super.isa = &ccv_cnnp_sum_isa;
333 model_sum->super.input_size = 0;
334 model_sum->super.outputs = &model_sum->output;
335 model_sum->super.output_size = 1;
336 ccv_cnnp_model_copy_name(&model_sum->super, name);
337 return (ccv_cnnp_model_t*)model_sum;
338}
339
340static ccv_cnnp_model_t* _ccv_cnnp_sum_copy(const ccv_cnnp_model_t* const self, void* const context)
341{
342 return ccv_cnnp_sum(self->name);
343}
344
345typedef struct {
346 ccv_cnnp_model_t super;
347 int axis;
348 ccv_nnc_tensor_symbol_t output;
349} ccv_cnnp_model_concat_t;
350
351static void _ccv_cnnp_concat_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
352{
353 const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
354 PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] 1. -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_concat_build] 1. -\n"); fflush(stdout); } } while
(0)
;
355 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 355, __extension__ __PRETTY_FUNCTION__
); }))
;
356 ccv_nnc_tensor_param_t output_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
357 int i, j;
358 if (output_params.dim[0] == 0)
359 for (i = 1; i < input_size; i++)
360 {
361 output_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
362 if (output_params.dim[0] != 0)
363 break;
364 }
365 const int nd = ccv_nnc_tensor_nd(output_params.dim);
366 const int axis = self->axis;
367 assert(axis < nd)((void) sizeof ((axis < nd) ? 1 : 0), __extension__ ({ if (
axis < nd) ; else __assert_fail ("axis < nd", "ccv_cnnp_model_addons.c"
, 367, __extension__ __PRETTY_FUNCTION__); }))
;
368 output_params.dim[axis] = 0;
369 int input_is_contiguous = 1;
370 for (i = 0; i < input_size; i++)
371 {
372 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
373 const int input_nd = ccv_nnc_tensor_nd(input_params.dim);
374 if (input_nd == 0)
375 {
376 PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] %d. input[%d]: -\n", i + 2, i)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_concat_build] %d. input[%d]: -\n", i + 2, i)
; fflush(stdout); } } while (0)
;
377 input_is_contiguous = 0;
378 continue;
379 }
380 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
381 {
382 PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] %d. input[%d]: (%d", i + 2, i, input_params.dim[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_concat_build] %d. input[%d]: (%d", i + 2, i,
input_params.dim[0]); fflush(stdout); } } while (0)
;
383 int i;
384 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && input_params.dim[i] > 0; i++)
385 PRINT(CCV_CLI_VERBOSE, ", %d", input_params.dim[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", input_params.dim[i]); fflush(stdout); } } while
(0)
;
386 PRINT(CCV_CLI_VERBOSE, ")\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(")\n"); fflush(stdout); } } while (0)
;
387 }
388 assert(input_nd == nd)((void) sizeof ((input_nd == nd) ? 1 : 0), __extension__ ({ if
(input_nd == nd) ; else __assert_fail ("input_nd == nd", "ccv_cnnp_model_addons.c"
, 388, __extension__ __PRETTY_FUNCTION__); }))
;
389 for (j = 0; j < nd; j++)
390 if (j != axis)
391 { assert(input_params.dim[j] == output_params.dim[j])((void) sizeof ((input_params.dim[j] == output_params.dim[j])
? 1 : 0), __extension__ ({ if (input_params.dim[j] == output_params
.dim[j]) ; else __assert_fail ("input_params.dim[j] == output_params.dim[j]"
, "ccv_cnnp_model_addons.c", 391, __extension__ __PRETTY_FUNCTION__
); }))
; }
392 output_params.dim[axis] += input_params.dim[axis];
393 }
394 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
395 int ofs[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
396 int stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
397 ccv_nnc_tensor_get_stride(output_params.dim, stride);
398 if (input_is_contiguous)
399 {
400 ccv_nnc_tensor_symbol_t aliases[input_size];
401 for (i = 0; i < input_size; i++)
402 {
403 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
404 aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
405 ofs[axis] += input_params.dim[axis];
406 }
407 // Format transform is more flexible.
408 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, input_size, aliases, input_size, "concat");
409 } else {
410 ccv_nnc_tensor_symbol_t aliases[input_size];
411 for (i = 0; i < input_size; i++)
412 {
413 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
414 if (input_params.dim[0] == 0)
415 {
416 // Create a new alias anyway, but not going to use it, in this way, the alias count will match during absorb.
417 aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
418 continue;
419 }
420 aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
421 ofs[axis] += input_params.dim[axis];
422 }
423 // Format transform is more flexible.
424 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, input_size, aliases, input_size, "concat");
425 }
426}
427
428static ccv_cnnp_model_t* _ccv_cnnp_concat_copy(const ccv_cnnp_model_t* const self, void* const context);
429
430static const ccv_cnnp_model_vtab_t ccv_cnnp_concat_isa = {
431 .build = _ccv_cnnp_concat_build,
432 .copy = _ccv_cnnp_concat_copy,
433};
434
435ccv_cnnp_model_t* ccv_cnnp_concat(const int axis, const char* const name)
436{
437 ccv_cnnp_model_concat_t* const model_concat = (ccv_cnnp_model_concat_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_concat_t));
438 model_concat->super.isa = &ccv_cnnp_concat_isa;
439 model_concat->super.input_size = 0;
440 model_concat->super.outputs = &model_concat->output;
441 model_concat->super.output_size = 1;
442 model_concat->axis = axis;
443 ccv_cnnp_model_copy_name(&model_concat->super, name);
444 return (ccv_cnnp_model_t*)model_concat;
445}
446
447static ccv_cnnp_model_t* _ccv_cnnp_concat_copy(const ccv_cnnp_model_t* const super, void* const context)
448{
449 const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
450 return ccv_cnnp_concat(self->axis, self->super.name);
451}
452
453typedef struct {
454 ccv_cnnp_model_t super;
455 int axis;
456 ccv_nnc_tensor_symbol_t outputs[1];
457} ccv_cnnp_model_chunk_t;
458
459static void _ccv_cnnp_chunk_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
460{
461 const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
462 PRINT(CCV_CLI_VERBOSE, "[cnnp_chunk_build] 1. axis: %d\n", self->axis)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_chunk_build] 1. axis: %d\n", self->axis);
fflush(stdout); } } while (0)
;
463 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 463, __extension__ __PRETTY_FUNCTION__); }))
;
464 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
465 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
466 {
467 PRINT(CCV_CLI_VERBOSE, "[cnnp_chunk_build] 2. input: (%d", input_params.dim[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_chunk_build] 2. input: (%d", input_params.dim
[0]); fflush(stdout); } } while (0)
;
468 int i;
469 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && input_params.dim[i] > 0; i++)
470 PRINT(CCV_CLI_VERBOSE, ", %d", input_params.dim[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", input_params.dim[i]); fflush(stdout); } } while
(0)
;
471 PRINT(CCV_CLI_VERBOSE, ")\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(")\n"); fflush(stdout); } } while (0)
;
472 }
473 ccv_nnc_tensor_param_t output_params = input_params;
474 int i;
475 const int nd = ccv_nnc_tensor_nd(output_params.dim);
476 const int axis = self->axis;
477 assert(axis < nd)((void) sizeof ((axis < nd) ? 1 : 0), __extension__ ({ if (
axis < nd) ; else __assert_fail ("axis < nd", "ccv_cnnp_model_addons.c"
, 477, __extension__ __PRETTY_FUNCTION__); }))
;
478 const int n = self->super.output_size;
479 assert(n == output_size)((void) sizeof ((n == output_size) ? 1 : 0), __extension__ ({
if (n == output_size) ; else __assert_fail ("n == output_size"
, "ccv_cnnp_model_addons.c", 479, __extension__ __PRETTY_FUNCTION__
); }))
;
480 assert(output_params.dim[axis] % n == 0)((void) sizeof ((output_params.dim[axis] % n == 0) ? 1 : 0), __extension__
({ if (output_params.dim[axis] % n == 0) ; else __assert_fail
("output_params.dim[axis] % n == 0", "ccv_cnnp_model_addons.c"
, 480, __extension__ __PRETTY_FUNCTION__); }))
;
481 output_params.dim[axis] = output_params.dim[axis] / n;
482 int ofs[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
483 int stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
484 ccv_nnc_tensor_get_stride(input_params.dim, stride);
485 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
486 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
487 {
488 for (i = 0; i < output_size; i++)
489 {
490 outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, stride, output_params, 0);
491 ofs[axis] += output_params.dim[axis];
492 }
493 } else {
494 // Otherwise, we need to check if it is permute. For permute, we cannot do alias directly.
495 // We need to first materialize the permute and then run reshape on top of it, otherwise it will be wrong.
496 int old_stride[CCV_NNC_MAX_DIM_ALLOC(12)];
497 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
498 // We identify permute by checking if the stride is not in descending order.
499 // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
500 int i, no_permute = 1;
501 for (i = 1; no_permute && i < nd; i++)
502 if (old_stride[i - 1] < old_stride[i])
503 no_permute = 0;
504 if (no_permute)
505 { // Just straightforward reshape if there is no no permute.
506 for (i = 0; i < output_size; i++)
507 {
508 outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, old_stride, output_params, 0);
509 ofs[axis] += output_params.dim[axis];
510 }
511 } else {
512 // Otherwise, we first do format transform to plain tensor and then do reshape.
513 ccv_nnc_tensor_symbol_t permuted = ccv_nnc_tensor_symbol_new(graph, input_params, 0);
514 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(permuted)(const ccv_nnc_tensor_symbol_t []){permuted}, (1 +1 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "reshape");
515 for (i = 0; i < output_size; i++)
516 {
517 outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, permuted, ofs, stride, output_params, 0);
518 ofs[axis] += output_params.dim[axis];
519 }
520 }
521 }
522}
523
524static ccv_cnnp_model_t* _ccv_cnnp_chunk_copy(const ccv_cnnp_model_t* const self, void* const context);
525
526static const ccv_cnnp_model_vtab_t ccv_cnnp_chunk_isa = {
527 .build = _ccv_cnnp_chunk_build,
528 .copy = _ccv_cnnp_chunk_copy,
529};
530
531ccv_cnnp_model_t* ccv_cnnp_chunk(const int n, const int axis, const char* const name)
532{
533 assert(n >= 1)((void) sizeof ((n >= 1) ? 1 : 0), __extension__ ({ if (n >=
1) ; else __assert_fail ("n >= 1", "ccv_cnnp_model_addons.c"
, 533, __extension__ __PRETTY_FUNCTION__); }))
;
534 ccv_cnnp_model_chunk_t* const model_chunk = (ccv_cnnp_model_chunk_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_chunk_t) + sizeof(ccv_nnc_tensor_symbol_t) * (n - 1));
535 model_chunk->super.isa = &ccv_cnnp_chunk_isa;
536 model_chunk->super.input_size = 1;
537 model_chunk->super.outputs = model_chunk->outputs;
538 model_chunk->super.output_size = n;
539 model_chunk->axis = axis;
540 ccv_cnnp_model_copy_name(&model_chunk->super, name);
541 return (ccv_cnnp_model_t*)model_chunk;
542}
543
544static ccv_cnnp_model_t* _ccv_cnnp_chunk_copy(const ccv_cnnp_model_t* const super, void* const context)
545{
546 const ccv_cnnp_model_chunk_t* const self = (const ccv_cnnp_model_chunk_t*)super;
547 return ccv_cnnp_chunk(self->super.output_size, self->axis, self->super.name);
548}
549
550typedef struct {
551 ccv_cnnp_model_t super;
552 ccv_nnc_tensor_symbol_t output;
553 int format;
554 int dim[CCV_NNC_MAX_DIM_ALLOC(12)];
555 int ofs[CCV_NNC_MAX_DIM_ALLOC(12)];
556 int stride[CCV_NNC_MAX_DIM_ALLOC(12)];
557} ccv_cnnp_model_reshape_t;
558
559static void _ccv_cnnp_reshape_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
560{
561 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 561, __extension__ __PRETTY_FUNCTION__); }))
;
562 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 562, __extension__ __PRETTY_FUNCTION__
); }))
;
563 ccv_cnnp_model_reshape_t* const self = (ccv_cnnp_model_reshape_t*)super;
564 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
565 {
566 PRINT(CCV_CLI_VERBOSE, "[cnnp_reshape_build] 1. dim: (%d", self->dim[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reshape_build] 1. dim: (%d", self->dim[0]
); fflush(stdout); } } while (0)
;
567 int i;
568 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && self->dim[i] > 0; i++)
569 PRINT(CCV_CLI_VERBOSE, ", %d", self->dim[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", self->dim[i]); fflush(stdout); } } while
(0)
;
570 const int count = i;
571 PRINT(CCV_CLI_VERBOSE, "), ofs: (%d", self->ofs[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("), ofs: (%d", self->ofs[0]); fflush(stdout); } }
while (0)
;
572 for (i = 1; i < count; i++)
573 PRINT(CCV_CLI_VERBOSE, ", %d", self->ofs[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", self->ofs[i]); fflush(stdout); } } while
(0)
;
574 PRINT(CCV_CLI_VERBOSE, "), stride: (%d", self->stride[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("), stride: (%d", self->stride[0]); fflush(stdout
); } } while (0)
;
575 for (i = 1; i < count; i++)
576 PRINT(CCV_CLI_VERBOSE, ", %d", self->stride[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", self->stride[i]); fflush(stdout); } } while
(0)
;
577 PRINT(CCV_CLI_VERBOSE, ")\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(")\n"); fflush(stdout); } } while (0)
;
578 }
579 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
580 int dim[CCV_NNC_MAX_DIM_ALLOC(12)];
581 memcpy(dim, self->dim, sizeof(dim));
582 int i, auto_idx = -1;
583 size_t known = 1;
584 const size_t tensor_count = ccv_nnc_tensor_count(params);
585 for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC(12) && dim[i]; i++)
586 if (dim[i] == -1)
587 auto_idx = i;
588 else
589 known *= dim[i];
590 if (auto_idx >= 0)
591 {
592 assert(known > 0 && tensor_count % known == 0)((void) sizeof ((known > 0 && tensor_count % known
== 0) ? 1 : 0), __extension__ ({ if (known > 0 &&
tensor_count % known == 0) ; else __assert_fail ("known > 0 && tensor_count % known == 0"
, "ccv_cnnp_model_addons.c", 592, __extension__ __PRETTY_FUNCTION__
); }))
;
593 dim[auto_idx] = tensor_count / known;
594 }
595 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
596 {
597 PRINT(CCV_CLI_VERBOSE, "[cnnp_reshape_build] 2. input: (%d", params.dim[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reshape_build] 2. input: (%d", params.dim[0]
); fflush(stdout); } } while (0)
;
598 int i;
599 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && params.dim[i] > 0; i++)
600 PRINT(CCV_CLI_VERBOSE, ", %d", params.dim[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", params.dim[i]); fflush(stdout); } } while (
0)
;
601 PRINT(CCV_CLI_VERBOSE, ")\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(")\n"); fflush(stdout); } } while (0)
;
602 }
603 assert(ccv_nnc_dimension_count(dim) <= ccv_nnc_tensor_count(params))((void) sizeof ((ccv_nnc_dimension_count(dim) <= ccv_nnc_tensor_count
(params)) ? 1 : 0), __extension__ ({ if (ccv_nnc_dimension_count
(dim) <= ccv_nnc_tensor_count(params)) ; else __assert_fail
("ccv_nnc_dimension_count(dim) <= ccv_nnc_tensor_count(params)"
, "ccv_cnnp_model_addons.c", 603, __extension__ __PRETTY_FUNCTION__
); }))
;
604 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
605 int stride_from_dim[CCV_NNC_MAX_DIM_ALLOC(12)];
606 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
607 {
608 memcpy(params.dim, dim, sizeof(params.dim));
609 int* stride;
610 if (self->stride[0] == 0)
611 {
612 ccv_nnc_tensor_get_stride(dim, stride_from_dim);
613 stride = stride_from_dim;
614 } else
615 stride = self->stride;
616 if (self->format > 0)
617 params.format = self->format;
618 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], self->ofs, stride, params, 0);
619 } else {
620 // Otherwise, we need to check if it is permute. For permute, we cannot do alias directly.
621 // We need to first materialize the permute and then run reshape on top of it, otherwise it will be wrong.
622 int old_stride[CCV_NNC_MAX_DIM_ALLOC(12)];
623 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
624 // We identify permute by checking if the stride is not in descending order.
625 // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
626 const int nd = ccv_nnc_tensor_nd(params.dim);
627 const int new_nd = ccv_nnc_tensor_nd(dim);
628 int i, no_permute = 1;
629 // If the new dim has different nd, or we actually have a stride, we need to check if it is no permute or not.
630 if (new_nd != nd || (self->stride[0] != 0 && memcmp(self->stride, old_stride, sizeof(self->stride))))
631 for (i = 1; no_permute && i < nd; i++)
632 if (old_stride[i - 1] < old_stride[i])
633 no_permute = 0;
634 if (no_permute)
635 { // Just straightforward reshape if there is no no permute.
636 memcpy(params.dim, dim, sizeof(params.dim));
637 int* stride;
638 if (self->stride[0] == 0)
639 {
640 if (new_nd != nd) // Cannot use old stride.
641 {
642 ccv_nnc_tensor_get_stride(dim, stride_from_dim);
643 stride = stride_from_dim;
644 } else
645 stride = old_stride;
646 } else
647 stride = self->stride;
648 if (self->format > 0)
649 params.format = self->format;
650 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], self->ofs, stride, params, 0);
651 } else {
652 // Otherwise, we first do format transform to plain tensor and then do reshape.
653 ccv_nnc_tensor_symbol_t permuted = ccv_nnc_tensor_symbol_new(graph, params, 0);
654 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(permuted)(const ccv_nnc_tensor_symbol_t []){permuted}, (1 +1 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "reshape");
655 memcpy(params.dim, dim, sizeof(params.dim));
656 int* stride;
657 if (self->stride[0] == 0)
658 {
659 ccv_nnc_tensor_get_stride(dim, stride_from_dim);
660 stride = stride_from_dim;
661 } else
662 stride = self->stride;
663 if (self->format > 0)
664 params.format = self->format;
665 // And then we create alias against the permuted one.
666 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, permuted, self->ofs, stride, params, 0);
667 }
668 }
669}
670
671static ccv_cnnp_model_t* _ccv_cnnp_reshape_copy(const ccv_cnnp_model_t* const super, void* const context);
672
673static const ccv_cnnp_model_vtab_t ccv_cnnp_reshape_isa = {
674 .build = _ccv_cnnp_reshape_build,
675 .copy = _ccv_cnnp_reshape_copy,
676};
677
678ccv_cnnp_model_t* ccv_cnnp_reshape(const int format, const int dim[CCV_NNC_MAX_DIM_ALLOC(12)], const int ofs[CCV_NNC_MAX_DIM_ALLOC(12)], const int stride[CCV_NNC_MAX_DIM_ALLOC(12)], const char* const name)
679{
680 ccv_cnnp_model_reshape_t* const model_reshape = (ccv_cnnp_model_reshape_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reshape_t));
681 model_reshape->super.isa = &ccv_cnnp_reshape_isa;
682 model_reshape->super.input_size = 1;
683 model_reshape->super.outputs = &model_reshape->output;
684 model_reshape->super.output_size = 1;
685 ccv_cnnp_model_copy_name(&model_reshape->super, name);
686 model_reshape->format = format;
687 memcpy(model_reshape->dim, dim, sizeof(model_reshape->dim));
688 memcpy(model_reshape->ofs, ofs, sizeof(model_reshape->ofs));
689 if (stride[0] != 0)
690 memcpy(model_reshape->stride, stride, sizeof(model_reshape->stride));
691 return (ccv_cnnp_model_t*)model_reshape;
692}
693
694static ccv_cnnp_model_t* _ccv_cnnp_reshape_copy(const ccv_cnnp_model_t* const super, void* const context)
695{
696 const ccv_cnnp_model_reshape_t* const self = (const ccv_cnnp_model_reshape_t*)super;
697 return ccv_cnnp_reshape(self->format, self->dim, self->ofs, self->stride, self->super.name);
698}
699
700typedef struct {
701 ccv_cnnp_model_t super;
702 ccv_nnc_tensor_symbol_t output;
703 int type;
704 int begin[CCV_NNC_MAX_DIM_ALLOC(12)];
705 int end[CCV_NNC_MAX_DIM_ALLOC(12)];
706} ccv_cnnp_model_pad_t;
707
708static void _ccv_cnnp_pad_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
709{
710 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 710, __extension__ __PRETTY_FUNCTION__); }))
;
711 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 711, __extension__ __PRETTY_FUNCTION__
); }))
;
712 ccv_cnnp_model_pad_t* const self = (ccv_cnnp_model_pad_t*)super;
713 PRINT(CCV_CLI_VERBOSE, "[cnnp_pad_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_pad_build] -\n"); fflush(stdout); } } while (
0)
;
714 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
715 const int nd = ccv_nnc_tensor_nd(input_params.dim);
716 ccv_nnc_tensor_param_t params = input_params;
717 int i;
718 for (i = 0 ; i < nd; i++)
719 params.dim[i] += self->begin[i] + self->end[i];
720 const ccv_nnc_tensor_symbol_t padded = ccv_nnc_tensor_symbol_new(graph, params, 0);
721 ccv_nnc_cmd_t pad = CMD_PAD_FORWARD(self->type, (), ())ccv_nnc_cmd(CCV_NNC_PAD_FORWARD, 0, ((ccv_nnc_cmd_param_t){.size
={.dim={}},.pad={.type=self->type,.end={}}}), 0)
;
722 memcpy(pad.info.size.dim, self->begin, sizeof(pad.info.size.dim));
723 memcpy(pad.info.pad.end, self->end, sizeof(pad.info.pad.end));
724 ccv_nnc_graph_exec_symbol_new(graph, pad, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(padded)(const ccv_nnc_tensor_symbol_t []){padded}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "pad");
725 outputs[0] = padded;
726}
727
728static ccv_cnnp_model_t* _ccv_cnnp_pad_copy(const ccv_cnnp_model_t* const super, void* const context);
729
730static const ccv_cnnp_model_vtab_t ccv_cnnp_pad_isa = {
731 .build = _ccv_cnnp_pad_build,
732 .copy = _ccv_cnnp_pad_copy,
733};
734
735ccv_cnnp_model_t* ccv_cnnp_pad(const int type, const int begin[CCV_NNC_MAX_DIM_ALLOC(12)], const int end[CCV_NNC_MAX_DIM_ALLOC(12)], const char* const name)
736{
737 ccv_cnnp_model_pad_t* const model_pad = (ccv_cnnp_model_pad_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_pad_t));
738 model_pad->super.isa = &ccv_cnnp_pad_isa;
739 model_pad->super.input_size = 1;
740 model_pad->super.outputs = &model_pad->output;
741 model_pad->super.output_size = 1;
742 ccv_cnnp_model_copy_name(&model_pad->super, name);
743 model_pad->type = type;
744 memcpy(model_pad->begin, begin, sizeof(model_pad->begin));
745 memcpy(model_pad->end, end, sizeof(model_pad->end));
746 return (ccv_cnnp_model_t*)model_pad;
747}
748
749static ccv_cnnp_model_t* _ccv_cnnp_pad_copy(const ccv_cnnp_model_t* const super, void* const context)
750{
751 const ccv_cnnp_model_pad_t* const self = (const ccv_cnnp_model_pad_t*)super;
752 return ccv_cnnp_pad(self->type, self->begin, self->end, self->super.name);
753}
754
755typedef struct {
756 ccv_cnnp_model_t super;
757 ccv_nnc_tensor_symbol_t output;
758} ccv_cnnp_model_identity_t;
759
760static void _ccv_cnnp_identity_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
761{
762 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 762, __extension__ __PRETTY_FUNCTION__); }))
;
763 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 763, __extension__ __PRETTY_FUNCTION__
); }))
;
764 PRINT(CCV_CLI_VERBOSE, "[cnnp_identity_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_identity_build] -\n"); fflush(stdout); } } while
(0)
;
765 outputs[0] = inputs[0];
766}
767
768static ccv_cnnp_model_t* _ccv_cnnp_identity_copy(const ccv_cnnp_model_t* const super, void* const context);
769
770static const ccv_cnnp_model_vtab_t ccv_cnnp_identity_isa = {
771 .build = _ccv_cnnp_identity_build,
772 .copy = _ccv_cnnp_identity_copy,
773};
774
775ccv_cnnp_model_t* ccv_cnnp_identity(const char* const name)
776{
777 ccv_cnnp_model_identity_t* const model_identity = (ccv_cnnp_model_identity_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_identity_t));
778 model_identity->super.isa = &ccv_cnnp_identity_isa;
779 model_identity->super.input_size = 1;
780 model_identity->super.outputs = &model_identity->output;
781 model_identity->super.output_size = 1;
782 ccv_cnnp_model_copy_name(&model_identity->super, name);
783 return (ccv_cnnp_model_t*)model_identity;
784}
785
786static ccv_cnnp_model_t* _ccv_cnnp_identity_copy(const ccv_cnnp_model_t* const super, void* const context)
787{
788 const ccv_cnnp_model_identity_t* const self = (const ccv_cnnp_model_identity_t*)super;
789 return ccv_cnnp_identity(self->super.name);
790}
791
792typedef struct {
793 ccv_cnnp_model_t super;
794 ccv_nnc_tensor_symbol_t output;
795 int index[CCV_NNC_MAX_DIM_ALLOC(12)];
796} ccv_cnnp_model_permute_t;
797
798static void _ccv_cnnp_permute_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
799{
800 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 800, __extension__ __PRETTY_FUNCTION__); }))
;
801 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 801, __extension__ __PRETTY_FUNCTION__
); }))
;
802 ccv_cnnp_model_permute_t* const self = (ccv_cnnp_model_permute_t*)super;
803 PRINT(CCV_CLI_VERBOSE, "[cnnp_permute_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_permute_build] -\n"); fflush(stdout); } } while
(0)
;
804 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
805 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
806 const int nd = ccv_nnc_tensor_nd(params.dim);
807 int input_dim[CCV_NNC_MAX_DIM_ALLOC(12)];
808 memcpy(input_dim, params.dim, sizeof(params.dim));
809 int input_stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
810 int output_stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
811 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If it is not an alias. Find stride and permute.
812 {
813 ccv_nnc_tensor_get_stride(input_dim, input_stride);
814 int i;
815 for (i = 0; i < nd; i++)
816 {
817 const int idx = self->index[i];
818 assert(idx >= 0 && idx < nd)((void) sizeof ((idx >= 0 && idx < nd) ? 1 : 0)
, __extension__ ({ if (idx >= 0 && idx < nd) ; else
__assert_fail ("idx >= 0 && idx < nd", "ccv_cnnp_model_addons.c"
, 818, __extension__ __PRETTY_FUNCTION__); }))
;
819 params.dim[i] = input_dim[idx];
820 output_stride[i] = input_stride[idx];
821 }
822 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ccv_nnc_no_ofs, output_stride, params, 0);
823 } else {
824 // if it is an alias, we can get the stride from it and use that.
825 int input_ofs[CCV_NNC_MAX_DIM_ALLOC(12)];
826 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], input_ofs, input_stride);
827 assert(input_stride[0] != 0)((void) sizeof ((input_stride[0] != 0) ? 1 : 0), __extension__
({ if (input_stride[0] != 0) ; else __assert_fail ("input_stride[0] != 0"
, "ccv_cnnp_model_addons.c", 827, __extension__ __PRETTY_FUNCTION__
); }))
;
828 int output_ofs[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
829 int i;
830 for (i = 0; i < nd; i++)
831 {
832 const int idx = self->index[i];
833 assert(idx >= 0 && idx < nd)((void) sizeof ((idx >= 0 && idx < nd) ? 1 : 0)
, __extension__ ({ if (idx >= 0 && idx < nd) ; else
__assert_fail ("idx >= 0 && idx < nd", "ccv_cnnp_model_addons.c"
, 833, __extension__ __PRETTY_FUNCTION__); }))
;
834 params.dim[i] = input_dim[idx];
835 output_stride[i] = input_stride[idx];
836 output_ofs[i] = input_ofs[idx];
837 }
838 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], output_ofs, output_stride, params, 0);
839 }
840}
841
842static ccv_cnnp_model_t* _ccv_cnnp_permute_copy(const ccv_cnnp_model_t* const super, void* const context);
843
844static const ccv_cnnp_model_vtab_t ccv_cnnp_permute_isa = {
845 .build = _ccv_cnnp_permute_build,
846 .copy = _ccv_cnnp_permute_copy,
847};
848
849ccv_cnnp_model_t* ccv_cnnp_permute(const int index[CCV_NNC_MAX_DIM_ALLOC(12)], const char* const name)
850{
851 ccv_cnnp_model_permute_t* const model_permute = (ccv_cnnp_model_permute_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_permute_t));
852 model_permute->super.isa = &ccv_cnnp_permute_isa;
853 model_permute->super.input_size = 1;
854 model_permute->super.outputs = &model_permute->output;
855 model_permute->super.output_size = 1;
856 ccv_cnnp_model_copy_name(&model_permute->super, name);
857 memcpy(model_permute->index, index, sizeof(model_permute->index));
858 return (ccv_cnnp_model_t*)model_permute;
859}
860
861static ccv_cnnp_model_t* _ccv_cnnp_permute_copy(const ccv_cnnp_model_t* const super, void* const context)
862{
863 const ccv_cnnp_model_permute_t* const self = (const ccv_cnnp_model_permute_t*)super;
864 return ccv_cnnp_permute(self->index, self->super.name);
865}
866
867typedef struct {
868 ccv_cnnp_model_t super;
869 int index;
870 ccv_nnc_tensor_symbol_t output;
871} ccv_cnnp_model_extract_t;
872
873static void _ccv_cnnp_extract_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
874{
875 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 875, __extension__ __PRETTY_FUNCTION__
); }))
;
876 ccv_cnnp_model_extract_t* const self = (ccv_cnnp_model_extract_t*)super;
877 PRINT(CCV_CLI_VERBOSE, "[cnnp_extract_build] index: %d\n", self->index)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_extract_build] index: %d\n", self->index)
; fflush(stdout); } } while (0)
;
878 outputs[0] = inputs[self->index];
879}
880
881static ccv_cnnp_model_t* _ccv_cnnp_extract_copy(const ccv_cnnp_model_t* const self, void* const context);
882
883static const ccv_cnnp_model_vtab_t ccv_cnnp_extract_isa = {
884 .build = _ccv_cnnp_extract_build,
885 .copy = _ccv_cnnp_extract_copy,
886};
887
888ccv_cnnp_model_t* ccv_cnnp_extract(const int index, const char* const name)
889{
890 ccv_cnnp_model_extract_t* const model_extract = (ccv_cnnp_model_extract_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_extract_t));
891 model_extract->index = index;
892 model_extract->super.isa = &ccv_cnnp_extract_isa;
893 model_extract->super.input_size = 0;
894 model_extract->super.outputs = &model_extract->output;
895 model_extract->super.output_size = 1;
896 ccv_cnnp_model_copy_name(&model_extract->super, name);
897 return (ccv_cnnp_model_t*)model_extract;
898}
899
900static ccv_cnnp_model_t* _ccv_cnnp_extract_copy(const ccv_cnnp_model_t* const super, void* const context)
901{
902 ccv_cnnp_model_extract_t* const self = (ccv_cnnp_model_extract_t*)super;
903 return ccv_cnnp_extract(self->index, self->super.name);
904}
905
906typedef struct {
907 ccv_cnnp_model_t super;
908 ccv_nnc_tensor_symbol_t output;
909} ccv_cnnp_model_flatten_t;
910
911static void _ccv_cnnp_flatten_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
912{
913 PRINT(CCV_CLI_VERBOSE, "[cnnp_flatten_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_flatten_build] -\n"); fflush(stdout); } } while
(0)
;
914 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 914, __extension__ __PRETTY_FUNCTION__); }))
;
915 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 915, __extension__ __PRETTY_FUNCTION__
); }))
;
916 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
917 ccv_nnc_tensor_param_t output_params = params;
918 memset(output_params.dim, 0, sizeof(output_params.dim));
919 output_params.dim[0] = ccv_nnc_tensor_get_n(params);
920 assert(output_params.dim[0] > 0)((void) sizeof ((output_params.dim[0] > 0) ? 1 : 0), __extension__
({ if (output_params.dim[0] > 0) ; else __assert_fail ("output_params.dim[0] > 0"
, "ccv_cnnp_model_addons.c", 920, __extension__ __PRETTY_FUNCTION__
); }))
;
921 output_params.dim[1] = ccv_nnc_tensor_count(params) / output_params.dim[0];
922 int stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
923 ccv_nnc_tensor_get_stride(output_params.dim, stride);
924 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], DIM_ALLOC()(int [(12)]){}, stride, output_params, 0);
925}
926
927static ccv_cnnp_model_t* _ccv_cnnp_flatten_copy(const ccv_cnnp_model_t* const self, void* const context);
928
929static const ccv_cnnp_model_vtab_t ccv_cnnp_flatten_isa = {
930 .build = _ccv_cnnp_flatten_build,
931 .copy = _ccv_cnnp_flatten_copy,
932};
933
934ccv_cnnp_model_t* ccv_cnnp_flatten(const char* const name)
935{
936 ccv_cnnp_model_flatten_t* const model_flatten = (ccv_cnnp_model_flatten_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_flatten_t));
937 model_flatten->super.isa = &ccv_cnnp_flatten_isa;
938 model_flatten->super.input_size = 1;
939 model_flatten->super.outputs = &model_flatten->output;
940 model_flatten->super.output_size = 1;
941 ccv_cnnp_model_copy_name(&model_flatten->super, name);
942 return (ccv_cnnp_model_t*)model_flatten;
943}
944
945static ccv_cnnp_model_t* _ccv_cnnp_flatten_copy(const ccv_cnnp_model_t* const self, void* const context)
946{
947 return ccv_cnnp_flatten(self->name);
948}
949
950// MARK - Batch Norm Layer
951
952typedef struct {
953 ccv_cnnp_model_t super;
954 ccv_nnc_tensor_symbol_t output;
955 ccv_nnc_tensor_symbol_t bias;
956 ccv_nnc_tensor_symbol_t scale;
957 ccv_nnc_graph_exec_symbol_t batch_norm;
958 ccv_nnc_cmd_param_t params;
959 ccv_array_t* zero_inits;
960 ccv_array_t* retainables;
961} ccv_cnnp_model_batch_norm_t;
962
963static void _ccv_cnnp_batch_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
964{
965 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 965, __extension__ __PRETTY_FUNCTION__); }))
;
966 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 966, __extension__ __PRETTY_FUNCTION__
); }))
;
967 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
968 PRINT(CCV_CLI_VERBOSE, "[cnnp_batch_norm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_batch_norm_build] -\n"); fflush(stdout); } }
while (0)
;
969 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
970 const int nd = ccv_nnc_tensor_nd(params.dim);
971 ccv_nnc_tensor_param_t bias_params = params;
972 memset(bias_params.dim, 0, sizeof(bias_params.dim));
973 // If the accuracy is not enough, bump it to 32-bit floating point.
974 if (bias_params.datatype != CCV_32F && bias_params.datatype != CCV_64F)
975 bias_params.datatype = CCV_32F;
976 bias_params.dim[0] = nd > 1 ? ccv_nnc_tensor_get_c(params) : params.dim[0];
977 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, params, 0);
978 // Both scale and bias are shared between if this model is reused.
979 if (!self->scale.graph)
980 self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
981 if (!self->bias.graph)
982 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
983 const ccv_nnc_tensor_symbol_t scale = ccv_cnnp_model_get_symbol(super, self->scale);
984 const ccv_nnc_tensor_symbol_t bias = ccv_cnnp_model_get_symbol(super, self->bias);
985 const ccv_nnc_tensor_symbol_t mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "mean");
986 const ccv_nnc_tensor_symbol_t var = ccv_nnc_tensor_symbol_new(graph, bias_params, "var");
987 // Otherwise, notice mean, var, saved_mean, saved_inv_std are not reused.
988 if (!self->zero_inits)
989 self->zero_inits = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0);
990 ccv_array_push(self->zero_inits, &mean);
991 ccv_array_push(self->zero_inits, &var);
992 const ccv_nnc_tensor_symbol_t out_mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "out_mean");
993 const ccv_nnc_tensor_symbol_t out_var = ccv_nnc_tensor_symbol_new(graph, bias_params, "out_var");
994 if (!self->retainables)
995 self->retainables = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0);
996 ccv_array_push(self->retainables, &out_mean);
997 ccv_array_push(self->retainables, &out_var);
998 const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "saved_mean");
999 const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, bias_params, "saved_inv_std");
1000 const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM(2));
1001 ccv_nnc_cmd_param_t batch_norm = self->params;
1002 batch_norm.bnorm.count = hw >= 0 ? CCV_NNC_MAX_DIM(2) + 1 : 1;
1003 int i;
1004 batch_norm.bnorm.axis[0] = (params.format == CCV_TENSOR_FORMAT_CHWN) ? 3 : 0;
1005 if (hw >= 0)
1006 for (i = 0; i < CCV_NNC_MAX_DIM(2); i++)
1007 batch_norm.bnorm.axis[i + 1] = i + hw;
1008 self->params = batch_norm;
1009 self->batch_norm = ccv_nnc_graph_exec_symbol_new(graph, ccv_nnc_cmd(CCV_NNC_BATCH_NORM_FORWARD, 0, batch_norm, 0), TENSOR_SYMBOL_LIST(inputs[0], scale, bias, mean, var)(const ccv_nnc_tensor_symbol_t []){inputs[0], scale, bias, mean
, var}, (1 +1 +1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, out_mean, out_var, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, out_mean, out_var,
saved_mean, saved_inv_std}, (1 +1 +1 +1 +1 +1 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "batch_norm");
1010 outputs[0] = output;
1011}
1012
1013static void _ccv_cnnp_batch_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1014{
1015 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1016 if (self->scale.graph)
1017 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(0, 1)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={0, 1}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
1018 if (self->bias.graph)
1019 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
1020 int i;
1021 if (self->zero_inits)
1022 for (i = 0; i < self->zero_inits->rnum; i++)
1023 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, *(ccv_nnc_tensor_symbol_t*)ccv_array_get(self->zero_inits, i)((void*)(((char*)((self->zero_inits)->data)) + (size_t)
(self->zero_inits)->rsize * (size_t)(i)))
);
1024}
1025
1026static void _ccv_cnnp_batch_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1027{
1028 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1029 if (self->scale.graph)
1030 add_to_array(parameters, self->scale, is_trainable);
1031 if (self->bias.graph)
1032 add_to_array(parameters, self->bias, is_trainable);
1033}
1034
1035static void _ccv_cnnp_batch_norm_add_to_output(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const outputs)
1036{
1037 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1038 int i;
1039 if (self->retainables)
1040 for (i = 0; i < self->retainables->rnum; i++)
1041 {
1042 const ccv_nnc_tensor_symbol_t symbol = *(ccv_nnc_tensor_symbol_t*)ccv_array_get(self->retainables, i)((void*)(((char*)((self->retainables)->data)) + (size_t
)(self->retainables)->rsize * (size_t)(i)))
;
1043 add_to_array(outputs, symbol, 0);
1044 }
1045}
1046
1047static void _ccv_cnnp_batch_norm_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
1048{
1049 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1050 if (self->batch_norm.graph)
1051 {
1052 self->params.bnorm.is_test = is_test;
1053 updater(context, self->batch_norm, ccv_nnc_cmd(CCV_NNC_BATCH_NORM_FORWARD, 0, self->params, 0), ccv_nnc_no_hint);
1054 }
1055}
1056
1057static void _ccv_cnnp_batch_norm_deinit(ccv_cnnp_model_t* const super)
1058{
1059 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1060 if (self->zero_inits)
1061 ccv_array_free(self->zero_inits);
1062 if (self->retainables)
1063 ccv_array_free(self->retainables);
1064}
1065
1066static ccv_cnnp_model_t* _ccv_cnnp_batch_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
1067
1068static const ccv_cnnp_model_vtab_t ccv_cnnp_batch_norm_isa = {
1069 .build = _ccv_cnnp_batch_norm_build,
1070 .init_states = _ccv_cnnp_batch_norm_init_states,
1071 .add_to_parameter = _ccv_cnnp_batch_norm_add_to_parameter,
1072 .add_to_output = _ccv_cnnp_batch_norm_add_to_output,
1073 .copy = _ccv_cnnp_batch_norm_copy,
1074 .set_is_test = _ccv_cnnp_batch_norm_set_is_test,
1075 .deinit = _ccv_cnnp_batch_norm_deinit,
1076};
1077
1078ccv_cnnp_model_t* ccv_cnnp_batch_norm(const float momentum, const float epsilon, const int is_trainable, const char* const name)
1079{
1080 ccv_cnnp_model_batch_norm_t* const model_batch_norm = (ccv_cnnp_model_batch_norm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_batch_norm_t));
1081 model_batch_norm->super.isa = &ccv_cnnp_batch_norm_isa;
1082 model_batch_norm->super.input_size = 1;
1083 model_batch_norm->super.outputs = &model_batch_norm->output;
1084 model_batch_norm->super.output_size = 1;
1085 model_batch_norm->super.is_trainable = is_trainable;
1086 ccv_cnnp_model_copy_name(&model_batch_norm->super, name);
1087 model_batch_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
1088 model_batch_norm->scale.graph = 0;
1089 model_batch_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1090 model_batch_norm->bias.graph = 0;
1091 model_batch_norm->params.bnorm.momentum = momentum;
1092 model_batch_norm->params.bnorm.epsilon = epsilon;
1093 return (ccv_cnnp_model_t*)model_batch_norm;
1094}
1095
1096static ccv_cnnp_model_t* _ccv_cnnp_batch_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
1097{
1098 const ccv_cnnp_model_batch_norm_t* const self = (const ccv_cnnp_model_batch_norm_t*)super;
1099 return ccv_cnnp_batch_norm(self->params.bnorm.momentum, self->params.bnorm.epsilon, self->super.is_trainable, self->super.name);
1100}
1101
1102// MARK - Convolution Layer
1103
1104typedef struct {
1105 ccv_cnnp_model_t super;
1106 ccv_nnc_tensor_symbol_t output;
1107 ccv_nnc_tensor_symbol_t weights;
1108 ccv_nnc_tensor_symbol_t bias;
1109 int groups;
1110 int filters;
1111 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
1112 int dilation[CCV_NNC_MAX_DIM_ALLOC(12)];
1113 int no_bias;
1114 int format;
1115 ccv_nnc_hint_t hint;
1116} ccv_cnnp_model_convolution_t;
1117
1118static void _ccv_cnnp_convolution_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1119{
1120 ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1121 PRINT(CCV_CLI_VERBOSE, "[cnnp_convolution_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_convolution_build] -\n"); fflush(stdout); } }
while (0)
;
1
Assuming the condition is false
2
Taking false branch
3
Loop condition is false. Exiting loop
1122 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1122, __extension__ __PRETTY_FUNCTION__); }))
;
4
Assuming 'input_size' is equal to 1
5
Taking true branch
1123 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1123, __extension__ __PRETTY_FUNCTION__
); }))
;
6
Assuming 'output_size' is equal to 1
7
Taking true branch
1124 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1125 int i;
1126 const int k_nd = ccv_nnc_tensor_nd(self->kdim);
1127 const int nd = k_nd + 2;
1128 ccv_nnc_tensor_param_t weights_params = params;
1129 if (self->format)
8
Assuming field 'format' is 0
9
Taking false branch
1130 weights_params.format = self->format;
1131 ccv_nnc_tensor_set_n(&weights_params, self->filters);
1132 const int a_nd = ccv_nnc_tensor_nd(params.dim);
1133 int c;
10
'c' declared without an initial value
1134 switch (params.format)
11
'Default' branch taken. Execution continues on line 1149
1135 {
1136 case CCV_TENSOR_FORMAT_NHWC:
1137 c = params.dim[a_nd - 1];
1138 break;
1139 case CCV_TENSOR_FORMAT_NCHW:
1140 if (a_nd == k_nd + 1)
1141 c = params.dim[0];
1142 else
1143 c = params.dim[a_nd <= 1 ? 0 : 1];
1144 break;
1145 case CCV_TENSOR_FORMAT_CHWN:
1146 c = params.dim[0];
1147 break;
1148 }
1149 assert(c % self->groups == 0)((void) sizeof ((c % self->groups == 0) ? 1 : 0), __extension__
({ if (c % self->groups == 0) ; else __assert_fail ("c % self->groups == 0"
, "ccv_cnnp_model_addons.c", 1149, __extension__ __PRETTY_FUNCTION__
); }))
;
12
The left operand of '%' is a garbage value
1150 ccv_nnc_tensor_set_c(&weights_params, nd, c / self->groups);
1151 int hw = -1;
1152 if (weights_params.format == CCV_TENSOR_FORMAT_NHWC || weights_params.format == CCV_TENSOR_FORMAT_CHWN)
1153 hw = 1;
1154 else if (weights_params.format == CCV_TENSOR_FORMAT_NCHW)
1155 hw = 2;
1156 assert(hw >= 0)((void) sizeof ((hw >= 0) ? 1 : 0), __extension__ ({ if (hw
>= 0) ; else __assert_fail ("hw >= 0", "ccv_cnnp_model_addons.c"
, 1156, __extension__ __PRETTY_FUNCTION__); }))
;
1157 for (i = 0; i < k_nd; i++)
1158 weights_params.dim[i + hw] = self->kdim[i];
1159 if (!self->weights.graph)
1160 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1161 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 1161, __extension__ __PRETTY_FUNCTION__); }))
;
1162 const ccv_nnc_tensor_symbol_t weights = ccv_cnnp_model_get_symbol(super, self->weights);
1163 ccv_nnc_tensor_param_t bias_params = params;
1164 if (self->format)
1165 bias_params.format = self->format;
1166 memset(bias_params.dim, 0, sizeof(bias_params.dim));
1167 bias_params.dim[0] = self->filters;
1168 ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(self->groups, self->filters)ccv_nnc_cmd(CCV_NNC_CONVOLUTION_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={}},.convolution={.count=self->filters,.groups
=self->groups}}), 0)
;
1169 for (i = 0; i < k_nd; i++)
1170 cmd.info.size.dim[i] = self->kdim[i];
1171 cmd.info.size.dim[k_nd] = c;
1172 memcpy(cmd.info.convolution.dilation, self->dilation, sizeof(self->dilation));
1173 ccv_nnc_tensor_param_t output_params;
1174 // Dilate weight size based on the dilation factor.
1175 for (i = 0; i < k_nd; i++)
1176 weights_params.dim[i + hw] = (self->kdim[i] - 1) * ccv_max(self->dilation[i], 1)({ typeof (self->dilation[i]) _a = (self->dilation[i]);
typeof (1) _b = (1); (_a > _b) ? _a : _b; })
+ 1;
1177 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1178 params,
1179 weights_params,
1180 bias_params,
1181 }, 3, self->hint, &output_params, 1);
1182 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1183 ccv_nnc_graph_exec_symbol_t convolution;
1184 if (self->no_bias)
1185 convolution = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], weights}, (1 +1
+1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "convolution");
1186 else {
1187 if (!self->bias.graph)
1188 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1189 const ccv_nnc_tensor_symbol_t bias = ccv_cnnp_model_get_symbol(super, self->bias);
1190 convolution = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], weights, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], weights, bias},
(1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "convolution");
1191 }
1192 ccv_nnc_graph_exec_symbol_set_hint(graph, convolution, self->hint);
1193 outputs[0] = output;
1194}
1195
1196static void _ccv_cnnp_convolution_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1197{
1198 ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1199 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1200 const int n = ccv_max(ccv_nnc_tensor_get_n(weight_params), 1)({ typeof (ccv_nnc_tensor_get_n(weight_params)) _a = (ccv_nnc_tensor_get_n
(weight_params)); typeof (1) _b = (1); (_a > _b) ? _a : _b
; })
;
1201 const int count = ccv_nnc_tensor_count(weight_params);
1202 const float std = sqrtf(2) / sqrtf(count / n);
1203 const float bound = sqrtf(3) * std;
1204 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
1205 if (self->bias.graph)
1206 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
1207}
1208
1209static void _ccv_cnnp_convolution_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1210{
1211 ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1212 add_to_array(parameters, self->weights, is_trainable);
1213 if (self->bias.graph)
1214 add_to_array(parameters, self->bias, is_trainable);
1215}
1216
1217static ccv_cnnp_model_t* _ccv_cnnp_convolution_copy(const ccv_cnnp_model_t* const super, void* const context);
1218
1219static const ccv_cnnp_model_vtab_t ccv_cnnp_convolution_isa = {
1220 .build = _ccv_cnnp_convolution_build,
1221 .init_states = _ccv_cnnp_convolution_init_states,
1222 .add_to_parameter = _ccv_cnnp_convolution_add_to_parameter,
1223 .copy = _ccv_cnnp_convolution_copy,
1224};
1225
1226ccv_cnnp_model_t* ccv_cnnp_convolution(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC(12)], const int dilation[CCV_NNC_MAX_DIM_ALLOC(12)], const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name)
1227{
1228 ccv_cnnp_model_convolution_t* const model_convolution = (ccv_cnnp_model_convolution_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_convolution_t));
1229 model_convolution->super.isa = &ccv_cnnp_convolution_isa;
1230 model_convolution->super.input_size = 1;
1231 model_convolution->super.outputs = &model_convolution->output;
1232 model_convolution->super.output_size = 1;
1233 model_convolution->super.is_trainable = is_trainable;
1234 ccv_cnnp_model_copy_name(&model_convolution->super, name);
1235 model_convolution->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1236 model_convolution->weights.graph = 0;
1237 model_convolution->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1238 model_convolution->bias.graph = 0;
1239 model_convolution->groups = groups;
1240 model_convolution->filters = filters;
1241 memcpy(model_convolution->kdim, kdim, sizeof(model_convolution->kdim));
1242 memcpy(model_convolution->dilation, dilation, sizeof(model_convolution->dilation));
1243 model_convolution->no_bias = no_bias;
1244 model_convolution->hint = hint;
1245 model_convolution->format = format;
1246 return (ccv_cnnp_model_t*)model_convolution;
1247}
1248
1249static ccv_cnnp_model_t* _ccv_cnnp_convolution_copy(const ccv_cnnp_model_t* const super, void* const context)
1250{
1251 ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1252 return ccv_cnnp_convolution(self->groups, self->filters, self->kdim, self->dilation, self->no_bias, self->hint, self->format, self->super.is_trainable, self->super.name);
1253}
1254
1255// MARK - Convolution Transpose Layer
1256
1257typedef struct {
1258 ccv_cnnp_model_t super;
1259 ccv_nnc_tensor_symbol_t output;
1260 ccv_nnc_tensor_symbol_t weights;
1261 ccv_nnc_tensor_symbol_t bias;
1262 int groups;
1263 int filters;
1264 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
1265 int dilation[CCV_NNC_MAX_DIM_ALLOC(12)];
1266 int output_padding;
1267 int no_bias;
1268 int format;
1269 ccv_nnc_hint_t hint;
1270} ccv_cnnp_model_convolution_transpose_t;
1271
1272static void _ccv_cnnp_convolution_transpose_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1273{
1274 ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1275 PRINT(CCV_CLI_VERBOSE, "[cnnp_convolution_transpose_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_convolution_transpose_build] -\n"); fflush(stdout
); } } while (0)
;
1276 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1276, __extension__ __PRETTY_FUNCTION__); }))
;
1277 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1277, __extension__ __PRETTY_FUNCTION__
); }))
;
1278 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1279 int i;
1280 const int nd = CCV_NNC_MAX_DIM(2) + 2;
1281 ccv_nnc_tensor_param_t weights_params = params;
1282 if (self->format)
1283 weights_params.format = self->format;
1284 const int c = ccv_nnc_tensor_get_c(params);
1285 ccv_nnc_tensor_set_n(&weights_params, c);
1286 assert(c % self->groups == 0)((void) sizeof ((c % self->groups == 0) ? 1 : 0), __extension__
({ if (c % self->groups == 0) ; else __assert_fail ("c % self->groups == 0"
, "ccv_cnnp_model_addons.c", 1286, __extension__ __PRETTY_FUNCTION__
); }))
;
1287 ccv_nnc_tensor_set_c(&weights_params, nd, self->filters / self->groups);
1288 const int hw = ccv_nnc_tensor_hw(weights_params, nd, CCV_NNC_MAX_DIM(2));
1289 assert(hw >= 0)((void) sizeof ((hw >= 0) ? 1 : 0), __extension__ ({ if (hw
>= 0) ; else __assert_fail ("hw >= 0", "ccv_cnnp_model_addons.c"
, 1289, __extension__ __PRETTY_FUNCTION__); }))
;
1290 for (i = 0; i < CCV_NNC_MAX_DIM(2); i++)
1291 weights_params.dim[i + hw] = self->kdim[i];
1292 if (!self->weights.graph)
1293 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1294 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 1294, __extension__ __PRETTY_FUNCTION__); }))
;
1295 const ccv_nnc_tensor_symbol_t weights = ccv_cnnp_model_get_symbol(super, self->weights);
1296 ccv_nnc_tensor_param_t bias_params = params;
1297 if (self->format)
1298 bias_params.format = self->format;
1299 memset(bias_params.dim, 0, sizeof(bias_params.dim));
1300 bias_params.dim[0] = self->filters;
1301 ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_TRANSPOSE_FORWARD(self->groups, self->filters, self->output_padding)ccv_nnc_cmd(CCV_NNC_CONVOLUTION_TRANSPOSE_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={}},.convolution_transpose={.count=self->filters
,.groups=self->groups,.output_padding=self->output_padding
}}), 0)
;
1302 for (i = 0; i < CCV_NNC_MAX_DIM(2); i++)
1303 cmd.info.size.dim[i] = self->kdim[i];
1304 cmd.info.size.dim[CCV_NNC_MAX_DIM(2)] = c;
1305 memcpy(cmd.info.convolution_transpose.dilation, self->dilation, sizeof(self->dilation));
1306 ccv_nnc_tensor_param_t output_params;
1307 // Dilate weight size based on the dilation factor.
1308 for (i = 0; i < CCV_NNC_MAX_DIM(2); i++)
1309 weights_params.dim[i + hw] = (self->kdim[i] - 1) * ccv_max(self->dilation[i], 1)({ typeof (self->dilation[i]) _a = (self->dilation[i]);
typeof (1) _b = (1); (_a > _b) ? _a : _b; })
+ 1;
1310 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1311 params,
1312 weights_params,
1313 bias_params,
1314 }, 3, self->hint, &output_params, 1);
1315 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1316 ccv_nnc_graph_exec_symbol_t convolution_transpose;
1317 if (self->no_bias)
1318 convolution_transpose = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], weights}, (1 +1
+1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "convolution_transpose");
1319 else {
1320 if (!self->bias.graph)
1321 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1322 const ccv_nnc_tensor_symbol_t bias = ccv_cnnp_model_get_symbol(super, self->bias);
1323 convolution_transpose = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], weights, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], weights, bias},
(1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "convolution_transpose");
1324 }
1325 ccv_nnc_graph_exec_symbol_set_hint(graph, convolution_transpose, self->hint);
1326 outputs[0] = output;
1327}
1328
1329static void _ccv_cnnp_convolution_transpose_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1330{
1331 ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1332 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1333 const int n = ccv_max(ccv_nnc_tensor_get_n(weight_params), 1)({ typeof (ccv_nnc_tensor_get_n(weight_params)) _a = (ccv_nnc_tensor_get_n
(weight_params)); typeof (1) _b = (1); (_a > _b) ? _a : _b
; })
;
1334 const int count = ccv_nnc_tensor_count(weight_params);
1335 const float std = sqrtf(2) / sqrtf(count / n);
1336 const float bound = sqrtf(3) * std;
1337 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
1338 if (self->bias.graph)
1339 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
1340}
1341
1342static void _ccv_cnnp_convolution_transpose_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1343{
1344 ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1345 add_to_array(parameters, self->weights, is_trainable);
1346 if (self->bias.graph)
1347 add_to_array(parameters, self->bias, is_trainable);
1348}
1349
1350static ccv_cnnp_model_t* _ccv_cnnp_convolution_transpose_copy(const ccv_cnnp_model_t* const super, void* const context);
1351
1352static const ccv_cnnp_model_vtab_t ccv_cnnp_convolution_transpose_isa = {
1353 .build = _ccv_cnnp_convolution_transpose_build,
1354 .init_states = _ccv_cnnp_convolution_transpose_init_states,
1355 .add_to_parameter = _ccv_cnnp_convolution_transpose_add_to_parameter,
1356 .copy = _ccv_cnnp_convolution_transpose_copy,
1357};
1358
1359ccv_cnnp_model_t* ccv_cnnp_convolution_transpose(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC(12)], const int dilation[CCV_NNC_MAX_DIM_ALLOC(12)], const int output_padding, const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name)
1360{
1361 ccv_cnnp_model_convolution_transpose_t* const model_convolution_transpose = (ccv_cnnp_model_convolution_transpose_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_convolution_transpose_t));
1362 model_convolution_transpose->super.isa = &ccv_cnnp_convolution_transpose_isa;
1363 model_convolution_transpose->super.input_size = 1;
1364 model_convolution_transpose->super.outputs = &model_convolution_transpose->output;
1365 model_convolution_transpose->super.output_size = 1;
1366 model_convolution_transpose->super.is_trainable = is_trainable;
1367 ccv_cnnp_model_copy_name(&model_convolution_transpose->super, name);
1368 model_convolution_transpose->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1369 model_convolution_transpose->weights.graph = 0;
1370 model_convolution_transpose->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1371 model_convolution_transpose->bias.graph = 0;
1372 model_convolution_transpose->groups = groups;
1373 model_convolution_transpose->filters = filters;
1374 memcpy(model_convolution_transpose->kdim, kdim, sizeof(model_convolution_transpose->kdim));
1375 memcpy(model_convolution_transpose->dilation, dilation, sizeof(model_convolution_transpose->dilation));
1376 model_convolution_transpose->output_padding = output_padding;
1377 model_convolution_transpose->no_bias = no_bias;
1378 model_convolution_transpose->hint = hint;
1379 model_convolution_transpose->format = format;
1380 return (ccv_cnnp_model_t*)model_convolution_transpose;
1381}
1382
1383static ccv_cnnp_model_t* _ccv_cnnp_convolution_transpose_copy(const ccv_cnnp_model_t* const super, void* const context)
1384{
1385 ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1386 return ccv_cnnp_convolution_transpose(self->groups, self->filters, self->kdim, self->dilation, self->output_padding, self->no_bias, self->hint, self->format, self->super.is_trainable, self->super.name);
1387}
1388
1389// MARK - Dense Layer
1390
1391typedef struct {
1392 ccv_cnnp_model_t super;
1393 ccv_nnc_tensor_symbol_t output;
1394 ccv_nnc_tensor_symbol_t weights;
1395 ccv_nnc_tensor_symbol_t bias;
1396 int count;
1397 int no_bias;
1398 int flags;
1399} ccv_cnnp_model_dense_t;
1400
1401static void _ccv_cnnp_dense_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1402{
1403 ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1404 PRINT(CCV_CLI_VERBOSE, "[cnnp_dense_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_dense_build] -\n"); fflush(stdout); } } while
(0)
;
1405 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1405, __extension__ __PRETTY_FUNCTION__); }))
;
1406 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1406, __extension__ __PRETTY_FUNCTION__
); }))
;
1407 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1408 ccv_nnc_tensor_param_t weights_params = params;
1409 memset(weights_params.dim, 0, sizeof(weights_params.dim));
1410 weights_params.dim[0] = self->count;
1411 weights_params.dim[1] = params.dim[ccv_nnc_tensor_nd(params.dim) - 1];
1412 if (!self->weights.graph)
1413 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1414 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 1414, __extension__ __PRETTY_FUNCTION__); }))
;
1415 const ccv_nnc_tensor_symbol_t weights = ccv_cnnp_model_get_symbol(super, self->weights);
1416 ccv_nnc_tensor_param_t bias_params = params;
1417 memset(bias_params.dim, 0, sizeof(bias_params.dim));
1418 bias_params.dim[0] = self->count;
1419 ccv_nnc_cmd_t cmd = {0};
1420 cmd.cmd = CCV_NNC_GEMM_FORWARD;
1421 cmd.info.blas.a[0] = 1;
1422 cmd.info.blas.a[1] = 1;
1423 cmd.info.blas.transpose_b[0] = 0;
1424 cmd.info.blas.transpose_b[1] = 1;
1425 cmd.info.blas.flags = self->flags;
1426 ccv_nnc_tensor_param_t output_params;
1427 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1428 params,
1429 weights_params,
1430 bias_params,
1431 }, 3, ccv_nnc_no_hint, &output_params, 1);
1432 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1433 if (self->no_bias)
1434 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], weights}, (1 +1
+1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "dense");
1435 else {
1436 if (!self->bias.graph)
1437 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1438 const ccv_nnc_tensor_symbol_t bias = ccv_cnnp_model_get_symbol(super, self->bias);
1439 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], weights, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], weights, bias},
(1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "dense");
1440 }
1441 outputs[0] = output;
1442}
1443
1444static void _ccv_cnnp_dense_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1445{
1446 ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1447 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1448 const int c = weight_params.dim[1];
1449 const float std = sqrtf(2) / sqrtf(c);
1450 const float bound = sqrtf(3) * std;
1451 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
1452 if (self->bias.graph)
1453 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
1454}
1455
1456static void _ccv_cnnp_dense_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1457{
1458 ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1459 add_to_array(parameters, self->weights, is_trainable);
1460 if (self->bias.graph)
1461 add_to_array(parameters, self->bias, is_trainable);
1462}
1463
1464static ccv_cnnp_model_t* _ccv_cnnp_dense_copy(const ccv_cnnp_model_t* const super, void* const context);
1465
1466static const ccv_cnnp_model_vtab_t ccv_cnnp_dense_isa = {
1467 .build = _ccv_cnnp_dense_build,
1468 .init_states = _ccv_cnnp_dense_init_states,
1469 .add_to_parameter = _ccv_cnnp_dense_add_to_parameter,
1470 .copy = _ccv_cnnp_dense_copy,
1471};
1472
1473ccv_cnnp_model_t* ccv_cnnp_dense(const int count, const int no_bias, const int flags, const int is_trainable, const char* const name)
1474{
1475 ccv_cnnp_model_dense_t* const model_dense = (ccv_cnnp_model_dense_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_dense_t));
1476 model_dense->super.isa = &ccv_cnnp_dense_isa;
1477 model_dense->super.input_size = 1;
1478 model_dense->super.outputs = &model_dense->output;
1479 model_dense->super.output_size = 1;
1480 model_dense->super.is_trainable = is_trainable;
1481 ccv_cnnp_model_copy_name(&model_dense->super, name);
1482 model_dense->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1483 model_dense->weights.graph = 0;
1484 model_dense->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1485 model_dense->bias.graph = 0;
1486 model_dense->count = count;
1487 model_dense->no_bias = no_bias;
1488 model_dense->flags = flags;
1489 return (ccv_cnnp_model_t*)model_dense;
1490}
1491
1492static ccv_cnnp_model_t* _ccv_cnnp_dense_copy(const ccv_cnnp_model_t* const super, void* const context)
1493{
1494 const ccv_cnnp_model_dense_t* const self = (const ccv_cnnp_model_dense_t*)super;
1495 return ccv_cnnp_dense(self->count, self->no_bias, self->flags, self->super.is_trainable, self->super.name);
1496}
1497
1498// MARK - Pool Layers
1499
1500typedef struct {
1501 ccv_cnnp_model_t super;
1502 ccv_nnc_tensor_symbol_t output;
1503 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
1504 ccv_nnc_hint_t hint;
1505} ccv_cnnp_model_pool_t;
1506
1507static void _ccv_cnnp_max_pool_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1508{
1509 ccv_cnnp_model_pool_t* const self = (ccv_cnnp_model_pool_t*)super;
1510 PRINT(CCV_CLI_VERBOSE, "[cnnp_max_pool_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_max_pool_build] -\n"); fflush(stdout); } } while
(0)
;
1511 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1511, __extension__ __PRETTY_FUNCTION__); }))
;
1512 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1512, __extension__ __PRETTY_FUNCTION__
); }))
;
1513 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1514 const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM(2));
1515 ccv_nnc_cmd_t cmd;
1516 if (hw >= 0 && self->kdim[0] == 0 && self->kdim[1] == 0)
1517 cmd = CMD_MAX_POOL_FORWARD(params.dim[hw], params.dim[hw + 1])ccv_nnc_cmd(CCV_NNC_MAX_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={params.dim[hw], params.dim[hw + 1],1}}}), 0)
;
1518 else
1519 cmd = CMD_MAX_POOL_FORWARD(self->kdim[0], self->kdim[1])ccv_nnc_cmd(CCV_NNC_MAX_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={self->kdim[0], self->kdim[1],1}}}), 0)
;
1520 ccv_nnc_tensor_param_t output_params;
1521 ccv_nnc_hint_tensor_auto(cmd, &params, 1, self->hint, &output_params, 1);
1522 const ccv_nnc_tensor_symbol_t pool_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1523 const ccv_nnc_graph_exec_symbol_t exec = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(pool_output)(const ccv_nnc_tensor_symbol_t []){pool_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "max_pool");
1524 ccv_nnc_graph_exec_symbol_set_hint(graph, exec, self->hint);
1525 outputs[0] = pool_output;
1526}
1527
1528static ccv_cnnp_model_t* _ccv_cnnp_max_pool_copy(const ccv_cnnp_model_t* const super, void* const context);
1529
1530static const ccv_cnnp_model_vtab_t ccv_cnnp_max_pool_isa = {
1531 .build = _ccv_cnnp_max_pool_build,
1532 .copy = _ccv_cnnp_max_pool_copy,
1533};
1534
1535ccv_cnnp_model_t* ccv_cnnp_max_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC(12)], const ccv_nnc_hint_t hint, const char* const name)
1536{
1537 ccv_cnnp_model_pool_t* const model_pool = (ccv_cnnp_model_pool_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_pool_t));
1538 model_pool->super.isa = &ccv_cnnp_max_pool_isa;
1539 model_pool->super.input_size = 1;
1540 model_pool->super.outputs = &model_pool->output;
1541 model_pool->super.output_size = 1;
1542 ccv_cnnp_model_copy_name(&model_pool->super, name);
1543 memcpy(model_pool->kdim, kdim, sizeof(model_pool->kdim));
1544 model_pool->hint = hint;
1545 return (ccv_cnnp_model_t*)model_pool;
1546}
1547
1548static ccv_cnnp_model_t* _ccv_cnnp_max_pool_copy(const ccv_cnnp_model_t* const super, void* const context)
1549{
1550 const ccv_cnnp_model_pool_t* const self = (const ccv_cnnp_model_pool_t*)super;
1551 return ccv_cnnp_max_pool(self->kdim, self->hint, self->super.name);
1552}
1553
1554static void _ccv_cnnp_average_pool_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1555{
1556 ccv_cnnp_model_pool_t* const self = (ccv_cnnp_model_pool_t*)super;
1557 PRINT(CCV_CLI_VERBOSE, "[cnnp_average_pool_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_average_pool_build] -\n"); fflush(stdout); }
} while (0)
;
1558 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1558, __extension__ __PRETTY_FUNCTION__); }))
;
1559 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1559, __extension__ __PRETTY_FUNCTION__
); }))
;
1560 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1561 const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM(2));
1562 ccv_nnc_cmd_t cmd;
1563 if (hw >= 0 && self->kdim[0] == 0 && self->kdim[1] == 0)
1564 cmd = CMD_AVERAGE_POOL_FORWARD(params.dim[hw], params.dim[hw + 1])ccv_nnc_cmd(CCV_NNC_AVERAGE_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={params.dim[hw], params.dim[hw + 1],1}}}), 0)
;
1565 else
1566 cmd = CMD_AVERAGE_POOL_FORWARD(self->kdim[0], self->kdim[1])ccv_nnc_cmd(CCV_NNC_AVERAGE_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={self->kdim[0], self->kdim[1],1}}}), 0)
;
1567 ccv_nnc_tensor_param_t output_params;
1568 ccv_nnc_hint_tensor_auto(cmd, &params, 1, self->hint, &output_params, 1);
1569 const ccv_nnc_tensor_symbol_t pool_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1570 const ccv_nnc_graph_exec_symbol_t exec = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(pool_output)(const ccv_nnc_tensor_symbol_t []){pool_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "average_pool");
1571 ccv_nnc_graph_exec_symbol_set_hint(graph, exec, self->hint);
1572 outputs[0] = pool_output;
1573}
1574
1575static ccv_cnnp_model_t* _ccv_cnnp_average_pool_copy(const ccv_cnnp_model_t* const super, void* const context);
1576
1577static const ccv_cnnp_model_vtab_t ccv_cnnp_average_pool_isa = {
1578 .build = _ccv_cnnp_average_pool_build,
1579 .copy = _ccv_cnnp_average_pool_copy,
1580};
1581
1582ccv_cnnp_model_t* ccv_cnnp_average_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC(12)], const ccv_nnc_hint_t hint, const char* const name)
1583{
1584 ccv_cnnp_model_pool_t* const model_pool = (ccv_cnnp_model_pool_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_pool_t));
1585 model_pool->super.isa = &ccv_cnnp_average_pool_isa;
1586 model_pool->super.input_size = 1;
1587 model_pool->super.outputs = &model_pool->output;
1588 model_pool->super.output_size = 1;
1589 ccv_cnnp_model_copy_name(&model_pool->super, name);
1590 memcpy(model_pool->kdim, kdim, sizeof(model_pool->kdim));
1591 model_pool->hint = hint;
1592 return (ccv_cnnp_model_t*)model_pool;
1593}
1594
1595static ccv_cnnp_model_t* _ccv_cnnp_average_pool_copy(const ccv_cnnp_model_t* const super, void* const context)
1596{
1597 const ccv_cnnp_model_pool_t* const self = (const ccv_cnnp_model_pool_t*)super;
1598 return ccv_cnnp_average_pool(self->kdim, self->hint, self->super.name);
1599}
1600
1601// MARK - RELU Layer
1602
1603typedef struct {
1604 ccv_cnnp_model_t super;
1605 ccv_nnc_tensor_symbol_t output;
1606} ccv_cnnp_model_relu_t;
1607
1608static void _ccv_cnnp_relu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1609{
1610 PRINT(CCV_CLI_VERBOSE, "[cnnp_relu_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_relu_build] -\n"); fflush(stdout); } } while
(0)
;
1611 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1611, __extension__ __PRETTY_FUNCTION__); }))
;
1612 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1612, __extension__ __PRETTY_FUNCTION__
); }))
;
1613 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1614 ccv_nnc_tensor_param_t output_params;
1615 const ccv_nnc_cmd_t relu = CMD_RELU_FORWARD()ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1616 ccv_nnc_hint_tensor_auto(relu, (ccv_nnc_tensor_param_t []){
1617 params,
1618 }, 1, ccv_nnc_no_hint, &output_params, 1);
1619 const ccv_nnc_tensor_symbol_t relu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1620 ccv_nnc_graph_exec_symbol_new(graph, relu, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(relu_output)(const ccv_nnc_tensor_symbol_t []){relu_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "relu");
1621 outputs[0] = relu_output;
1622}
1623
1624static ccv_cnnp_model_t* _ccv_cnnp_relu_copy(const ccv_cnnp_model_t* const self, void* const context);
1625
1626static const ccv_cnnp_model_vtab_t ccv_cnnp_relu_isa = {
1627 .build = _ccv_cnnp_relu_build,
1628 .copy = _ccv_cnnp_relu_copy,
1629};
1630
1631ccv_cnnp_model_t* ccv_cnnp_relu(const char* const name)
1632{
1633 ccv_cnnp_model_relu_t* const model_relu = (ccv_cnnp_model_relu_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_relu_t));
1634 model_relu->super.isa = &ccv_cnnp_relu_isa;
1635 model_relu->super.input_size = 1;
1636 model_relu->super.outputs = &model_relu->output;
1637 model_relu->super.output_size = 1;
1638 ccv_cnnp_model_copy_name(&model_relu->super, name);
1639 return (ccv_cnnp_model_t*)model_relu;
1640}
1641
1642static ccv_cnnp_model_t* _ccv_cnnp_relu_copy(const ccv_cnnp_model_t* const self, void* const context)
1643{
1644 return ccv_cnnp_relu(self->name);
1645}
1646
1647// MARK - Sigmoid Layer
1648
1649typedef struct {
1650 ccv_cnnp_model_t super;
1651 ccv_nnc_tensor_symbol_t output;
1652} ccv_cnnp_model_sigmoid_t;
1653
1654static void _ccv_cnnp_sigmoid_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1655{
1656 PRINT(CCV_CLI_VERBOSE, "[cnnp_sigmoid_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sigmoid_build] -\n"); fflush(stdout); } } while
(0)
;
1657 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1657, __extension__ __PRETTY_FUNCTION__); }))
;
1658 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1658, __extension__ __PRETTY_FUNCTION__
); }))
;
1659 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1660 ccv_nnc_tensor_param_t output_params;
1661 const ccv_nnc_cmd_t sigmoid = CMD_SIGMOID_FORWARD()ccv_nnc_cmd(CCV_NNC_SIGMOID_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1662 ccv_nnc_hint_tensor_auto(sigmoid, (ccv_nnc_tensor_param_t []){
1663 params,
1664 }, 1, ccv_nnc_no_hint, &output_params, 1);
1665 const ccv_nnc_tensor_symbol_t sigmoid_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1666 ccv_nnc_graph_exec_symbol_new(graph, sigmoid, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(sigmoid_output)(const ccv_nnc_tensor_symbol_t []){sigmoid_output}, (1 +1 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1
)
, "sigmoid");
1667 outputs[0] = sigmoid_output;
1668}
1669
1670static ccv_cnnp_model_t* _ccv_cnnp_sigmoid_copy(const ccv_cnnp_model_t* const self, void* const context);
1671
1672static const ccv_cnnp_model_vtab_t ccv_cnnp_sigmoid_isa = {
1673 .build = _ccv_cnnp_sigmoid_build,
1674 .copy = _ccv_cnnp_sigmoid_copy,
1675};
1676
1677ccv_cnnp_model_t* ccv_cnnp_sigmoid(const char* const name)
1678{
1679 ccv_cnnp_model_sigmoid_t* const model_sigmoid = (ccv_cnnp_model_sigmoid_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_sigmoid_t));
1680 model_sigmoid->super.isa = &ccv_cnnp_sigmoid_isa;
1681 model_sigmoid->super.input_size = 1;
1682 model_sigmoid->super.outputs = &model_sigmoid->output;
1683 model_sigmoid->super.output_size = 1;
1684 ccv_cnnp_model_copy_name(&model_sigmoid->super, name);
1685 return (ccv_cnnp_model_t*)model_sigmoid;
1686}
1687
1688static ccv_cnnp_model_t* _ccv_cnnp_sigmoid_copy(const ccv_cnnp_model_t* const self, void* const context)
1689{
1690 return ccv_cnnp_sigmoid(self->name);
1691}
1692
1693// MARK - Tanh Layer
1694
1695typedef struct {
1696 ccv_cnnp_model_t super;
1697 ccv_nnc_tensor_symbol_t output;
1698} ccv_cnnp_model_tanh_t;
1699
1700static void _ccv_cnnp_tanh_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1701{
1702 PRINT(CCV_CLI_VERBOSE, "[cnnp_tanh_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_tanh_build] -\n"); fflush(stdout); } } while
(0)
;
1703 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1703, __extension__ __PRETTY_FUNCTION__); }))
;
1704 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1704, __extension__ __PRETTY_FUNCTION__
); }))
;
1705 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1706 ccv_nnc_tensor_param_t output_params;
1707 const ccv_nnc_cmd_t tanh = CMD_TANH_FORWARD()ccv_nnc_cmd(CCV_NNC_TANH_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1708 ccv_nnc_hint_tensor_auto(tanh, (ccv_nnc_tensor_param_t []){
1709 params,
1710 }, 1, ccv_nnc_no_hint, &output_params, 1);
1711 const ccv_nnc_tensor_symbol_t tanh_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1712 ccv_nnc_graph_exec_symbol_new(graph, tanh, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(tanh_output)(const ccv_nnc_tensor_symbol_t []){tanh_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "tanh");
1713 outputs[0] = tanh_output;
1714}
1715
1716static ccv_cnnp_model_t* _ccv_cnnp_tanh_copy(const ccv_cnnp_model_t* const self, void* const context);
1717
1718static const ccv_cnnp_model_vtab_t ccv_cnnp_tanh_isa = {
1719 .build = _ccv_cnnp_tanh_build,
1720 .copy = _ccv_cnnp_tanh_copy,
1721};
1722
1723ccv_cnnp_model_t* ccv_cnnp_tanh(const char* const name)
1724{
1725 ccv_cnnp_model_tanh_t* const model_tanh = (ccv_cnnp_model_tanh_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_tanh_t));
1726 model_tanh->super.isa = &ccv_cnnp_tanh_isa;
1727 model_tanh->super.input_size = 1;
1728 model_tanh->super.outputs = &model_tanh->output;
1729 model_tanh->super.output_size = 1;
1730 ccv_cnnp_model_copy_name(&model_tanh->super, name);
1731 return (ccv_cnnp_model_t*)model_tanh;
1732}
1733
1734static ccv_cnnp_model_t* _ccv_cnnp_tanh_copy(const ccv_cnnp_model_t* const self, void* const context)
1735{
1736 return ccv_cnnp_tanh(self->name);
1737}
1738
1739// MARK - Exp Layer
1740
1741typedef struct {
1742 ccv_cnnp_model_t super;
1743 ccv_nnc_tensor_symbol_t output;
1744} ccv_cnnp_model_exp_t;
1745
1746static void _ccv_cnnp_exp_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1747{
1748 PRINT(CCV_CLI_VERBOSE, "[cnnp_exp_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_exp_build] -\n"); fflush(stdout); } } while (
0)
;
1749 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1749, __extension__ __PRETTY_FUNCTION__); }))
;
1750 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1750, __extension__ __PRETTY_FUNCTION__
); }))
;
1751 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1752 ccv_nnc_tensor_param_t output_params;
1753 const ccv_nnc_cmd_t exp = CMD_EWEXP_FORWARD()ccv_nnc_cmd(CCV_NNC_EWEXP_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1754 ccv_nnc_hint_tensor_auto(exp, (ccv_nnc_tensor_param_t []){
1755 params,
1756 }, 1, ccv_nnc_no_hint, &output_params, 1);
1757 const ccv_nnc_tensor_symbol_t exp_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1758 ccv_nnc_graph_exec_symbol_new(graph, exp, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(exp_output)(const ccv_nnc_tensor_symbol_t []){exp_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "exp");
1759 outputs[0] = exp_output;
1760}
1761
1762static ccv_cnnp_model_t* _ccv_cnnp_exp_copy(const ccv_cnnp_model_t* const self, void* const context);
1763
1764static const ccv_cnnp_model_vtab_t ccv_cnnp_exp_isa = {
1765 .build = _ccv_cnnp_exp_build,
1766 .copy = _ccv_cnnp_exp_copy,
1767};
1768
1769ccv_cnnp_model_t* ccv_cnnp_exp(const char* const name)
1770{
1771 ccv_cnnp_model_exp_t* const model_exp = (ccv_cnnp_model_exp_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_exp_t));
1772 model_exp->super.isa = &ccv_cnnp_exp_isa;
1773 model_exp->super.input_size = 1;
1774 model_exp->super.outputs = &model_exp->output;
1775 model_exp->super.output_size = 1;
1776 ccv_cnnp_model_copy_name(&model_exp->super, name);
1777 return (ccv_cnnp_model_t*)model_exp;
1778}
1779
1780static ccv_cnnp_model_t* _ccv_cnnp_exp_copy(const ccv_cnnp_model_t* const self, void* const context)
1781{
1782 return ccv_cnnp_exp(self->name);
1783}
1784
1785// MARK - Softplus Layer
1786
1787typedef struct {
1788 ccv_cnnp_model_t super;
1789 ccv_nnc_tensor_symbol_t output;
1790} ccv_cnnp_model_softplus_t;
1791
1792static void _ccv_cnnp_softplus_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1793{
1794 PRINT(CCV_CLI_VERBOSE, "[cnnp_softplus_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_softplus_build] -\n"); fflush(stdout); } } while
(0)
;
1795 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1795, __extension__ __PRETTY_FUNCTION__); }))
;
1796 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1796, __extension__ __PRETTY_FUNCTION__
); }))
;
1797 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1798 ccv_nnc_tensor_param_t output_params;
1799 const ccv_nnc_cmd_t softplus = CMD_EWSOFTPLUS_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSOFTPLUS_FORWARD, 0, ccv_nnc_cmd_auto, 0
)
;
1800 ccv_nnc_hint_tensor_auto(softplus, (ccv_nnc_tensor_param_t []){
1801 params,
1802 }, 1, ccv_nnc_no_hint, &output_params, 1);
1803 const ccv_nnc_tensor_symbol_t softplus_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1804 ccv_nnc_graph_exec_symbol_new(graph, softplus, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(softplus_output)(const ccv_nnc_tensor_symbol_t []){softplus_output}, (1 +1 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -
1)
, "softplus");
1805 outputs[0] = softplus_output;
1806}
1807
1808static ccv_cnnp_model_t* _ccv_cnnp_softplus_copy(const ccv_cnnp_model_t* const self, void* const context);
1809
1810static const ccv_cnnp_model_vtab_t ccv_cnnp_softplus_isa = {
1811 .build = _ccv_cnnp_softplus_build,
1812 .copy = _ccv_cnnp_softplus_copy,
1813};
1814
1815ccv_cnnp_model_t* ccv_cnnp_softplus(const char* const name)
1816{
1817 ccv_cnnp_model_softplus_t* const model_softplus = (ccv_cnnp_model_softplus_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_softplus_t));
1818 model_softplus->super.isa = &ccv_cnnp_softplus_isa;
1819 model_softplus->super.input_size = 1;
1820 model_softplus->super.outputs = &model_softplus->output;
1821 model_softplus->super.output_size = 1;
1822 ccv_cnnp_model_copy_name(&model_softplus->super, name);
1823 return (ccv_cnnp_model_t*)model_softplus;
1824}
1825
1826static ccv_cnnp_model_t* _ccv_cnnp_softplus_copy(const ccv_cnnp_model_t* const self, void* const context)
1827{
1828 return ccv_cnnp_softplus(self->name);
1829}
1830
1831// MARK - Swish Layer
1832
1833typedef struct {
1834 ccv_cnnp_model_t super;
1835 ccv_nnc_tensor_symbol_t output;
1836 float beta;
1837} ccv_cnnp_model_swish_t;
1838
1839static void _ccv_cnnp_swish_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1840{
1841 PRINT(CCV_CLI_VERBOSE, "[cnnp_swish_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_swish_build] -\n"); fflush(stdout); } } while
(0)
;
1842 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1842, __extension__ __PRETTY_FUNCTION__); }))
;
1843 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1843, __extension__ __PRETTY_FUNCTION__
); }))
;
1844 ccv_cnnp_model_swish_t* const self = (ccv_cnnp_model_swish_t*)super;
1845 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1846 ccv_nnc_tensor_param_t output_params;
1847 const ccv_nnc_cmd_t swish = CMD_SWISH_FORWARD(self->beta)ccv_nnc_cmd(CCV_NNC_SWISH_FORWARD, 0, ((ccv_nnc_cmd_param_t){
.size={.dim={1,1,1}},.swish={.beta=self->beta}}), 0)
;
1848 ccv_nnc_hint_tensor_auto(swish, (ccv_nnc_tensor_param_t []){
1849 params,
1850 }, 1, ccv_nnc_no_hint, &output_params, 1);
1851 const ccv_nnc_tensor_symbol_t swish_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1852 ccv_nnc_graph_exec_symbol_new(graph, swish, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(swish_output)(const ccv_nnc_tensor_symbol_t []){swish_output}, (1 +1 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "swish");
1853 outputs[0] = swish_output;
1854}
1855
1856static ccv_cnnp_model_t* _ccv_cnnp_swish_copy(const ccv_cnnp_model_t* const self, void* const context);
1857
1858static const ccv_cnnp_model_vtab_t ccv_cnnp_swish_isa = {
1859 .build = _ccv_cnnp_swish_build,
1860 .copy = _ccv_cnnp_swish_copy,
1861};
1862
1863ccv_cnnp_model_t* ccv_cnnp_swish(const float beta, const char* const name)
1864{
1865 ccv_cnnp_model_swish_t* const model_swish = (ccv_cnnp_model_swish_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_swish_t));
1866 model_swish->super.isa = &ccv_cnnp_swish_isa;
1867 model_swish->super.input_size = 1;
1868 model_swish->super.outputs = &model_swish->output;
1869 model_swish->super.output_size = 1;
1870 model_swish->beta = beta;
1871 ccv_cnnp_model_copy_name(&model_swish->super, name);
1872 return (ccv_cnnp_model_t*)model_swish;
1873}
1874
1875static ccv_cnnp_model_t* _ccv_cnnp_swish_copy(const ccv_cnnp_model_t* const self, void* const context)
1876{
1877 const ccv_cnnp_model_swish_t* const swish = (const ccv_cnnp_model_swish_t*)self;
1878 return ccv_cnnp_swish(swish->beta, self->name);
1879}
1880
1881// MARK - Swish Mul Layer
1882
1883typedef struct {
1884 ccv_cnnp_model_t super;
1885 ccv_nnc_tensor_symbol_t output;
1886 float beta;
1887 float scale;
1888} ccv_cnnp_model_swish_mul_t;
1889
1890static void _ccv_cnnp_swish_mul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1891{
1892 PRINT(CCV_CLI_VERBOSE, "[cnnp_swish_mul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_swish_mul_build] -\n"); fflush(stdout); } } while
(0)
;
1893 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 1893, __extension__ __PRETTY_FUNCTION__); }))
;
1894 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1894, __extension__ __PRETTY_FUNCTION__
); }))
;
1895 const ccv_cnnp_model_swish_mul_t* const self = (const ccv_cnnp_model_swish_mul_t*)super;
1896 ccv_nnc_tensor_param_t input_params[2];
1897 int i;
1898 for (i = 0; i < 2; i++)
1899 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
1900 ccv_nnc_tensor_param_t output_params;
1901 const ccv_nnc_cmd_t swish_mul = CMD_SWISH_MUL_FORWARD(self->beta, self->scale)ccv_nnc_cmd(CCV_NNC_SWISH_MUL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.swish_mul={.beta=self->beta,.scale
=self->scale}}), 0)
;
1902 ccv_nnc_hint_tensor_auto(swish_mul, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
1903 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1904 ccv_nnc_graph_exec_symbol_new(graph, swish_mul, inputs, input_size, outputs, output_size, "swish_mul");
1905}
1906
1907static ccv_cnnp_model_t* _ccv_cnnp_swish_mul_copy(const ccv_cnnp_model_t* const self, void* const context);
1908
1909static const ccv_cnnp_model_vtab_t ccv_cnnp_swish_mul_isa = {
1910 .build = _ccv_cnnp_swish_mul_build,
1911 .copy = _ccv_cnnp_swish_mul_copy,
1912};
1913
1914ccv_cnnp_model_t* ccv_cnnp_swish_mul(const float beta, const float scale, const char* const name)
1915{
1916 ccv_cnnp_model_swish_mul_t* const model_swish_mul = (ccv_cnnp_model_swish_mul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_swish_mul_t));
1917 model_swish_mul->super.isa = &ccv_cnnp_swish_mul_isa;
1918 model_swish_mul->super.input_size = 2;
1919 model_swish_mul->super.outputs = &model_swish_mul->output;
1920 model_swish_mul->super.output_size = 1;
1921 model_swish_mul->beta = beta;
1922 model_swish_mul->scale = scale;
1923 ccv_cnnp_model_copy_name(&model_swish_mul->super, name);
1924 return (ccv_cnnp_model_t*)model_swish_mul;
1925}
1926
1927static ccv_cnnp_model_t* _ccv_cnnp_swish_mul_copy(const ccv_cnnp_model_t* const super, void* const context)
1928{
1929 const ccv_cnnp_model_swish_mul_t* const self = (const ccv_cnnp_model_swish_mul_t*)super;
1930 return ccv_cnnp_swish_mul(self->beta, self->scale, self->super.name);
1931}
1932
1933// MARK - GELU Layer
1934
1935typedef struct {
1936 ccv_cnnp_model_t super;
1937 ccv_nnc_tensor_symbol_t output;
1938 int tanh;
1939} ccv_cnnp_model_gelu_t;
1940
1941static void _ccv_cnnp_gelu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1942{
1943 PRINT(CCV_CLI_VERBOSE, "[cnnp_gelu_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_gelu_build] -\n"); fflush(stdout); } } while
(0)
;
1944 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1944, __extension__ __PRETTY_FUNCTION__); }))
;
1945 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1945, __extension__ __PRETTY_FUNCTION__
); }))
;
1946 ccv_cnnp_model_gelu_t* const self = (ccv_cnnp_model_gelu_t*)super;
1947 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1948 ccv_nnc_tensor_param_t output_params;
1949 const ccv_nnc_cmd_t gelu = CMD_GELU_FORWARD(self->tanh)ccv_nnc_cmd(CCV_NNC_GELU_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.gelu={.tanh=self->tanh}}, 0)
;
1950 ccv_nnc_hint_tensor_auto(gelu, (ccv_nnc_tensor_param_t []){
1951 params,
1952 }, 1, ccv_nnc_no_hint, &output_params, 1);
1953 const ccv_nnc_tensor_symbol_t gelu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1954 ccv_nnc_graph_exec_symbol_new(graph, gelu, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(gelu_output)(const ccv_nnc_tensor_symbol_t []){gelu_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "gelu");
1955 outputs[0] = gelu_output;
1956}
1957
1958static ccv_cnnp_model_t* _ccv_cnnp_gelu_copy(const ccv_cnnp_model_t* const self, void* const context);
1959
1960static const ccv_cnnp_model_vtab_t ccv_cnnp_gelu_isa = {
1961 .build = _ccv_cnnp_gelu_build,
1962 .copy = _ccv_cnnp_gelu_copy,
1963};
1964
1965ccv_cnnp_model_t* ccv_cnnp_gelu(const int tanh, const char* const name)
1966{
1967 ccv_cnnp_model_gelu_t* const model_gelu = (ccv_cnnp_model_gelu_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_gelu_t));
1968 model_gelu->super.isa = &ccv_cnnp_gelu_isa;
1969 model_gelu->super.input_size = 1;
1970 model_gelu->super.outputs = &model_gelu->output;
1971 model_gelu->super.output_size = 1;
1972 model_gelu->tanh = tanh;
1973 ccv_cnnp_model_copy_name(&model_gelu->super, name);
1974 return (ccv_cnnp_model_t*)model_gelu;
1975}
1976
1977static ccv_cnnp_model_t* _ccv_cnnp_gelu_copy(const ccv_cnnp_model_t* const super, void* const context)
1978{
1979 ccv_cnnp_model_gelu_t* const self = (ccv_cnnp_model_gelu_t*)super;
1980 return ccv_cnnp_gelu(self->tanh, self->super.name);
1981}
1982
1983// MARK - Leaky ReLU Layer
1984
1985typedef struct {
1986 ccv_cnnp_model_t super;
1987 ccv_nnc_tensor_symbol_t output;
1988 float negative_slope;
1989} ccv_cnnp_model_leaky_relu_t;
1990
1991static void _ccv_cnnp_leaky_relu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1992{
1993 PRINT(CCV_CLI_VERBOSE, "[cnnp_leaky_relu_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_leaky_relu_build] -\n"); fflush(stdout); } }
while (0)
;
1994 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1994, __extension__ __PRETTY_FUNCTION__); }))
;
1995 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1995, __extension__ __PRETTY_FUNCTION__
); }))
;
1996 ccv_cnnp_model_leaky_relu_t* const self = (ccv_cnnp_model_leaky_relu_t*)super;
1997 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1998 ccv_nnc_tensor_param_t output_params;
1999 const ccv_nnc_cmd_t leaky_relu = CMD_LEAKY_RELU_FORWARD(self->negative_slope)ccv_nnc_cmd(CCV_NNC_LEAKY_RELU_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.leaky_relu={.negative_slope=self->
negative_slope}}, 0)
;
2000 ccv_nnc_hint_tensor_auto(leaky_relu, (ccv_nnc_tensor_param_t []){
2001 params,
2002 }, 1, ccv_nnc_no_hint, &output_params, 1);
2003 const ccv_nnc_tensor_symbol_t leaky_relu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2004 ccv_nnc_graph_exec_symbol_new(graph, leaky_relu, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(leaky_relu_output)(const ccv_nnc_tensor_symbol_t []){leaky_relu_output}, (1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, "leaky_relu");
2005 outputs[0] = leaky_relu_output;
2006}
2007
2008static ccv_cnnp_model_t* _ccv_cnnp_leaky_relu_copy(const ccv_cnnp_model_t* const self, void* const context);
2009
2010static const ccv_cnnp_model_vtab_t ccv_cnnp_leaky_relu_isa = {
2011 .build = _ccv_cnnp_leaky_relu_build,
2012 .copy = _ccv_cnnp_leaky_relu_copy,
2013};
2014
2015ccv_cnnp_model_t* ccv_cnnp_leaky_relu(const float negative_slope, const char* const name)
2016{
2017 ccv_cnnp_model_leaky_relu_t* const model_leaky_relu = (ccv_cnnp_model_leaky_relu_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_leaky_relu_t));
2018 model_leaky_relu->super.isa = &ccv_cnnp_leaky_relu_isa;
2019 model_leaky_relu->super.input_size = 1;
2020 model_leaky_relu->super.outputs = &model_leaky_relu->output;
2021 model_leaky_relu->super.output_size = 1;
2022 model_leaky_relu->negative_slope = negative_slope;
2023 ccv_cnnp_model_copy_name(&model_leaky_relu->super, name);
2024 return (ccv_cnnp_model_t*)model_leaky_relu;
2025}
2026
2027static ccv_cnnp_model_t* _ccv_cnnp_leaky_relu_copy(const ccv_cnnp_model_t* const super, void* const context)
2028{
2029 ccv_cnnp_model_leaky_relu_t* const self = (ccv_cnnp_model_leaky_relu_t*)super;
2030 return ccv_cnnp_leaky_relu(self->negative_slope, self->super.name);
2031}
2032
2033// MARK - Softmax Layer
2034
2035typedef struct {
2036 ccv_cnnp_model_t super;
2037 ccv_nnc_tensor_symbol_t output;
2038} ccv_cnnp_model_softmax_t;
2039
2040static void _ccv_cnnp_softmax_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2041{
2042 PRINT(CCV_CLI_VERBOSE, "[cnnp_softmax_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_softmax_build] -\n"); fflush(stdout); } } while
(0)
;
2043 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2043, __extension__ __PRETTY_FUNCTION__); }))
;
2044 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2044, __extension__ __PRETTY_FUNCTION__
); }))
;
2045 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2046 ccv_nnc_tensor_param_t output_params;
2047 const ccv_nnc_cmd_t softmax = CMD_SOFTMAX_FORWARD()ccv_nnc_cmd(CCV_NNC_SOFTMAX_FORWARD, 0, ccv_nnc_cmd_auto, 0);
2048 ccv_nnc_hint_tensor_auto(softmax, (ccv_nnc_tensor_param_t []){
2049 params,
2050 }, 1, ccv_nnc_no_hint, &output_params, 1);
2051 const ccv_nnc_tensor_symbol_t softmax_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2052 ccv_nnc_graph_exec_symbol_new(graph, softmax, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(softmax_output)(const ccv_nnc_tensor_symbol_t []){softmax_output}, (1 +1 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1
)
, "softmax");
2053 outputs[0] = softmax_output;
2054}
2055
2056static ccv_cnnp_model_t* _ccv_cnnp_softmax_copy(const ccv_cnnp_model_t* const self, void* const context);
2057
2058static const ccv_cnnp_model_vtab_t ccv_cnnp_softmax_isa = {
2059 .build = _ccv_cnnp_softmax_build,
2060 .copy = _ccv_cnnp_softmax_copy,
2061};
2062
2063ccv_cnnp_model_t* ccv_cnnp_softmax(const char* const name)
2064{
2065 ccv_cnnp_model_softmax_t* const model_softmax = (ccv_cnnp_model_softmax_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_softmax_t));
2066 model_softmax->super.isa = &ccv_cnnp_softmax_isa;
2067 model_softmax->super.input_size = 1;
2068 model_softmax->super.outputs = &model_softmax->output;
2069 model_softmax->super.output_size = 1;
2070 ccv_cnnp_model_copy_name(&model_softmax->super, name);
2071 return (ccv_cnnp_model_t*)model_softmax;
2072}
2073
2074static ccv_cnnp_model_t* _ccv_cnnp_softmax_copy(const ccv_cnnp_model_t* const self, void* const context)
2075{
2076 return ccv_cnnp_softmax(self->name);
2077}
2078
2079// MARK - Add Layer
2080
2081typedef struct {
2082 ccv_cnnp_model_t super;
2083 float p;
2084 float q;
2085 ccv_nnc_tensor_symbol_t output;
2086} ccv_cnnp_model_add_t;
2087
2088static void _ccv_cnnp_add_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2089{
2090 PRINT(CCV_CLI_VERBOSE, "[cnnp_add_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_add_build] -\n"); fflush(stdout); } } while (
0)
;
2091 const ccv_cnnp_model_add_t* const self = (const ccv_cnnp_model_add_t*)super;
2092 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2092, __extension__ __PRETTY_FUNCTION__); }))
;
2093 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2093, __extension__ __PRETTY_FUNCTION__
); }))
;
2094 ccv_nnc_tensor_param_t input_params[2];
2095 int i;
2096 for (i = 0; i < 2; i++)
2097 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2098 ccv_nnc_tensor_param_t output_params;
2099 const ccv_nnc_cmd_t add = CMD_ADD_FORWARD(self->p, self->q)ccv_nnc_cmd(CCV_NNC_ADD_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={self->p, self->q}}}, 0)
;
2100 ccv_nnc_hint_tensor_auto(add, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
2101 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2102 ccv_nnc_graph_exec_symbol_new(graph, add, inputs, input_size, outputs, output_size, "add");
2103}
2104
2105static ccv_cnnp_model_t* _ccv_cnnp_add_copy(const ccv_cnnp_model_t* const self, void* const context);
2106
2107static const ccv_cnnp_model_vtab_t ccv_cnnp_add_isa = {
2108 .build = _ccv_cnnp_add_build,
2109 .copy = _ccv_cnnp_add_copy,
2110};
2111
2112ccv_cnnp_model_t* ccv_cnnp_add(const float p, const float q, const char* const name)
2113{
2114 ccv_cnnp_model_add_t* const model_add = (ccv_cnnp_model_add_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_add_t));
2115 model_add->super.isa = &ccv_cnnp_add_isa;
2116 model_add->super.input_size = 2;
2117 model_add->super.outputs = &model_add->output;
2118 model_add->super.output_size = 1;
2119 model_add->p = p;
2120 model_add->q = q;
2121 ccv_cnnp_model_copy_name(&model_add->super, name);
2122 return (ccv_cnnp_model_t*)model_add;
2123}
2124
2125static ccv_cnnp_model_t* _ccv_cnnp_add_copy(const ccv_cnnp_model_t* const super, void* const context)
2126{
2127 const ccv_cnnp_model_add_t* const self = (const ccv_cnnp_model_add_t*)super;
2128 return ccv_cnnp_add(self->p, self->q, self->super.name);
2129}
2130
2131// MARK - Mul Layer
2132
2133typedef struct {
2134 ccv_cnnp_model_t super;
2135 ccv_nnc_tensor_symbol_t output;
2136 float p;
2137} ccv_cnnp_model_mul_t;
2138
2139static void _ccv_cnnp_mul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2140{
2141 PRINT(CCV_CLI_VERBOSE, "[cnnp_mul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_mul_build] -\n"); fflush(stdout); } } while (
0)
;
2142 const ccv_cnnp_model_mul_t* const self = (const ccv_cnnp_model_mul_t*)super;
2143 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2143, __extension__ __PRETTY_FUNCTION__); }))
;
2144 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2144, __extension__ __PRETTY_FUNCTION__
); }))
;
2145 ccv_nnc_tensor_param_t input_params[2];
2146 int i;
2147 for (i = 0; i < 2; i++)
2148 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2149 ccv_nnc_tensor_param_t output_params;
2150 const ccv_nnc_cmd_t mul = CMD_MUL_FORWARD(self->p)ccv_nnc_cmd(CCV_NNC_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={self->p,}}}, 0)
;
2151 ccv_nnc_hint_tensor_auto(mul, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
2152 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2153 ccv_nnc_graph_exec_symbol_new(graph, mul, inputs, input_size, outputs, output_size, "mul");
2154}
2155
2156static ccv_cnnp_model_t* _ccv_cnnp_mul_copy(const ccv_cnnp_model_t* const self, void* const context);
2157
2158static const ccv_cnnp_model_vtab_t ccv_cnnp_mul_isa = {
2159 .build = _ccv_cnnp_mul_build,
2160 .copy = _ccv_cnnp_mul_copy,
2161};
2162
2163ccv_cnnp_model_t* ccv_cnnp_mul(const float p, const char* const name)
2164{
2165 ccv_cnnp_model_mul_t* const model_mul = (ccv_cnnp_model_mul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_mul_t));
2166 model_mul->super.isa = &ccv_cnnp_mul_isa;
2167 model_mul->super.input_size = 2;
2168 model_mul->super.outputs = &model_mul->output;
2169 model_mul->super.output_size = 1;
2170 model_mul->p = p;
2171 ccv_cnnp_model_copy_name(&model_mul->super, name);
2172 return (ccv_cnnp_model_t*)model_mul;
2173}
2174
2175static ccv_cnnp_model_t* _ccv_cnnp_mul_copy(const ccv_cnnp_model_t* const super, void* const context)
2176{
2177 const ccv_cnnp_model_mul_t* const self = (const ccv_cnnp_model_mul_t*)super;
2178 return ccv_cnnp_mul(self->p, self->super.name);
2179}
2180
2181// MARK - Scalar Mul Layer
2182
2183typedef struct {
2184 ccv_cnnp_model_t super;
2185 ccv_nnc_tensor_symbol_t output;
2186 float a;
2187} ccv_cnnp_model_scalar_mul_t;
2188
2189static void _ccv_cnnp_scalar_mul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2190{
2191 PRINT(CCV_CLI_VERBOSE, "[cnnp_scalar_mul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_scalar_mul_build] -\n"); fflush(stdout); } }
while (0)
;
2192 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2192, __extension__ __PRETTY_FUNCTION__); }))
;
2193 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2193, __extension__ __PRETTY_FUNCTION__
); }))
;
2194 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2195 ccv_nnc_tensor_param_t output_params;
2196 ccv_cnnp_model_scalar_mul_t* const self = (ccv_cnnp_model_scalar_mul_t*)super;
2197 const ccv_nnc_cmd_t scalar_mul = CMD_SCALAR_MUL_FORWARD(self->a)ccv_nnc_cmd(CCV_NNC_SCALAR_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={self->a,}}}, 0)
;
2198 ccv_nnc_hint_tensor_auto(scalar_mul, (ccv_nnc_tensor_param_t []){
2199 params,
2200 }, 1, ccv_nnc_no_hint, &output_params, 1);
2201 const ccv_nnc_tensor_symbol_t scalar_mul_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2202 ccv_nnc_graph_exec_symbol_new(graph, scalar_mul, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(scalar_mul_output)(const ccv_nnc_tensor_symbol_t []){scalar_mul_output}, (1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, "scalar_mul");
2203 outputs[0] = scalar_mul_output;
2204}
2205
2206static ccv_cnnp_model_t* _ccv_cnnp_scalar_mul_copy(const ccv_cnnp_model_t* const super, void* const context);
2207
2208static const ccv_cnnp_model_vtab_t ccv_cnnp_scalar_mul_isa = {
2209 .build = _ccv_cnnp_scalar_mul_build,
2210 .copy = _ccv_cnnp_scalar_mul_copy,
2211};
2212
2213ccv_cnnp_model_t* ccv_cnnp_scalar_mul(const float a, const char* const name)
2214{
2215 ccv_cnnp_model_scalar_mul_t* const model_scalar_mul = (ccv_cnnp_model_scalar_mul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_scalar_mul_t));
2216 model_scalar_mul->super.isa = &ccv_cnnp_scalar_mul_isa;
2217 model_scalar_mul->super.input_size = 1;
2218 model_scalar_mul->super.outputs = &model_scalar_mul->output;
2219 model_scalar_mul->super.output_size = 1;
2220 model_scalar_mul->a = a;
2221 ccv_cnnp_model_copy_name(&model_scalar_mul->super, name);
2222 return (ccv_cnnp_model_t*)model_scalar_mul;
2223}
2224
2225static ccv_cnnp_model_t* _ccv_cnnp_scalar_mul_copy(const ccv_cnnp_model_t* const super, void* const context)
2226{
2227 const ccv_cnnp_model_scalar_mul_t* const self = (const ccv_cnnp_model_scalar_mul_t*)super;
2228 return ccv_cnnp_scalar_mul(self->a, self->super.name);
2229}
2230
2231// MARK - Div Layer
2232
2233typedef struct {
2234 ccv_cnnp_model_t super;
2235 ccv_nnc_tensor_symbol_t output;
2236 int reciprocal;
2237} ccv_cnnp_model_div_t;
2238
2239static void _ccv_cnnp_div_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2240{
2241 const ccv_cnnp_model_div_t* const self = (const ccv_cnnp_model_div_t*)super;
2242 PRINT(CCV_CLI_VERBOSE, "[cnnp_div_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_div_build] -\n"); fflush(stdout); } } while (
0)
;
2243 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2243, __extension__ __PRETTY_FUNCTION__
); }))
;
2244 ccv_nnc_tensor_param_t input_params[2];
2245 int i;
2246 ccv_nnc_tensor_param_t output_params;
2247 const ccv_nnc_cmd_t div = CMD_EWDIV_FORWARD()ccv_nnc_cmd(CCV_NNC_EWDIV_FORWARD, 0, ccv_nnc_cmd_auto, 0);
2248 if (self->reciprocal)
2249 {
2250 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2250, __extension__ __PRETTY_FUNCTION__); }))
;
2251 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2252 input_params[1] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2253 ccv_nnc_hint_tensor_auto(div, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
2254 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2255 ccv_nnc_graph_exec_symbol_new(graph, div, TENSOR_SYMBOL_LIST(NO_TENSOR_SYMBOL, inputs[0])(const ccv_nnc_tensor_symbol_t []){(const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, inputs[0]}, (1 +1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, outputs, output_size, "div");
2256 } else {
2257 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2257, __extension__ __PRETTY_FUNCTION__); }))
;
2258 for (i = 0; i < 2; i++)
2259 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2260 ccv_nnc_hint_tensor_auto(div, input_params, input_size, ccv_nnc_no_hint, &output_params, 1);
2261 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2262 ccv_nnc_graph_exec_symbol_new(graph, div, inputs, input_size, outputs, output_size, "div");
2263 }
2264}
2265
2266static ccv_cnnp_model_t* _ccv_cnnp_div_copy(const ccv_cnnp_model_t* const self, void* const context);
2267
2268static const ccv_cnnp_model_vtab_t ccv_cnnp_div_isa = {
2269 .build = _ccv_cnnp_div_build,
2270 .copy = _ccv_cnnp_div_copy,
2271};
2272
2273ccv_cnnp_model_t* ccv_cnnp_div(const int reciprocal, const char* const name)
2274{
2275 ccv_cnnp_model_div_t* const model_div = (ccv_cnnp_model_div_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_div_t));
2276 model_div->super.isa = &ccv_cnnp_div_isa;
2277 model_div->super.input_size = reciprocal ? 1 : 2;
2278 model_div->super.outputs = &model_div->output;
2279 model_div->super.output_size = 1;
2280 model_div->reciprocal = reciprocal;
2281 ccv_cnnp_model_copy_name(&model_div->super, name);
2282 return (ccv_cnnp_model_t*)model_div;
2283}
2284
2285static ccv_cnnp_model_t* _ccv_cnnp_div_copy(const ccv_cnnp_model_t* const super, void* const context)
2286{
2287 const ccv_cnnp_model_div_t* const self = (const ccv_cnnp_model_div_t*)super;
2288 return ccv_cnnp_div(self->reciprocal, self->super.name);
2289}
2290
2291// MARK - Sqrt Layer
2292
2293typedef struct {
2294 ccv_cnnp_model_t super;
2295 ccv_nnc_tensor_symbol_t output;
2296} ccv_cnnp_model_sqrt_t;
2297
2298static void _ccv_cnnp_sqrt_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2299{
2300 PRINT(CCV_CLI_VERBOSE, "[cnnp_sqrt_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sqrt_build] -\n"); fflush(stdout); } } while
(0)
;
2301 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2301, __extension__ __PRETTY_FUNCTION__
); }))
;
2302 ccv_nnc_tensor_param_t input_params[1];
2303 ccv_nnc_tensor_param_t output_params;
2304 const ccv_nnc_cmd_t sqrt = CMD_EWSQRT_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSQRT_FORWARD, 0, ccv_nnc_cmd_auto, 0);
2305 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2305, __extension__ __PRETTY_FUNCTION__); }))
;
2306 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2307 ccv_nnc_hint_tensor_auto(sqrt, input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2308 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2309 ccv_nnc_graph_exec_symbol_new(graph, sqrt, inputs, 1, outputs, output_size, "sqrt");
2310}
2311
2312static ccv_cnnp_model_t* _ccv_cnnp_sqrt_copy(const ccv_cnnp_model_t* const self, void* const context);
2313
2314static const ccv_cnnp_model_vtab_t ccv_cnnp_sqrt_isa = {
2315 .build = _ccv_cnnp_sqrt_build,
2316 .copy = _ccv_cnnp_sqrt_copy,
2317};
2318
2319ccv_cnnp_model_t* ccv_cnnp_sqrt(const char* const name)
2320{
2321 ccv_cnnp_model_sqrt_t* const model_sqrt = (ccv_cnnp_model_sqrt_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_sqrt_t));
2322 model_sqrt->super.isa = &ccv_cnnp_sqrt_isa;
2323 model_sqrt->super.input_size = 1;
2324 model_sqrt->super.outputs = &model_sqrt->output;
2325 model_sqrt->super.output_size = 1;
2326 ccv_cnnp_model_copy_name(&model_sqrt->super, name);
2327 return (ccv_cnnp_model_t*)model_sqrt;
2328}
2329
2330static ccv_cnnp_model_t* _ccv_cnnp_sqrt_copy(const ccv_cnnp_model_t* const super, void* const context)
2331{
2332 const ccv_cnnp_model_sqrt_t* const self = (const ccv_cnnp_model_sqrt_t*)super;
2333 return ccv_cnnp_sqrt(self->super.name);
2334}
2335
2336// MARK - Log Layer
2337
2338typedef struct {
2339 ccv_cnnp_model_t super;
2340 ccv_nnc_tensor_symbol_t output;
2341} ccv_cnnp_model_log_t;
2342
2343static void _ccv_cnnp_log_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2344{
2345 PRINT(CCV_CLI_VERBOSE, "[cnnp_log_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_log_build] -\n"); fflush(stdout); } } while (
0)
;
2346 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2346, __extension__ __PRETTY_FUNCTION__
); }))
;
2347 ccv_nnc_tensor_param_t input_params[1];
2348 ccv_nnc_tensor_param_t output_params;
2349 const ccv_nnc_cmd_t log = CMD_EWLOG_FORWARD()ccv_nnc_cmd(CCV_NNC_EWLOG_FORWARD, 0, ccv_nnc_cmd_auto, 0);
2350 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2350, __extension__ __PRETTY_FUNCTION__); }))
;
2351 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2352 ccv_nnc_hint_tensor_auto(log, input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2353 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2354 ccv_nnc_graph_exec_symbol_new(graph, log, inputs, 1, outputs, output_size, "log");
2355}
2356
2357static ccv_cnnp_model_t* _ccv_cnnp_log_copy(const ccv_cnnp_model_t* const self, void* const context);
2358
2359static const ccv_cnnp_model_vtab_t ccv_cnnp_log_isa = {
2360 .build = _ccv_cnnp_log_build,
2361 .copy = _ccv_cnnp_log_copy,
2362};
2363
2364ccv_cnnp_model_t* ccv_cnnp_log(const char* const name)
2365{
2366 ccv_cnnp_model_log_t* const model_log = (ccv_cnnp_model_log_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_log_t));
2367 model_log->super.isa = &ccv_cnnp_log_isa;
2368 model_log->super.input_size = 1;
2369 model_log->super.outputs = &model_log->output;
2370 model_log->super.output_size = 1;
2371 ccv_cnnp_model_copy_name(&model_log->super, name);
2372 return (ccv_cnnp_model_t*)model_log;
2373}
2374
2375static ccv_cnnp_model_t* _ccv_cnnp_log_copy(const ccv_cnnp_model_t* const super, void* const context)
2376{
2377 return ccv_cnnp_log(super->name);
2378}
2379
2380// MARK - Pow Layer
2381
2382typedef struct {
2383 ccv_cnnp_model_t super;
2384 ccv_nnc_tensor_symbol_t output;
2385 ccv_nnc_cmd_param_t params;
2386} ccv_cnnp_model_pow_t;
2387
2388static void _ccv_cnnp_pow_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2389{
2390 ccv_cnnp_model_pow_t* const self = (ccv_cnnp_model_pow_t*)super;
2391 PRINT(CCV_CLI_VERBOSE, "[cnnp_pow_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_pow_build] -\n"); fflush(stdout); } } while (
0)
;
2392 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2392, __extension__ __PRETTY_FUNCTION__); }))
;
2393 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2393, __extension__ __PRETTY_FUNCTION__
); }))
;
2394 ccv_nnc_tensor_param_t input_params[1];
2395 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2396 ccv_nnc_tensor_param_t output_params;
2397 const ccv_nnc_cmd_t pow = ccv_nnc_cmd(CCV_NNC_EWPOW_FORWARD, 0, self->params, 0);
2398 ccv_nnc_hint_tensor_auto(pow, input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2399 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2400 ccv_nnc_graph_exec_symbol_new(graph, pow, inputs, input_size, outputs, output_size, "pow");
2401}
2402
2403static ccv_cnnp_model_t* _ccv_cnnp_pow_copy(const ccv_cnnp_model_t* const self, void* const context);
2404
2405static const ccv_cnnp_model_vtab_t ccv_cnnp_pow_isa = {
2406 .build = _ccv_cnnp_pow_build,
2407 .copy = _ccv_cnnp_pow_copy,
2408};
2409
2410ccv_cnnp_model_t* ccv_cnnp_pow(const float exponent, const char* const name)
2411{
2412 ccv_cnnp_model_pow_t* const model_pow = (ccv_cnnp_model_pow_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_pow_t));
2413 model_pow->super.isa = &ccv_cnnp_pow_isa;
2414 model_pow->super.input_size = 1;
2415 model_pow->super.outputs = &model_pow->output;
2416 model_pow->super.output_size = 1;
2417 model_pow->params = (ccv_nnc_cmd_param_t){
2418 .size = {
2419 .dim = { 1, 1, 1 }
2420 },
2421 .pow = {
2422 .exponent = exponent,
2423 },
2424 };
2425 ccv_cnnp_model_copy_name(&model_pow->super, name);
2426 return (ccv_cnnp_model_t*)model_pow;
2427}
2428
2429static ccv_cnnp_model_t* _ccv_cnnp_pow_copy(const ccv_cnnp_model_t* const super, void* const context)
2430{
2431 const ccv_cnnp_model_pow_t* const self = (const ccv_cnnp_model_pow_t*)super;
2432 return ccv_cnnp_pow(self->params.pow.exponent, super->name);
2433}
2434
2435// MARK - Sin Layer
2436
2437typedef struct {
2438 ccv_cnnp_model_t super;
2439 ccv_nnc_tensor_symbol_t output;
2440} ccv_cnnp_model_sin_t;
2441
2442static void _ccv_cnnp_sin_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2443{
2444 PRINT(CCV_CLI_VERBOSE, "[cnnp_sin_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sin_build] -\n"); fflush(stdout); } } while (
0)
;
2445 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2445, __extension__ __PRETTY_FUNCTION__
); }))
;
2446 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2446, __extension__ __PRETTY_FUNCTION__); }))
;
2447 ccv_nnc_tensor_param_t input_params[1];
2448 ccv_nnc_tensor_param_t output_params;
2449 const ccv_nnc_cmd_t sin = CMD_EWSIN_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSIN_FORWARD, 0, ccv_nnc_cmd_auto, 0);
2450 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2451 ccv_nnc_hint_tensor_auto(sin, input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2452 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2453 ccv_nnc_graph_exec_symbol_new(graph, sin, inputs, 1, outputs, output_size, "sin");
2454}
2455
2456static ccv_cnnp_model_t* _ccv_cnnp_sin_copy(const ccv_cnnp_model_t* const self, void* const context);
2457
2458static const ccv_cnnp_model_vtab_t ccv_cnnp_sin_isa = {
2459 .build = _ccv_cnnp_sin_build,
2460 .copy = _ccv_cnnp_sin_copy,
2461};
2462
2463ccv_cnnp_model_t* ccv_cnnp_sin(const char* const name)
2464{
2465 ccv_cnnp_model_sin_t* const model_sin = (ccv_cnnp_model_sin_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_sin_t));
2466 model_sin->super.isa = &ccv_cnnp_sin_isa;
2467 model_sin->super.input_size = 1;
2468 model_sin->super.outputs = &model_sin->output;
2469 model_sin->super.output_size = 1;
2470 ccv_cnnp_model_copy_name(&model_sin->super, name);
2471 return (ccv_cnnp_model_t*)model_sin;
2472}
2473
2474static ccv_cnnp_model_t* _ccv_cnnp_sin_copy(const ccv_cnnp_model_t* const super, void* const context)
2475{
2476 return ccv_cnnp_sin(super->name);
2477}
2478
2479// MARK - Cos Layer
2480
2481typedef struct {
2482 ccv_cnnp_model_t super;
2483 ccv_nnc_tensor_symbol_t output;
2484} ccv_cnnp_model_cos_t;
2485
2486static void _ccv_cnnp_cos_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2487{
2488 PRINT(CCV_CLI_VERBOSE, "[cnnp_cos_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_cos_build] -\n"); fflush(stdout); } } while (
0)
;
2489 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2489, __extension__ __PRETTY_FUNCTION__
); }))
;
2490 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2490, __extension__ __PRETTY_FUNCTION__); }))
;
2491 ccv_nnc_tensor_param_t input_params[1];
2492 ccv_nnc_tensor_param_t output_params;
2493 const ccv_nnc_cmd_t cos = CMD_EWCOS_FORWARD()ccv_nnc_cmd(CCV_NNC_EWCOS_FORWARD, 0, ccv_nnc_cmd_auto, 0);
2494 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2495 ccv_nnc_hint_tensor_auto(cos, input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2496 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2497 ccv_nnc_graph_exec_symbol_new(graph, cos, inputs, 1, outputs, output_size, "cos");
2498}
2499
2500static ccv_cnnp_model_t* _ccv_cnnp_cos_copy(const ccv_cnnp_model_t* const self, void* const context);
2501
2502static const ccv_cnnp_model_vtab_t ccv_cnnp_cos_isa = {
2503 .build = _ccv_cnnp_cos_build,
2504 .copy = _ccv_cnnp_cos_copy,
2505};
2506
2507ccv_cnnp_model_t* ccv_cnnp_cos(const char* const name)
2508{
2509 ccv_cnnp_model_cos_t* const model_cos = (ccv_cnnp_model_cos_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_cos_t));
2510 model_cos->super.isa = &ccv_cnnp_cos_isa;
2511 model_cos->super.input_size = 1;
2512 model_cos->super.outputs = &model_cos->output;
2513 model_cos->super.output_size = 1;
2514 ccv_cnnp_model_copy_name(&model_cos->super, name);
2515 return (ccv_cnnp_model_t*)model_cos;
2516}
2517
2518static ccv_cnnp_model_t* _ccv_cnnp_cos_copy(const ccv_cnnp_model_t* const super, void* const context)
2519{
2520 return ccv_cnnp_cos(super->name);
2521}
2522
2523// MARK - Rotate Half Layer
2524
2525typedef struct {
2526 ccv_cnnp_model_t super;
2527 ccv_nnc_tensor_symbol_t output;
2528} ccv_cnnp_model_rotate_half_t;
2529
2530static void _ccv_cnnp_rotate_half_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2531{
2532 PRINT(CCV_CLI_VERBOSE, "[cnnp_rotate_half_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_rotate_half_build] -\n"); fflush(stdout); } }
while (0)
;
2533 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2533, __extension__ __PRETTY_FUNCTION__); }))
;
2534 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2534, __extension__ __PRETTY_FUNCTION__
); }))
;
2535 ccv_nnc_tensor_param_t input_params[1];
2536 ccv_nnc_tensor_param_t output_params;
2537 const ccv_nnc_cmd_t rotate_half = CMD_ROTATE_HALF_FORWARD()ccv_nnc_cmd(CCV_NNC_ROTATE_HALF_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}}}, 0)
;
2538 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2539 ccv_nnc_hint_tensor_auto(rotate_half, input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2540 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2541 ccv_nnc_graph_exec_symbol_new(graph, rotate_half, inputs, 1, outputs, output_size, "rotate_half");
2542}
2543
2544static ccv_cnnp_model_t* _ccv_cnnp_rotate_half_copy(const ccv_cnnp_model_t* const self, void* const context);
2545
2546static const ccv_cnnp_model_vtab_t ccv_cnnp_rotate_half_isa = {
2547 .build = _ccv_cnnp_rotate_half_build,
2548 .copy = _ccv_cnnp_rotate_half_copy,
2549};
2550
2551ccv_cnnp_model_t* ccv_cnnp_rotate_half(const char* const name)
2552{
2553 ccv_cnnp_model_rotate_half_t* const model_rotate_half = (ccv_cnnp_model_rotate_half_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_rotate_half_t));
2554 model_rotate_half->super.isa = &ccv_cnnp_rotate_half_isa;
2555 model_rotate_half->super.input_size = 1;
2556 model_rotate_half->super.outputs = &model_rotate_half->output;
2557 model_rotate_half->super.output_size = 1;
2558 ccv_cnnp_model_copy_name(&model_rotate_half->super, name);
2559 return (ccv_cnnp_model_t*)model_rotate_half;
2560}
2561
2562static ccv_cnnp_model_t* _ccv_cnnp_rotate_half_copy(const ccv_cnnp_model_t* const super, void* const context)
2563{
2564 return ccv_cnnp_rotate_half(super->name);
2565}
2566
2567// MARK - Gated Delta Layer
2568
2569typedef struct {
2570 ccv_cnnp_model_t super;
2571 ccv_nnc_tensor_symbol_t outputs[2];
2572 int log_decay;
2573} ccv_cnnp_model_gated_delta_t;
2574
2575static void _ccv_cnnp_gated_delta_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2576{
2577 PRINT(CCV_CLI_VERBOSE, "[cnnp_gated_delta_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_gated_delta_build] -\n"); fflush(stdout); } }
while (0)
;
2578 ccv_cnnp_model_gated_delta_t* const self = (ccv_cnnp_model_gated_delta_t*)super;
2579 assert(input_size == 6)((void) sizeof ((input_size == 6) ? 1 : 0), __extension__ ({ if
(input_size == 6) ; else __assert_fail ("input_size == 6", "ccv_cnnp_model_addons.c"
, 2579, __extension__ __PRETTY_FUNCTION__); }))
;
2580 assert(output_size == 2)((void) sizeof ((output_size == 2) ? 1 : 0), __extension__ ({
if (output_size == 2) ; else __assert_fail ("output_size == 2"
, "ccv_cnnp_model_addons.c", 2580, __extension__ __PRETTY_FUNCTION__
); }))
;
2581 ccv_nnc_tensor_param_t input_params[6];
2582 int i;
2583 for (i = 0; i < 6; i++)
2584 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2585 ccv_nnc_tensor_param_t output_params[2];
2586 const ccv_nnc_cmd_t gated_delta = CMD_GATED_DELTA_FORWARD(self->log_decay)ccv_nnc_cmd(CCV_NNC_GATED_DELTA_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.gated_delta={.log_decay=(self->log_decay
)}}), 0)
;
2587 ccv_nnc_hint_tensor_auto(gated_delta, input_params, 6, ccv_nnc_no_hint, output_params, 2);
2588 for (i = 0; i < 2; i++)
2589 outputs[i] = ccv_nnc_tensor_symbol_new(graph, output_params[i], 0);
2590 ccv_nnc_graph_exec_symbol_new(graph, gated_delta, inputs, input_size, outputs, output_size, "gated_delta");
2591}
2592
2593static ccv_cnnp_model_t* _ccv_cnnp_gated_delta_copy(const ccv_cnnp_model_t* const self, void* const context);
2594
2595static const ccv_cnnp_model_vtab_t ccv_cnnp_gated_delta_isa = {
2596 .build = _ccv_cnnp_gated_delta_build,
2597 .copy = _ccv_cnnp_gated_delta_copy,
2598};
2599
2600ccv_cnnp_model_t* ccv_cnnp_gated_delta(const int log_decay, const char* const name)
2601{
2602 ccv_cnnp_model_gated_delta_t* const model_gated_delta = (ccv_cnnp_model_gated_delta_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_gated_delta_t));
2603 model_gated_delta->super.isa = &ccv_cnnp_gated_delta_isa;
2604 model_gated_delta->super.input_size = 6;
2605 model_gated_delta->super.outputs = model_gated_delta->outputs;
2606 model_gated_delta->super.output_size = 2;
2607 model_gated_delta->log_decay = log_decay;
2608 ccv_cnnp_model_copy_name(&model_gated_delta->super, name);
2609 return (ccv_cnnp_model_t*)model_gated_delta;
2610}
2611
2612static ccv_cnnp_model_t* _ccv_cnnp_gated_delta_copy(const ccv_cnnp_model_t* const super, void* const context)
2613{
2614 const ccv_cnnp_model_gated_delta_t* const self = (const ccv_cnnp_model_gated_delta_t*)super;
2615 return ccv_cnnp_gated_delta(self->log_decay, super->name);
2616}
2617
2618// MARK - Cmul Layer
2619
2620typedef struct {
2621 ccv_cnnp_model_t super;
2622 ccv_nnc_tensor_symbol_t output;
2623} ccv_cnnp_model_cmul_t;
2624
2625static void _ccv_cnnp_cmul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2626{
2627 PRINT(CCV_CLI_VERBOSE, "[cnnp_cmul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_cmul_build] -\n"); fflush(stdout); } } while
(0)
;
2628 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2628, __extension__ __PRETTY_FUNCTION__); }))
;
2629 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2629, __extension__ __PRETTY_FUNCTION__
); }))
;
2630 ccv_nnc_tensor_param_t input_params[2];
2631 int i;
2632 for (i = 0; i < 2; i++)
2633 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2634 ccv_nnc_tensor_param_t output_params;
2635 const ccv_nnc_cmd_t mul = CMD_CMUL_FORWARD()ccv_nnc_cmd(CCV_NNC_CMUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}}}, 0)
;
2636 ccv_nnc_hint_tensor_auto(mul, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
2637 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2638 ccv_nnc_graph_exec_symbol_new(graph, mul, inputs, input_size, outputs, output_size, "cmul");
2639}
2640
2641static ccv_cnnp_model_t* _ccv_cnnp_cmul_copy(const ccv_cnnp_model_t* const self, void* const context);
2642
2643static const ccv_cnnp_model_vtab_t ccv_cnnp_cmul_isa = {
2644 .build = _ccv_cnnp_cmul_build,
2645 .copy = _ccv_cnnp_cmul_copy,
2646};
2647
2648ccv_cnnp_model_t* ccv_cnnp_cmul(const char* const name)
2649{
2650 ccv_cnnp_model_cmul_t* const model_cmul = (ccv_cnnp_model_cmul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_cmul_t));
2651 model_cmul->super.isa = &ccv_cnnp_cmul_isa;
2652 model_cmul->super.input_size = 2;
2653 model_cmul->super.outputs = &model_cmul->output;
2654 model_cmul->super.output_size = 1;
2655 ccv_cnnp_model_copy_name(&model_cmul->super, name);
2656 return (ccv_cnnp_model_t*)model_cmul;
2657}
2658
2659static ccv_cnnp_model_t* _ccv_cnnp_cmul_copy(const ccv_cnnp_model_t* const super, void* const context)
2660{
2661 return ccv_cnnp_cmul(super->name);
2662}
2663
2664// MARK - Transpose Layer
2665
2666typedef struct {
2667 ccv_cnnp_model_t super;
2668 ccv_nnc_tensor_symbol_t output;
2669 int transpose[2];
2670} ccv_cnnp_model_transpose_t;
2671
2672static void _ccv_cnnp_transpose_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2673{
2674 ccv_cnnp_model_transpose_t* const self = (ccv_cnnp_model_transpose_t*)super;
2675 PRINT(CCV_CLI_VERBOSE, "[cnnp_transpose_build] (%d, %d)\n", self->transpose[0], self->transpose[1])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_transpose_build] (%d, %d)\n", self->transpose
[0], self->transpose[1]); fflush(stdout); } } while (0)
;
2676 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2676, __extension__ __PRETTY_FUNCTION__); }))
;
2677 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2677, __extension__ __PRETTY_FUNCTION__
); }))
;
2678 if (self->transpose[0] == self->transpose[1])
2679 {
2680 outputs[0] = inputs[0];
2681 return;
2682 }
2683 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2684 ccv_nnc_tensor_param_t output_params;
2685 const ccv_nnc_cmd_t transpose = CMD_TRANSPOSE_FORWARD(self->transpose[0], self->transpose[1])ccv_nnc_cmd(CCV_NNC_TRANSPOSE_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.transpose={.axis={self->transpose[
0], self->transpose[1]}}}), 0)
;
2686 ccv_nnc_hint_tensor_auto(transpose, (ccv_nnc_tensor_param_t []){
2687 params,
2688 }, 1, ccv_nnc_no_hint, &output_params, 1);
2689 const ccv_nnc_tensor_symbol_t transpose_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2690 ccv_nnc_graph_exec_symbol_new(graph, transpose, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(transpose_output)(const ccv_nnc_tensor_symbol_t []){transpose_output}, (1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, "transpose");
2691 outputs[0] = transpose_output;
2692}
2693
2694static ccv_cnnp_model_t* _ccv_cnnp_transpose_copy(const ccv_cnnp_model_t* const super, void* const context);
2695
2696static const ccv_cnnp_model_vtab_t ccv_cnnp_transpose_isa = {
2697 .build = _ccv_cnnp_transpose_build,
2698 .copy = _ccv_cnnp_transpose_copy,
2699};
2700
2701ccv_cnnp_model_t* ccv_cnnp_transpose(const int axis_a, const int axis_b, const char* const name)
2702{
2703 ccv_cnnp_model_transpose_t* const model_transpose = (ccv_cnnp_model_transpose_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_transpose_t));
2704 model_transpose->super.isa = &ccv_cnnp_transpose_isa;
2705 model_transpose->super.input_size = 1;
2706 model_transpose->super.outputs = &model_transpose->output;
2707 model_transpose->super.output_size = 1;
2708 model_transpose->transpose[0] = axis_a;
2709 model_transpose->transpose[1] = axis_b;
2710 ccv_cnnp_model_copy_name(&model_transpose->super, name);
2711 return (ccv_cnnp_model_t*)model_transpose;
2712}
2713
2714static ccv_cnnp_model_t* _ccv_cnnp_transpose_copy(const ccv_cnnp_model_t* const super, void* const context)
2715{
2716 const ccv_cnnp_model_transpose_t* const self = (const ccv_cnnp_model_transpose_t*)super;
2717 return ccv_cnnp_transpose(self->transpose[0], self->transpose[1], self->super.name);
2718}
2719
2720// MARK - Layer Norm Layer
2721
2722typedef struct {
2723 ccv_cnnp_model_t super;
2724 ccv_nnc_tensor_symbol_t output;
2725 ccv_nnc_tensor_symbol_t bias;
2726 ccv_nnc_tensor_symbol_t scale;
2727 ccv_nnc_cmd_param_t params;
2728} ccv_cnnp_model_layer_norm_t;
2729
2730static void _ccv_cnnp_layer_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2731{
2732 PRINT(CCV_CLI_VERBOSE, "[cnnp_layer_norm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_layer_norm_build] -\n"); fflush(stdout); } }
while (0)
;
2733 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2733, __extension__ __PRETTY_FUNCTION__); }))
;
2734 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2734, __extension__ __PRETTY_FUNCTION__
); }))
;
2735 ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2736 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2737 ccv_nnc_tensor_param_t bias_params = params;
2738 const int nd = ccv_nnc_tensor_nd(params.dim);
2739 int i;
2740 for (i = 0; i < nd; i++)
2741 bias_params.dim[i] = 1;
2742 for (i = 0; i < self->params.lnorm.count; i++)
2743 bias_params.dim[self->params.lnorm.axis[i]] = params.dim[self->params.lnorm.axis[i]];
2744 if (self->params.lnorm.elementwise_affine)
2745 {
2746 // Both scale and bias are shared between if this model is reused.
2747 if (!self->scale.graph)
2748 self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
2749 if (!self->bias.graph)
2750 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
2751 }
2752 const ccv_nnc_tensor_symbol_t scale = self->params.lnorm.elementwise_affine ? ccv_cnnp_model_get_symbol(super, self->scale) : NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
2753 const ccv_nnc_tensor_symbol_t bias = self->params.lnorm.elementwise_affine ? ccv_cnnp_model_get_symbol(super, self->bias) : NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
2754 const ccv_nnc_cmd_t layer_norm = ccv_nnc_cmd(CCV_NNC_LAYER_NORM_FORWARD, 0, self->params, 0);
2755 ccv_nnc_tensor_param_t output_params[3];
2756 if (self->params.lnorm.elementwise_affine)
2757 ccv_nnc_hint_tensor_auto(layer_norm, (ccv_nnc_tensor_param_t []){
2758 params,
2759 bias_params,
2760 bias_params,
2761 }, 3, ccv_nnc_no_hint, output_params, 3);
2762 else
2763 ccv_nnc_hint_tensor_auto(layer_norm, (ccv_nnc_tensor_param_t []){
2764 params,
2765 }, 1, ccv_nnc_no_hint, output_params, 3);
2766 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2767 const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_mean");
2768 const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[2], "saved_inv_std");
2769 if (self->params.lnorm.elementwise_affine)
2770 ccv_nnc_graph_exec_symbol_new(graph, layer_norm, TENSOR_SYMBOL_LIST(inputs[0], scale, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], scale, bias}, (
1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_mean, saved_inv_std
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, "layer_norm");
2771 else
2772 ccv_nnc_graph_exec_symbol_new(graph, layer_norm, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_mean, saved_inv_std
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, "layer_norm");
2773 outputs[0] = output;
2774}
2775
2776static void _ccv_cnnp_layer_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2777{
2778 ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2779 if (self->scale.graph)
2780 initializer(context, CMD_SET_FORWARD(1)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
2781 if (self->bias.graph)
2782 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
2783}
2784
2785static void _ccv_cnnp_layer_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2786{
2787 ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2788 if (self->scale.graph)
2789 add_to_array(parameters, self->scale, is_trainable);
2790 if (self->bias.graph)
2791 add_to_array(parameters, self->bias, is_trainable);
2792}
2793
2794static ccv_cnnp_model_t* _ccv_cnnp_layer_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
2795
2796static const ccv_cnnp_model_vtab_t ccv_cnnp_layer_norm_isa = {
2797 .build = _ccv_cnnp_layer_norm_build,
2798 .init_states = _ccv_cnnp_layer_norm_init_states,
2799 .add_to_parameter = _ccv_cnnp_layer_norm_add_to_parameter,
2800 .copy = _ccv_cnnp_layer_norm_copy,
2801};
2802
2803ccv_cnnp_model_t* ccv_cnnp_layer_norm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC(12)], const int axis_count, const int elementwise_affine, const float scale, const int is_trainable, const char* const name)
2804{
2805 ccv_cnnp_model_layer_norm_t* const model_layer_norm = (ccv_cnnp_model_layer_norm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_layer_norm_t));
2806 model_layer_norm->super.isa = &ccv_cnnp_layer_norm_isa;
2807 model_layer_norm->super.input_size = 1;
2808 model_layer_norm->super.outputs = &model_layer_norm->output;
2809 model_layer_norm->super.output_size = 1;
2810 model_layer_norm->super.is_trainable = is_trainable;
2811 ccv_cnnp_model_copy_name(&model_layer_norm->super, name);
2812 model_layer_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
2813 model_layer_norm->scale.graph = 0;
2814 model_layer_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
2815 model_layer_norm->bias.graph = 0;
2816 model_layer_norm->params.lnorm.epsilon = epsilon;
2817 model_layer_norm->params.lnorm.scale = scale;
2818 model_layer_norm->params.lnorm.count = axis_count;
2819 model_layer_norm->params.lnorm.elementwise_affine = elementwise_affine;
2820 memcpy(model_layer_norm->params.lnorm.axis, axis, sizeof(int) * axis_count);
2821 return (ccv_cnnp_model_t*)model_layer_norm;
2822}
2823
2824static ccv_cnnp_model_t* _ccv_cnnp_layer_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
2825{
2826 const ccv_cnnp_model_layer_norm_t* const self = (const ccv_cnnp_model_layer_norm_t*)super;
2827 return ccv_cnnp_layer_norm(self->params.lnorm.epsilon, self->params.lnorm.axis, self->params.lnorm.count, self->params.lnorm.elementwise_affine, self->params.lnorm.scale, self->super.is_trainable, self->super.name);
2828}
2829
2830// MARK - Group Norm Layer
2831
2832typedef struct {
2833 ccv_cnnp_model_t super;
2834 ccv_nnc_tensor_symbol_t output;
2835 ccv_nnc_tensor_symbol_t bias;
2836 ccv_nnc_tensor_symbol_t scale;
2837 ccv_nnc_cmd_param_t params;
2838} ccv_cnnp_model_group_norm_t;
2839
2840static void _ccv_cnnp_group_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2841{
2842 PRINT(CCV_CLI_VERBOSE, "[cnnp_group_norm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_group_norm_build] -\n"); fflush(stdout); } }
while (0)
;
2843 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2843, __extension__ __PRETTY_FUNCTION__); }))
;
2844 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2844, __extension__ __PRETTY_FUNCTION__
); }))
;
2845 ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2846 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2847 ccv_nnc_tensor_param_t bias_params = params;
2848 const int nd = ccv_nnc_tensor_nd(params.dim);
2849 int i;
2850 for (i = 0; i < nd; i++)
2851 bias_params.dim[i] = 1;
2852 bias_params.dim[self->params.gnorm.group_axis] = params.dim[self->params.gnorm.group_axis];
2853 if (self->params.gnorm.elementwise_affine)
2854 {
2855 // Both scale and bias are shared between if this model is reused.
2856 if (!self->scale.graph)
2857 self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
2858 if (!self->bias.graph)
2859 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
2860 }
2861 const ccv_nnc_tensor_symbol_t scale = self->params.gnorm.elementwise_affine ? ccv_cnnp_model_get_symbol(super, self->scale) : NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
2862 const ccv_nnc_tensor_symbol_t bias = self->params.gnorm.elementwise_affine ? ccv_cnnp_model_get_symbol(super, self->bias) : NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
2863 const ccv_nnc_cmd_t group_norm = ccv_nnc_cmd(CCV_NNC_GROUP_NORM_FORWARD, 0, self->params, 0);
2864 ccv_nnc_tensor_param_t output_params[3];
2865 if (self->params.gnorm.elementwise_affine)
2866 ccv_nnc_hint_tensor_auto(group_norm, (ccv_nnc_tensor_param_t []){
2867 params,
2868 bias_params,
2869 bias_params,
2870 }, 3, ccv_nnc_no_hint, output_params, 3);
2871 else
2872 ccv_nnc_hint_tensor_auto(group_norm, (ccv_nnc_tensor_param_t []){
2873 params,
2874 }, 1, ccv_nnc_no_hint, output_params, 3);
2875 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2876 const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_mean");
2877 const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[2], "saved_inv_std");
2878 if (self->params.gnorm.elementwise_affine)
2879 ccv_nnc_graph_exec_symbol_new(graph, group_norm, TENSOR_SYMBOL_LIST(inputs[0], scale, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], scale, bias}, (
1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_mean, saved_inv_std
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, "group_norm");
2880 else
2881 ccv_nnc_graph_exec_symbol_new(graph, group_norm, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_mean, saved_inv_std
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, "group_norm");
2882 outputs[0] = output;
2883}
2884
2885static void _ccv_cnnp_group_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2886{
2887 ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2888 if (self->scale.graph)
2889 initializer(context, CMD_SET_FORWARD(1)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
2890 if (self->bias.graph)
2891 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
2892}
2893
2894static void _ccv_cnnp_group_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2895{
2896 ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2897 if (self->scale.graph)
2898 add_to_array(parameters, self->scale, is_trainable);
2899 if (self->bias.graph)
2900 add_to_array(parameters, self->bias, is_trainable);
2901}
2902
2903static ccv_cnnp_model_t* _ccv_cnnp_group_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
2904
2905static const ccv_cnnp_model_vtab_t ccv_cnnp_group_norm_isa = {
2906 .build = _ccv_cnnp_group_norm_build,
2907 .init_states = _ccv_cnnp_group_norm_init_states,
2908 .add_to_parameter = _ccv_cnnp_group_norm_add_to_parameter,
2909 .copy = _ccv_cnnp_group_norm_copy,
2910};
2911
2912ccv_cnnp_model_t* ccv_cnnp_group_norm(const int group_axis, const int groups, const float epsilon, const int reduce_axis[CCV_NNC_MAX_DIM_ALLOC(12)], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name)
2913{
2914 ccv_cnnp_model_group_norm_t* const model_group_norm = (ccv_cnnp_model_group_norm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_group_norm_t));
2915 model_group_norm->super.isa = &ccv_cnnp_group_norm_isa;
2916 model_group_norm->super.input_size = 1;
2917 model_group_norm->super.outputs = &model_group_norm->output;
2918 model_group_norm->super.output_size = 1;
2919 model_group_norm->super.is_trainable = is_trainable;
2920 ccv_cnnp_model_copy_name(&model_group_norm->super, name);
2921 model_group_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
2922 model_group_norm->scale.graph = 0;
2923 model_group_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
2924 model_group_norm->bias.graph = 0;
2925 model_group_norm->params.gnorm.group_axis = group_axis;
2926 model_group_norm->params.gnorm.groups = groups;
2927 model_group_norm->params.gnorm.epsilon = epsilon;
2928 model_group_norm->params.gnorm.reduce_count = axis_count;
2929 model_group_norm->params.gnorm.elementwise_affine = elementwise_affine;
2930 memcpy(model_group_norm->params.gnorm.reduce_axis, reduce_axis, sizeof(int) * axis_count);
2931 return (ccv_cnnp_model_t*)model_group_norm;
2932}
2933
2934static ccv_cnnp_model_t* _ccv_cnnp_group_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
2935{
2936 const ccv_cnnp_model_group_norm_t* const self = (const ccv_cnnp_model_group_norm_t*)super;
2937 return ccv_cnnp_group_norm(self->params.gnorm.group_axis, self->params.gnorm.groups, self->params.gnorm.epsilon, self->params.gnorm.reduce_axis, self->params.gnorm.reduce_count, self->params.gnorm.elementwise_affine, self->super.is_trainable, self->super.name);
2938}
2939
2940// MARK - RMSNorm Layer
2941
2942typedef struct {
2943 ccv_cnnp_model_t super;
2944 ccv_nnc_tensor_symbol_t output;
2945 ccv_nnc_tensor_symbol_t scale;
2946 ccv_nnc_cmd_param_t params;
2947} ccv_cnnp_model_rmsnorm_t;
2948
2949static void _ccv_cnnp_rmsnorm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2950{
2951 PRINT(CCV_CLI_VERBOSE, "[cnnp_rmsnorm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_rmsnorm_build] -\n"); fflush(stdout); } } while
(0)
;
2952 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2952, __extension__ __PRETTY_FUNCTION__); }))
;
2953 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2953, __extension__ __PRETTY_FUNCTION__
); }))
;
2954 ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
2955 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2956 ccv_nnc_tensor_param_t scale_params = params;
2957 const int nd = ccv_nnc_tensor_nd(params.dim);
2958 int i;
2959 for (i = 0; i < nd; i++)
2960 scale_params.dim[i] = 1;
2961 for (i = 0; i < self->params.rmsnorm.count; i++)
2962 scale_params.dim[self->params.rmsnorm.axis[i]] = params.dim[self->params.rmsnorm.axis[i]];
2963 // Both scale and bias are shared between if this model is reused.
2964 if (self->params.rmsnorm.elementwise_affine)
2965 {
2966 if (!self->scale.graph)
2967 self->scale = ccv_nnc_tensor_symbol_new(graph, scale_params, "scale");
2968 }
2969 const ccv_nnc_tensor_symbol_t scale = self->params.rmsnorm.elementwise_affine ? ccv_cnnp_model_get_symbol(super, self->scale) : NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
2970 const ccv_nnc_cmd_t rmsnorm = ccv_nnc_cmd(CCV_NNC_RMSNORM_FORWARD, 0, self->params, 0);
2971 ccv_nnc_tensor_param_t output_params[2];
2972 if (self->params.rmsnorm.elementwise_affine)
2973 ccv_nnc_hint_tensor_auto(rmsnorm, (ccv_nnc_tensor_param_t []){
2974 params,
2975 scale_params,
2976 }, 2, ccv_nnc_no_hint, output_params, 2);
2977 else
2978 ccv_nnc_hint_tensor_auto(rmsnorm, (ccv_nnc_tensor_param_t []){
2979 params,
2980 }, 1, ccv_nnc_no_hint, output_params, 2);
2981 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2982 const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_inv_std");
2983 if (self->params.rmsnorm.elementwise_affine)
2984 ccv_nnc_graph_exec_symbol_new(graph, rmsnorm, TENSOR_SYMBOL_LIST(inputs[0], scale)(const ccv_nnc_tensor_symbol_t []){inputs[0], scale}, (1 +1 +
1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, TENSOR_SYMBOL_LIST(output, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_inv_std}, (1
+1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 -1)
, "rmsnorm");
2985 else
2986 ccv_nnc_graph_exec_symbol_new(graph, rmsnorm, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_inv_std}, (1
+1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 -1)
, "rmsnorm");
2987 outputs[0] = output;
2988}
2989
2990static void _ccv_cnnp_rmsnorm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2991{
2992 ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
2993 if (self->scale.graph)
2994 initializer(context, CMD_SET_FORWARD(1)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
2995}
2996
2997static void _ccv_cnnp_rmsnorm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2998{
2999 ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
3000 if (self->scale.graph)
3001 add_to_array(parameters, self->scale, is_trainable);
3002}
3003
3004static ccv_cnnp_model_t* _ccv_cnnp_rmsnorm_copy(const ccv_cnnp_model_t* const super, void* const context);
3005
3006static const ccv_cnnp_model_vtab_t ccv_cnnp_rmsnorm_isa = {
3007 .build = _ccv_cnnp_rmsnorm_build,
3008 .init_states = _ccv_cnnp_rmsnorm_init_states,
3009 .add_to_parameter = _ccv_cnnp_rmsnorm_add_to_parameter,
3010 .copy = _ccv_cnnp_rmsnorm_copy,
3011};
3012
3013ccv_cnnp_model_t* ccv_cnnp_rmsnorm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC(12)], const int axis_count, const int elementwise_affine, const float scale, const int is_trainable, const char* const name)
3014{
3015 ccv_cnnp_model_rmsnorm_t* const model_rmsnorm = (ccv_cnnp_model_rmsnorm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_rmsnorm_t));
3016 model_rmsnorm->super.isa = &ccv_cnnp_rmsnorm_isa;
3017 model_rmsnorm->super.input_size = 1;
3018 model_rmsnorm->super.outputs = &model_rmsnorm->output;
3019 model_rmsnorm->super.output_size = 1;
3020 model_rmsnorm->super.is_trainable = is_trainable;
3021 ccv_cnnp_model_copy_name(&model_rmsnorm->super, name);
3022 model_rmsnorm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
3023 model_rmsnorm->scale.graph = 0;
3024 model_rmsnorm->params.rmsnorm.epsilon = epsilon;
3025 model_rmsnorm->params.rmsnorm.scale = scale;
3026 model_rmsnorm->params.rmsnorm.count = axis_count;
3027 model_rmsnorm->params.rmsnorm.elementwise_affine = elementwise_affine;
3028 memcpy(model_rmsnorm->params.lnorm.axis, axis, sizeof(int) * axis_count);
3029 return (ccv_cnnp_model_t*)model_rmsnorm;
3030}
3031
3032static ccv_cnnp_model_t* _ccv_cnnp_rmsnorm_copy(const ccv_cnnp_model_t* const super, void* const context)
3033{
3034 const ccv_cnnp_model_rmsnorm_t* const self = (const ccv_cnnp_model_rmsnorm_t*)super;
3035 return ccv_cnnp_rmsnorm(self->params.rmsnorm.epsilon, self->params.rmsnorm.axis, self->params.rmsnorm.count, self->params.rmsnorm.elementwise_affine, self->params.rmsnorm.scale, self->super.is_trainable, self->super.name);
3036}
3037
3038// MARK - RMSNorm Gated Layer
3039
3040typedef struct {
3041 ccv_cnnp_model_t super;
3042 ccv_nnc_tensor_symbol_t output;
3043 ccv_nnc_tensor_symbol_t scale;
3044 ccv_nnc_cmd_param_t params;
3045} ccv_cnnp_model_rmsnorm_gated_t;
3046
3047static void _ccv_cnnp_rmsnorm_gated_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3048{
3049 PRINT(CCV_CLI_VERBOSE, "[cnnp_rmsnorm_gated_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_rmsnorm_gated_build] -\n"); fflush(stdout); }
} while (0)
;
3050 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3050, __extension__ __PRETTY_FUNCTION__); }))
;
3051 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3051, __extension__ __PRETTY_FUNCTION__
); }))
;
3052 ccv_cnnp_model_rmsnorm_gated_t* const self = (ccv_cnnp_model_rmsnorm_gated_t*)super;
3053 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3054 const ccv_nnc_tensor_param_t gate_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
3055 ccv_nnc_tensor_param_t scale_params = params;
3056 const int nd = ccv_nnc_tensor_nd(params.dim);
3057 int i;
3058 for (i = 0; i < nd; i++)
3059 scale_params.dim[i] = 1;
3060 for (i = 0; i < self->params.rmsnorm_gated.count; i++)
3061 scale_params.dim[self->params.rmsnorm_gated.axis[i]] = params.dim[self->params.rmsnorm_gated.axis[i]];
3062 if (self->params.rmsnorm_gated.elementwise_affine)
3063 {
3064 if (!self->scale.graph)
3065 self->scale = ccv_nnc_tensor_symbol_new(graph, scale_params, "scale");
3066 }
3067 const ccv_nnc_tensor_symbol_t scale = self->params.rmsnorm_gated.elementwise_affine ? ccv_cnnp_model_get_symbol(super, self->scale) : NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
3068 const ccv_nnc_cmd_t rmsnorm_gated = ccv_nnc_cmd(CCV_NNC_RMSNORM_GATED_FORWARD, 0, self->params, 0);
3069 ccv_nnc_tensor_param_t output_params;
3070 if (self->params.rmsnorm_gated.elementwise_affine)
3071 ccv_nnc_hint_tensor_auto(rmsnorm_gated, (ccv_nnc_tensor_param_t []){
3072 params,
3073 gate_params,
3074 scale_params,
3075 }, 3, ccv_nnc_no_hint, &output_params, 1);
3076 else
3077 ccv_nnc_hint_tensor_auto(rmsnorm_gated, (ccv_nnc_tensor_param_t []){
3078 params,
3079 gate_params,
3080 }, 2, ccv_nnc_no_hint, &output_params, 1);
3081 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3082 if (self->params.rmsnorm_gated.elementwise_affine)
3083 ccv_nnc_graph_exec_symbol_new(graph, rmsnorm_gated, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], scale)(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1], scale
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, outputs, output_size, "rmsnorm_gated");
3084 else
3085 ccv_nnc_graph_exec_symbol_new(graph, rmsnorm_gated, inputs, input_size, outputs, output_size, "rmsnorm_gated");
3086}
3087
3088static void _ccv_cnnp_rmsnorm_gated_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
3089{
3090 ccv_cnnp_model_rmsnorm_gated_t* const self = (ccv_cnnp_model_rmsnorm_gated_t*)super;
3091 if (self->scale.graph)
3092 initializer(context, CMD_SET_FORWARD(1)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
3093}
3094
3095static void _ccv_cnnp_rmsnorm_gated_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
3096{
3097 ccv_cnnp_model_rmsnorm_gated_t* const self = (ccv_cnnp_model_rmsnorm_gated_t*)super;
3098 if (self->scale.graph)
3099 add_to_array(parameters, self->scale, is_trainable);
3100}
3101
3102static ccv_cnnp_model_t* _ccv_cnnp_rmsnorm_gated_copy(const ccv_cnnp_model_t* const super, void* const context);
3103
3104static const ccv_cnnp_model_vtab_t ccv_cnnp_rmsnorm_gated_isa = {
3105 .build = _ccv_cnnp_rmsnorm_gated_build,
3106 .init_states = _ccv_cnnp_rmsnorm_gated_init_states,
3107 .add_to_parameter = _ccv_cnnp_rmsnorm_gated_add_to_parameter,
3108 .copy = _ccv_cnnp_rmsnorm_gated_copy,
3109};
3110
3111ccv_cnnp_model_t* ccv_cnnp_rmsnorm_gated(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC(12)], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name)
3112{
3113 ccv_cnnp_model_rmsnorm_gated_t* const model_rmsnorm_gated = (ccv_cnnp_model_rmsnorm_gated_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_rmsnorm_gated_t));
3114 model_rmsnorm_gated->super.isa = &ccv_cnnp_rmsnorm_gated_isa;
3115 model_rmsnorm_gated->super.input_size = 2;
3116 model_rmsnorm_gated->super.outputs = &model_rmsnorm_gated->output;
3117 model_rmsnorm_gated->super.output_size = 1;
3118 model_rmsnorm_gated->super.is_trainable = is_trainable;
3119 ccv_cnnp_model_copy_name(&model_rmsnorm_gated->super, name);
3120 model_rmsnorm_gated->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
3121 model_rmsnorm_gated->scale.graph = 0;
3122 model_rmsnorm_gated->params.rmsnorm_gated.epsilon = epsilon;
3123 model_rmsnorm_gated->params.rmsnorm_gated.count = axis_count;
3124 model_rmsnorm_gated->params.rmsnorm_gated.elementwise_affine = elementwise_affine;
3125 memcpy(model_rmsnorm_gated->params.rmsnorm_gated.axis, axis, sizeof(int) * axis_count);
3126 return (ccv_cnnp_model_t*)model_rmsnorm_gated;
3127}
3128
3129static ccv_cnnp_model_t* _ccv_cnnp_rmsnorm_gated_copy(const ccv_cnnp_model_t* const super, void* const context)
3130{
3131 const ccv_cnnp_model_rmsnorm_gated_t* const self = (const ccv_cnnp_model_rmsnorm_gated_t*)super;
3132 return ccv_cnnp_rmsnorm_gated(self->params.rmsnorm_gated.epsilon, self->params.rmsnorm_gated.axis, self->params.rmsnorm_gated.count, self->params.rmsnorm_gated.elementwise_affine, self->super.is_trainable, self->super.name);
3133}
3134
3135// MARK - Batched Matrix Mul Layer
3136
3137typedef struct {
3138 ccv_cnnp_model_t super;
3139 ccv_nnc_tensor_symbol_t output;
3140 int transpose_a[2];
3141 int transpose_b[2];
3142 int flags;
3143} ccv_cnnp_model_matmul_t;
3144
3145static void _ccv_cnnp_matmul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3146{
3147 PRINT(CCV_CLI_VERBOSE, "[cnnp_matmul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_matmul_build] -\n"); fflush(stdout); } } while
(0)
;
3148 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3148, __extension__ __PRETTY_FUNCTION__); }))
;
3149 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3149, __extension__ __PRETTY_FUNCTION__
); }))
;
3150 ccv_cnnp_model_matmul_t* const self = (ccv_cnnp_model_matmul_t*)super;
3151 ccv_nnc_tensor_param_t a_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3152 ccv_nnc_tensor_param_t b_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
3153 ccv_nnc_tensor_param_t output_params;
3154 ccv_nnc_cmd_t matmul = CMD_GEMM_FORWARD(self->transpose_a, self->transpose_b)ccv_nnc_cmd(CCV_NNC_GEMM_FORWARD, 0, ((ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.blas={.a={1,1},.transpose_a={self->transpose_a
[0],self->transpose_a[1]},.transpose_b={self->transpose_b
[0],self->transpose_b[1]},}}), 0)
;
3155 matmul.info.blas.flags = self->flags;
3156 ccv_nnc_hint_tensor_auto(matmul, (ccv_nnc_tensor_param_t []){
3157 a_params,
3158 b_params,
3159 }, 2, ccv_nnc_no_hint, &output_params, 1);
3160 const ccv_nnc_tensor_symbol_t matmul_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3161 ccv_nnc_graph_exec_symbol_new(graph, matmul, inputs, input_size, TENSOR_SYMBOL_LIST(matmul_output)(const ccv_nnc_tensor_symbol_t []){matmul_output}, (1 +1 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1
)
, "matmul");
3162 outputs[0] = matmul_output;
3163}
3164
3165static ccv_cnnp_model_t* _ccv_cnnp_matmul_copy(const ccv_cnnp_model_t* const super, void* const context);
3166
3167static const ccv_cnnp_model_vtab_t ccv_cnnp_matmul_isa = {
3168 .build = _ccv_cnnp_matmul_build,
3169 .copy = _ccv_cnnp_matmul_copy,
3170};
3171
3172ccv_cnnp_model_t* ccv_cnnp_matmul(const int transpose_a[2], const int transpose_b[2], const int flags, const char* const name)
3173{
3174 ccv_cnnp_model_matmul_t* const model_matmul = (ccv_cnnp_model_matmul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_matmul_t));
3175 model_matmul->super.isa = &ccv_cnnp_matmul_isa;
3176 model_matmul->super.input_size = 2;
3177 model_matmul->super.outputs = &model_matmul->output;
3178 model_matmul->super.output_size = 1;
3179 model_matmul->transpose_a[0] = transpose_a[0];
3180 model_matmul->transpose_a[1] = transpose_a[1];
3181 model_matmul->transpose_b[0] = transpose_b[0];
3182 model_matmul->transpose_b[1] = transpose_b[1];
3183 model_matmul->flags = flags;
3184 ccv_cnnp_model_copy_name(&model_matmul->super, name);
3185 return (ccv_cnnp_model_t*)model_matmul;
3186}
3187
3188static ccv_cnnp_model_t* _ccv_cnnp_matmul_copy(const ccv_cnnp_model_t* const super, void* const context)
3189{
3190 const ccv_cnnp_model_matmul_t* const self = (const ccv_cnnp_model_matmul_t*)super;
3191 return ccv_cnnp_matmul(self->transpose_a, self->transpose_b, self->flags, self->super.name);
3192}
3193
3194// MARK - Dropout Layer
3195
3196typedef struct {
3197 ccv_cnnp_model_t super;
3198 ccv_nnc_tensor_symbol_t output;
3199 ccv_nnc_graph_exec_symbol_t dropout;
3200 float p;
3201 int entirety;
3202} ccv_cnnp_model_dropout_t;
3203
3204static void _ccv_cnnp_dropout_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3205{
3206 PRINT(CCV_CLI_VERBOSE, "[cnnp_dropout_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_dropout_build] -\n"); fflush(stdout); } } while
(0)
;
3207 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3207, __extension__ __PRETTY_FUNCTION__); }))
;
3208 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3208, __extension__ __PRETTY_FUNCTION__
); }))
;
3209 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3210 ccv_nnc_tensor_param_t output_params[2];
3211 ccv_cnnp_model_dropout_t* const self = (ccv_cnnp_model_dropout_t*)super;
3212 const ccv_nnc_cmd_t dropout = CMD_DROPOUT_FORWARD(self->p, self->entirety)ccv_nnc_cmd(CCV_NNC_DROPOUT_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.dropout={.p=self->p,.entirety=self
->entirety}}), 0)
;
3213 ccv_nnc_hint_tensor_auto(dropout, (ccv_nnc_tensor_param_t []){
3214 params,
3215 }, 1, ccv_nnc_no_hint, output_params, 2);
3216 const ccv_nnc_tensor_symbol_t dropout_output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
3217 const ccv_nnc_tensor_symbol_t mask = ccv_nnc_tensor_symbol_new(graph, output_params[1], "mask");
3218 self->dropout = ccv_nnc_graph_exec_symbol_new(graph, dropout, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(dropout_output, mask)(const ccv_nnc_tensor_symbol_t []){dropout_output, mask}, (1 +
1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 -1)
, "dropout");
3219 outputs[0] = dropout_output;
3220}
3221
3222static void _ccv_cnnp_dropout_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
3223{
3224 ccv_cnnp_model_dropout_t* const self = (ccv_cnnp_model_dropout_t*)super;
3225 if (self->dropout.graph)
3226 {
3227 if (is_test)
3228 // During test, the dropout is not applied. Data transfer is perfect because if these are the same tensor, it will skip.
3229 updater(context, self->dropout, CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, ccv_nnc_no_hint);
3230 else
3231 updater(context, self->dropout, CMD_DROPOUT_FORWARD(self->p, self->entirety)ccv_nnc_cmd(CCV_NNC_DROPOUT_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.dropout={.p=self->p,.entirety=self
->entirety}}), 0)
, ccv_nnc_no_hint);
3232 }
3233}
3234
3235static ccv_cnnp_model_t* _ccv_cnnp_dropout_copy(const ccv_cnnp_model_t* const super, void* const context);
3236
3237static const ccv_cnnp_model_vtab_t ccv_cnnp_dropout_isa = {
3238 .build = _ccv_cnnp_dropout_build,
3239 .set_is_test = _ccv_cnnp_dropout_set_is_test,
3240 .copy = _ccv_cnnp_dropout_copy,
3241};
3242
3243ccv_cnnp_model_t* ccv_cnnp_dropout(const float p, const int entirety, const char* const name)
3244{
3245 ccv_cnnp_model_dropout_t* const model_dropout = (ccv_cnnp_model_dropout_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_dropout_t));
3246 model_dropout->super.isa = &ccv_cnnp_dropout_isa;
3247 model_dropout->super.input_size = 1;
3248 model_dropout->super.outputs = &model_dropout->output;
3249 model_dropout->super.output_size = 1;
3250 model_dropout->p = p;
3251 model_dropout->entirety = entirety;
3252 ccv_cnnp_model_copy_name(&model_dropout->super, name);
3253 return (ccv_cnnp_model_t*)model_dropout;
3254}
3255
3256static ccv_cnnp_model_t* _ccv_cnnp_dropout_copy(const ccv_cnnp_model_t* const super, void* const context)
3257{
3258 const ccv_cnnp_model_dropout_t* const self = (const ccv_cnnp_model_dropout_t*)super;
3259 return ccv_cnnp_dropout(self->p, self->entirety, self->super.name);
3260}
3261
3262// MARK - Masked Fill Layer
3263
3264typedef struct {
3265 ccv_cnnp_model_t super;
3266 ccv_nnc_tensor_symbol_t output;
3267 float eq;
3268 float fill;
3269} ccv_cnnp_model_masked_fill_t;
3270
3271static void _ccv_cnnp_masked_fill_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3272{
3273 PRINT(CCV_CLI_VERBOSE, "[cnnp_masked_fill_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_masked_fill_build] -\n"); fflush(stdout); } }
while (0)
;
3274 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3274, __extension__ __PRETTY_FUNCTION__); }))
;
3275 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3275, __extension__ __PRETTY_FUNCTION__
); }))
;
3276 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3277 ccv_cnnp_model_masked_fill_t* const self = (ccv_cnnp_model_masked_fill_t*)super;
3278 const ccv_nnc_tensor_symbol_t masked_fill_output = ccv_nnc_tensor_symbol_new(graph, params, 0);
3279 ccv_nnc_graph_exec_symbol_new(graph, CMD_MASKED_FILL_FORWARD(self->eq, self->fill)ccv_nnc_cmd(CCV_NNC_MASKED_FILL_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={self->eq, self->fill}
}}, 0)
, TENSOR_SYMBOL_LIST(inputs[0], inputs[1])(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1]}, (1 +
1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 -1)
, TENSOR_SYMBOL_LIST(masked_fill_output)(const ccv_nnc_tensor_symbol_t []){masked_fill_output}, (1 +1
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 -1)
, "masked_fill");
3280 outputs[0] = masked_fill_output;
3281}
3282
3283static ccv_cnnp_model_t* _ccv_cnnp_masked_fill_copy(const ccv_cnnp_model_t* const super, void* const context);
3284
3285static const ccv_cnnp_model_vtab_t ccv_cnnp_masked_fill_isa = {
3286 .build = _ccv_cnnp_masked_fill_build,
3287 .copy = _ccv_cnnp_masked_fill_copy,
3288};
3289
3290ccv_cnnp_model_t* ccv_cnnp_masked_fill(const float eq, const float fill, const char* const name)
3291{
3292 ccv_cnnp_model_masked_fill_t* const model_masked_fill = (ccv_cnnp_model_masked_fill_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_masked_fill_t));
3293 model_masked_fill->super.isa = &ccv_cnnp_masked_fill_isa;
3294 model_masked_fill->super.input_size = 2;
3295 model_masked_fill->super.outputs = &model_masked_fill->output;
3296 model_masked_fill->super.output_size = 1;
3297 model_masked_fill->eq = eq;
3298 model_masked_fill->fill = fill;
3299 ccv_cnnp_model_copy_name(&model_masked_fill->super, name);
3300 return (ccv_cnnp_model_t*)model_masked_fill;
3301}
3302
3303static ccv_cnnp_model_t* _ccv_cnnp_masked_fill_copy(const ccv_cnnp_model_t* const super, void* const context)
3304{
3305 const ccv_cnnp_model_masked_fill_t* const self = (const ccv_cnnp_model_masked_fill_t*)super;
3306 return ccv_cnnp_masked_fill(self->eq, self->fill, self->super.name);
3307}
3308
3309// MARK - Index Select Layer
3310
3311typedef struct {
3312 ccv_cnnp_model_t super;
3313 ccv_nnc_tensor_symbol_t output;
3314} ccv_cnnp_model_index_select_t;
3315
3316static void _ccv_cnnp_index_select_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3317{
3318 PRINT(CCV_CLI_VERBOSE, "[cnnp_index_select_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_index_select_build] -\n"); fflush(stdout); }
} while (0)
;
3319 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3319, __extension__ __PRETTY_FUNCTION__); }))
;
3320 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3320, __extension__ __PRETTY_FUNCTION__
); }))
;
3321 const ccv_nnc_tensor_param_t vocab_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3322 const ccv_nnc_tensor_param_t index_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
3323 ccv_nnc_tensor_param_t output_params;
3324 const ccv_nnc_cmd_t index_select = CMD_INDEX_SELECT_FORWARD()ccv_nnc_cmd(CCV_NNC_INDEX_SELECT_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
;
3325 ccv_nnc_hint_tensor_auto(index_select, (ccv_nnc_tensor_param_t []){
3326 vocab_params,
3327 index_params,
3328 }, 2, ccv_nnc_no_hint, &output_params, 1);
3329 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3330 ccv_nnc_graph_exec_symbol_new(graph, index_select, TENSOR_SYMBOL_LIST(inputs[0], inputs[1])(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1]}, (1 +
1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "index_select");
3331 outputs[0] = output;
3332}
3333
3334static ccv_cnnp_model_t* _ccv_cnnp_index_select_copy(const ccv_cnnp_model_t* const super, void* const context);
3335
3336static const ccv_cnnp_model_vtab_t ccv_cnnp_index_select_isa = {
3337 .build = _ccv_cnnp_index_select_build,
3338 .copy = _ccv_cnnp_index_select_copy,
3339};
3340
3341ccv_cnnp_model_t* ccv_cnnp_index_select(const char* const name)
3342{
3343 ccv_cnnp_model_index_select_t* const model_index_select = (ccv_cnnp_model_index_select_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_index_select_t));
3344 model_index_select->super.isa = &ccv_cnnp_index_select_isa;
3345 model_index_select->super.input_size = 2;
3346 model_index_select->super.outputs = &model_index_select->output;
3347 model_index_select->super.output_size = 1;
3348 ccv_cnnp_model_copy_name(&model_index_select->super, name);
3349 return (ccv_cnnp_model_t*)model_index_select;
3350}
3351
3352static ccv_cnnp_model_t* _ccv_cnnp_index_select_copy(const ccv_cnnp_model_t* const super, void* const context)
3353{
3354 ccv_cnnp_model_index_select_t* const self = (ccv_cnnp_model_index_select_t*)super;
3355 return ccv_cnnp_index_select(self->super.name);
3356}
3357
3358// MARK - Embedding Layer
3359
3360typedef struct {
3361 ccv_cnnp_model_t super;
3362 ccv_nnc_tensor_symbol_t output;
3363 ccv_nnc_tensor_symbol_t vocab;
3364 int datatype;
3365 int vocab_size;
3366 int embed_size;
3367} ccv_cnnp_model_embedding_t;
3368
3369static void _ccv_cnnp_embedding_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3370{
3371 ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
3372 PRINT(CCV_CLI_VERBOSE, "[cnnp_embedding_build] vocab_size: %d, embed_size: %d\n", self->vocab_size, self->embed_size)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_embedding_build] vocab_size: %d, embed_size: %d\n"
, self->vocab_size, self->embed_size); fflush(stdout); }
} while (0)
;
3373 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3373, __extension__ __PRETTY_FUNCTION__); }))
;
3374 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3374, __extension__ __PRETTY_FUNCTION__
); }))
;
3375 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3376 ccv_nnc_tensor_param_t vocab_params = params;
3377 memset(vocab_params.dim, 0, sizeof(vocab_params.dim));
3378 vocab_params.datatype = self->datatype;
3379 vocab_params.dim[0] = self->vocab_size;
3380 vocab_params.dim[1] = self->embed_size;
3381 if (!self->vocab.graph)
3382 self->vocab = ccv_nnc_tensor_symbol_new(graph, vocab_params, "vocab");
3383 assert(self->vocab.graph == graph)((void) sizeof ((self->vocab.graph == graph) ? 1 : 0), __extension__
({ if (self->vocab.graph == graph) ; else __assert_fail (
"self->vocab.graph == graph", "ccv_cnnp_model_addons.c", 3383
, __extension__ __PRETTY_FUNCTION__); }))
;
3384 const ccv_nnc_tensor_symbol_t vocab = ccv_cnnp_model_get_symbol(super, self->vocab);
3385 ccv_nnc_tensor_param_t output_params;
3386 const ccv_nnc_cmd_t embedding = CMD_INDEX_SELECT_FORWARD()ccv_nnc_cmd(CCV_NNC_INDEX_SELECT_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
;
3387 ccv_nnc_hint_tensor_auto(embedding, (ccv_nnc_tensor_param_t []){
3388 vocab_params,
3389 params,
3390 }, 2, ccv_nnc_no_hint, &output_params, 1);
3391 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3392 ccv_nnc_graph_exec_symbol_new(graph, embedding, TENSOR_SYMBOL_LIST(vocab, inputs[0])(const ccv_nnc_tensor_symbol_t []){vocab, inputs[0]}, (1 +1 +
1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "embedding");
3393 outputs[0] = output;
3394}
3395
3396static void _ccv_cnnp_embedding_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
3397{
3398 ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
3399 const float std = sqrtf(2) / sqrtf(self->vocab_size + self->embed_size);
3400 const float bound = sqrtf(3) * std;
3401 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->vocab);
3402}
3403
3404static void _ccv_cnnp_embedding_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
3405{
3406 ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
3407 add_to_array(parameters, self->vocab, is_trainable);
3408}
3409
3410static ccv_cnnp_model_t* _ccv_cnnp_embedding_copy(const ccv_cnnp_model_t* const super, void* const context);
3411
3412static const ccv_cnnp_model_vtab_t ccv_cnnp_embedding_isa = {
3413 .build = _ccv_cnnp_embedding_build,
3414 .init_states = _ccv_cnnp_embedding_init_states,
3415 .add_to_parameter = _ccv_cnnp_embedding_add_to_parameter,
3416 .copy = _ccv_cnnp_embedding_copy,
3417};
3418
3419ccv_cnnp_model_t* ccv_cnnp_embedding(const int datatype, const int vocab_size, const int embed_size, const int is_trainable, const char* const name)
3420{
3421 ccv_cnnp_model_embedding_t* const model_embedding = (ccv_cnnp_model_embedding_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_embedding_t));
3422 model_embedding->super.isa = &ccv_cnnp_embedding_isa;
3423 model_embedding->super.input_size = 1;
3424 model_embedding->super.outputs = &model_embedding->output;
3425 model_embedding->super.output_size = 1;
3426 model_embedding->super.is_trainable = is_trainable;
3427 ccv_cnnp_model_copy_name(&model_embedding->super, name);
3428 model_embedding->vocab.d = CCV_NNC_NO_TENSOR_SYMBOL;
3429 model_embedding->vocab.graph = 0;
3430 assert(datatype == CCV_32F || datatype == CCV_16F || datatype == CCV_16BF)((void) sizeof ((datatype == CCV_32F || datatype == CCV_16F ||
datatype == CCV_16BF) ? 1 : 0), __extension__ ({ if (datatype
== CCV_32F || datatype == CCV_16F || datatype == CCV_16BF) ;
else __assert_fail ("datatype == CCV_32F || datatype == CCV_16F || datatype == CCV_16BF"
, "ccv_cnnp_model_addons.c", 3430, __extension__ __PRETTY_FUNCTION__
); }))
;
3431 model_embedding->datatype = datatype;
3432 assert(vocab_size > 0)((void) sizeof ((vocab_size > 0) ? 1 : 0), __extension__ (
{ if (vocab_size > 0) ; else __assert_fail ("vocab_size > 0"
, "ccv_cnnp_model_addons.c", 3432, __extension__ __PRETTY_FUNCTION__
); }))
;
3433 model_embedding->vocab_size = vocab_size;
3434 assert(embed_size > 0)((void) sizeof ((embed_size > 0) ? 1 : 0), __extension__ (
{ if (embed_size > 0) ; else __assert_fail ("embed_size > 0"
, "ccv_cnnp_model_addons.c", 3434, __extension__ __PRETTY_FUNCTION__
); }))
;
3435 model_embedding->embed_size = embed_size;
3436 return (ccv_cnnp_model_t*)model_embedding;
3437}
3438
3439static ccv_cnnp_model_t* _ccv_cnnp_embedding_copy(const ccv_cnnp_model_t* const super, void* const context)
3440{
3441 ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
3442 return ccv_cnnp_embedding(self->datatype, self->vocab_size, self->embed_size, self->super.is_trainable, self->super.name);
3443}
3444
3445// MARK - Pool Layers
3446
3447typedef struct {
3448 ccv_cnnp_model_t super;
3449 ccv_nnc_tensor_symbol_t output;
3450 int type;
3451 float width_scale;
3452 float height_scale;
3453 int align_corners;
3454} ccv_cnnp_model_upsample_t;
3455
3456static void _ccv_cnnp_upsample_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3457{
3458 PRINT(CCV_CLI_VERBOSE, "[cnnp_upsample_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_upsample_build] -\n"); fflush(stdout); } } while
(0)
;
3459 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3459, __extension__ __PRETTY_FUNCTION__); }))
;
3460 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3460, __extension__ __PRETTY_FUNCTION__
); }))
;
3461 ccv_cnnp_model_upsample_t* const self = (ccv_cnnp_model_upsample_t*)super;
3462 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3463 ccv_nnc_cmd_t cmd = CMD_UPSAMPLE_FORWARD(self->type, self->width_scale, self->height_scale, self->align_corners)ccv_nnc_cmd(CCV_NNC_UPSAMPLE_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.upsample={.type=self->type,.width_scale
=self->width_scale,.height_scale=self->height_scale,.align_corners
=self->align_corners}}), 0)
;
3464 ccv_nnc_tensor_param_t output_params;
3465 ccv_nnc_hint_tensor_auto(cmd, &params, 1, ccv_nnc_no_hint, &output_params, 1);
3466 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3467 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "upsample");
3468 outputs[0] = output;
3469}
3470
3471static ccv_cnnp_model_t* _ccv_cnnp_upsample_copy(const ccv_cnnp_model_t* const super, void* const context);
3472
3473static const ccv_cnnp_model_vtab_t ccv_cnnp_upsample_isa = {
3474 .build = _ccv_cnnp_upsample_build,
3475 .copy = _ccv_cnnp_upsample_copy,
3476};
3477
3478ccv_cnnp_model_t* ccv_cnnp_upsample(const int type, const float width_scale, const float height_scale, const int align_corners, const char* const name)
3479{
3480 ccv_cnnp_model_upsample_t* const model_upsample = (ccv_cnnp_model_upsample_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_upsample_t));
3481 model_upsample->super.isa = &ccv_cnnp_upsample_isa;
3482 model_upsample->super.input_size = 1;
3483 model_upsample->super.outputs = &model_upsample->output;
3484 model_upsample->super.output_size = 1;
3485 ccv_cnnp_model_copy_name(&model_upsample->super, name);
3486 assert(type == CCV_NNC_UPSAMPLE_NEAREST || type == CCV_NNC_UPSAMPLE_BILINEAR)((void) sizeof ((type == CCV_NNC_UPSAMPLE_NEAREST || type == CCV_NNC_UPSAMPLE_BILINEAR
) ? 1 : 0), __extension__ ({ if (type == CCV_NNC_UPSAMPLE_NEAREST
|| type == CCV_NNC_UPSAMPLE_BILINEAR) ; else __assert_fail (
"type == CCV_NNC_UPSAMPLE_NEAREST || type == CCV_NNC_UPSAMPLE_BILINEAR"
, "ccv_cnnp_model_addons.c", 3486, __extension__ __PRETTY_FUNCTION__
); }))
;
3487 model_upsample->type = type;
3488 model_upsample->width_scale = width_scale;
3489 model_upsample->height_scale = height_scale;
3490 model_upsample->align_corners = align_corners;
3491 return (ccv_cnnp_model_t*)model_upsample;
3492}
3493
3494static ccv_cnnp_model_t* _ccv_cnnp_upsample_copy(const ccv_cnnp_model_t* const super, void* const context)
3495{
3496 const ccv_cnnp_model_upsample_t* const self = (const ccv_cnnp_model_upsample_t*)super;
3497 return ccv_cnnp_upsample(self->type, self->width_scale, self->height_scale, self->align_corners, self->super.name);
3498}
3499
3500// MARK - Reduce Sum Layer
3501
3502typedef struct {
3503 ccv_cnnp_model_t super;
3504 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
3505 int count;
3506 ccv_nnc_tensor_symbol_t output;
3507} ccv_cnnp_model_reduce_sum_t;
3508
3509static void _ccv_cnnp_reduce_sum_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3510{
3511 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_sum_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_sum_build] -\n"); fflush(stdout); } }
while (0)
;
3512 const ccv_cnnp_model_reduce_sum_t* const self = (const ccv_cnnp_model_reduce_sum_t*)super;
3513 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3513, __extension__ __PRETTY_FUNCTION__); }))
;
3514 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3514, __extension__ __PRETTY_FUNCTION__
); }))
;
3515 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3516 ccv_nnc_tensor_param_t output_params;
3517 ccv_nnc_cmd_t reduce_sum = CMD_REDUCE_SUM_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_SUM_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3518 int i;
3519 for (i = 0; i < self->count; i++)
3520 reduce_sum.info.reduce.axis[i] = self->axis[i];
3521 reduce_sum.info.reduce.count = self->count;
3522 ccv_nnc_hint_tensor_auto(reduce_sum, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3523 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3524 ccv_nnc_graph_exec_symbol_new(graph, reduce_sum, inputs, input_size, outputs, output_size, "reduce_sum");
3525}
3526
3527static ccv_cnnp_model_t* _ccv_cnnp_reduce_sum_copy(const ccv_cnnp_model_t* const self, void* const context);
3528
3529static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_sum_isa = {
3530 .build = _ccv_cnnp_reduce_sum_build,
3531 .copy = _ccv_cnnp_reduce_sum_copy,
3532};
3533
3534ccv_cnnp_model_t* ccv_cnnp_reduce_sum(const int* const axis, const int axis_count, const char* const name)
3535{
3536 ccv_cnnp_model_reduce_sum_t* const model_reduce_sum = (ccv_cnnp_model_reduce_sum_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_sum_t));
3537 model_reduce_sum->super.isa = &ccv_cnnp_reduce_sum_isa;
3538 model_reduce_sum->super.input_size = 1;
3539 model_reduce_sum->super.outputs = &model_reduce_sum->output;
3540 model_reduce_sum->super.output_size = 1;
3541 ccv_cnnp_model_copy_name(&model_reduce_sum->super, name);
3542 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3542, __extension__ __PRETTY_FUNCTION__
); }))
;
3543 int i;
3544 for (i = 0; i < axis_count; i++)
3545 model_reduce_sum->axis[i] = axis[i];
3546 model_reduce_sum->count = axis_count;
3547 return (ccv_cnnp_model_t*)model_reduce_sum;
3548}
3549
3550static ccv_cnnp_model_t* _ccv_cnnp_reduce_sum_copy(const ccv_cnnp_model_t* const super, void* const context)
3551{
3552 const ccv_cnnp_model_reduce_sum_t* const self = (const ccv_cnnp_model_reduce_sum_t*)super;
3553 return ccv_cnnp_reduce_sum(self->axis, self->count, self->super.name);
3554}
3555
3556// MARK - Reduce Mean Layer
3557
3558typedef struct {
3559 ccv_cnnp_model_t super;
3560 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
3561 int count;
3562 ccv_nnc_tensor_symbol_t output;
3563} ccv_cnnp_model_reduce_mean_t;
3564
3565static void _ccv_cnnp_reduce_mean_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3566{
3567 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_mean_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_mean_build] -\n"); fflush(stdout); } }
while (0)
;
3568 const ccv_cnnp_model_reduce_mean_t* const self = (const ccv_cnnp_model_reduce_mean_t*)super;
3569 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3569, __extension__ __PRETTY_FUNCTION__); }))
;
3570 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3570, __extension__ __PRETTY_FUNCTION__
); }))
;
3571 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3572 ccv_nnc_tensor_param_t output_params;
3573 ccv_nnc_cmd_t reduce_mean = CMD_REDUCE_MEAN_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_MEAN_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3574 int i;
3575 for (i = 0; i < self->count; i++)
3576 reduce_mean.info.reduce.axis[i] = self->axis[i];
3577 reduce_mean.info.reduce.count = self->count;
3578 ccv_nnc_hint_tensor_auto(reduce_mean, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3579 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3580 ccv_nnc_graph_exec_symbol_new(graph, reduce_mean, inputs, input_size, outputs, output_size, "reduce_mean");
3581}
3582
3583static ccv_cnnp_model_t* _ccv_cnnp_reduce_mean_copy(const ccv_cnnp_model_t* const self, void* const context);
3584
3585static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_mean_isa = {
3586 .build = _ccv_cnnp_reduce_mean_build,
3587 .copy = _ccv_cnnp_reduce_mean_copy,
3588};
3589
3590ccv_cnnp_model_t* ccv_cnnp_reduce_mean(const int* const axis, const int axis_count, const char* const name)
3591{
3592 ccv_cnnp_model_reduce_mean_t* const model_reduce_mean = (ccv_cnnp_model_reduce_mean_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_mean_t));
3593 model_reduce_mean->super.isa = &ccv_cnnp_reduce_mean_isa;
3594 model_reduce_mean->super.input_size = 1;
3595 model_reduce_mean->super.outputs = &model_reduce_mean->output;
3596 model_reduce_mean->super.output_size = 1;
3597 ccv_cnnp_model_copy_name(&model_reduce_mean->super, name);
3598 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3598, __extension__ __PRETTY_FUNCTION__
); }))
;
3599 int i;
3600 for (i = 0; i < axis_count; i++)
3601 model_reduce_mean->axis[i] = axis[i];
3602 model_reduce_mean->count = axis_count;
3603 return (ccv_cnnp_model_t*)model_reduce_mean;
3604}
3605
3606static ccv_cnnp_model_t* _ccv_cnnp_reduce_mean_copy(const ccv_cnnp_model_t* const super, void* const context)
3607{
3608 const ccv_cnnp_model_reduce_mean_t* const self = (const ccv_cnnp_model_reduce_mean_t*)super;
3609 return ccv_cnnp_reduce_mean(self->axis, self->count, self->super.name);
3610}
3611
3612// MARK - Reduce Max Layer
3613
3614typedef struct {
3615 ccv_cnnp_model_t super;
3616 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
3617 int count;
3618 ccv_nnc_tensor_symbol_t output;
3619} ccv_cnnp_model_reduce_max_t;
3620
3621static void _ccv_cnnp_reduce_max_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3622{
3623 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_max_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_max_build] -\n"); fflush(stdout); } }
while (0)
;
3624 const ccv_cnnp_model_reduce_max_t* const self = (const ccv_cnnp_model_reduce_max_t*)super;
3625 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3625, __extension__ __PRETTY_FUNCTION__); }))
;
3626 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3626, __extension__ __PRETTY_FUNCTION__
); }))
;
3627 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3628 ccv_nnc_tensor_param_t output_params;
3629 ccv_nnc_cmd_t reduce_max = CMD_REDUCE_MAX_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_MAX_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3630 int i;
3631 for (i = 0; i < self->count; i++)
3632 reduce_max.info.reduce.axis[i] = self->axis[i];
3633 reduce_max.info.reduce.count = self->count;
3634 ccv_nnc_hint_tensor_auto(reduce_max, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3635 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3636 ccv_nnc_graph_exec_symbol_new(graph, reduce_max, inputs, input_size, outputs, output_size, "reduce_max");
3637}
3638
3639static ccv_cnnp_model_t* _ccv_cnnp_reduce_max_copy(const ccv_cnnp_model_t* const self, void* const context);
3640
3641static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_max_isa = {
3642 .build = _ccv_cnnp_reduce_max_build,
3643 .copy = _ccv_cnnp_reduce_max_copy,
3644};
3645
3646ccv_cnnp_model_t* ccv_cnnp_reduce_max(const int* const axis, const int axis_count, const char* const name)
3647{
3648 ccv_cnnp_model_reduce_max_t* const model_reduce_max = (ccv_cnnp_model_reduce_max_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_max_t));
3649 model_reduce_max->super.isa = &ccv_cnnp_reduce_max_isa;
3650 model_reduce_max->super.input_size = 1;
3651 model_reduce_max->super.outputs = &model_reduce_max->output;
3652 model_reduce_max->super.output_size = 1;
3653 ccv_cnnp_model_copy_name(&model_reduce_max->super, name);
3654 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3654, __extension__ __PRETTY_FUNCTION__
); }))
;
3655 int i;
3656 for (i = 0; i < axis_count; i++)
3657 model_reduce_max->axis[i] = axis[i];
3658 model_reduce_max->count = axis_count;
3659 return (ccv_cnnp_model_t*)model_reduce_max;
3660}
3661
3662static ccv_cnnp_model_t* _ccv_cnnp_reduce_max_copy(const ccv_cnnp_model_t* const super, void* const context)
3663{
3664 const ccv_cnnp_model_reduce_max_t* const self = (const ccv_cnnp_model_reduce_max_t*)super;
3665 return ccv_cnnp_reduce_max(self->axis, self->count, self->super.name);
3666}
3667
3668// MARK - Reduce Min Layer
3669
3670typedef struct {
3671 ccv_cnnp_model_t super;
3672 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
3673 int count;
3674 ccv_nnc_tensor_symbol_t output;
3675} ccv_cnnp_model_reduce_min_t;
3676
3677static void _ccv_cnnp_reduce_min_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3678{
3679 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_min_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_min_build] -\n"); fflush(stdout); } }
while (0)
;
3680 const ccv_cnnp_model_reduce_min_t* const self = (const ccv_cnnp_model_reduce_min_t*)super;
3681 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3681, __extension__ __PRETTY_FUNCTION__); }))
;
3682 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3682, __extension__ __PRETTY_FUNCTION__
); }))
;
3683 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3684 ccv_nnc_tensor_param_t output_params;
3685 ccv_nnc_cmd_t reduce_min = CMD_REDUCE_MIN_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_MIN_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3686 int i;
3687 for (i = 0; i < self->count; i++)
3688 reduce_min.info.reduce.axis[i] = self->axis[i];
3689 reduce_min.info.reduce.count = self->count;
3690 ccv_nnc_hint_tensor_auto(reduce_min, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3691 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3692 ccv_nnc_graph_exec_symbol_new(graph, reduce_min, inputs, input_size, outputs, output_size, "reduce_min");
3693}
3694
3695static ccv_cnnp_model_t* _ccv_cnnp_reduce_min_copy(const ccv_cnnp_model_t* const self, void* const context);
3696
3697static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_min_isa = {
3698 .build = _ccv_cnnp_reduce_min_build,
3699 .copy = _ccv_cnnp_reduce_min_copy,
3700};
3701
3702ccv_cnnp_model_t* ccv_cnnp_reduce_min(const int* const axis, const int axis_count, const char* const name)
3703{
3704 ccv_cnnp_model_reduce_min_t* const model_reduce_min = (ccv_cnnp_model_reduce_min_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_min_t));
3705 model_reduce_min->super.isa = &ccv_cnnp_reduce_min_isa;
3706 model_reduce_min->super.input_size = 1;
3707 model_reduce_min->super.outputs = &model_reduce_min->output;
3708 model_reduce_min->super.output_size = 1;
3709 ccv_cnnp_model_copy_name(&model_reduce_min->super, name);
3710 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3710, __extension__ __PRETTY_FUNCTION__
); }))
;
3711 int i;
3712 for (i = 0; i < axis_count; i++)
3713 model_reduce_min->axis[i] = axis[i];
3714 model_reduce_min->count = axis_count;
3715 return (ccv_cnnp_model_t*)model_reduce_min;
3716}
3717
3718static ccv_cnnp_model_t* _ccv_cnnp_reduce_min_copy(const ccv_cnnp_model_t* const super, void* const context)
3719{
3720 const ccv_cnnp_model_reduce_min_t* const self = (const ccv_cnnp_model_reduce_min_t*)super;
3721 return ccv_cnnp_reduce_min(self->axis, self->count, self->super.name);
3722}
3723
3724// MARK - Reduce Norm2 Layer
3725
3726typedef struct {
3727 ccv_cnnp_model_t super;
3728 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
3729 int count;
3730 ccv_nnc_tensor_symbol_t output;
3731} ccv_cnnp_model_reduce_norm2_t;
3732
3733static void _ccv_cnnp_reduce_norm2_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3734{
3735 const ccv_cnnp_model_reduce_norm2_t* const self = (const ccv_cnnp_model_reduce_norm2_t*)super;
3736 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_norm2_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_norm2_build] -\n"); fflush(stdout); }
} while (0)
;
3737 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3737, __extension__ __PRETTY_FUNCTION__); }))
;
3738 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3738, __extension__ __PRETTY_FUNCTION__
); }))
;
3739 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3740 ccv_nnc_tensor_param_t output_params;
3741 ccv_nnc_cmd_t reduce_norm2 = CMD_REDUCE_NORM2_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_NORM2_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3742 int i;
3743 for (i = 0; i < self->count; i++)
3744 reduce_norm2.info.reduce.axis[i] = self->axis[i];
3745 reduce_norm2.info.reduce.count = self->count;
3746 ccv_nnc_hint_tensor_auto(reduce_norm2, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3747 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3748 ccv_nnc_graph_exec_symbol_new(graph, reduce_norm2, inputs, input_size, outputs, output_size, "reduce_norm2");
3749}
3750
3751static ccv_cnnp_model_t* _ccv_cnnp_reduce_norm2_copy(const ccv_cnnp_model_t* const self, void* const context);
3752
3753static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_norm2_isa = {
3754 .build = _ccv_cnnp_reduce_norm2_build,
3755 .copy = _ccv_cnnp_reduce_norm2_copy,
3756};
3757
3758ccv_cnnp_model_t* ccv_cnnp_reduce_norm2(const int* const axis, const int axis_count, const char* const name)
3759{
3760 ccv_cnnp_model_reduce_norm2_t* const model_reduce_norm2 = (ccv_cnnp_model_reduce_norm2_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_norm2_t));
3761 model_reduce_norm2->super.isa = &ccv_cnnp_reduce_norm2_isa;
3762 model_reduce_norm2->super.input_size = 1;
3763 model_reduce_norm2->super.outputs = &model_reduce_norm2->output;
3764 model_reduce_norm2->super.output_size = 1;
3765 ccv_cnnp_model_copy_name(&model_reduce_norm2->super, name);
3766 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3766, __extension__ __PRETTY_FUNCTION__
); }))
;
3767 int i;
3768 for (i = 0; i < axis_count; i++)
3769 model_reduce_norm2->axis[i] = axis[i];
3770 model_reduce_norm2->count = axis_count;
3771 return (ccv_cnnp_model_t*)model_reduce_norm2;
3772}
3773
3774static ccv_cnnp_model_t* _ccv_cnnp_reduce_norm2_copy(const ccv_cnnp_model_t* const super, void* const context)
3775{
3776 const ccv_cnnp_model_reduce_norm2_t* const self = (const ccv_cnnp_model_reduce_norm2_t*)super;
3777 return ccv_cnnp_reduce_norm2(self->axis, self->count, self->super.name);
3778}
3779
3780// MARK - Argmax Layer
3781
3782typedef struct {
3783 ccv_cnnp_model_t super;
3784 int axis;
3785 ccv_nnc_tensor_symbol_t output;
3786} ccv_cnnp_model_argmax_t;
3787
3788static void _ccv_cnnp_argmax_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3789{
3790 const ccv_cnnp_model_argmax_t* const self = (const ccv_cnnp_model_argmax_t*)super;
3791 PRINT(CCV_CLI_VERBOSE, "[cnnp_argmax_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_argmax_build] -\n"); fflush(stdout); } } while
(0)
;
3792 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3792, __extension__ __PRETTY_FUNCTION__); }))
;
3793 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3793, __extension__ __PRETTY_FUNCTION__
); }))
;
3794 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3795 ccv_nnc_tensor_param_t output_params;
3796 ccv_nnc_cmd_t argmax = CMD_ARGMAX_FORWARD()ccv_nnc_cmd(CCV_NNC_ARGMAX_FORWARD, 0, ((ccv_nnc_cmd_param_t)
{.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}})
, 0)
;
3797 argmax.info.reduce.axis[0] = self->axis;
3798 argmax.info.reduce.count = 1;
3799 ccv_nnc_hint_tensor_auto(argmax, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3800 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3801 ccv_nnc_graph_exec_symbol_new(graph, argmax, inputs, input_size, outputs, output_size, "argmax");
3802}
3803
3804static ccv_cnnp_model_t* _ccv_cnnp_argmax_copy(const ccv_cnnp_model_t* const self, void* const context);
3805
3806static const ccv_cnnp_model_vtab_t ccv_cnnp_argmax_isa = {
3807 .build = _ccv_cnnp_argmax_build,
3808 .copy = _ccv_cnnp_argmax_copy,
3809};
3810
3811ccv_cnnp_model_t* ccv_cnnp_argmax(const int axis, const char* const name)
3812{
3813 ccv_cnnp_model_argmax_t* const model_argmax = (ccv_cnnp_model_argmax_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_argmax_t));
3814 model_argmax->super.isa = &ccv_cnnp_argmax_isa;
3815 model_argmax->super.input_size = 1;
3816 model_argmax->super.outputs = &model_argmax->output;
3817 model_argmax->super.output_size = 1;
3818 ccv_cnnp_model_copy_name(&model_argmax->super, name);
3819 model_argmax->axis = axis;
3820 return (ccv_cnnp_model_t*)model_argmax;
3821}
3822
3823static ccv_cnnp_model_t* _ccv_cnnp_argmax_copy(const ccv_cnnp_model_t* const super, void* const context)
3824{
3825 const ccv_cnnp_model_argmax_t* const self = (const ccv_cnnp_model_argmax_t*)super;
3826 return ccv_cnnp_argmax(self->axis, self->super.name);
3827}
3828
3829// MARK - Argmin Layer
3830
3831typedef struct {
3832 ccv_cnnp_model_t super;
3833 int axis;
3834 ccv_nnc_tensor_symbol_t output;
3835} ccv_cnnp_model_argmin_t;
3836
3837static void _ccv_cnnp_argmin_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3838{
3839 const ccv_cnnp_model_argmin_t* const self = (const ccv_cnnp_model_argmin_t*)super;
3840 PRINT(CCV_CLI_VERBOSE, "[cnnp_argmin_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_argmin_build] -\n"); fflush(stdout); } } while
(0)
;
3841 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3841, __extension__ __PRETTY_FUNCTION__); }))
;
3842 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3842, __extension__ __PRETTY_FUNCTION__
); }))
;
3843 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3844 ccv_nnc_tensor_param_t output_params;
3845 ccv_nnc_cmd_t argmin = CMD_ARGMIN_FORWARD()ccv_nnc_cmd(CCV_NNC_ARGMIN_FORWARD, 0, ((ccv_nnc_cmd_param_t)
{.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}})
, 0)
;
3846 argmin.info.reduce.axis[0] = self->axis;
3847 argmin.info.reduce.count = 1;
3848 ccv_nnc_hint_tensor_auto(argmin, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3849 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3850 ccv_nnc_graph_exec_symbol_new(graph, argmin, inputs, input_size, outputs, output_size, "argmin");
3851}
3852
3853static ccv_cnnp_model_t* _ccv_cnnp_argmin_copy(const ccv_cnnp_model_t* const self, void* const context);
3854
3855static const ccv_cnnp_model_vtab_t ccv_cnnp_argmin_isa = {
3856 .build = _ccv_cnnp_argmin_build,
3857 .copy = _ccv_cnnp_argmin_copy,
3858};
3859
3860ccv_cnnp_model_t* ccv_cnnp_argmin(const int axis, const char* const name)
3861{
3862 ccv_cnnp_model_argmin_t* const model_argmin = (ccv_cnnp_model_argmin_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_argmin_t));
3863 model_argmin->super.isa = &ccv_cnnp_argmin_isa;
3864 model_argmin->super.input_size = 1;
3865 model_argmin->super.outputs = &model_argmin->output;
3866 model_argmin->super.output_size = 1;
3867 ccv_cnnp_model_copy_name(&model_argmin->super, name);
3868 model_argmin->axis = axis;
3869 return (ccv_cnnp_model_t*)model_argmin;
3870}
3871
3872static ccv_cnnp_model_t* _ccv_cnnp_argmin_copy(const ccv_cnnp_model_t* const super, void* const context)
3873{
3874 const ccv_cnnp_model_argmin_t* const self = (const ccv_cnnp_model_argmin_t*)super;
3875 return ccv_cnnp_argmin(self->axis, self->super.name);
3876}
3877
3878// MARK - Min Layer
3879
3880typedef struct {
3881 ccv_cnnp_model_t super;
3882 ccv_nnc_tensor_symbol_t output;
3883} ccv_cnnp_model_min_t;
3884
3885static void _ccv_cnnp_min_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3886{
3887 PRINT(CCV_CLI_VERBOSE, "[cnnp_min_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_min_build] -\n"); fflush(stdout); } } while (
0)
;
3888 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3888, __extension__ __PRETTY_FUNCTION__); }))
;
3889 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3889, __extension__ __PRETTY_FUNCTION__
); }))
;
3890 ccv_nnc_tensor_param_t input_params[2];
3891 int i;
3892 for (i = 0; i < 2; i++)
3893 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
3894 ccv_nnc_tensor_param_t output_params;
3895 const ccv_nnc_cmd_t min = CMD_MIN_FORWARD()ccv_nnc_cmd(CCV_NNC_MIN_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}}}, 0)
;
3896 ccv_nnc_hint_tensor_auto(min, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
3897 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3898 ccv_nnc_graph_exec_symbol_new(graph, min, inputs, input_size, outputs, output_size, "min");
3899}
3900
3901static ccv_cnnp_model_t* _ccv_cnnp_min_copy(const ccv_cnnp_model_t* const self, void* const context);
3902
3903static const ccv_cnnp_model_vtab_t ccv_cnnp_min_isa = {
3904 .build = _ccv_cnnp_min_build,
3905 .copy = _ccv_cnnp_min_copy,
3906};
3907
3908ccv_cnnp_model_t* ccv_cnnp_min(const char* const name)
3909{
3910 ccv_cnnp_model_min_t* const model_min = (ccv_cnnp_model_min_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_min_t));
3911 model_min->super.isa = &ccv_cnnp_min_isa;
3912 model_min->super.input_size = 2;
3913 model_min->super.outputs = &model_min->output;
3914 model_min->super.output_size = 1;
3915 ccv_cnnp_model_copy_name(&model_min->super, name);
3916 return (ccv_cnnp_model_t*)model_min;
3917}
3918
3919static ccv_cnnp_model_t* _ccv_cnnp_min_copy(const ccv_cnnp_model_t* const super, void* const context)
3920{
3921 const ccv_cnnp_model_min_t* const self = (const ccv_cnnp_model_min_t*)super;
3922 return ccv_cnnp_min(self->super.name);
3923}
3924
3925// MARK - Max Layer
3926
3927typedef struct {
3928 ccv_cnnp_model_t super;
3929 ccv_nnc_tensor_symbol_t output;
3930} ccv_cnnp_model_max_t;
3931
3932static void _ccv_cnnp_max_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3933{
3934 PRINT(CCV_CLI_VERBOSE, "[cnnp_max_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_max_build] -\n"); fflush(stdout); } } while (
0)
;
3935 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3935, __extension__ __PRETTY_FUNCTION__); }))
;
3936 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3936, __extension__ __PRETTY_FUNCTION__
); }))
;
3937 ccv_nnc_tensor_param_t input_params[2];
3938 int i;
3939 for (i = 0; i < 2; i++)
3940 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
3941 ccv_nnc_tensor_param_t output_params;
3942 const ccv_nnc_cmd_t max = CMD_MAX_FORWARD()ccv_nnc_cmd(CCV_NNC_MAX_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}}}, 0)
;
3943 ccv_nnc_hint_tensor_auto(max, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
3944 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3945 ccv_nnc_graph_exec_symbol_new(graph, max, inputs, input_size, outputs, output_size, "max");
3946}
3947
3948static ccv_cnnp_model_t* _ccv_cnnp_max_copy(const ccv_cnnp_model_t* const self, void* const context);
3949
3950static const ccv_cnnp_model_vtab_t ccv_cnnp_max_isa = {
3951 .build = _ccv_cnnp_max_build,
3952 .copy = _ccv_cnnp_max_copy,
3953};
3954
3955ccv_cnnp_model_t* ccv_cnnp_max(const char* const name)
3956{
3957 ccv_cnnp_model_max_t* const model_max = (ccv_cnnp_model_max_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_max_t));
3958 model_max->super.isa = &ccv_cnnp_max_isa;
3959 model_max->super.input_size = 2;
3960 model_max->super.outputs = &model_max->output;
3961 model_max->super.output_size = 1;
3962 ccv_cnnp_model_copy_name(&model_max->super, name);
3963 return (ccv_cnnp_model_t*)model_max;
3964}
3965
3966static ccv_cnnp_model_t* _ccv_cnnp_max_copy(const ccv_cnnp_model_t* const super, void* const context)
3967{
3968 const ccv_cnnp_model_max_t* const self = (const ccv_cnnp_model_max_t*)super;
3969 return ccv_cnnp_max(self->super.name);
3970}
3971
3972// MARK - LSTM Layer
3973
3974typedef struct {
3975 ccv_cnnp_model_t super;
3976 int masked;
3977 ccv_nnc_tensor_symbol_t output;
3978 ccv_nnc_tensor_symbol_t weights;
3979 ccv_nnc_tensor_symbol_t reserves;
3980 ccv_nnc_cmd_param_t params;
3981 ccv_nnc_graph_exec_symbol_t lstm;
3982} ccv_cnnp_model_lstm_t;
3983
3984static int _ccv_cnnp_lstm_weight_dim(int bidirectional, int num_layers, int input_size, int hidden_size, int proj_size, int bias)
3985{
3986 const int D = !!bidirectional + 1;
3987 if (hidden_size == proj_size)
3988 return (num_layers * (bias ? 8 : 0) + (num_layers - 1) * (hidden_size * 4 * D + hidden_size * 4) + input_size * 4 + hidden_size * 4) * D;
3989 else
3990 return (num_layers * (bias ? 8 : 0) + (num_layers - 1) * (proj_size * 4 * D + proj_size * 4) + (proj_size * 4 + input_size * 4) + num_layers * proj_size) * D;
3991}
3992
3993static void _ccv_cnnp_lstm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3994{
3995 ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3996 PRINT(CCV_CLI_VERBOSE, "[cnnp_lstm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_lstm_build] -\n"); fflush(stdout); } } while
(0)
;
3997 assert(input_size == self->super.input_size)((void) sizeof ((input_size == self->super.input_size) ? 1
: 0), __extension__ ({ if (input_size == self->super.input_size
) ; else __assert_fail ("input_size == self->super.input_size"
, "ccv_cnnp_model_addons.c", 3997, __extension__ __PRETTY_FUNCTION__
); }))
;
3998 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3998, __extension__ __PRETTY_FUNCTION__
); }))
;
3999 const int proj_size = self->params.rnn.proj_size == 0 ? self->params.rnn.hidden_size : self->params.rnn.proj_size;
4000 ccv_nnc_tensor_param_t input_params[5];
4001 input_params[0]= ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4002 if (input_size == 2)
4003 input_params[1] = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
4004 input_params[4] = input_params[0];
4005 memset(input_params[4].dim, 0, sizeof(input_params[4].dim));
4006 const int x_nd = ccv_nnc_tensor_nd(input_params[0].dim);
4007 const int feature_count = input_params[0].dim[x_nd - 1];
4008 input_params[4].dim[0] = _ccv_cnnp_lstm_weight_dim(self->params.rnn.bidirectional, self->params.rnn.num_layers, feature_count, self->params.rnn.hidden_size, proj_size, self->params.rnn.bias);
4009 input_params[4].dim[1] = self->params.rnn.hidden_size;
4010 const ccv_nnc_cmd_t lstm = ccv_nnc_cmd(CCV_NNC_LSTM_FORWARD, 0, self->params, 0);
4011 ccv_nnc_tensor_param_t output_params[4];
4012 ccv_nnc_hint_tensor_auto(lstm, input_params, 5, ccv_nnc_no_hint, output_params, 4);
4013 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
4014 if (!self->weights.graph)
4015 self->weights = ccv_nnc_tensor_symbol_new(graph, input_params[4], "weights");
4016 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 4016, __extension__ __PRETTY_FUNCTION__); }))
;
4017 const ccv_nnc_tensor_symbol_t weights = ccv_cnnp_model_get_symbol(super, self->weights);
4018 if (!self->reserves.graph)
4019 self->reserves = ccv_nnc_tensor_symbol_new(graph, output_params[3], "reserves");
4020 assert(self->reserves.graph == graph)((void) sizeof ((self->reserves.graph == graph) ? 1 : 0), __extension__
({ if (self->reserves.graph == graph) ; else __assert_fail
("self->reserves.graph == graph", "ccv_cnnp_model_addons.c"
, 4020, __extension__ __PRETTY_FUNCTION__); }))
;
4021 const ccv_nnc_tensor_symbol_t reserves = ccv_cnnp_model_get_symbol(super, self->reserves);
4022 const ccv_nnc_tensor_symbol_t mask = input_size == 2 ? inputs[1] : NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
4023 self->lstm = ccv_nnc_graph_exec_symbol_new(graph, lstm, TENSOR_SYMBOL_LIST(inputs[0], mask, NO_TENSOR_SYMBOL, NO_TENSOR_SYMBOL, weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], mask, (const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, (const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, weights}, (1 +1 +1 +1 +1 +1
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(outputs[0], NO_TENSOR_SYMBOL, NO_TENSOR_SYMBOL, reserves)(const ccv_nnc_tensor_symbol_t []){outputs[0], (const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, (const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, reserves}, (1 +1 +1 +1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "lstm");
4024}
4025
4026static void _ccv_cnnp_lstm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
4027{
4028 ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
4029 if (self->weights.graph)
4030 {
4031 const float stdv = 1.0 / sqrt(self->params.rnn.hidden_size);
4032 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-stdv, stdv)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-stdv, stdv}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
4033 }
4034}
4035
4036static void _ccv_cnnp_lstm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
4037{
4038 ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
4039 if (self->weights.graph)
4040 add_to_array(parameters, self->weights, is_trainable);
4041}
4042
4043static void _ccv_cnnp_lstm_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
4044{
4045 ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
4046 if (self->lstm.graph)
4047 {
4048 self->params.rnn.is_test = is_test;
4049 updater(context, self->lstm, ccv_nnc_cmd(CCV_NNC_LSTM_FORWARD, 0, self->params, 0), ccv_nnc_no_hint);
4050 }
4051}
4052
4053static ccv_cnnp_model_t* _ccv_cnnp_lstm_copy(const ccv_cnnp_model_t* const self, void* const context);
4054
4055static const ccv_cnnp_model_vtab_t ccv_cnnp_lstm_isa = {
4056 .build = _ccv_cnnp_lstm_build,
4057 .init_states = _ccv_cnnp_lstm_init_states,
4058 .add_to_parameter = _ccv_cnnp_lstm_add_to_parameter,
4059 .copy = _ccv_cnnp_lstm_copy,
4060 .set_is_test = _ccv_cnnp_lstm_set_is_test,
4061};
4062
4063ccv_cnnp_model_t* ccv_cnnp_lstm(const int masked, const int hidden_size, const int proj_size, const int num_layers, const int bias, const int batch_first, const int bidirectional, const float dropout, const int is_trainable, const char* const name)
4064{
4065 ccv_cnnp_model_lstm_t* const model_lstm = (ccv_cnnp_model_lstm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_lstm_t));
4066 model_lstm->super.isa = &ccv_cnnp_lstm_isa;
4067 model_lstm->super.input_size = masked ? 2 : 1;
4068 model_lstm->super.outputs = &model_lstm->output;
4069 model_lstm->super.output_size = 1;
4070 model_lstm->super.is_trainable = is_trainable;
4071 ccv_cnnp_model_copy_name(&model_lstm->super, name);
4072 model_lstm->masked = masked;
4073 model_lstm->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
4074 model_lstm->weights.graph = 0;
4075 model_lstm->params.rnn.hidden_size = hidden_size;
4076 model_lstm->params.rnn.proj_size = proj_size;
4077 model_lstm->params.rnn.num_layers = num_layers;
4078 model_lstm->params.rnn.bias = bias;
4079 model_lstm->params.rnn.batch_first = batch_first;
4080 model_lstm->params.rnn.bidirectional = bidirectional;
4081 model_lstm->params.rnn.dropout = dropout;
4082 return (ccv_cnnp_model_t*)model_lstm;
4083}
4084
4085static ccv_cnnp_model_t* _ccv_cnnp_lstm_copy(const ccv_cnnp_model_t* const super, void* const context)
4086{
4087 const ccv_cnnp_model_lstm_t* const self = (const ccv_cnnp_model_lstm_t*)super;
4088 return ccv_cnnp_lstm(self->masked, self->params.rnn.hidden_size, self->params.rnn.proj_size, self->params.rnn.num_layers, self->params.rnn.bias, self->params.rnn.batch_first, self->params.rnn.bidirectional, self->params.rnn.dropout, self->super.is_trainable, self->super.name);
4089}
4090
4091/// MARK - Datatype conversion layer.
4092
4093typedef struct {
4094 ccv_cnnp_model_t super;
4095 ccv_nnc_tensor_symbol_t output;
4096 int datatype;
4097 int ref_to_last;
4098} ccv_cnnp_model_datatype_conversion_t;
4099
4100static void _ccv_cnnp_datatype_conversion_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4101{
4102 ccv_cnnp_model_datatype_conversion_t* const self = (ccv_cnnp_model_datatype_conversion_t*)super;
4103 PRINT(CCV_CLI_VERBOSE, "[cnnp_datatype_conversion_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_datatype_conversion_build] -\n"); fflush(stdout
); } } while (0)
;
4104 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4105 if (self->ref_to_last)
4106 {
4107 assert(input_size > 1)((void) sizeof ((input_size > 1) ? 1 : 0), __extension__ (
{ if (input_size > 1) ; else __assert_fail ("input_size > 1"
, "ccv_cnnp_model_addons.c", 4107, __extension__ __PRETTY_FUNCTION__
); }))
;
4108 const ccv_nnc_tensor_param_t last_params = ccv_nnc_tensor_symbol_params(graph, inputs[input_size - 1]);
4109 params.datatype = last_params.datatype;
4110 } else
4111 params.datatype = self->datatype;
4112 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4112, __extension__ __PRETTY_FUNCTION__
); }))
;
4113 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4114 ccv_nnc_graph_exec_symbol_new(graph, CMD_DATATYPE_CONVERSION_FORWARD()ccv_nnc_cmd(CCV_NNC_DATATYPE_CONVERSION_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, output_size /* intentional */, outputs, output_size, 0);
4115}
4116
4117static ccv_cnnp_model_t* _ccv_cnnp_datatype_conversion_copy(const ccv_cnnp_model_t* const self, void* const context);
4118
4119static const ccv_cnnp_model_vtab_t ccv_cnnp_datatype_conversion_isa = {
4120 .build = _ccv_cnnp_datatype_conversion_build,
4121 .copy = _ccv_cnnp_datatype_conversion_copy,
4122};
4123
4124ccv_cnnp_model_t* ccv_cnnp_datatype_conversion(const int datatype, const int ref_to_last, const char* const name)
4125{
4126 ccv_cnnp_model_datatype_conversion_t* const model_datatype_conversion = (ccv_cnnp_model_datatype_conversion_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_datatype_conversion_t));
4127 model_datatype_conversion->super.isa = &ccv_cnnp_datatype_conversion_isa;
4128 model_datatype_conversion->super.input_size = 0;
4129 model_datatype_conversion->super.outputs = &model_datatype_conversion->output;
4130 model_datatype_conversion->super.output_size = 1;
4131 model_datatype_conversion->datatype = datatype;
4132 model_datatype_conversion->ref_to_last = ref_to_last;
4133 ccv_cnnp_model_copy_name(&model_datatype_conversion->super, name);
4134 return (ccv_cnnp_model_t*)model_datatype_conversion;
4135}
4136
4137static ccv_cnnp_model_t* _ccv_cnnp_datatype_conversion_copy(const ccv_cnnp_model_t* const super, void* const context)
4138{
4139 ccv_cnnp_model_datatype_conversion_t* const self = (ccv_cnnp_model_datatype_conversion_t*)super;
4140 return ccv_cnnp_datatype_conversion(self->datatype, self->ref_to_last, self->super.name);
4141}
4142
4143/// MARK - Clamp layer.
4144
4145typedef struct {
4146 ccv_cnnp_model_t super;
4147 ccv_nnc_tensor_symbol_t output;
4148 float min;
4149 float max;
4150} ccv_cnnp_model_clamp_t;
4151
4152static void _ccv_cnnp_clamp_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4153{
4154 ccv_cnnp_model_clamp_t* const self = (ccv_cnnp_model_clamp_t*)super;
4155 PRINT(CCV_CLI_VERBOSE, "[cnnp_clamp_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_clamp_build] -\n"); fflush(stdout); } } while
(0)
;
4156 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4157 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4157, __extension__ __PRETTY_FUNCTION__
); }))
;
4158 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4159 ccv_nnc_graph_exec_symbol_new(graph, CMD_CLAMP_FORWARD(self->min, self->max)ccv_nnc_cmd(CCV_NNC_CLAMP_FORWARD, 0, (ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.clamp={.min=self->min,.max=self->max
}}, 0)
, inputs, output_size /* intentional */, outputs, output_size, 0);
4160}
4161
4162static ccv_cnnp_model_t* _ccv_cnnp_clamp_copy(const ccv_cnnp_model_t* const self, void* const context);
4163
4164static const ccv_cnnp_model_vtab_t ccv_cnnp_clamp_isa = {
4165 .build = _ccv_cnnp_clamp_build,
4166 .copy = _ccv_cnnp_clamp_copy,
4167};
4168
4169ccv_cnnp_model_t* ccv_cnnp_clamp(const float min, const float max, const char* const name)
4170{
4171 ccv_cnnp_model_clamp_t* const model_clamp = (ccv_cnnp_model_clamp_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_clamp_t));
4172 model_clamp->super.isa = &ccv_cnnp_clamp_isa;
4173 model_clamp->super.input_size = 0;
4174 model_clamp->super.outputs = &model_clamp->output;
4175 model_clamp->super.output_size = 1;
4176 model_clamp->min = min;
4177 model_clamp->max = max;
4178 ccv_cnnp_model_copy_name(&model_clamp->super, name);
4179 return (ccv_cnnp_model_t*)model_clamp;
4180}
4181
4182static ccv_cnnp_model_t* _ccv_cnnp_clamp_copy(const ccv_cnnp_model_t* const super, void* const context)
4183{
4184 ccv_cnnp_model_clamp_t* const self = (ccv_cnnp_model_clamp_t*)super;
4185 return ccv_cnnp_clamp(self->min, self->max, self->super.name);
4186}
4187
4188// MARK - Parameter Layer
4189
4190typedef struct {
4191 ccv_cnnp_model_t super;
4192 float init_bound;
4193 ccv_nnc_tensor_symbol_t weights;
4194 ccv_nnc_tensor_param_t weights_params;
4195 ccv_nnc_tensor_symbol_t output;
4196} ccv_cnnp_model_parameter_t;
4197
4198static void _ccv_cnnp_parameter_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4199{
4200 PRINT(CCV_CLI_VERBOSE, "[cnnp_parameter_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_parameter_build] -\n"); fflush(stdout); } } while
(0)
;
4201 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4201, __extension__ __PRETTY_FUNCTION__
); }))
;
4202 ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
4203 if (!self->weights.graph)
4204 self->weights = ccv_nnc_tensor_symbol_new(graph, self->weights_params, "weights");
4205 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 4205, __extension__ __PRETTY_FUNCTION__); }))
;
4206 outputs[0] = ccv_cnnp_model_get_symbol(super, self->weights);
4207}
4208
4209static void _ccv_cnnp_parameter_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
4210{
4211 ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
4212 if (self->init_bound > 0)
4213 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-self->init_bound, self->init_bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-self->init_bound, self->
init_bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
4214 else
4215 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
4216}
4217
4218static void _ccv_cnnp_parameter_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
4219{
4220 ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
4221 add_to_array(parameters, self->weights, is_trainable);
4222}
4223
4224static ccv_cnnp_model_t* _ccv_cnnp_parameter_copy(const ccv_cnnp_model_t* const super, void* const context);
4225
4226static const ccv_cnnp_model_vtab_t ccv_cnnp_parameter_isa = {
4227 .build = _ccv_cnnp_parameter_build,
4228 .init_states = _ccv_cnnp_parameter_init_states,
4229 .add_to_parameter = _ccv_cnnp_parameter_add_to_parameter,
4230 .copy = _ccv_cnnp_parameter_copy,
4231};
4232
4233ccv_cnnp_model_t* ccv_cnnp_parameter(const ccv_nnc_tensor_param_t params, const float init_bound, const int is_trainable, const char* const name)
4234{
4235 ccv_cnnp_model_parameter_t* const model_parameter = (ccv_cnnp_model_parameter_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_parameter_t));
4236 model_parameter->super.isa = &ccv_cnnp_parameter_isa;
4237 model_parameter->super.input_size = 0;
4238 model_parameter->super.outputs = &model_parameter->output;
4239 model_parameter->super.output_size = 1;
4240 model_parameter->super.is_trainable = is_trainable;
4241 ccv_cnnp_model_copy_name(&model_parameter->super, name);
4242 model_parameter->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
4243 model_parameter->weights.graph = 0;
4244 model_parameter->weights_params = params;
4245 return (ccv_cnnp_model_t*)model_parameter;
4246}
4247
4248static ccv_cnnp_model_t* _ccv_cnnp_parameter_copy(const ccv_cnnp_model_t* const super, void* const context)
4249{
4250 const ccv_cnnp_model_parameter_t* const self = (const ccv_cnnp_model_parameter_t*)super;
4251 return ccv_cnnp_parameter(self->weights_params, self->init_bound, self->super.is_trainable, self->super.name);
4252}
4253
4254// MARK - Scalar Layer
4255
4256typedef struct {
4257 ccv_cnnp_model_t super;
4258 int type;
4259 int format;
4260 int datatype;
4261 float value;
4262 ccv_nnc_tensor_symbol_t output;
4263} ccv_cnnp_model_scalar_t;
4264
4265static void _ccv_cnnp_scalar_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4266{
4267 PRINT(CCV_CLI_VERBOSE, "[cnnp_scalar_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_scalar_build] -\n"); fflush(stdout); } } while
(0)
;
4268 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4268, __extension__ __PRETTY_FUNCTION__
); }))
;
4269 ccv_cnnp_model_scalar_t* const self = (ccv_cnnp_model_scalar_t*)super;
4270 ccv_nnc_tensor_param_t params = {
4271 .type = self->type,
4272 .format = self->format,
4273 .datatype = self->datatype,
4274 .dim = {
4275 1
4276 }
4277 };
4278 if (input_size > 0)
4279 {
4280 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4281 params.type = input_params.type;
4282 params.format = input_params.format;
4283 params.datatype = input_params.datatype;
4284 }
4285 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4286 ccv_nnc_graph_exec_symbol_new(graph, CMD_SET_FORWARD(self->value)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={self->value,}}}, 0)
, 0, 0, outputs, 1, 0);
4287}
4288
4289static ccv_cnnp_model_t* _ccv_cnnp_scalar_copy(const ccv_cnnp_model_t* const super, void* const context);
4290
4291static const ccv_cnnp_model_vtab_t ccv_cnnp_scalar_isa = {
4292 .build = _ccv_cnnp_scalar_build,
4293 .copy = _ccv_cnnp_scalar_copy,
4294};
4295
4296ccv_cnnp_model_t* ccv_cnnp_scalar(const int type, const int format, const int datatype, const float value, const char* const name)
4297{
4298 ccv_cnnp_model_scalar_t* const model_scalar = (ccv_cnnp_model_scalar_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_scalar_t));
4299 model_scalar->super.isa = &ccv_cnnp_scalar_isa;
4300 model_scalar->super.input_size = 0;
4301 model_scalar->super.outputs = &model_scalar->output;
4302 model_scalar->super.output_size = 1;
4303 ccv_cnnp_model_copy_name(&model_scalar->super, name);
4304 model_scalar->type = type;
4305 model_scalar->format = format;
4306 model_scalar->datatype = datatype;
4307 model_scalar->value = value;
4308 return (ccv_cnnp_model_t*)model_scalar;
4309}
4310
4311static ccv_cnnp_model_t* _ccv_cnnp_scalar_copy(const ccv_cnnp_model_t* const super, void* const context)
4312{
4313 const ccv_cnnp_model_scalar_t* const self = (const ccv_cnnp_model_scalar_t*)super;
4314 return ccv_cnnp_scalar(self->type, self->format, self->datatype, self->value, self->super.name);
4315}
4316
4317// MARK - Variable Layer
4318
4319typedef struct {
4320 ccv_cnnp_model_t super;
4321 ccv_nnc_tensor_param_t params;
4322 ccv_nnc_tensor_symbol_t output;
4323} ccv_cnnp_model_variable_t;
4324
4325static void _ccv_cnnp_variable_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4326{
4327 PRINT(CCV_CLI_VERBOSE, "[cnnp_variable_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_variable_build] -\n"); fflush(stdout); } } while
(0)
;
4328 assert(input_size == 0)((void) sizeof ((input_size == 0) ? 1 : 0), __extension__ ({ if
(input_size == 0) ; else __assert_fail ("input_size == 0", "ccv_cnnp_model_addons.c"
, 4328, __extension__ __PRETTY_FUNCTION__); }))
;
4329 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4329, __extension__ __PRETTY_FUNCTION__
); }))
;
4330 ccv_cnnp_model_variable_t* const self = (ccv_cnnp_model_variable_t*)super;
4331 outputs[0] = ccv_nnc_tensor_symbol_new(graph, self->params, 0);
4332}
4333
4334static ccv_cnnp_model_t* _ccv_cnnp_variable_copy(const ccv_cnnp_model_t* const super, void* const context);
4335
4336static const ccv_cnnp_model_vtab_t ccv_cnnp_variable_isa = {
4337 .build = _ccv_cnnp_variable_build,
4338 .copy = _ccv_cnnp_variable_copy,
4339};
4340
4341ccv_cnnp_model_t* ccv_cnnp_variable(const ccv_nnc_tensor_param_t params, const char* const name)
4342{
4343 ccv_cnnp_model_variable_t* const model_variable = (ccv_cnnp_model_variable_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_variable_t));
4344 model_variable->super.isa = &ccv_cnnp_variable_isa;
4345 model_variable->super.input_size = 0;
4346 model_variable->super.outputs = &model_variable->output;
4347 model_variable->super.output_size = 1;
4348 ccv_cnnp_model_copy_name(&model_variable->super, name);
4349 model_variable->params = params;
4350 return (ccv_cnnp_model_t*)model_variable;
4351}
4352
4353static ccv_cnnp_model_t* _ccv_cnnp_variable_copy(const ccv_cnnp_model_t* const super, void* const context)
4354{
4355 const ccv_cnnp_model_variable_t* const self = (const ccv_cnnp_model_variable_t*)super;
4356 return ccv_cnnp_variable(self->params, self->super.name);
4357}
4358
4359// MARK - Send Layer
4360
4361typedef struct {
4362 ccv_cnnp_model_t super;
4363 int device_id;
4364 ccv_nnc_tensor_symbol_t output;
4365} ccv_cnnp_model_send_t;
4366
4367static void _ccv_cnnp_send_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4368{
4369 ccv_cnnp_model_send_t* const self = (ccv_cnnp_model_send_t*)super;
4370 PRINT(CCV_CLI_VERBOSE, "[cnnp_send_build] - device_id: %d\n", self->device_id)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_send_build] - device_id: %d\n", self->device_id
); fflush(stdout); } } while (0)
;
4371 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 4371, __extension__ __PRETTY_FUNCTION__); }))
;
4372 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4372, __extension__ __PRETTY_FUNCTION__
); }))
;
4373 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4374 params.type = (params.type & ~0x3) | CCV_TENSOR_GPU_MEMORY;
4375 CCV_TENSOR_SET_DEVICE_ID(params.type, self->device_id)(params.type) = (((params.type) & ~0xfff00) | (((self->
device_id) & 0xfff) << 8))
;
4376 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4377 ccv_nnc_graph_exec_symbol_new(graph, CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, 1, outputs, 1, "send");
4378}
4379
4380static ccv_cnnp_model_t* _ccv_cnnp_send_copy(const ccv_cnnp_model_t* const super, void* const context);
4381
4382static const ccv_cnnp_model_vtab_t ccv_cnnp_send_isa = {
4383 .build = _ccv_cnnp_send_build,
4384 .copy = _ccv_cnnp_send_copy,
4385};
4386
4387ccv_cnnp_model_t* ccv_cnnp_send(const int device_id, const char* const name)
4388{
4389 assert(device_id >= 0)((void) sizeof ((device_id >= 0) ? 1 : 0), __extension__ (
{ if (device_id >= 0) ; else __assert_fail ("device_id >= 0"
, "ccv_cnnp_model_addons.c", 4389, __extension__ __PRETTY_FUNCTION__
); }))
;
4390 ccv_cnnp_model_send_t* const model_send = (ccv_cnnp_model_send_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_send_t));
4391 model_send->super.isa = &ccv_cnnp_send_isa;
4392 model_send->super.input_size = 1;
4393 model_send->super.outputs = &model_send->output;
4394 model_send->super.output_size = 1;
4395 ccv_cnnp_model_copy_name(&model_send->super, name);
4396 model_send->device_id = device_id;
4397 return (ccv_cnnp_model_t*)model_send;
4398}
4399
4400static ccv_cnnp_model_t* _ccv_cnnp_send_copy(const ccv_cnnp_model_t* const super, void* const context)
4401{
4402 const ccv_cnnp_model_send_t* const self = (const ccv_cnnp_model_send_t*)super;
4403 return ccv_cnnp_send(self->device_id, self->super.name);
4404}
4405
4406// MARK - Move Layer
4407
4408typedef struct {
4409 ccv_cnnp_model_t super;
4410 ccv_nnc_tensor_symbol_t output;
4411} ccv_cnnp_model_move_t;
4412
4413static void _ccv_cnnp_move_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4414{
4415 PRINT(CCV_CLI_VERBOSE, "[cnnp_move_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_move_build] -\n"); fflush(stdout); } } while
(0)
;
4416 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 4416, __extension__ __PRETTY_FUNCTION__); }))
;
4417 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4417, __extension__ __PRETTY_FUNCTION__
); }))
;
4418 outputs[0] = inputs[1];
4419 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, 1, outputs, 1, "move");
4420}
4421
4422static ccv_cnnp_model_t* _ccv_cnnp_move_copy(const ccv_cnnp_model_t* const super, void* const context);
4423
4424static const ccv_cnnp_model_vtab_t ccv_cnnp_move_isa = {
4425 .build = _ccv_cnnp_move_build,
4426 .copy = _ccv_cnnp_move_copy,
4427};
4428
4429ccv_cnnp_model_t* ccv_cnnp_move(const char* const name)
4430{
4431 ccv_cnnp_model_move_t* const model_move = (ccv_cnnp_model_move_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_move_t));
4432 model_move->super.isa = &ccv_cnnp_move_isa;
4433 model_move->super.input_size = 2;
4434 model_move->super.outputs = &model_move->output;
4435 model_move->super.output_size = 1;
4436 ccv_cnnp_model_copy_name(&model_move->super, name);
4437 return (ccv_cnnp_model_t*)model_move;
4438}
4439
4440static ccv_cnnp_model_t* _ccv_cnnp_move_copy(const ccv_cnnp_model_t* const super, void* const context)
4441{
4442 const ccv_cnnp_model_move_t* const self = (const ccv_cnnp_model_move_t*)super;
4443 return ccv_cnnp_move(self->super.name);
4444}
4445
4446// MARK - "Making" Contiguous Layer
4447
4448typedef struct {
4449 ccv_cnnp_model_t super;
4450 ccv_nnc_tensor_symbol_t output;
4451} ccv_cnnp_model_contiguous_t;
4452
4453static void _ccv_cnnp_contiguous_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4454{
4455 PRINT(CCV_CLI_VERBOSE, "[cnnp_contiguous_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_contiguous_build] -\n"); fflush(stdout); } }
while (0)
;
4456 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 4456, __extension__ __PRETTY_FUNCTION__); }))
;
4457 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4457, __extension__ __PRETTY_FUNCTION__
); }))
;
4458 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4459 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
4460 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
4461 {
4462 outputs[0] = inputs[0];
4463 return;
4464 }
4465 // Otherwise, we need to check its stride to know if it is contiguous.
4466 int old_stride[CCV_NNC_MAX_DIM_ALLOC(12)];
4467 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
4468 // We identify permute by checking if the stride is not in descending order.
4469 // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
4470 if (ccv_nnc_is_tensor_stride_packed(old_stride, params.dim))
4471 {
4472 outputs[0] = inputs[0];
4473 return;
4474 }
4475 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4476 ccv_nnc_graph_exec_symbol_t make_contiguous = ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, 1, outputs, 1, "contiguous");
4477 ccv_nnc_graph_exec_symbol_set_flags(graph, make_contiguous, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
4478}
4479
4480static ccv_cnnp_model_t* _ccv_cnnp_contiguous_copy(const ccv_cnnp_model_t* const super, void* const context);
4481
4482static const ccv_cnnp_model_vtab_t ccv_cnnp_contiguous_isa = {
4483 .build = _ccv_cnnp_contiguous_build,
4484 .copy = _ccv_cnnp_contiguous_copy,
4485};
4486
4487ccv_cnnp_model_t* ccv_cnnp_contiguous(const char* const name)
4488{
4489 ccv_cnnp_model_contiguous_t* const model_contiguous = (ccv_cnnp_model_contiguous_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_contiguous_t));
4490 model_contiguous->super.isa = &ccv_cnnp_contiguous_isa;
4491 model_contiguous->super.input_size = 1;
4492 model_contiguous->super.outputs = &model_contiguous->output;
4493 model_contiguous->super.output_size = 1;
4494 ccv_cnnp_model_copy_name(&model_contiguous->super, name);
4495 return (ccv_cnnp_model_t*)model_contiguous;
4496}
4497
4498static ccv_cnnp_model_t* _ccv_cnnp_contiguous_copy(const ccv_cnnp_model_t* const super, void* const context)
4499{
4500 const ccv_cnnp_model_contiguous_t* const self = (const ccv_cnnp_model_contiguous_t*)super;
4501 return ccv_cnnp_contiguous(self->super.name);
4502}
4503
4504// MARK - "Making" Copy Layer
4505
4506typedef struct {
4507 ccv_cnnp_model_t super;
4508 ccv_nnc_tensor_symbol_t output;
4509} ccv_cnnp_model_copy_t;
4510
4511static void _ccv_cnnp_copy_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4512{
4513 PRINT(CCV_CLI_VERBOSE, "[cnnp_copy_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_copy_build] -\n"); fflush(stdout); } } while
(0)
;
4514 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 4514, __extension__ __PRETTY_FUNCTION__); }))
;
4515 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4515, __extension__ __PRETTY_FUNCTION__
); }))
;
4516 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4517 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
4518 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
4519 {
4520 outputs[0] = inputs[0];
4521 return;
4522 }
4523 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4524 ccv_nnc_graph_exec_symbol_t make_contiguous = ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, 1, outputs, 1, "contiguous");
4525 ccv_nnc_graph_exec_symbol_set_flags(graph, make_contiguous, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
4526}
4527
4528static ccv_cnnp_model_t* _ccv_cnnp_copy_copy(const ccv_cnnp_model_t* const super, void* const context);
4529
4530static const ccv_cnnp_model_vtab_t ccv_cnnp_copy_isa = {
4531 .build = _ccv_cnnp_copy_build,
4532 .copy = _ccv_cnnp_copy_copy,
4533};
4534
4535ccv_cnnp_model_t* ccv_cnnp_copy(const char* const name)
4536{
4537 ccv_cnnp_model_copy_t* const model_copy = (ccv_cnnp_model_copy_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_copy_t));
4538 model_copy->super.isa = &ccv_cnnp_copy_isa;
4539 model_copy->super.input_size = 1;
4540 model_copy->super.outputs = &model_copy->output;
4541 model_copy->super.output_size = 1;
4542 ccv_cnnp_model_copy_name(&model_copy->super, name);
4543 return (ccv_cnnp_model_t*)model_copy;
4544}
4545
4546static ccv_cnnp_model_t* _ccv_cnnp_copy_copy(const ccv_cnnp_model_t* const super, void* const context)
4547{
4548 const ccv_cnnp_model_copy_t* const self = (const ccv_cnnp_model_copy_t*)super;
4549 return ccv_cnnp_copy(self->super.name);
4550}
4551
4552// MARK - All-To-All Layer
4553
4554typedef struct {
4555 ccv_cnnp_model_t super;
4556 int axis;
4557 ccv_nnc_tensor_symbol_t outputs[1];
4558} ccv_cnnp_model_all_to_all_t;
4559
4560static void _ccv_cnnp_all_to_all_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4561{
4562 ccv_cnnp_model_all_to_all_t* const self = (ccv_cnnp_model_all_to_all_t*)super;
4563 PRINT(CCV_CLI_VERBOSE, "[cnnp_all_to_all_build] - axis: %d\n", self->axis)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_all_to_all_build] - axis: %d\n", self->axis
); fflush(stdout); } } while (0)
;
4564 assert(input_size == self->super.input_size)((void) sizeof ((input_size == self->super.input_size) ? 1
: 0), __extension__ ({ if (input_size == self->super.input_size
) ; else __assert_fail ("input_size == self->super.input_size"
, "ccv_cnnp_model_addons.c", 4564, __extension__ __PRETTY_FUNCTION__
); }))
;
4565 assert(output_size == self->super.output_size)((void) sizeof ((output_size == self->super.output_size) ?
1 : 0), __extension__ ({ if (output_size == self->super.output_size
) ; else __assert_fail ("output_size == self->super.output_size"
, "ccv_cnnp_model_addons.c", 4565, __extension__ __PRETTY_FUNCTION__
); }))
;
4566 assert(input_size == output_size)((void) sizeof ((input_size == output_size) ? 1 : 0), __extension__
({ if (input_size == output_size) ; else __assert_fail ("input_size == output_size"
, "ccv_cnnp_model_addons.c", 4566, __extension__ __PRETTY_FUNCTION__
); }))
;
4567 assert(input_size > 0)((void) sizeof ((input_size > 0) ? 1 : 0), __extension__ (
{ if (input_size > 0) ; else __assert_fail ("input_size > 0"
, "ccv_cnnp_model_addons.c", 4567, __extension__ __PRETTY_FUNCTION__
); }))
;
4568 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4569 const int nd = ccv_nnc_tensor_nd(input_params.dim);
4570 assert(self->axis >= 0 && self->axis < nd)((void) sizeof ((self->axis >= 0 && self->axis
< nd) ? 1 : 0), __extension__ ({ if (self->axis >= 0
&& self->axis < nd) ; else __assert_fail ("self->axis >= 0 && self->axis < nd"
, "ccv_cnnp_model_addons.c", 4570, __extension__ __PRETTY_FUNCTION__
); }))
;
4571 assert(input_params.dim[self->axis] % input_size == 0)((void) sizeof ((input_params.dim[self->axis] % input_size
== 0) ? 1 : 0), __extension__ ({ if (input_params.dim[self->
axis] % input_size == 0) ; else __assert_fail ("input_params.dim[self->axis] % input_size == 0"
, "ccv_cnnp_model_addons.c", 4571, __extension__ __PRETTY_FUNCTION__
); }))
;
4572 int i;
4573 for (i = 0; i < input_size; i++)
4574 {
4575 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
4576 assert(params.format == input_params.format)((void) sizeof ((params.format == input_params.format) ? 1 : 0
), __extension__ ({ if (params.format == input_params.format)
; else __assert_fail ("params.format == input_params.format"
, "ccv_cnnp_model_addons.c", 4576, __extension__ __PRETTY_FUNCTION__
); }))
;
4577 assert(params.datatype == input_params.datatype)((void) sizeof ((params.datatype == input_params.datatype) ? 1
: 0), __extension__ ({ if (params.datatype == input_params.datatype
) ; else __assert_fail ("params.datatype == input_params.datatype"
, "ccv_cnnp_model_addons.c", 4577, __extension__ __PRETTY_FUNCTION__
); }))
;
4578 assert(CCV_TENSOR_GET_MEMORY(params.type) == CCV_TENSOR_GET_MEMORY(input_params.type))((void) sizeof ((((params.type) & 0x3) == ((input_params.
type) & 0x3)) ? 1 : 0), __extension__ ({ if (((params.type
) & 0x3) == ((input_params.type) & 0x3)) ; else __assert_fail
("CCV_TENSOR_GET_MEMORY(params.type) == CCV_TENSOR_GET_MEMORY(input_params.type)"
, "ccv_cnnp_model_addons.c", 4578, __extension__ __PRETTY_FUNCTION__
); }))
;
4579 assert(memcmp(params.dim, input_params.dim, sizeof(input_params.dim)) == 0)((void) sizeof ((memcmp(params.dim, input_params.dim, sizeof(
input_params.dim)) == 0) ? 1 : 0), __extension__ ({ if (memcmp
(params.dim, input_params.dim, sizeof(input_params.dim)) == 0
) ; else __assert_fail ("memcmp(params.dim, input_params.dim, sizeof(input_params.dim)) == 0"
, "ccv_cnnp_model_addons.c", 4579, __extension__ __PRETTY_FUNCTION__
); }))
;
4580 outputs[i] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4581 }
4582 ccv_nnc_graph_exec_symbol_new(graph, CMD_COMM_ALL_TO_ALL_FORWARD(self->axis)ccv_nnc_cmd(CCV_NNC_COMM_ALL_TO_ALL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.all_to_all={.axis=self->axis}}), 0
)
, inputs, input_size, outputs, output_size, "all_to_all");
4583}
4584
4585static ccv_cnnp_model_t* _ccv_cnnp_all_to_all_copy(const ccv_cnnp_model_t* const super, void* const context);
4586
4587static const ccv_cnnp_model_vtab_t ccv_cnnp_all_to_all_isa = {
4588 .build = _ccv_cnnp_all_to_all_build,
4589 .copy = _ccv_cnnp_all_to_all_copy,
4590};
4591
4592ccv_cnnp_model_t* ccv_cnnp_all_to_all(const int count, const int axis, const char* const name)
4593{
4594 assert(count > 0)((void) sizeof ((count > 0) ? 1 : 0), __extension__ ({ if (
count > 0) ; else __assert_fail ("count > 0", "ccv_cnnp_model_addons.c"
, 4594, __extension__ __PRETTY_FUNCTION__); }))
;
4595 assert(axis >= 0)((void) sizeof ((axis >= 0) ? 1 : 0), __extension__ ({ if (
axis >= 0) ; else __assert_fail ("axis >= 0", "ccv_cnnp_model_addons.c"
, 4595, __extension__ __PRETTY_FUNCTION__); }))
;
4596 ccv_cnnp_model_all_to_all_t* const model_all_to_all = (ccv_cnnp_model_all_to_all_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_all_to_all_t) + sizeof(ccv_nnc_tensor_symbol_t) * (count - 1));
4597 model_all_to_all->super.isa = &ccv_cnnp_all_to_all_isa;
4598 model_all_to_all->super.input_size = count;
4599 model_all_to_all->super.outputs = model_all_to_all->outputs;
4600 model_all_to_all->super.output_size = count;
4601 model_all_to_all->axis = axis;
4602 ccv_cnnp_model_copy_name(&model_all_to_all->super, name);
4603 return (ccv_cnnp_model_t*)model_all_to_all;
4604}
4605
4606static ccv_cnnp_model_t* _ccv_cnnp_all_to_all_copy(const ccv_cnnp_model_t* const super, void* const context)
4607{
4608 const ccv_cnnp_model_all_to_all_t* const self = (const ccv_cnnp_model_all_to_all_t*)super;
4609 return ccv_cnnp_all_to_all(self->super.output_size, self->axis, self->super.name);
4610}
4611
4612// MARK - Scaled-Dot Product Attention Layer
4613
4614typedef struct {
4615 ccv_cnnp_model_t super;
4616 ccv_nnc_tensor_symbol_t output;
4617 ccv_nnc_tensor_symbol_t weights;
4618 ccv_nnc_tensor_symbol_t bias;
4619 float scale;
4620 int is_causal;
4621 int has_attn_mask;
4622 int is_varlen;
4623 int max_seqlen_q;
4624 int max_seqlen_kv;
4625 int flags;
4626 int fused_unify_head_weights;
4627 int no_bias;
4628} ccv_cnnp_model_scaled_dot_product_attention_t;
4629
4630static void _ccv_cnnp_scaled_dot_product_attention_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4631{
4632 PRINT(CCV_CLI_VERBOSE, "[cnnp_scaled_dot_product_attention_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_scaled_dot_product_attention_build] -\n"); fflush
(stdout); } } while (0)
;
4633 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4633, __extension__ __PRETTY_FUNCTION__
); }))
;
4634 ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
4635 assert(input_size == (self->is_varlen ? 5 : (self->has_attn_mask ? 4 : 3)))((void) sizeof ((input_size == (self->is_varlen ? 5 : (self
->has_attn_mask ? 4 : 3))) ? 1 : 0), __extension__ ({ if (
input_size == (self->is_varlen ? 5 : (self->has_attn_mask
? 4 : 3))) ; else __assert_fail ("input_size == (self->is_varlen ? 5 : (self->has_attn_mask ? 4 : 3))"
, "ccv_cnnp_model_addons.c", 4635, __extension__ __PRETTY_FUNCTION__
); }))
;
4636 assert(!self->is_varlen || !self->has_attn_mask)((void) sizeof ((!self->is_varlen || !self->has_attn_mask
) ? 1 : 0), __extension__ ({ if (!self->is_varlen || !self
->has_attn_mask) ; else __assert_fail ("!self->is_varlen || !self->has_attn_mask"
, "ccv_cnnp_model_addons.c", 4636, __extension__ __PRETTY_FUNCTION__
); }))
;
4637 assert(!self->is_varlen || !self->fused_unify_head_weights)((void) sizeof ((!self->is_varlen || !self->fused_unify_head_weights
) ? 1 : 0), __extension__ ({ if (!self->is_varlen || !self
->fused_unify_head_weights) ; else __assert_fail ("!self->is_varlen || !self->fused_unify_head_weights"
, "ccv_cnnp_model_addons.c", 4637, __extension__ __PRETTY_FUNCTION__
); }))
;
4638 const ccv_nnc_tensor_param_t q_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4639 const ccv_nnc_tensor_param_t k_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
4640 const ccv_nnc_tensor_param_t v_params = ccv_nnc_tensor_symbol_params(graph, inputs[2]);
4641 const ccv_nnc_tensor_param_t q_seq_offsets_params = self->is_varlen ? ccv_nnc_tensor_symbol_params(graph, inputs[3]) : (ccv_nnc_tensor_param_t){};
4642 const ccv_nnc_tensor_param_t kv_seq_offsets_params = self->is_varlen ? ccv_nnc_tensor_symbol_params(graph, inputs[4]) : (ccv_nnc_tensor_param_t){};
4643 const int v_nd = ccv_nnc_tensor_nd(v_params.dim);
4644 assert(v_nd == 3 || v_nd == 4)((void) sizeof ((v_nd == 3 || v_nd == 4) ? 1 : 0), __extension__
({ if (v_nd == 3 || v_nd == 4) ; else __assert_fail ("v_nd == 3 || v_nd == 4"
, "ccv_cnnp_model_addons.c", 4644, __extension__ __PRETTY_FUNCTION__
); }))
;
4645 const int hEv = (v_nd == 3 ? 1 : v_params.dim[2]) * v_params.dim[v_nd - 1];
4646 ccv_nnc_tensor_param_t weights_params = q_params;
4647 memset(weights_params.dim, 0, sizeof(weights_params.dim));
4648 weights_params.dim[0] = hEv;
4649 weights_params.dim[1] = hEv;
4650 ccv_nnc_tensor_param_t bias_params = q_params;
4651 memset(bias_params.dim, 0, sizeof(bias_params.dim));
4652 bias_params.dim[0] = hEv;
4653 ccv_nnc_cmd_t cmd = {0};
4654 cmd.cmd = CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_FORWARD;
4655 cmd.info.scaled_dot_product_attention.scale = self->scale;
4656 cmd.info.scaled_dot_product_attention.is_causal = self->is_causal;
4657 cmd.info.scaled_dot_product_attention.is_varlen = self->is_varlen;
4658 cmd.info.scaled_dot_product_attention.max_seqlen_q = self->max_seqlen_q;
4659 cmd.info.scaled_dot_product_attention.max_seqlen_kv = self->max_seqlen_kv;
4660 cmd.info.scaled_dot_product_attention.flags = self->flags;
4661 ccv_nnc_tensor_param_t output_params[3];
4662 ccv_nnc_tensor_symbol_t output;
4663 ccv_nnc_tensor_symbol_t saved_softmax_lse;
4664 ccv_nnc_tensor_symbol_t saved_v_proj = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
4665 ccv_nnc_tensor_symbol_t attn_mask = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
4666 ccv_nnc_tensor_symbol_t weights = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
4667 ccv_nnc_tensor_symbol_t bias = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
4668 if (self->has_attn_mask)
4669 attn_mask = inputs[3];
4670 if (self->is_varlen)
4671 {
4672 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
4673 q_params,
4674 k_params,
4675 v_params,
4676 (ccv_nnc_tensor_param_t){},
4677 (ccv_nnc_tensor_param_t){},
4678 (ccv_nnc_tensor_param_t){},
4679 q_seq_offsets_params,
4680 kv_seq_offsets_params,
4681 }, 8, ccv_nnc_no_hint, output_params, 2);
4682 output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
4683 saved_softmax_lse = ccv_nnc_tensor_symbol_new(graph, output_params[1], 0);
4684 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], NO_TENSOR_SYMBOL, NO_TENSOR_SYMBOL, NO_TENSOR_SYMBOL, inputs[3], inputs[4])(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1], inputs
[2], (const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}, (const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}, (const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}, inputs[3], inputs[4]}, (1 +1 +1 +1 +1 +1 +1 +1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_softmax_lse, saved_v_proj)(const ccv_nnc_tensor_symbol_t []){output, saved_softmax_lse,
saved_v_proj}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 -1)
, "scaled_dot_product_attention");
4685 } else if (self->fused_unify_head_weights)
4686 {
4687 if (!self->weights.graph)
4688 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
4689 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 4689, __extension__ __PRETTY_FUNCTION__); }))
;
4690 weights = ccv_cnnp_model_get_symbol(super, self->weights);
4691 if (!self->no_bias)
4692 {
4693 if (!self->bias.graph)
4694 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
4695 assert(self->bias.graph == graph)((void) sizeof ((self->bias.graph == graph) ? 1 : 0), __extension__
({ if (self->bias.graph == graph) ; else __assert_fail ("self->bias.graph == graph"
, "ccv_cnnp_model_addons.c", 4695, __extension__ __PRETTY_FUNCTION__
); }))
;
4696 bias = ccv_cnnp_model_get_symbol(super, self->bias);
4697 }
4698 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
4699 q_params,
4700 k_params,
4701 v_params,
4702 (ccv_nnc_tensor_param_t){},
4703 weights_params,
4704 bias_params,
4705 }, 6, ccv_nnc_no_hint, output_params, 3);
4706 output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
4707 saved_softmax_lse = ccv_nnc_tensor_symbol_new(graph, output_params[1], 0);
4708 saved_v_proj = ccv_nnc_tensor_symbol_new(graph, output_params[2], 0);
4709 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], attn_mask, weights, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1], inputs
[2], attn_mask, weights, bias}, (1 +1 +1 +1 +1 +1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_softmax_lse, saved_v_proj)(const ccv_nnc_tensor_symbol_t []){output, saved_softmax_lse,
saved_v_proj}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 -1)
, "scaled_dot_product_attention");
4710 } else {
4711 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
4712 q_params,
4713 k_params,
4714 v_params,
4715 }, 3, ccv_nnc_no_hint, output_params, 2);
4716 output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
4717 saved_softmax_lse = ccv_nnc_tensor_symbol_new(graph, output_params[1], 0);
4718 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], attn_mask, weights, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1], inputs
[2], attn_mask, weights, bias}, (1 +1 +1 +1 +1 +1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_softmax_lse, saved_v_proj)(const ccv_nnc_tensor_symbol_t []){output, saved_softmax_lse,
saved_v_proj}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 -1)
, "scaled_dot_product_attention");
4719 }
4720 outputs[0] = output;
4721}
4722
4723static void _ccv_cnnp_scaled_dot_product_attention_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
4724{
4725 ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
4726 if (self->weights.graph)
4727 {
4728 assert(self->fused_unify_head_weights)((void) sizeof ((self->fused_unify_head_weights) ? 1 : 0),
__extension__ ({ if (self->fused_unify_head_weights) ; else
__assert_fail ("self->fused_unify_head_weights", "ccv_cnnp_model_addons.c"
, 4728, __extension__ __PRETTY_FUNCTION__); }))
;
4729 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
4730 const int c = weight_params.dim[1];
4731 const float std = sqrtf(2) / sqrtf(c);
4732 const float bound = sqrtf(3) * std;
4733 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
4734 if (self->bias.graph)
4735 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
4736 }
4737}
4738
4739static void _ccv_cnnp_scaled_dot_product_attention_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
4740{
4741 ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
4742 if (self->weights.graph)
4743 {
4744 assert(self->fused_unify_head_weights)((void) sizeof ((self->fused_unify_head_weights) ? 1 : 0),
__extension__ ({ if (self->fused_unify_head_weights) ; else
__assert_fail ("self->fused_unify_head_weights", "ccv_cnnp_model_addons.c"
, 4744, __extension__ __PRETTY_FUNCTION__); }))
;
4745 add_to_array(parameters, self->weights, is_trainable);
4746 if (self->bias.graph)
4747 add_to_array(parameters, self->bias, is_trainable);
4748 }
4749}
4750
4751static ccv_cnnp_model_t* _ccv_cnnp_scaled_dot_product_attention_copy(const ccv_cnnp_model_t* const super, void* const context);
4752
4753static const ccv_cnnp_model_vtab_t ccv_cnnp_scaled_dot_product_attention_isa = {
4754 .build = _ccv_cnnp_scaled_dot_product_attention_build,
4755 .copy = _ccv_cnnp_scaled_dot_product_attention_copy,
4756};
4757
4758static const ccv_cnnp_model_vtab_t ccv_cnnp_scaled_dot_product_attention_fused_isa = {
4759 .build = _ccv_cnnp_scaled_dot_product_attention_build,
4760 .init_states = _ccv_cnnp_scaled_dot_product_attention_init_states,
4761 .add_to_parameter = _ccv_cnnp_scaled_dot_product_attention_add_to_parameter,
4762 .copy = _ccv_cnnp_scaled_dot_product_attention_copy,
4763};
4764
4765ccv_cnnp_model_t* ccv_cnnp_scaled_dot_product_attention(const float scale, const int is_causal, const int has_attn_mask, const int is_varlen, const int max_seqlen_q, const int max_seqlen_kv, const int flags, const int fused_unify_head_weights, const int no_bias, const int is_trainable, const char* const name)
4766{
4767 assert(!is_varlen || !has_attn_mask)((void) sizeof ((!is_varlen || !has_attn_mask) ? 1 : 0), __extension__
({ if (!is_varlen || !has_attn_mask) ; else __assert_fail ("!is_varlen || !has_attn_mask"
, "ccv_cnnp_model_addons.c", 4767, __extension__ __PRETTY_FUNCTION__
); }))
;
4768 assert(!is_varlen || !fused_unify_head_weights)((void) sizeof ((!is_varlen || !fused_unify_head_weights) ? 1
: 0), __extension__ ({ if (!is_varlen || !fused_unify_head_weights
) ; else __assert_fail ("!is_varlen || !fused_unify_head_weights"
, "ccv_cnnp_model_addons.c", 4768, __extension__ __PRETTY_FUNCTION__
); }))
;
4769 ccv_cnnp_model_scaled_dot_product_attention_t* const model_scaled_dot_product_attention = (ccv_cnnp_model_scaled_dot_product_attention_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_scaled_dot_product_attention_t));
4770 model_scaled_dot_product_attention->super.isa = fused_unify_head_weights ? &ccv_cnnp_scaled_dot_product_attention_fused_isa : &ccv_cnnp_scaled_dot_product_attention_isa;
4771 model_scaled_dot_product_attention->super.input_size = is_varlen ? 5 : (has_attn_mask ? 4 : 3);
4772 model_scaled_dot_product_attention->super.outputs = &model_scaled_dot_product_attention->output;
4773 model_scaled_dot_product_attention->super.output_size = 1;
4774 model_scaled_dot_product_attention->super.is_trainable = is_trainable;
4775 ccv_cnnp_model_copy_name(&model_scaled_dot_product_attention->super, name);
4776 model_scaled_dot_product_attention->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
4777 model_scaled_dot_product_attention->weights.graph = 0;
4778 model_scaled_dot_product_attention->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
4779 model_scaled_dot_product_attention->bias.graph = 0;
4780 model_scaled_dot_product_attention->scale = scale;
4781 model_scaled_dot_product_attention->is_causal = is_causal;
4782 model_scaled_dot_product_attention->has_attn_mask = has_attn_mask;
4783 model_scaled_dot_product_attention->is_varlen = is_varlen;
4784 model_scaled_dot_product_attention->max_seqlen_q = max_seqlen_q;
4785 model_scaled_dot_product_attention->max_seqlen_kv = max_seqlen_kv;
4786 model_scaled_dot_product_attention->flags = flags;
4787 model_scaled_dot_product_attention->fused_unify_head_weights = fused_unify_head_weights;
4788 model_scaled_dot_product_attention->no_bias = no_bias;
4789 return (ccv_cnnp_model_t*)model_scaled_dot_product_attention;
4790}
4791
4792static ccv_cnnp_model_t* _ccv_cnnp_scaled_dot_product_attention_copy(const ccv_cnnp_model_t* const super, void* const context)
4793{
4794 const ccv_cnnp_model_scaled_dot_product_attention_t* const self = (const ccv_cnnp_model_scaled_dot_product_attention_t*)super;
4795 return ccv_cnnp_scaled_dot_product_attention(self->scale, self->is_causal, self->has_attn_mask, self->is_varlen, self->max_seqlen_q, self->max_seqlen_kv, self->flags, self->fused_unify_head_weights, self->no_bias, self->super.is_trainable, self->super.name);
4796}
4797
4798// MARK - Debug Layer
4799
4800typedef struct {
4801 ccv_cnnp_model_t super;
4802 ccv_nnc_tensor_symbol_t output;
4803 ccv_cnnp_model_debug_f debugger;
4804 ccv_cnnp_model_debug_context_deinit_f debug_deinit;
4805 ccv_cnnp_model_debug_context_copy_f debug_copy;
4806 void* debug_context;
4807} ccv_cnnp_model_debug_t;
4808
4809static int _ccv_cnnp_debug_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
4810{
4811 if (cmd.cmd == CCV_NNC_CUSTOM_BACKWARD)
4812 {
4813 assert(0 && "don't support debug backward pass yet")((void) sizeof ((0 && "don't support debug backward pass yet"
) ? 1 : 0), __extension__ ({ if (0 && "don't support debug backward pass yet"
) ; else __assert_fail ("0 && \"don't support debug backward pass yet\""
, "ccv_cnnp_model_addons.c", 4813, __extension__ __PRETTY_FUNCTION__
); }))
;
4814 }
4815 ccv_cnnp_model_debug_t* const self = (ccv_cnnp_model_debug_t*)cmd.data;
4816 self->debugger(inputs, input_size, stream_context, self->debug_context);
4817 return CCV_NNC_EXEC_SUCCESS;
4818}
4819
4820static ccv_nnc_cmd_vtab_t ccv_cnnp_debug_exec_isa = {
4821 .exec = _ccv_cnnp_debug_exec
4822};
4823
4824static void _ccv_cnnp_debug_build(ccv_cnnp_model_t* const self, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4825{
4826 PRINT(CCV_CLI_VERBOSE, "[cnnp_debug_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_debug_build] -\n"); fflush(stdout); } } while
(0)
;
4827 assert(input_size >= 1)((void) sizeof ((input_size >= 1) ? 1 : 0), __extension__ (
{ if (input_size >= 1) ; else __assert_fail ("input_size >= 1"
, "ccv_cnnp_model_addons.c", 4827, __extension__ __PRETTY_FUNCTION__
); }))
;
4828 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4828, __extension__ __PRETTY_FUNCTION__
); }))
;
4829 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
4830 ccv_nnc_tensor_param_t output_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4831 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
4832 {
4833 int ofs[CCV_NNC_MAX_DIM_ALLOC(12)] = {0};
4834 int stride[CCV_NNC_MAX_DIM_ALLOC(12)];
4835 ccv_nnc_tensor_get_stride(output_params.dim, stride);
4836 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, stride, output_params, 0);
4837 } else {
4838 int old_ofs[CCV_NNC_MAX_DIM_ALLOC(12)];
4839 int old_stride[CCV_NNC_MAX_DIM_ALLOC(12)];
4840 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], old_ofs, old_stride);
4841 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, to, old_ofs, old_stride, output_params, 0);
4842 }
4843 ccv_nnc_cmd_t cmd = ccv_nnc_cmd(CCV_NNC_CUSTOM_FORWARD, (ccv_nnc_cmd_vtab_t*)&ccv_cnnp_debug_exec_isa, (ccv_nnc_cmd_param_t){}, 0);
4844 cmd.data = self;
4845 ccv_nnc_graph_exec_symbol_t make_debug = ccv_nnc_graph_exec_symbol_new(graph, cmd, inputs, input_size, outputs, 1, "debug");
4846 // Disable any optimizations.
4847 ccv_nnc_graph_exec_symbol_set_flags(graph, make_debug, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
4848}
4849
4850static void _ccv_cnnp_debug_deinit(ccv_cnnp_model_t* const super)
4851{
4852 const ccv_cnnp_model_debug_t* const self = (const ccv_cnnp_model_debug_t*)super;
4853 if (self->debug_deinit && self->debug_context)
4854 self->debug_deinit(self->debug_context);
4855}
4856
4857static ccv_cnnp_model_t* _ccv_cnnp_debug_copy(const ccv_cnnp_model_t* const super, void* const context);
4858
4859static const ccv_cnnp_model_vtab_t ccv_cnnp_debug_isa = {
4860 .build = _ccv_cnnp_debug_build,
4861 .deinit = _ccv_cnnp_debug_deinit,
4862 .copy = _ccv_cnnp_debug_copy,
4863};
4864
4865ccv_cnnp_model_t* ccv_cnnp_debug(ccv_cnnp_model_debug_f func, void* const context, ccv_cnnp_model_debug_context_deinit_f deinit, ccv_cnnp_model_debug_context_copy_f copy, const char* const name)
4866{
4867 ccv_cnnp_model_debug_t* const model_debug = (ccv_cnnp_model_debug_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_debug_t));
4868 model_debug->super.isa = &ccv_cnnp_debug_isa;
4869 model_debug->super.input_size = 0;
4870 model_debug->super.outputs = &model_debug->output;
4871 model_debug->super.output_size = 1;
4872 model_debug->debugger = func;
4873 model_debug->debug_context = context;
4874 model_debug->debug_deinit = deinit;
4875 model_debug->debug_copy = copy;
4876 ccv_cnnp_model_copy_name(&model_debug->super, name);
4877 return (ccv_cnnp_model_t*)model_debug;
4878}
4879
4880static ccv_cnnp_model_t* _ccv_cnnp_debug_copy(const ccv_cnnp_model_t* const super, void* const context)
4881{
4882 const ccv_cnnp_model_debug_t* const self = (const ccv_cnnp_model_debug_t*)super;
4883 void* debug_context = self->debug_context;
4884 if (self->debug_copy && self->debug_context)
4885 debug_context = self->debug_copy(self->debug_context);
4886 return ccv_cnnp_debug(self->debugger, debug_context, self->debug_deinit, self->debug_copy, self->super.name);
4887}
4888
4889/// MARK - Sort layer.
4890
4891typedef struct {
4892 ccv_cnnp_model_t super;
4893 ccv_nnc_tensor_symbol_t outputs[2];
4894 int along_axis;
4895 int descending;
4896} ccv_cnnp_model_sort_t;
4897
4898static void _ccv_cnnp_sort_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4899{
4900 ccv_cnnp_model_sort_t* const self = (ccv_cnnp_model_sort_t*)super;
4901 PRINT(CCV_CLI_VERBOSE, "[cnnp_sort_build] - along_axis: %d, descending: %d\n", self->along_axis, self->descending)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sort_build] - along_axis: %d, descending: %d\n"
, self->along_axis, self->descending); fflush(stdout); }
} while (0)
;
4902 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4903 assert(output_size == 2)((void) sizeof ((output_size == 2) ? 1 : 0), __extension__ ({
if (output_size == 2) ; else __assert_fail ("output_size == 2"
, "ccv_cnnp_model_addons.c", 4903, __extension__ __PRETTY_FUNCTION__
); }))
;
4904 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4905 params.datatype = CCV_32S;
4906 outputs[1] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4907 ccv_nnc_graph_exec_symbol_new(graph, CMD_SORT_FORWARD(self->along_axis, self->descending)ccv_nnc_cmd(CCV_NNC_SORT_FORWARD, 0, ((ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.sort={.along_axis=self->along_axis,.descending
=self->descending}}), 0)
, inputs, input_size, outputs, output_size, "sort");
4908}
4909
4910static ccv_cnnp_model_t* _ccv_cnnp_sort_copy(const ccv_cnnp_model_t* const self, void* const context);
4911
4912static const ccv_cnnp_model_vtab_t ccv_cnnp_sort_isa = {
4913 .build = _ccv_cnnp_sort_build,
4914 .copy = _ccv_cnnp_sort_copy,
4915};
4916
4917ccv_cnnp_model_t* ccv_cnnp_sort(const int along_axis, const int descending, const char* const name)
4918{
4919 ccv_cnnp_model_sort_t* const model_sort = (ccv_cnnp_model_sort_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_sort_t));
4920 model_sort->super.isa = &ccv_cnnp_sort_isa;
4921 model_sort->super.input_size = 0;
4922 model_sort->super.outputs = model_sort->outputs;
4923 model_sort->super.output_size = 2;
4924 model_sort->along_axis = along_axis;
4925 model_sort->descending = descending;
4926 ccv_cnnp_model_copy_name(&model_sort->super, name);
4927 return (ccv_cnnp_model_t*)model_sort;
4928}
4929
4930static ccv_cnnp_model_t* _ccv_cnnp_sort_copy(const ccv_cnnp_model_t* const super, void* const context)
4931{
4932 ccv_cnnp_model_sort_t* const self = (ccv_cnnp_model_sort_t*)super;
4933 return ccv_cnnp_sort(self->along_axis, self->descending, self->super.name);
4934}
4935
4936/// MARK - Partition layer.
4937
4938typedef struct {
4939 ccv_cnnp_model_t super;
4940 ccv_nnc_tensor_symbol_t outputs[2];
4941 int kth;
4942 int along_axis;
4943 int descending;
4944} ccv_cnnp_model_partition_t;
4945
4946static void _ccv_cnnp_partition_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4947{
4948 ccv_cnnp_model_partition_t* const self = (ccv_cnnp_model_partition_t*)super;
4949 PRINT(CCV_CLI_VERBOSE, "[cnnp_partition_build] - kth: %d, along_axis: %d, descending: %d\n", self->kth, self->along_axis, self->descending)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_partition_build] - kth: %d, along_axis: %d, descending: %d\n"
, self->kth, self->along_axis, self->descending); fflush
(stdout); } } while (0)
;
4950 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4951 assert(output_size == 2)((void) sizeof ((output_size == 2) ? 1 : 0), __extension__ ({
if (output_size == 2) ; else __assert_fail ("output_size == 2"
, "ccv_cnnp_model_addons.c", 4951, __extension__ __PRETTY_FUNCTION__
); }))
;
4952 if (self->kth > 0)
4953 params.dim[self->along_axis] = self->kth;
4954 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4955 params.datatype = CCV_32S;
4956 outputs[1] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4957 ccv_nnc_graph_exec_symbol_new(graph, CMD_PARTITION_FORWARD(self->kth, self->along_axis, self->descending)ccv_nnc_cmd(CCV_NNC_PARTITION_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.partition={.kth=self->kth,.along_axis
=self->along_axis,.descending=self->descending}}), 0)
, inputs, input_size, outputs, output_size, "partition");
4958}
4959
4960static ccv_cnnp_model_t* _ccv_cnnp_partition_copy(const ccv_cnnp_model_t* const self, void* const context);
4961
4962static const ccv_cnnp_model_vtab_t ccv_cnnp_partition_isa = {
4963 .build = _ccv_cnnp_partition_build,
4964 .copy = _ccv_cnnp_partition_copy,
4965};
4966
4967ccv_cnnp_model_t* ccv_cnnp_partition(const int kth, const int along_axis, const int descending, const char* const name)
4968{
4969 ccv_cnnp_model_partition_t* const model_partition = (ccv_cnnp_model_partition_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_partition_t));
4970 model_partition->super.isa = &ccv_cnnp_partition_isa;
4971 model_partition->super.input_size = 0;
4972 model_partition->super.outputs = model_partition->outputs;
4973 model_partition->super.output_size = 2;
4974 model_partition->kth = kth;
4975 model_partition->along_axis = along_axis;
4976 model_partition->descending = descending;
4977 ccv_cnnp_model_copy_name(&model_partition->super, name);
4978 return (ccv_cnnp_model_t*)model_partition;
4979}
4980
4981static ccv_cnnp_model_t* _ccv_cnnp_partition_copy(const ccv_cnnp_model_t* const super, void* const context)
4982{
4983 ccv_cnnp_model_partition_t* const self = (ccv_cnnp_model_partition_t*)super;
4984 return ccv_cnnp_partition(self->kth, self->along_axis, self->descending, self->super.name);
4985}
4986
4987/// MARK - Unique consecutive layer.
4988
4989typedef struct {
4990 ccv_cnnp_model_t super;
4991 ccv_nnc_tensor_symbol_t outputs[2];
4992 int bincount;
4993} ccv_cnnp_model_unique_consecutive_t;
4994
4995static void _ccv_cnnp_unique_consecutive_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4996{
4997 ccv_cnnp_model_unique_consecutive_t* const self = (ccv_cnnp_model_unique_consecutive_t*)super;
4998 PRINT(CCV_CLI_VERBOSE, "[cnnp_unique_consecutive_build] - bincount: %d\n", self->bincount)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_unique_consecutive_build] - bincount: %d\n",
self->bincount); fflush(stdout); } } while (0)
;
4999 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
5000 assert(output_size == 2)((void) sizeof ((output_size == 2) ? 1 : 0), __extension__ ({
if (output_size == 2) ; else __assert_fail ("output_size == 2"
, "ccv_cnnp_model_addons.c", 5000, __extension__ __PRETTY_FUNCTION__
); }))
;
5001 if (self->bincount > 0)
5002 params.dim[0] = ccv_min(params.dim[0], self->bincount)({ typeof (params.dim[0]) _a = (params.dim[0]); typeof (self->
bincount) _b = (self->bincount); (_a < _b) ? _a : _b; }
)
;
5003 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
5004 params.datatype = CCV_32S;
5005 outputs[1] = ccv_nnc_tensor_symbol_new(graph, params, 0);
5006 ccv_nnc_graph_exec_symbol_new(graph, CMD_UNIQUE_CONSECUTIVE_FORWARD(self->bincount)ccv_nnc_cmd(CCV_NNC_UNIQUE_CONSECUTIVE_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.unique_consecutive={.bincount=self->
bincount}}), 0)
, inputs, input_size, outputs, output_size, "unique_consecutive");
5007}
5008
5009static ccv_cnnp_model_t* _ccv_cnnp_unique_consecutive_copy(const ccv_cnnp_model_t* const self, void* const context);
5010
5011static const ccv_cnnp_model_vtab_t ccv_cnnp_unique_consecutive_isa = {
5012 .build = _ccv_cnnp_unique_consecutive_build,
5013 .copy = _ccv_cnnp_unique_consecutive_copy,
5014};
5015
5016ccv_cnnp_model_t* ccv_cnnp_unique_consecutive(const int bincount, const char* const name)
5017{
5018 ccv_cnnp_model_unique_consecutive_t* const model_unique_consecutive = (ccv_cnnp_model_unique_consecutive_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_unique_consecutive_t));
5019 model_unique_consecutive->super.isa = &ccv_cnnp_unique_consecutive_isa;
5020 model_unique_consecutive->super.input_size = 0;
5021 model_unique_consecutive->super.outputs = model_unique_consecutive->outputs;
5022 model_unique_consecutive->super.output_size = 2;
5023 model_unique_consecutive->bincount = bincount;
5024 ccv_cnnp_model_copy_name(&model_unique_consecutive->super, name);
5025 return (ccv_cnnp_model_t*)model_unique_consecutive;
5026}
5027
5028static ccv_cnnp_model_t* _ccv_cnnp_unique_consecutive_copy(const ccv_cnnp_model_t* const super, void* const context)
5029{
5030 ccv_cnnp_model_unique_consecutive_t* const self = (ccv_cnnp_model_unique_consecutive_t*)super;
5031 return ccv_cnnp_unique_consecutive(self->bincount, self->super.name);
5032}
5033
5034/// MARK - Scatter add layer.
5035
5036typedef struct {
5037 ccv_cnnp_model_t super;
5038 ccv_nnc_tensor_symbol_t output;
5039 int bincount;
5040} ccv_cnnp_model_scatter_add_t;
5041
5042static void _ccv_cnnp_scatter_add_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
5043{
5044 ccv_cnnp_model_scatter_add_t* const self = (ccv_cnnp_model_scatter_add_t*)super;
5045 PRINT(CCV_CLI_VERBOSE, "[cnnp_scatter_add_build] - bincount: %d\n", self->bincount)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_scatter_add_build] - bincount: %d\n", self->
bincount); fflush(stdout); } } while (0)
;
5046 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
5047 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 5047, __extension__ __PRETTY_FUNCTION__
); }))
;
5048 assert(self->bincount > 0)((void) sizeof ((self->bincount > 0) ? 1 : 0), __extension__
({ if (self->bincount > 0) ; else __assert_fail ("self->bincount > 0"
, "ccv_cnnp_model_addons.c", 5048, __extension__ __PRETTY_FUNCTION__
); }))
;
5049 params.dim[0] = self->bincount;
5050 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
5051 ccv_nnc_graph_exec_symbol_new(graph, CMD_SCATTER_ADD_FORWARD(self->bincount)ccv_nnc_cmd(CCV_NNC_SCATTER_ADD_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.scatter_add={.bincount=self->bincount
}}), 0)
, inputs, input_size, outputs, output_size, "scatter_add");
5052}
5053
5054static ccv_cnnp_model_t* _ccv_cnnp_scatter_add_copy(const ccv_cnnp_model_t* const self, void* const context);
5055
5056static const ccv_cnnp_model_vtab_t ccv_cnnp_scatter_add_isa = {
5057 .build = _ccv_cnnp_scatter_add_build,
5058 .copy = _ccv_cnnp_scatter_add_copy,
5059};
5060
5061ccv_cnnp_model_t* ccv_cnnp_scatter_add(const int bincount, const char* const name)
5062{
5063 assert(bincount > 0)((void) sizeof ((bincount > 0) ? 1 : 0), __extension__ ({ if
(bincount > 0) ; else __assert_fail ("bincount > 0", "ccv_cnnp_model_addons.c"
, 5063, __extension__ __PRETTY_FUNCTION__); }))
;
5064 ccv_cnnp_model_scatter_add_t* const model_scatter_add = (ccv_cnnp_model_scatter_add_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_scatter_add_t));
5065 model_scatter_add->super.isa = &ccv_cnnp_scatter_add_isa;
5066 model_scatter_add->super.input_size = 0;
5067 model_scatter_add->super.outputs = &model_scatter_add->output;
5068 model_scatter_add->super.output_size = 1;
5069 model_scatter_add->bincount = bincount;
5070 ccv_cnnp_model_copy_name(&model_scatter_add->super, name);
5071 return (ccv_cnnp_model_t*)model_scatter_add;
5072}
5073
5074static ccv_cnnp_model_t* _ccv_cnnp_scatter_add_copy(const ccv_cnnp_model_t* const super, void* const context)
5075{
5076 ccv_cnnp_model_scatter_add_t* const self = (ccv_cnnp_model_scatter_add_t*)super;
5077 return ccv_cnnp_scatter_add(self->bincount, self->super.name);
5078}
5079
5080// MARK - Segmented Dense Layer
5081
5082typedef struct {
5083 ccv_cnnp_model_t super;
5084 ccv_nnc_tensor_symbol_t output;
5085 ccv_nnc_tensor_symbol_t weights;
5086 ccv_nnc_tensor_symbol_t bias;
5087 int segments;
5088 int count;
5089 int no_bias;
5090 int flags;
5091} ccv_cnnp_model_segmented_dense_t;
5092
5093static void _ccv_cnnp_segmented_dense_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
5094{
5095 ccv_cnnp_model_segmented_dense_t* const self = (ccv_cnnp_model_segmented_dense_t*)super;
5096 PRINT(CCV_CLI_VERBOSE, "[cnnp_segmented_dense_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_segmented_dense_build] -\n"); fflush(stdout)
; } } while (0)
;
5097 assert(input_size == 3)((void) sizeof ((input_size == 3) ? 1 : 0), __extension__ ({ if
(input_size == 3) ; else __assert_fail ("input_size == 3", "ccv_cnnp_model_addons.c"
, 5097, __extension__ __PRETTY_FUNCTION__); }))
;
5098 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 5098, __extension__ __PRETTY_FUNCTION__
); }))
;
5099 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
5100 const ccv_nnc_tensor_param_t indices_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
5101 const ccv_nnc_tensor_param_t counts_params = ccv_nnc_tensor_symbol_params(graph, inputs[2]);
5102 ccv_nnc_tensor_param_t weights_params = params;
5103 memset(weights_params.dim, 0, sizeof(weights_params.dim));
5104 weights_params.dim[0] = self->segments;
5105 weights_params.dim[1] = self->count;
5106 weights_params.dim[2] = params.dim[ccv_nnc_tensor_nd(params.dim) - 1];
5107 if (!self->weights.graph)
5108 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
5109 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 5109, __extension__ __PRETTY_FUNCTION__); }))
;
5110 const ccv_nnc_tensor_symbol_t weights = ccv_cnnp_model_get_symbol(super, self->weights);
5111 ccv_nnc_tensor_param_t bias_params = params;
5112 memset(bias_params.dim, 0, sizeof(bias_params.dim));
5113 bias_params.dim[0] = self->segments;
5114 bias_params.dim[1] = self->count;
5115 ccv_nnc_cmd_t cmd = {0};
5116 cmd.cmd = CCV_NNC_SEGMENTED_GEMM_FORWARD;
5117 cmd.info.blas.a[0] = 1;
5118 cmd.info.blas.a[1] = 1;
5119 cmd.info.blas.transpose_b[0] = 1;
5120 cmd.info.blas.transpose_b[1] = 2;
5121 cmd.info.blas.flags = self->flags;
5122 ccv_nnc_tensor_param_t output_params;
5123 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
5124 params, indices_params, counts_params,
5125 weights_params,
5126 bias_params,
5127 }, 5, ccv_nnc_no_hint, &output_params, 1);
5128 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
5129 if (self->no_bias)
5130 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1], inputs
[2], weights}, (1 +1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "segmented_dense");
5131 else {
5132 if (!self->bias.graph)
5133 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
5134 const ccv_nnc_tensor_symbol_t bias = ccv_cnnp_model_get_symbol(super, self->bias);
5135 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], weights, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1], inputs
[2], weights, bias}, (1 +1 +1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "segmented_dense");
5136 }
5137 outputs[0] = output;
5138}
5139
5140static void _ccv_cnnp_segmented_dense_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
5141{
5142 ccv_cnnp_model_segmented_dense_t* const self = (ccv_cnnp_model_segmented_dense_t*)super;
5143 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
5144 const int c = weight_params.dim[1];
5145 const float std = sqrtf(2) / sqrtf(c);
5146 const float bound = sqrtf(3) * std;
5147 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
5148 if (self->bias.graph)
5149 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
5150}
5151
5152static void _ccv_cnnp_segmented_dense_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
5153{
5154 ccv_cnnp_model_segmented_dense_t* const self = (ccv_cnnp_model_segmented_dense_t*)super;
5155 add_to_array(parameters, self->weights, is_trainable);
5156 if (self->bias.graph)
5157 add_to_array(parameters, self->bias, is_trainable);
5158}
5159
5160static ccv_cnnp_model_t* _ccv_cnnp_segmented_dense_copy(const ccv_cnnp_model_t* const super, void* const context);
5161
5162static const ccv_cnnp_model_vtab_t ccv_cnnp_segmented_dense_isa = {
5163 .build = _ccv_cnnp_segmented_dense_build,
5164 .init_states = _ccv_cnnp_segmented_dense_init_states,
5165 .add_to_parameter = _ccv_cnnp_segmented_dense_add_to_parameter,
5166 .copy = _ccv_cnnp_segmented_dense_copy,
5167};
5168
5169ccv_cnnp_model_t* ccv_cnnp_segmented_dense(const int segments, const int count, const int no_bias, const int flags, const int is_trainable, const char* const name)
5170{
5171 ccv_cnnp_model_segmented_dense_t* const model_segmented_dense = (ccv_cnnp_model_segmented_dense_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_segmented_dense_t));
5172 model_segmented_dense->super.isa = &ccv_cnnp_segmented_dense_isa;
5173 model_segmented_dense->super.input_size = 3;
5174 model_segmented_dense->super.outputs = &model_segmented_dense->output;
5175 model_segmented_dense->super.output_size = 1;
5176 model_segmented_dense->super.is_trainable = is_trainable;
5177 ccv_cnnp_model_copy_name(&model_segmented_dense->super, name);
5178 model_segmented_dense->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
5179 model_segmented_dense->weights.graph = 0;
5180 model_segmented_dense->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
5181 model_segmented_dense->bias.graph = 0;
5182 model_segmented_dense->segments = segments;
5183 model_segmented_dense->count = count;
5184 model_segmented_dense->no_bias = no_bias;
5185 model_segmented_dense->flags = flags;
5186 return (ccv_cnnp_model_t*)model_segmented_dense;
5187}
5188
5189static ccv_cnnp_model_t* _ccv_cnnp_segmented_dense_copy(const ccv_cnnp_model_t* const super, void* const context)
5190{
5191 const ccv_cnnp_model_segmented_dense_t* const self = (const ccv_cnnp_model_segmented_dense_t*)super;
5192 return ccv_cnnp_segmented_dense(self->segments, self->count, self->no_bias, self->flags, self->super.is_trainable, self->super.name);
5193}