Bug Summary

File:nnc/ccv_cnnp_model_addons.c
Warning:line 1128, column 2
The left operand of '%' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_cnnp_model_addons.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -resource-dir /usr/local/lib/clang/19 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2025-01-21-185238-267098-1 -x c ccv_cnnp_model_addons.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_cnnp_model.h"
6
7// MARK - Add-on Functions
8
9static int _ccv_cnnp_model_clip_grad_norm_reduce_norm2(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
10{
11 const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type)(((inputs[0]->info.type) & 0xfff00) >> 8);
12 ccv_nnc_tensor_t* const old_norm2 = outputs[1 + device_id * 2];
13 ccv_nnc_tensor_t* const norm2 = outputs[1 + device_id * 2 + 1];
14 const int tensor_count = ccv_nnc_tensor_count(inputs[0]->info);
15 if (tensor_count == 1)
16 ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1)ccv_nnc_cmd(CCV_NNC_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, hint, flags, TENSOR_LIST(inputs[0], inputs[0])(ccv_nnc_tensor_t* []){inputs[0], inputs[0]}, (1 +1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2)(ccv_nnc_tensor_t* []){norm2}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
17 else {
18 ccv_nnc_cmd_exec(CMD_REDUCE_NORM2_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_NORM2_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
, hint, flags, TENSOR_LIST(inputs[0])(ccv_nnc_tensor_t* []){inputs[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2)(ccv_nnc_tensor_t* []){norm2}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
19 ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1)ccv_nnc_cmd(CCV_NNC_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, hint, flags, TENSOR_LIST(norm2, norm2)(ccv_nnc_tensor_t* []){norm2, norm2}, (1 +1 +1 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2)(ccv_nnc_tensor_t* []){norm2}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
20 }
21 ccv_nnc_cmd_exec(CMD_ADD_FORWARD(1, 1)ccv_nnc_cmd(CCV_NNC_ADD_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1, 1}}}, 0)
, hint, flags, TENSOR_LIST(old_norm2, norm2)(ccv_nnc_tensor_t* []){old_norm2, norm2}, (1 +1 +1 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(old_norm2)(ccv_nnc_tensor_t* []){old_norm2}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
22 return CCV_NNC_EXEC_SUCCESS;
23}
24
25static ccv_nnc_cmd_vtab_t clip_grad_norm_reduce_norm2_vtab = {
26 .exec = _ccv_cnnp_model_clip_grad_norm_reduce_norm2
27};
28
29static int _ccv_cnnp_model_clip_grad_norm_scatter_norm2(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
30{
31 const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type)(((inputs[0]->info.type) & 0xfff00) >> 8);
32 ccv_nnc_tensor_t* const norm2 = inputs[1 + device_id * 2];
33 ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1)ccv_nnc_cmd(CCV_NNC_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, hint, flags, TENSOR_LIST(inputs[0], norm2)(ccv_nnc_tensor_t* []){inputs[0], norm2}, (1 +1 +1 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(outputs[0])(ccv_nnc_tensor_t* []){outputs[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
34 return CCV_NNC_EXEC_SUCCESS;
35}
36
37static ccv_nnc_cmd_vtab_t clip_grad_norm_scatter_norm2_vtab = {
38 .exec = _ccv_cnnp_model_clip_grad_norm_scatter_norm2
39};
40
41void ccv_cnnp_model_parameters_clip_grad_norm(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, int norm_type, float max_norm, ccv_nnc_stream_context_t* const stream_context)
42{
43 assert(norm_type == 2)((void) sizeof ((norm_type == 2) ? 1 : 0), __extension__ ({ if
(norm_type == 2) ; else __assert_fail ("norm_type == 2", "ccv_cnnp_model_addons.c"
, 43, __extension__ __PRETTY_FUNCTION__); }))
;
44 ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
45 assert(compiled_data)((void) sizeof ((compiled_data) ? 1 : 0), __extension__ ({ if
(compiled_data) ; else __assert_fail ("compiled_data", "ccv_cnnp_model_addons.c"
, 45, __extension__ __PRETTY_FUNCTION__); }))
;
46 const int parallel_count = ccv_max(model->parallel_count, 1)({ typeof (model->parallel_count) _a = (model->parallel_count
); typeof (1) _b = (1); (_a > _b) ? _a : _b; })
;
47 ccv_nnc_tensor_t* norm2[parallel_count * 2];
48 ccv_nnc_tensor_t* max_normt[parallel_count];
49 const int stream_type = model->compiled_data->stream_type;
50 int i;
51 if (stream_type == CCV_STREAM_CONTEXT_GPU)
52 {
53 for (i = 0; i < parallel_count; i++)
54 {
55 ccv_nnc_tensor_param_t info = {
56 .type = CCV_TENSOR_GPU_MEMORY,
57 .format = CCV_TENSOR_FORMAT_NHWC,
58 .datatype = CCV_32F,
59 .dim = {1},
60 };
61 CCV_TENSOR_SET_DEVICE_ID(info.type, i)(info.type) = (((info.type) & ~0xfff00) | (((i) & 0xfff
) << 8))
;
62 norm2[i * 2] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
63 norm2[i * 2 + 1] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
64 max_normt[i] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
65 }
66 } else {
67 for (i = 0; i < parallel_count; i++)
68 {
69 ccv_nnc_tensor_param_t info = {
70 .type = CCV_TENSOR_CPU_MEMORY,
71 .format = CCV_TENSOR_FORMAT_NHWC,
72 .datatype = CCV_32F,
73 .dim = {1},
74 };
75 norm2[i * 2] = ccv_nnc_tensor_new(0, info, 0);
76 norm2[i * 2 + 1] = ccv_nnc_tensor_new(0, info, 0);
77 max_normt[i] = ccv_nnc_tensor_new(0, info, 0);
78 }
79 }
80 // zero out old norm2.
81 if (parallel_count > 1)
82 {
83 ccv_nnc_stream_context_t* streams[parallel_count];
84 ccv_nnc_stream_signal_t* signal;
85 if (stream_context)
86 signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
87 for (i = 0; i < parallel_count; i++)
88 {
89 const int stream_type = CCV_TENSOR_GET_MEMORY(norm2[i * 2]->info.type)((norm2[i * 2]->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
90 const int device_id = CCV_TENSOR_GET_DEVICE_ID(norm2[i * 2]->info.type)(((norm2[i * 2]->info.type) & 0xfff00) >> 8);
91 int type = stream_type;
92 CCV_STREAM_SET_DEVICE_ID(type, device_id)(type) = (((type) & ~0xfff00) | (((device_id) & 0xfff
) << 8))
;
93 ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
94 // Wait signal to finish.
95 if (stream_context)
96 ccv_nnc_stream_context_wait_signal(stream_0, signal);
97 ccv_nnc_cmd_exec(CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
98 if (stream_context)
99 {
100 ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
101 ccv_nnc_stream_context_wait_signal(stream_context, signal);
102 }
103 streams[i] = stream_0;
104 }
105 // If this should be blocking, blocking it.
106 if (!stream_context)
107 for (i = 0; i < parallel_count; i++)
108 if (streams[i])
109 ccv_nnc_stream_context_wait(streams[i]);
110 } else {
111 ccv_nnc_cmd_exec(CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
112 }
113 // Gather norm2.
114 ccv_nnc_cmd_t reduce_cmd = {
115 .cmd = CCV_NNC_CUSTOM_FORWARD,
116 .isa = &clip_grad_norm_reduce_norm2_vtab,
117 };
118 ccv_cnnp_model_parameter_gradients_map(model, parameters, reduce_cmd, ccv_nnc_no_hint, 0, 0, 0, norm2, parallel_count * 2, stream_context);
119 // Now compute max(max_norm / norm2, 1.0).
120 if (parallel_count > 1)
121 {
122 ccv_nnc_stream_context_t* streams[parallel_count];
123 ccv_nnc_stream_signal_t* signal;
124 if (stream_context)
125 signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
126 for (i = 0; i < parallel_count; i++)
127 {
128 const int stream_type = CCV_TENSOR_GET_MEMORY(norm2[i * 2]->info.type)((norm2[i * 2]->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
129 const int device_id = CCV_TENSOR_GET_DEVICE_ID(norm2[i * 2]->info.type)(((norm2[i * 2]->info.type) & 0xfff00) >> 8);
130 int type = stream_type;
131 CCV_STREAM_SET_DEVICE_ID(type, device_id)(type) = (((type) & ~0xfff00) | (((device_id) & 0xfff
) << 8))
;
132 ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
133 // Wait signal to finish.
134 if (stream_context)
135 ccv_nnc_stream_context_wait_signal(stream_0, signal);
136 ccv_nnc_cmd_exec(CMD_EWSQRT_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSQRT_FORWARD, 0, ccv_nnc_cmd_auto, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
137 ccv_nnc_cmd_exec(CMD_SET_FORWARD(max_norm)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={max_norm,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(max_normt[i])(ccv_nnc_tensor_t* []){max_normt[i]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
138 ccv_nnc_cmd_exec(CMD_EWDIV_FORWARD()ccv_nnc_cmd(CCV_NNC_EWDIV_FORWARD, 0, ccv_nnc_cmd_auto, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(max_normt[i], norm2[i * 2])(ccv_nnc_tensor_t* []){max_normt[i], norm2[i * 2]}, (1 +1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1
)
, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
139 ccv_nnc_cmd_exec(CMD_CLAMP_FORWARD(NAN, 1)ccv_nnc_cmd(CCV_NNC_CLAMP_FORWARD, 0, (ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.clamp={.min=(__builtin_nanf ("")),.max=1
}}, 0)
, ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[i * 2])(ccv_nnc_tensor_t* []){norm2[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
140 if (stream_context)
141 {
142 ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
143 ccv_nnc_stream_context_wait_signal(stream_context, signal);
144 }
145 streams[i] = stream_0;
146 }
147 // If this should be blocking, blocking it.
148 if (!stream_context)
149 for (i = 0; i < parallel_count; i++)
150 if (streams[i])
151 ccv_nnc_stream_context_wait(streams[i]);
152 } else {
153 ccv_nnc_cmd_exec(CMD_EWSQRT_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSQRT_FORWARD, 0, ccv_nnc_cmd_auto, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
154 ccv_nnc_cmd_exec(CMD_SET_FORWARD(max_norm)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={max_norm,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(max_normt[0])(ccv_nnc_tensor_t* []){max_normt[0]}, (1 +1 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
155 ccv_nnc_cmd_exec(CMD_EWDIV_FORWARD()ccv_nnc_cmd(CCV_NNC_EWDIV_FORWARD, 0, ccv_nnc_cmd_auto, 0), ccv_nnc_no_hint, 0, TENSOR_LIST(max_normt[0], norm2[0])(ccv_nnc_tensor_t* []){max_normt[0], norm2[0]}, (1 +1 +1 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
156 ccv_nnc_cmd_exec(CMD_CLAMP_FORWARD(NAN, 1)ccv_nnc_cmd(CCV_NNC_CLAMP_FORWARD, 0, (ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.clamp={.min=(__builtin_nanf ("")),.max=1
}}, 0)
, ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(norm2[0])(ccv_nnc_tensor_t* []){norm2[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
157 }
158 ccv_nnc_cmd_t scatter_cmd = {
159 .cmd = CCV_NNC_CUSTOM_FORWARD,
160 .isa = &clip_grad_norm_scatter_norm2_vtab,
161 };
162 ccv_cnnp_model_parameter_gradients_map(model, parameters, scatter_cmd, ccv_nnc_no_hint, 0, norm2, parallel_count * 2, 0, 0, stream_context);
163 if (stream_type == CCV_STREAM_CONTEXT_GPU)
164 for (i = 0; i < parallel_count; i++)
165 {
166 ccv_nnc_xpu_free(&compiled_data->xpu_alloc, norm2[i * 2]->data.u8);
167 ccv_nnc_xpu_free(&compiled_data->xpu_alloc, norm2[i * 2 + 1]->data.u8);
168 ccv_nnc_xpu_free(&compiled_data->xpu_alloc, max_normt[i]->data.u8);
169 }
170 for (i = 0; i < parallel_count; i++)
171 {
172 ccv_nnc_tensor_free(norm2[i * 2]);
173 ccv_nnc_tensor_free(norm2[i * 2 + 1]);
174 ccv_nnc_tensor_free(max_normt[i]);
175 }
176}
177
178// MARK - Add-on Functions
179
180static int _ccv_cnnp_model_isnan(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
181{
182 const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type)(((inputs[0]->info.type) & 0xfff00) >> 8);
183 ccv_nnc_tensor_t* const old_isnanr = outputs[1 + device_id * 2];
184 ccv_nnc_tensor_t* const isnanr = outputs[1 + device_id * 2 + 1];
185 ccv_nnc_cmd_t reduce_cmd = CMD_REDUCE_ISNAN_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_ISNAN_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
186 reduce_cmd.info.reduce.count = ccv_nnc_tensor_nd(inputs[0]->info.dim);
187 int i;
188 for (i = 0; i < cmd.info.reduce.count; i++)
189 reduce_cmd.info.reduce.axis[i] = i;
190 ccv_nnc_cmd_exec(reduce_cmd, hint, flags, TENSOR_LIST(inputs[0])(ccv_nnc_tensor_t* []){inputs[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(isnanr)(ccv_nnc_tensor_t* []){isnanr}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
191 ccv_nnc_cmd_exec(CMD_EWSUM_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, ccv_nnc_cmd_auto, 0), hint, flags, TENSOR_LIST(old_isnanr, isnanr)(ccv_nnc_tensor_t* []){old_isnanr, isnanr}, (1 +1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(old_isnanr)(ccv_nnc_tensor_t* []){old_isnanr}, (1 +1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
192 return CCV_NNC_EXEC_SUCCESS;
193}
194
195static ccv_nnc_cmd_vtab_t reduce_isnan_vtab = {
196 .exec = _ccv_cnnp_model_isnan
197};
198
199int ccv_cnnp_model_parameter_gradients_isnan(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, ccv_nnc_stream_context_t* const stream_context)
200{
201 ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
202 assert(compiled_data)((void) sizeof ((compiled_data) ? 1 : 0), __extension__ ({ if
(compiled_data) ; else __assert_fail ("compiled_data", "ccv_cnnp_model_addons.c"
, 202, __extension__ __PRETTY_FUNCTION__); }))
;
203 const int parallel_count = ccv_max(model->parallel_count, 1)({ typeof (model->parallel_count) _a = (model->parallel_count
); typeof (1) _b = (1); (_a > _b) ? _a : _b; })
;
204 ccv_nnc_tensor_t* isnanr[parallel_count * 2];
205 const int stream_type = model->compiled_data->stream_type;
206 int i;
207 if (stream_type == CCV_STREAM_CONTEXT_GPU)
208 {
209 for (i = 0; i < parallel_count; i++)
210 {
211 ccv_nnc_tensor_param_t info = {
212 .type = CCV_TENSOR_GPU_MEMORY,
213 .format = CCV_TENSOR_FORMAT_NHWC,
214 .datatype = CCV_32S,
215 .dim = {1},
216 };
217 CCV_TENSOR_SET_DEVICE_ID(info.type, i)(info.type) = (((info.type) & ~0xfff00) | (((i) & 0xfff
) << 8))
;
218 isnanr[i * 2] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
219 isnanr[i * 2 + 1] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
220 }
221 } else {
222 for (i = 0; i < parallel_count; i++)
223 {
224 ccv_nnc_tensor_param_t info = {
225 .type = CCV_TENSOR_CPU_MEMORY,
226 .format = CCV_TENSOR_FORMAT_NHWC,
227 .datatype = CCV_32S,
228 .dim = {1},
229 };
230 isnanr[i * 2] = ccv_nnc_tensor_new(0, info, 0);
231 isnanr[i * 2 + 1] = ccv_nnc_tensor_new(0, info, 0);
232 }
233 }
234 // zero out old isnanr.
235 if (parallel_count > 1)
236 {
237 ccv_nnc_stream_context_t* streams[parallel_count];
238 ccv_nnc_stream_signal_t* signal;
239 if (stream_context)
240 signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
241 for (i = 0; i < parallel_count; i++)
242 {
243 const int stream_type = CCV_TENSOR_GET_MEMORY(isnanr[i * 2]->info.type)((isnanr[i * 2]->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
244 const int device_id = CCV_TENSOR_GET_DEVICE_ID(isnanr[i * 2]->info.type)(((isnanr[i * 2]->info.type) & 0xfff00) >> 8);
245 int type = stream_type;
246 CCV_STREAM_SET_DEVICE_ID(type, device_id)(type) = (((type) & ~0xfff00) | (((device_id) & 0xfff
) << 8))
;
247 ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
248 // Wait signal to finish.
249 if (stream_context)
250 ccv_nnc_stream_context_wait_signal(stream_0, signal);
251 ccv_nnc_cmd_exec(CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(isnanr[i * 2])(ccv_nnc_tensor_t* []){isnanr[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_0);
252 if (stream_context)
253 {
254 ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
255 ccv_nnc_stream_context_wait_signal(stream_context, signal);
256 }
257 streams[i] = stream_0;
258 }
259 // If this should be blocking, blocking it.
260 if (!stream_context)
261 for (i = 0; i < parallel_count; i++)
262 if (streams[i])
263 ccv_nnc_stream_context_wait(streams[i]);
264 } else
265 ccv_nnc_cmd_exec(CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(isnanr[0])(ccv_nnc_tensor_t* []){isnanr[0]}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, stream_context);
266 // Gather isnanr.
267 ccv_nnc_cmd_t reduce_cmd = {
268 .cmd = CCV_NNC_CUSTOM_FORWARD,
269 .isa = &reduce_isnan_vtab,
270 };
271 ccv_cnnp_model_parameter_gradients_map(model, parameters, reduce_cmd, ccv_nnc_no_hint, 0, 0, 0, isnanr, parallel_count * 2, stream_context);
272 for (i = 0; i < parallel_count; i++)
273 ccv_nnc_tensor_free(isnanr[i * 2 + 1]);
274 int retval = 0;
275 if (stream_type == CCV_TENSOR_GPU_MEMORY)
276 {
277 ccv_nnc_tensor_param_t info = {
278 .type = CCV_TENSOR_CPU_MEMORY,
279 .format = CCV_TENSOR_FORMAT_NHWC,
280 .datatype = CCV_32S,
281 .dim = {1},
282 };
283 ccv_nnc_tensor_t* checknan = ccv_nnc_tensor_new(0, info, 0);
284 for (i = 0; i < parallel_count; i++)
285 {
286 ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, ccv_nnc_no_hint, 0, TENSOR_LIST(isnanr[i * 2])(ccv_nnc_tensor_t* []){isnanr[i * 2]}, (1 +1 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_LIST(checknan)(ccv_nnc_tensor_t* []){checknan}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, 0);
287 if (checknan->data.i32[0] > 0)
288 {
289 retval = 1;
290 break;
291 }
292 }
293 ccv_nnc_tensor_free(checknan);
294 } else {
295 for (i = 0; i < parallel_count; i++)
296 if (isnanr[i * 2]->data.i32[0] > 0)
297 {
298 retval = 1;
299 break;
300 }
301 }
302 for (i = 0; i < parallel_count; i++)
303 ccv_nnc_tensor_free(isnanr[i * 2]);
304 return retval;
305}
306
307// MARK - Core Layers
308
309static void _ccv_cnnp_sum_build(ccv_cnnp_model_t* const self, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
310{
311 PRINT(CCV_CLI_VERBOSE, "[cnnp_sum_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sum_build] -\n"); fflush(stdout); } } while (
0)
;
312 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 312, __extension__ __PRETTY_FUNCTION__
); }))
;
313 outputs[0] = ccv_nnc_tensor_symbol_new(graph, ccv_nnc_tensor_symbol_params(graph, inputs[0]), 0);
314 ccv_nnc_graph_exec_symbol_new(graph, CMD_EWSUM_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, ccv_nnc_cmd_auto, 0), inputs, input_size, outputs, output_size, 0);
315}
316
317static ccv_cnnp_model_t* _ccv_cnnp_sum_copy(const ccv_cnnp_model_t* const self, void* const context);
318
319static const ccv_cnnp_model_vtab_t ccv_cnnp_sum_isa = {
320 .build = _ccv_cnnp_sum_build,
321 .copy = _ccv_cnnp_sum_copy,
322};
323
324typedef struct {
325 ccv_cnnp_model_t super;
326 ccv_nnc_tensor_symbol_t output;
327} ccv_cnnp_model_sum_t;
328
329ccv_cnnp_model_t* ccv_cnnp_sum(const char* const name)
330{
331 ccv_cnnp_model_sum_t* const model_sum = (ccv_cnnp_model_sum_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_sum_t));
332 model_sum->super.isa = &ccv_cnnp_sum_isa;
333 model_sum->super.input_size = 0;
334 model_sum->super.outputs = &model_sum->output;
335 model_sum->super.output_size = 1;
336 ccv_cnnp_model_copy_name(&model_sum->super, name);
337 return (ccv_cnnp_model_t*)model_sum;
338}
339
340static ccv_cnnp_model_t* _ccv_cnnp_sum_copy(const ccv_cnnp_model_t* const self, void* const context)
341{
342 return ccv_cnnp_sum(self->name);
343}
344
345typedef struct {
346 ccv_cnnp_model_t super;
347 int axis;
348 ccv_nnc_tensor_symbol_t output;
349} ccv_cnnp_model_concat_t;
350
351static void _ccv_cnnp_concat_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
352{
353 const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
354 PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] 1. -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_concat_build] 1. -\n"); fflush(stdout); } } while
(0)
;
355 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 355, __extension__ __PRETTY_FUNCTION__
); }))
;
356 ccv_nnc_tensor_param_t output_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
357 int i, j;
358 if (output_params.dim[0] == 0)
359 for (i = 1; i < input_size; i++)
360 {
361 output_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
362 if (output_params.dim[0] != 0)
363 break;
364 }
365 const int nd = ccv_nnc_tensor_nd(output_params.dim);
366 const int axis = self->axis;
367 assert(axis < nd)((void) sizeof ((axis < nd) ? 1 : 0), __extension__ ({ if (
axis < nd) ; else __assert_fail ("axis < nd", "ccv_cnnp_model_addons.c"
, 367, __extension__ __PRETTY_FUNCTION__); }))
;
368 output_params.dim[axis] = 0;
369 int input_is_contiguous = 1;
370 for (i = 0; i < input_size; i++)
371 {
372 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
373 const int input_nd = ccv_nnc_tensor_nd(input_params.dim);
374 if (input_nd == 0)
375 {
376 PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] %d. input[%d]: -\n", i + 2, i)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_concat_build] %d. input[%d]: -\n", i + 2, i)
; fflush(stdout); } } while (0)
;
377 input_is_contiguous = 0;
378 continue;
379 }
380 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
381 {
382 PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] %d. input[%d]: (%d", i + 2, i, input_params.dim[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_concat_build] %d. input[%d]: (%d", i + 2, i,
input_params.dim[0]); fflush(stdout); } } while (0)
;
383 int i;
384 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && input_params.dim[i] > 0; i++)
385 PRINT(CCV_CLI_VERBOSE, ", %d", input_params.dim[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", input_params.dim[i]); fflush(stdout); } } while
(0)
;
386 PRINT(CCV_CLI_VERBOSE, ")\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(")\n"); fflush(stdout); } } while (0)
;
387 }
388 assert(input_nd == nd)((void) sizeof ((input_nd == nd) ? 1 : 0), __extension__ ({ if
(input_nd == nd) ; else __assert_fail ("input_nd == nd", "ccv_cnnp_model_addons.c"
, 388, __extension__ __PRETTY_FUNCTION__); }))
;
389 for (j = 0; j < nd; j++)
390 if (j != axis)
391 { assert(input_params.dim[j] == output_params.dim[j])((void) sizeof ((input_params.dim[j] == output_params.dim[j])
? 1 : 0), __extension__ ({ if (input_params.dim[j] == output_params
.dim[j]) ; else __assert_fail ("input_params.dim[j] == output_params.dim[j]"
, "ccv_cnnp_model_addons.c", 391, __extension__ __PRETTY_FUNCTION__
); }))
; }
392 output_params.dim[axis] += input_params.dim[axis];
393 }
394 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
395 int ofs[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
396 int stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
397 ccv_nnc_tensor_get_stride(output_params.dim, stride);
398 if (input_is_contiguous)
399 {
400 ccv_nnc_tensor_symbol_t aliases[input_size];
401 for (i = 0; i < input_size; i++)
402 {
403 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
404 aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
405 ofs[axis] += input_params.dim[axis];
406 }
407 // Format transform is more flexible.
408 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, input_size, aliases, input_size, "concat");
409 } else {
410 ccv_nnc_tensor_symbol_t aliases[input_size];
411 for (i = 0; i < input_size; i++)
412 {
413 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
414 if (input_params.dim[0] == 0)
415 {
416 // Create a new alias anyway, but not going to use it, in this way, the alias count will match during absorb.
417 aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
418 continue;
419 }
420 aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
421 ofs[axis] += input_params.dim[axis];
422 }
423 // Format transform is more flexible.
424 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, input_size, aliases, input_size, "concat");
425 }
426}
427
428static ccv_cnnp_model_t* _ccv_cnnp_concat_copy(const ccv_cnnp_model_t* const self, void* const context);
429
430static const ccv_cnnp_model_vtab_t ccv_cnnp_concat_isa = {
431 .build = _ccv_cnnp_concat_build,
432 .copy = _ccv_cnnp_concat_copy,
433};
434
435ccv_cnnp_model_t* ccv_cnnp_concat(const int axis, const char* const name)
436{
437 ccv_cnnp_model_concat_t* const model_concat = (ccv_cnnp_model_concat_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_concat_t));
438 model_concat->super.isa = &ccv_cnnp_concat_isa;
439 model_concat->super.input_size = 0;
440 model_concat->super.outputs = &model_concat->output;
441 model_concat->super.output_size = 1;
442 model_concat->axis = axis;
443 ccv_cnnp_model_copy_name(&model_concat->super, name);
444 return (ccv_cnnp_model_t*)model_concat;
445}
446
447static ccv_cnnp_model_t* _ccv_cnnp_concat_copy(const ccv_cnnp_model_t* const super, void* const context)
448{
449 const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
450 return ccv_cnnp_concat(self->axis, self->super.name);
451}
452
453typedef struct {
454 ccv_cnnp_model_t super;
455 int axis;
456 ccv_nnc_tensor_symbol_t outputs[1];
457} ccv_cnnp_model_chunk_t;
458
459static void _ccv_cnnp_chunk_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
460{
461 const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
462 PRINT(CCV_CLI_VERBOSE, "[cnnp_chunk_build] 1. axis: %d\n", self->axis)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_chunk_build] 1. axis: %d\n", self->axis);
fflush(stdout); } } while (0)
;
463 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 463, __extension__ __PRETTY_FUNCTION__); }))
;
464 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
465 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
466 {
467 PRINT(CCV_CLI_VERBOSE, "[cnnp_chunk_build] 2. input: (%d", input_params.dim[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_chunk_build] 2. input: (%d", input_params.dim
[0]); fflush(stdout); } } while (0)
;
468 int i;
469 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && input_params.dim[i] > 0; i++)
470 PRINT(CCV_CLI_VERBOSE, ", %d", input_params.dim[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", input_params.dim[i]); fflush(stdout); } } while
(0)
;
471 PRINT(CCV_CLI_VERBOSE, ")\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(")\n"); fflush(stdout); } } while (0)
;
472 }
473 ccv_nnc_tensor_param_t output_params = input_params;
474 int i;
475 const int nd = ccv_nnc_tensor_nd(output_params.dim);
476 const int axis = self->axis;
477 assert(axis < nd)((void) sizeof ((axis < nd) ? 1 : 0), __extension__ ({ if (
axis < nd) ; else __assert_fail ("axis < nd", "ccv_cnnp_model_addons.c"
, 477, __extension__ __PRETTY_FUNCTION__); }))
;
478 const int n = self->super.output_size;
479 assert(n == output_size)((void) sizeof ((n == output_size) ? 1 : 0), __extension__ ({
if (n == output_size) ; else __assert_fail ("n == output_size"
, "ccv_cnnp_model_addons.c", 479, __extension__ __PRETTY_FUNCTION__
); }))
;
480 assert(output_params.dim[axis] % n == 0)((void) sizeof ((output_params.dim[axis] % n == 0) ? 1 : 0), __extension__
({ if (output_params.dim[axis] % n == 0) ; else __assert_fail
("output_params.dim[axis] % n == 0", "ccv_cnnp_model_addons.c"
, 480, __extension__ __PRETTY_FUNCTION__); }))
;
481 output_params.dim[axis] = output_params.dim[axis] / n;
482 int ofs[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
483 int stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
484 ccv_nnc_tensor_get_stride(input_params.dim, stride);
485 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
486 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
487 {
488 for (i = 0; i < output_size; i++)
489 {
490 outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, stride, output_params, 0);
491 ofs[axis] += output_params.dim[axis];
492 }
493 } else {
494 // Otherwise, we need to check if it is permute. For permute, we cannot do alias directly.
495 // We need to first materialize the permute and then run reshape on top of it, otherwise it will be wrong.
496 int old_stride[CCV_NNC_MAX_DIM_ALLOC(12)];
497 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
498 // We identify permute by checking if the stride is not in descending order.
499 // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
500 int i, no_permute = 1;
501 for (i = 1; no_permute && i < nd; i++)
502 if (old_stride[i - 1] < old_stride[i])
503 no_permute = 0;
504 if (no_permute)
505 { // Just straightforward reshape if there is no no permute.
506 for (i = 0; i < output_size; i++)
507 {
508 outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, old_stride, output_params, 0);
509 ofs[axis] += output_params.dim[axis];
510 }
511 } else {
512 // Otherwise, we first do format transform to plain tensor and then do reshape.
513 ccv_nnc_tensor_symbol_t permuted = ccv_nnc_tensor_symbol_new(graph, input_params, 0);
514 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(permuted)(const ccv_nnc_tensor_symbol_t []){permuted}, (1 +1 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "reshape");
515 for (i = 0; i < output_size; i++)
516 {
517 outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, permuted, ofs, stride, output_params, 0);
518 ofs[axis] += output_params.dim[axis];
519 }
520 }
521 }
522}
523
524static ccv_cnnp_model_t* _ccv_cnnp_chunk_copy(const ccv_cnnp_model_t* const self, void* const context);
525
526static const ccv_cnnp_model_vtab_t ccv_cnnp_chunk_isa = {
527 .build = _ccv_cnnp_chunk_build,
528 .copy = _ccv_cnnp_chunk_copy,
529};
530
531ccv_cnnp_model_t* ccv_cnnp_chunk(const int n, const int axis, const char* const name)
532{
533 assert(n >= 1)((void) sizeof ((n >= 1) ? 1 : 0), __extension__ ({ if (n >=
1) ; else __assert_fail ("n >= 1", "ccv_cnnp_model_addons.c"
, 533, __extension__ __PRETTY_FUNCTION__); }))
;
534 ccv_cnnp_model_chunk_t* const model_chunk = (ccv_cnnp_model_chunk_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_chunk_t) + sizeof(ccv_nnc_tensor_symbol_t) * (n - 1));
535 model_chunk->super.isa = &ccv_cnnp_chunk_isa;
536 model_chunk->super.input_size = 1;
537 model_chunk->super.outputs = model_chunk->outputs;
538 model_chunk->super.output_size = n;
539 model_chunk->axis = axis;
540 ccv_cnnp_model_copy_name(&model_chunk->super, name);
541 return (ccv_cnnp_model_t*)model_chunk;
542}
543
544static ccv_cnnp_model_t* _ccv_cnnp_chunk_copy(const ccv_cnnp_model_t* const super, void* const context)
545{
546 const ccv_cnnp_model_chunk_t* const self = (const ccv_cnnp_model_chunk_t*)super;
547 return ccv_cnnp_chunk(self->super.output_size, self->axis, self->super.name);
548}
549
550typedef struct {
551 ccv_cnnp_model_t super;
552 ccv_nnc_tensor_symbol_t output;
553 int format;
554 int dim[CCV_NNC_MAX_DIM_ALLOC(12)];
555 int ofs[CCV_NNC_MAX_DIM_ALLOC(12)];
556 int stride[CCV_NNC_MAX_DIM_ALLOC(12)];
557} ccv_cnnp_model_reshape_t;
558
559static void _ccv_cnnp_reshape_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
560{
561 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 561, __extension__ __PRETTY_FUNCTION__); }))
;
562 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 562, __extension__ __PRETTY_FUNCTION__
); }))
;
563 ccv_cnnp_model_reshape_t* const self = (ccv_cnnp_model_reshape_t*)super;
564 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
565 {
566 PRINT(CCV_CLI_VERBOSE, "[cnnp_reshape_build] 1. dim: (%d", self->dim[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reshape_build] 1. dim: (%d", self->dim[0]
); fflush(stdout); } } while (0)
;
567 int i;
568 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && self->dim[i] > 0; i++)
569 PRINT(CCV_CLI_VERBOSE, ", %d", self->dim[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", self->dim[i]); fflush(stdout); } } while
(0)
;
570 const int count = i;
571 PRINT(CCV_CLI_VERBOSE, "), ofs: (%d", self->ofs[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("), ofs: (%d", self->ofs[0]); fflush(stdout); } }
while (0)
;
572 for (i = 1; i < count; i++)
573 PRINT(CCV_CLI_VERBOSE, ", %d", self->ofs[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", self->ofs[i]); fflush(stdout); } } while
(0)
;
574 PRINT(CCV_CLI_VERBOSE, "), stride: (%d", self->stride[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("), stride: (%d", self->stride[0]); fflush(stdout
); } } while (0)
;
575 for (i = 1; i < count; i++)
576 PRINT(CCV_CLI_VERBOSE, ", %d", self->stride[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", self->stride[i]); fflush(stdout); } } while
(0)
;
577 PRINT(CCV_CLI_VERBOSE, ")\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(")\n"); fflush(stdout); } } while (0)
;
578 }
579 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
580 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
581 {
582 PRINT(CCV_CLI_VERBOSE, "[cnnp_reshape_build] 2. input: (%d", params.dim[0])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reshape_build] 2. input: (%d", params.dim[0]
); fflush(stdout); } } while (0)
;
583 int i;
584 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && params.dim[i] > 0; i++)
585 PRINT(CCV_CLI_VERBOSE, ", %d", params.dim[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(", %d", params.dim[i]); fflush(stdout); } } while (
0)
;
586 PRINT(CCV_CLI_VERBOSE, ")\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(")\n"); fflush(stdout); } } while (0)
;
587 }
588 if (self->format > 0)
589 params.format = self->format;
590 assert(ccv_nnc_dimension_count(self->dim) <= ccv_nnc_tensor_count(params))((void) sizeof ((ccv_nnc_dimension_count(self->dim) <= ccv_nnc_tensor_count
(params)) ? 1 : 0), __extension__ ({ if (ccv_nnc_dimension_count
(self->dim) <= ccv_nnc_tensor_count(params)) ; else __assert_fail
("ccv_nnc_dimension_count(self->dim) <= ccv_nnc_tensor_count(params)"
, "ccv_cnnp_model_addons.c", 590, __extension__ __PRETTY_FUNCTION__
); }))
;
591 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
592 int stride_from_dim[CCV_NNC_MAX_DIM_ALLOC(12)];
593 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
594 {
595 memcpy(params.dim, self->dim, sizeof(params.dim));
596 int* stride;
597 if (self->stride[0] == 0)
598 {
599 ccv_nnc_tensor_get_stride(self->dim, stride_from_dim);
600 stride = stride_from_dim;
601 } else
602 stride = self->stride;
603 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], self->ofs, stride, params, 0);
604 } else {
605 // Otherwise, we need to check if it is permute. For permute, we cannot do alias directly.
606 // We need to first materialize the permute and then run reshape on top of it, otherwise it will be wrong.
607 int old_stride[CCV_NNC_MAX_DIM_ALLOC(12)];
608 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
609 // We identify permute by checking if the stride is not in descending order.
610 // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
611 const int nd = ccv_nnc_tensor_nd(params.dim);
612 const int new_nd = ccv_nnc_tensor_nd(self->dim);
613 int i, no_permute = 1;
614 // If the new dim has different nd, or we actually have a stride, we need to check if it is no permute or not.
615 if (new_nd != nd || (self->stride[0] != 0 && memcmp(self->stride, old_stride, sizeof(self->stride)) != 0))
616 for (i = 1; no_permute && i < nd; i++)
617 if (old_stride[i - 1] < old_stride[i])
618 no_permute = 0;
619 if (no_permute)
620 { // Just straightforward reshape if there is no no permute.
621 memcpy(params.dim, self->dim, sizeof(params.dim));
622 int* stride;
623 if (self->stride[0] == 0)
624 {
625 if (new_nd != nd) // Cannot use old stride.
626 {
627 ccv_nnc_tensor_get_stride(self->dim, stride_from_dim);
628 stride = stride_from_dim;
629 } else
630 stride = old_stride;
631 } else
632 stride = self->stride;
633 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], self->ofs, stride, params, 0);
634 } else {
635 // Otherwise, we first do format transform to plain tensor and then do reshape.
636 ccv_nnc_tensor_symbol_t permuted = ccv_nnc_tensor_symbol_new(graph, params, 0);
637 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(permuted)(const ccv_nnc_tensor_symbol_t []){permuted}, (1 +1 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "reshape");
638 memcpy(params.dim, self->dim, sizeof(params.dim));
639 int* stride;
640 if (self->stride[0] == 0)
641 {
642 ccv_nnc_tensor_get_stride(self->dim, stride_from_dim);
643 stride = stride_from_dim;
644 } else
645 stride = self->stride;
646 // And then we create alias against the permuted one.
647 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, permuted, self->ofs, stride, params, 0);
648 }
649 }
650}
651
652static ccv_cnnp_model_t* _ccv_cnnp_reshape_copy(const ccv_cnnp_model_t* const super, void* const context);
653
654static const ccv_cnnp_model_vtab_t ccv_cnnp_reshape_isa = {
655 .build = _ccv_cnnp_reshape_build,
656 .copy = _ccv_cnnp_reshape_copy,
657};
658
659ccv_cnnp_model_t* ccv_cnnp_reshape(const int format, const int dim[CCV_NNC_MAX_DIM_ALLOC(12)], const int ofs[CCV_NNC_MAX_DIM_ALLOC(12)], const int stride[CCV_NNC_MAX_DIM_ALLOC(12)], const char* const name)
660{
661 ccv_cnnp_model_reshape_t* const model_reshape = (ccv_cnnp_model_reshape_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reshape_t));
662 model_reshape->super.isa = &ccv_cnnp_reshape_isa;
663 model_reshape->super.input_size = 1;
664 model_reshape->super.outputs = &model_reshape->output;
665 model_reshape->super.output_size = 1;
666 ccv_cnnp_model_copy_name(&model_reshape->super, name);
667 model_reshape->format = format;
668 memcpy(model_reshape->dim, dim, sizeof(model_reshape->dim));
669 memcpy(model_reshape->ofs, ofs, sizeof(model_reshape->ofs));
670 if (stride[0] != 0)
671 memcpy(model_reshape->stride, stride, sizeof(model_reshape->stride));
672 return (ccv_cnnp_model_t*)model_reshape;
673}
674
675static ccv_cnnp_model_t* _ccv_cnnp_reshape_copy(const ccv_cnnp_model_t* const super, void* const context)
676{
677 const ccv_cnnp_model_reshape_t* const self = (const ccv_cnnp_model_reshape_t*)super;
678 return ccv_cnnp_reshape(self->format, self->dim, self->ofs, self->stride, self->super.name);
679}
680
681typedef struct {
682 ccv_cnnp_model_t super;
683 ccv_nnc_tensor_symbol_t output;
684 int type;
685 int begin[CCV_NNC_MAX_DIM_ALLOC(12)];
686 int end[CCV_NNC_MAX_DIM_ALLOC(12)];
687} ccv_cnnp_model_pad_t;
688
689static void _ccv_cnnp_pad_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
690{
691 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 691, __extension__ __PRETTY_FUNCTION__); }))
;
692 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 692, __extension__ __PRETTY_FUNCTION__
); }))
;
693 ccv_cnnp_model_pad_t* const self = (ccv_cnnp_model_pad_t*)super;
694 PRINT(CCV_CLI_VERBOSE, "[cnnp_pad_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_pad_build] -\n"); fflush(stdout); } } while (
0)
;
695 const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
696 const int nd = ccv_nnc_tensor_nd(input_params.dim);
697 ccv_nnc_tensor_param_t params = input_params;
698 int i;
699 for (i = 0 ; i < nd; i++)
700 params.dim[i] += self->begin[i] + self->end[i];
701 const ccv_nnc_tensor_symbol_t padded = ccv_nnc_tensor_symbol_new(graph, params, 0);
702 ccv_nnc_cmd_t pad = CMD_PAD_FORWARD(self->type, (), ())ccv_nnc_cmd(CCV_NNC_PAD_FORWARD, 0, ((ccv_nnc_cmd_param_t){.size
={.dim={}},.pad={.type=self->type,.end={}}}), 0)
;
703 memcpy(pad.info.size.dim, self->begin, sizeof(pad.info.size.dim));
704 memcpy(pad.info.pad.end, self->end, sizeof(pad.info.pad.end));
705 ccv_nnc_graph_exec_symbol_new(graph, pad, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(padded)(const ccv_nnc_tensor_symbol_t []){padded}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "pad");
706 outputs[0] = padded;
707}
708
709static ccv_cnnp_model_t* _ccv_cnnp_pad_copy(const ccv_cnnp_model_t* const super, void* const context);
710
711static const ccv_cnnp_model_vtab_t ccv_cnnp_pad_isa = {
712 .build = _ccv_cnnp_pad_build,
713 .copy = _ccv_cnnp_pad_copy,
714};
715
716ccv_cnnp_model_t* ccv_cnnp_pad(const int type, const int begin[CCV_NNC_MAX_DIM_ALLOC(12)], const int end[CCV_NNC_MAX_DIM_ALLOC(12)], const char* const name)
717{
718 ccv_cnnp_model_pad_t* const model_pad = (ccv_cnnp_model_pad_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_pad_t));
719 model_pad->super.isa = &ccv_cnnp_pad_isa;
720 model_pad->super.input_size = 1;
721 model_pad->super.outputs = &model_pad->output;
722 model_pad->super.output_size = 1;
723 ccv_cnnp_model_copy_name(&model_pad->super, name);
724 model_pad->type = type;
725 memcpy(model_pad->begin, begin, sizeof(model_pad->begin));
726 memcpy(model_pad->end, end, sizeof(model_pad->end));
727 return (ccv_cnnp_model_t*)model_pad;
728}
729
730static ccv_cnnp_model_t* _ccv_cnnp_pad_copy(const ccv_cnnp_model_t* const super, void* const context)
731{
732 const ccv_cnnp_model_pad_t* const self = (const ccv_cnnp_model_pad_t*)super;
733 return ccv_cnnp_pad(self->type, self->begin, self->end, self->super.name);
734}
735
736typedef struct {
737 ccv_cnnp_model_t super;
738 ccv_nnc_tensor_symbol_t output;
739} ccv_cnnp_model_identity_t;
740
741static void _ccv_cnnp_identity_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
742{
743 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 743, __extension__ __PRETTY_FUNCTION__); }))
;
744 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 744, __extension__ __PRETTY_FUNCTION__
); }))
;
745 PRINT(CCV_CLI_VERBOSE, "[cnnp_identity_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_identity_build] -\n"); fflush(stdout); } } while
(0)
;
746 outputs[0] = inputs[0];
747}
748
749static ccv_cnnp_model_t* _ccv_cnnp_identity_copy(const ccv_cnnp_model_t* const super, void* const context);
750
751static const ccv_cnnp_model_vtab_t ccv_cnnp_identity_isa = {
752 .build = _ccv_cnnp_identity_build,
753 .copy = _ccv_cnnp_identity_copy,
754};
755
756ccv_cnnp_model_t* ccv_cnnp_identity(const char* const name)
757{
758 ccv_cnnp_model_identity_t* const model_identity = (ccv_cnnp_model_identity_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_identity_t));
759 model_identity->super.isa = &ccv_cnnp_identity_isa;
760 model_identity->super.input_size = 1;
761 model_identity->super.outputs = &model_identity->output;
762 model_identity->super.output_size = 1;
763 ccv_cnnp_model_copy_name(&model_identity->super, name);
764 return (ccv_cnnp_model_t*)model_identity;
765}
766
767static ccv_cnnp_model_t* _ccv_cnnp_identity_copy(const ccv_cnnp_model_t* const super, void* const context)
768{
769 const ccv_cnnp_model_identity_t* const self = (const ccv_cnnp_model_identity_t*)super;
770 return ccv_cnnp_identity(self->super.name);
771}
772
773typedef struct {
774 ccv_cnnp_model_t super;
775 ccv_nnc_tensor_symbol_t output;
776 int index[CCV_NNC_MAX_DIM_ALLOC(12)];
777} ccv_cnnp_model_permute_t;
778
779static void _ccv_cnnp_permute_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
780{
781 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 781, __extension__ __PRETTY_FUNCTION__); }))
;
782 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 782, __extension__ __PRETTY_FUNCTION__
); }))
;
783 ccv_cnnp_model_permute_t* const self = (ccv_cnnp_model_permute_t*)super;
784 PRINT(CCV_CLI_VERBOSE, "[cnnp_permute_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_permute_build] -\n"); fflush(stdout); } } while
(0)
;
785 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
786 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
787 const int nd = ccv_nnc_tensor_nd(params.dim);
788 int input_dim[CCV_NNC_MAX_DIM_ALLOC(12)];
789 memcpy(input_dim, params.dim, sizeof(params.dim));
790 int input_stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
791 int output_stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
792 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If it is not an alias. Find stride and permute.
793 {
794 ccv_nnc_tensor_get_stride(input_dim, input_stride);
795 int i;
796 for (i = 0; i < nd; i++)
797 {
798 const int idx = self->index[i];
799 assert(idx >= 0 && idx < nd)((void) sizeof ((idx >= 0 && idx < nd) ? 1 : 0)
, __extension__ ({ if (idx >= 0 && idx < nd) ; else
__assert_fail ("idx >= 0 && idx < nd", "ccv_cnnp_model_addons.c"
, 799, __extension__ __PRETTY_FUNCTION__); }))
;
800 params.dim[i] = input_dim[idx];
801 output_stride[i] = input_stride[idx];
802 }
803 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ccv_nnc_no_ofs, output_stride, params, 0);
804 } else {
805 // if it is an alias, we can get the stride from it and use that.
806 int input_ofs[CCV_NNC_MAX_DIM_ALLOC(12)];
807 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], input_ofs, input_stride);
808 assert(input_stride[0] != 0)((void) sizeof ((input_stride[0] != 0) ? 1 : 0), __extension__
({ if (input_stride[0] != 0) ; else __assert_fail ("input_stride[0] != 0"
, "ccv_cnnp_model_addons.c", 808, __extension__ __PRETTY_FUNCTION__
); }))
;
809 int output_ofs[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
810 int i;
811 for (i = 0; i < nd; i++)
812 {
813 const int idx = self->index[i];
814 assert(idx >= 0 && idx < nd)((void) sizeof ((idx >= 0 && idx < nd) ? 1 : 0)
, __extension__ ({ if (idx >= 0 && idx < nd) ; else
__assert_fail ("idx >= 0 && idx < nd", "ccv_cnnp_model_addons.c"
, 814, __extension__ __PRETTY_FUNCTION__); }))
;
815 params.dim[i] = input_dim[idx];
816 output_stride[i] = input_stride[idx];
817 output_ofs[i] = input_ofs[idx];
818 }
819 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], output_ofs, output_stride, params, 0);
820 }
821}
822
823static ccv_cnnp_model_t* _ccv_cnnp_permute_copy(const ccv_cnnp_model_t* const super, void* const context);
824
825static const ccv_cnnp_model_vtab_t ccv_cnnp_permute_isa = {
826 .build = _ccv_cnnp_permute_build,
827 .copy = _ccv_cnnp_permute_copy,
828};
829
830ccv_cnnp_model_t* ccv_cnnp_permute(const int index[CCV_NNC_MAX_DIM_ALLOC(12)], const char* const name)
831{
832 ccv_cnnp_model_permute_t* const model_permute = (ccv_cnnp_model_permute_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_permute_t));
833 model_permute->super.isa = &ccv_cnnp_permute_isa;
834 model_permute->super.input_size = 1;
835 model_permute->super.outputs = &model_permute->output;
836 model_permute->super.output_size = 1;
837 ccv_cnnp_model_copy_name(&model_permute->super, name);
838 memcpy(model_permute->index, index, sizeof(model_permute->index));
839 return (ccv_cnnp_model_t*)model_permute;
840}
841
842static ccv_cnnp_model_t* _ccv_cnnp_permute_copy(const ccv_cnnp_model_t* const super, void* const context)
843{
844 const ccv_cnnp_model_permute_t* const self = (const ccv_cnnp_model_permute_t*)super;
845 return ccv_cnnp_permute(self->index, self->super.name);
846}
847
848typedef struct {
849 ccv_cnnp_model_t super;
850 int index;
851 ccv_nnc_tensor_symbol_t output;
852} ccv_cnnp_model_extract_t;
853
854static void _ccv_cnnp_extract_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
855{
856 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 856, __extension__ __PRETTY_FUNCTION__
); }))
;
857 ccv_cnnp_model_extract_t* const self = (ccv_cnnp_model_extract_t*)super;
858 PRINT(CCV_CLI_VERBOSE, "[cnnp_extract_build] index: %d\n", self->index)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_extract_build] index: %d\n", self->index)
; fflush(stdout); } } while (0)
;
859 outputs[0] = inputs[self->index];
860}
861
862static ccv_cnnp_model_t* _ccv_cnnp_extract_copy(const ccv_cnnp_model_t* const self, void* const context);
863
864static const ccv_cnnp_model_vtab_t ccv_cnnp_extract_isa = {
865 .build = _ccv_cnnp_extract_build,
866 .copy = _ccv_cnnp_extract_copy,
867};
868
869ccv_cnnp_model_t* ccv_cnnp_extract(const int index, const char* const name)
870{
871 ccv_cnnp_model_extract_t* const model_extract = (ccv_cnnp_model_extract_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_extract_t));
872 model_extract->index = index;
873 model_extract->super.isa = &ccv_cnnp_extract_isa;
874 model_extract->super.input_size = 0;
875 model_extract->super.outputs = &model_extract->output;
876 model_extract->super.output_size = 1;
877 ccv_cnnp_model_copy_name(&model_extract->super, name);
878 return (ccv_cnnp_model_t*)model_extract;
879}
880
881static ccv_cnnp_model_t* _ccv_cnnp_extract_copy(const ccv_cnnp_model_t* const super, void* const context)
882{
883 ccv_cnnp_model_extract_t* const self = (ccv_cnnp_model_extract_t*)super;
884 return ccv_cnnp_extract(self->index, self->super.name);
885}
886
887typedef struct {
888 ccv_cnnp_model_t super;
889 ccv_nnc_tensor_symbol_t output;
890} ccv_cnnp_model_flatten_t;
891
892static void _ccv_cnnp_flatten_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
893{
894 PRINT(CCV_CLI_VERBOSE, "[cnnp_flatten_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_flatten_build] -\n"); fflush(stdout); } } while
(0)
;
895 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 895, __extension__ __PRETTY_FUNCTION__); }))
;
896 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 896, __extension__ __PRETTY_FUNCTION__
); }))
;
897 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
898 ccv_nnc_tensor_param_t output_params = params;
899 memset(output_params.dim, 0, sizeof(output_params.dim));
900 output_params.dim[0] = ccv_nnc_tensor_get_n(params);
901 assert(output_params.dim[0] > 0)((void) sizeof ((output_params.dim[0] > 0) ? 1 : 0), __extension__
({ if (output_params.dim[0] > 0) ; else __assert_fail ("output_params.dim[0] > 0"
, "ccv_cnnp_model_addons.c", 901, __extension__ __PRETTY_FUNCTION__
); }))
;
902 output_params.dim[1] = ccv_nnc_tensor_count(params) / output_params.dim[0];
903 int stride[CCV_NNC_MAX_DIM_ALLOC(12)] = {};
904 ccv_nnc_tensor_get_stride(output_params.dim, stride);
905 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], DIM_ALLOC()(int [(12)]){}, stride, output_params, 0);
906}
907
908static ccv_cnnp_model_t* _ccv_cnnp_flatten_copy(const ccv_cnnp_model_t* const self, void* const context);
909
910static const ccv_cnnp_model_vtab_t ccv_cnnp_flatten_isa = {
911 .build = _ccv_cnnp_flatten_build,
912 .copy = _ccv_cnnp_flatten_copy,
913};
914
915ccv_cnnp_model_t* ccv_cnnp_flatten(const char* const name)
916{
917 ccv_cnnp_model_flatten_t* const model_flatten = (ccv_cnnp_model_flatten_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_flatten_t));
918 model_flatten->super.isa = &ccv_cnnp_flatten_isa;
919 model_flatten->super.input_size = 1;
920 model_flatten->super.outputs = &model_flatten->output;
921 model_flatten->super.output_size = 1;
922 ccv_cnnp_model_copy_name(&model_flatten->super, name);
923 return (ccv_cnnp_model_t*)model_flatten;
924}
925
926static ccv_cnnp_model_t* _ccv_cnnp_flatten_copy(const ccv_cnnp_model_t* const self, void* const context)
927{
928 return ccv_cnnp_flatten(self->name);
929}
930
931// MARK - Batch Norm Layer
932
933typedef struct {
934 ccv_cnnp_model_t super;
935 ccv_nnc_tensor_symbol_t output;
936 ccv_nnc_tensor_symbol_t bias;
937 ccv_nnc_tensor_symbol_t scale;
938 ccv_nnc_graph_exec_symbol_t batch_norm;
939 ccv_nnc_cmd_param_t params;
940 ccv_array_t* zero_inits;
941 ccv_array_t* retainables;
942} ccv_cnnp_model_batch_norm_t;
943
944static void _ccv_cnnp_batch_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
945{
946 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 946, __extension__ __PRETTY_FUNCTION__); }))
;
947 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 947, __extension__ __PRETTY_FUNCTION__
); }))
;
948 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
949 PRINT(CCV_CLI_VERBOSE, "[cnnp_batch_norm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_batch_norm_build] -\n"); fflush(stdout); } }
while (0)
;
950 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
951 const int nd = ccv_nnc_tensor_nd(params.dim);
952 ccv_nnc_tensor_param_t bias_params = params;
953 memset(bias_params.dim, 0, sizeof(bias_params.dim));
954 // If the accuracy is not enough, bump it to 32-bit floating point.
955 if (bias_params.datatype != CCV_32F && bias_params.datatype != CCV_64F)
956 bias_params.datatype = CCV_32F;
957 bias_params.dim[0] = nd > 1 ? ccv_nnc_tensor_get_c(params) : params.dim[0];
958 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, params, 0);
959 // Both scale and bias are shared between if this model is reused.
960 if (!self->scale.graph)
961 self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
962 if (!self->bias.graph)
963 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
964 const ccv_nnc_tensor_symbol_t mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "mean");
965 const ccv_nnc_tensor_symbol_t var = ccv_nnc_tensor_symbol_new(graph, bias_params, "var");
966 // Otherwise, notice mean, var, saved_mean, saved_inv_std are not reused.
967 if (!self->zero_inits)
968 self->zero_inits = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0);
969 ccv_array_push(self->zero_inits, &mean);
970 ccv_array_push(self->zero_inits, &var);
971 const ccv_nnc_tensor_symbol_t out_mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "out_mean");
972 const ccv_nnc_tensor_symbol_t out_var = ccv_nnc_tensor_symbol_new(graph, bias_params, "out_var");
973 if (!self->retainables)
974 self->retainables = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0);
975 ccv_array_push(self->retainables, &out_mean);
976 ccv_array_push(self->retainables, &out_var);
977 const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "saved_mean");
978 const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, bias_params, "saved_inv_std");
979 const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM(2));
980 ccv_nnc_cmd_param_t batch_norm = self->params;
981 batch_norm.bnorm.count = hw >= 0 ? CCV_NNC_MAX_DIM(2) + 1 : 1;
982 int i;
983 batch_norm.bnorm.axis[0] = (params.format == CCV_TENSOR_FORMAT_CHWN) ? 3 : 0;
984 if (hw >= 0)
985 for (i = 0; i < CCV_NNC_MAX_DIM(2); i++)
986 batch_norm.bnorm.axis[i + 1] = i + hw;
987 self->params = batch_norm;
988 self->batch_norm = ccv_nnc_graph_exec_symbol_new(graph, ccv_nnc_cmd(CCV_NNC_BATCH_NORM_FORWARD, 0, batch_norm, 0), TENSOR_SYMBOL_LIST(inputs[0], self->scale, self->bias, mean, var)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->scale,
self->bias, mean, var}, (1 +1 +1 +1 +1 +1 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, out_mean, out_var, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, out_mean, out_var,
saved_mean, saved_inv_std}, (1 +1 +1 +1 +1 +1 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "batch_norm");
989 outputs[0] = output;
990}
991
992static void _ccv_cnnp_batch_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
993{
994 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
995 if (self->scale.graph)
996 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(0, 1)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={0, 1}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
997 if (self->bias.graph)
998 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
999 int i;
1000 if (self->zero_inits)
1001 for (i = 0; i < self->zero_inits->rnum; i++)
1002 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, *(ccv_nnc_tensor_symbol_t*)ccv_array_get(self->zero_inits, i)((void*)(((char*)((self->zero_inits)->data)) + (size_t)
(self->zero_inits)->rsize * (size_t)(i)))
);
1003}
1004
1005static void _ccv_cnnp_batch_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1006{
1007 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1008 if (self->scale.graph)
1009 add_to_array(parameters, self->scale, is_trainable);
1010 if (self->bias.graph)
1011 add_to_array(parameters, self->bias, is_trainable);
1012}
1013
1014static void _ccv_cnnp_batch_norm_add_to_output(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const outputs)
1015{
1016 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1017 int i;
1018 if (self->retainables)
1019 for (i = 0; i < self->retainables->rnum; i++)
1020 {
1021 const ccv_nnc_tensor_symbol_t symbol = *(ccv_nnc_tensor_symbol_t*)ccv_array_get(self->retainables, i)((void*)(((char*)((self->retainables)->data)) + (size_t
)(self->retainables)->rsize * (size_t)(i)))
;
1022 add_to_array(outputs, symbol, 0);
1023 }
1024}
1025
1026static void _ccv_cnnp_batch_norm_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
1027{
1028 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1029 if (self->batch_norm.graph)
1030 {
1031 self->params.bnorm.is_test = is_test;
1032 updater(context, self->batch_norm, ccv_nnc_cmd(CCV_NNC_BATCH_NORM_FORWARD, 0, self->params, 0), ccv_nnc_no_hint);
1033 }
1034}
1035
1036static void _ccv_cnnp_batch_norm_deinit(ccv_cnnp_model_t* const super)
1037{
1038 ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1039 if (self->zero_inits)
1040 ccv_array_free(self->zero_inits);
1041 if (self->retainables)
1042 ccv_array_free(self->retainables);
1043}
1044
1045static ccv_cnnp_model_t* _ccv_cnnp_batch_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
1046
1047static const ccv_cnnp_model_vtab_t ccv_cnnp_batch_norm_isa = {
1048 .build = _ccv_cnnp_batch_norm_build,
1049 .init_states = _ccv_cnnp_batch_norm_init_states,
1050 .add_to_parameter = _ccv_cnnp_batch_norm_add_to_parameter,
1051 .add_to_output = _ccv_cnnp_batch_norm_add_to_output,
1052 .copy = _ccv_cnnp_batch_norm_copy,
1053 .set_is_test = _ccv_cnnp_batch_norm_set_is_test,
1054 .deinit = _ccv_cnnp_batch_norm_deinit,
1055};
1056
1057ccv_cnnp_model_t* ccv_cnnp_batch_norm(const float momentum, const float epsilon, const int is_trainable, const char* const name)
1058{
1059 ccv_cnnp_model_batch_norm_t* const model_batch_norm = (ccv_cnnp_model_batch_norm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_batch_norm_t));
1060 model_batch_norm->super.isa = &ccv_cnnp_batch_norm_isa;
1061 model_batch_norm->super.input_size = 1;
1062 model_batch_norm->super.outputs = &model_batch_norm->output;
1063 model_batch_norm->super.output_size = 1;
1064 model_batch_norm->super.is_trainable = is_trainable;
1065 ccv_cnnp_model_copy_name(&model_batch_norm->super, name);
1066 model_batch_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
1067 model_batch_norm->scale.graph = 0;
1068 model_batch_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1069 model_batch_norm->bias.graph = 0;
1070 model_batch_norm->params.bnorm.momentum = momentum;
1071 model_batch_norm->params.bnorm.epsilon = epsilon;
1072 return (ccv_cnnp_model_t*)model_batch_norm;
1073}
1074
1075static ccv_cnnp_model_t* _ccv_cnnp_batch_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
1076{
1077 const ccv_cnnp_model_batch_norm_t* const self = (const ccv_cnnp_model_batch_norm_t*)super;
1078 return ccv_cnnp_batch_norm(self->params.bnorm.momentum, self->params.bnorm.epsilon, self->super.is_trainable, self->super.name);
1079}
1080
1081// MARK - Convolution Layer
1082
1083typedef struct {
1084 ccv_cnnp_model_t super;
1085 ccv_nnc_tensor_symbol_t output;
1086 ccv_nnc_tensor_symbol_t weights;
1087 ccv_nnc_tensor_symbol_t bias;
1088 int groups;
1089 int filters;
1090 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
1091 int dilation[CCV_NNC_MAX_DIM_ALLOC(12)];
1092 int no_bias;
1093 int format;
1094 ccv_nnc_hint_t hint;
1095} ccv_cnnp_model_convolution_t;
1096
1097static void _ccv_cnnp_convolution_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1098{
1099 ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1100 PRINT(CCV_CLI_VERBOSE, "[cnnp_convolution_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_convolution_build] -\n"); fflush(stdout); } }
while (0)
;
1
Assuming the condition is false
2
Taking false branch
3
Loop condition is false. Exiting loop
1101 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1101, __extension__ __PRETTY_FUNCTION__); }))
;
4
Assuming 'input_size' is equal to 1
5
Taking true branch
1102 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1102, __extension__ __PRETTY_FUNCTION__
); }))
;
6
Assuming 'output_size' is equal to 1
7
Taking true branch
1103 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1104 int i;
1105 const int k_nd = ccv_nnc_tensor_nd(self->kdim);
1106 const int nd = k_nd + 2;
1107 ccv_nnc_tensor_param_t weights_params = params;
1108 if (self->format)
8
Assuming field 'format' is 0
9
Taking false branch
1109 weights_params.format = self->format;
1110 ccv_nnc_tensor_set_n(&weights_params, self->filters);
1111 const int a_nd = ccv_nnc_tensor_nd(params.dim);
1112 int c;
10
'c' declared without an initial value
1113 switch (params.format)
11
'Default' branch taken. Execution continues on line 1128
1114 {
1115 case CCV_TENSOR_FORMAT_NHWC:
1116 c = params.dim[a_nd - 1];
1117 break;
1118 case CCV_TENSOR_FORMAT_NCHW:
1119 if (a_nd == k_nd + 1)
1120 c = params.dim[0];
1121 else
1122 c = params.dim[a_nd <= 1 ? 0 : 1];
1123 break;
1124 case CCV_TENSOR_FORMAT_CHWN:
1125 c = params.dim[0];
1126 break;
1127 }
1128 assert(c % self->groups == 0)((void) sizeof ((c % self->groups == 0) ? 1 : 0), __extension__
({ if (c % self->groups == 0) ; else __assert_fail ("c % self->groups == 0"
, "ccv_cnnp_model_addons.c", 1128, __extension__ __PRETTY_FUNCTION__
); }))
;
12
The left operand of '%' is a garbage value
1129 ccv_nnc_tensor_set_c(&weights_params, nd, c / self->groups);
1130 int hw = -1;
1131 if (weights_params.format == CCV_TENSOR_FORMAT_NHWC || weights_params.format == CCV_TENSOR_FORMAT_CHWN)
1132 hw = 1;
1133 else if (weights_params.format == CCV_TENSOR_FORMAT_NCHW)
1134 hw = 2;
1135 assert(hw >= 0)((void) sizeof ((hw >= 0) ? 1 : 0), __extension__ ({ if (hw
>= 0) ; else __assert_fail ("hw >= 0", "ccv_cnnp_model_addons.c"
, 1135, __extension__ __PRETTY_FUNCTION__); }))
;
1136 for (i = 0; i < k_nd; i++)
1137 weights_params.dim[i + hw] = self->kdim[i];
1138 if (!self->weights.graph)
1139 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1140 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 1140, __extension__ __PRETTY_FUNCTION__); }))
;
1141 ccv_nnc_tensor_param_t bias_params = params;
1142 if (self->format)
1143 bias_params.format = self->format;
1144 memset(bias_params.dim, 0, sizeof(bias_params.dim));
1145 bias_params.dim[0] = self->filters;
1146 ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(self->groups, self->filters)ccv_nnc_cmd(CCV_NNC_CONVOLUTION_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={}},.convolution={.count=self->filters,.groups
=self->groups}}), 0)
;
1147 for (i = 0; i < k_nd; i++)
1148 cmd.info.size.dim[i] = self->kdim[i];
1149 cmd.info.size.dim[k_nd] = c;
1150 memcpy(cmd.info.convolution.dilation, self->dilation, sizeof(self->dilation));
1151 ccv_nnc_tensor_param_t output_params;
1152 // Dilate weight size based on the dilation factor.
1153 for (i = 0; i < k_nd; i++)
1154 weights_params.dim[i + hw] = (self->kdim[i] - 1) * ccv_max(self->dilation[i], 1)({ typeof (self->dilation[i]) _a = (self->dilation[i]);
typeof (1) _b = (1); (_a > _b) ? _a : _b; })
+ 1;
1155 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1156 params,
1157 weights_params,
1158 bias_params,
1159 }, 3, self->hint, &output_params, 1);
1160 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1161 ccv_nnc_graph_exec_symbol_t convolution;
1162 if (self->no_bias)
1163 convolution = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->weights
}, (1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "convolution");
1164 else {
1165 if (!self->bias.graph)
1166 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1167 convolution = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights, self->bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->weights
, self->bias}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "convolution");
1168 }
1169 ccv_nnc_graph_exec_symbol_set_hint(graph, convolution, self->hint);
1170 outputs[0] = output;
1171}
1172
1173static void _ccv_cnnp_convolution_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1174{
1175 ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1176 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1177 const int n = ccv_max(ccv_nnc_tensor_get_n(weight_params), 1)({ typeof (ccv_nnc_tensor_get_n(weight_params)) _a = (ccv_nnc_tensor_get_n
(weight_params)); typeof (1) _b = (1); (_a > _b) ? _a : _b
; })
;
1178 const int count = ccv_nnc_tensor_count(weight_params);
1179 const float std = sqrtf(2) / sqrtf(count / n);
1180 const float bound = sqrtf(3) * std;
1181 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
1182 if (self->bias.graph)
1183 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
1184}
1185
1186static void _ccv_cnnp_convolution_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1187{
1188 ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1189 add_to_array(parameters, self->weights, is_trainable);
1190 if (self->bias.graph)
1191 add_to_array(parameters, self->bias, is_trainable);
1192}
1193
1194static ccv_cnnp_model_t* _ccv_cnnp_convolution_copy(const ccv_cnnp_model_t* const super, void* const context);
1195
1196static const ccv_cnnp_model_vtab_t ccv_cnnp_convolution_isa = {
1197 .build = _ccv_cnnp_convolution_build,
1198 .init_states = _ccv_cnnp_convolution_init_states,
1199 .add_to_parameter = _ccv_cnnp_convolution_add_to_parameter,
1200 .copy = _ccv_cnnp_convolution_copy,
1201};
1202
1203ccv_cnnp_model_t* ccv_cnnp_convolution(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC(12)], const int dilation[CCV_NNC_MAX_DIM_ALLOC(12)], const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name)
1204{
1205 ccv_cnnp_model_convolution_t* const model_convolution = (ccv_cnnp_model_convolution_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_convolution_t));
1206 model_convolution->super.isa = &ccv_cnnp_convolution_isa;
1207 model_convolution->super.input_size = 1;
1208 model_convolution->super.outputs = &model_convolution->output;
1209 model_convolution->super.output_size = 1;
1210 model_convolution->super.is_trainable = is_trainable;
1211 ccv_cnnp_model_copy_name(&model_convolution->super, name);
1212 model_convolution->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1213 model_convolution->weights.graph = 0;
1214 model_convolution->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1215 model_convolution->bias.graph = 0;
1216 model_convolution->groups = groups;
1217 model_convolution->filters = filters;
1218 memcpy(model_convolution->kdim, kdim, sizeof(model_convolution->kdim));
1219 memcpy(model_convolution->dilation, dilation, sizeof(model_convolution->dilation));
1220 model_convolution->no_bias = no_bias;
1221 model_convolution->hint = hint;
1222 model_convolution->format = format;
1223 return (ccv_cnnp_model_t*)model_convolution;
1224}
1225
1226static ccv_cnnp_model_t* _ccv_cnnp_convolution_copy(const ccv_cnnp_model_t* const super, void* const context)
1227{
1228 ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1229 return ccv_cnnp_convolution(self->groups, self->filters, self->kdim, self->dilation, self->no_bias, self->hint, self->format, self->super.is_trainable, self->super.name);
1230}
1231
1232// MARK - Convolution Transpose Layer
1233
1234typedef struct {
1235 ccv_cnnp_model_t super;
1236 ccv_nnc_tensor_symbol_t output;
1237 ccv_nnc_tensor_symbol_t weights;
1238 ccv_nnc_tensor_symbol_t bias;
1239 int groups;
1240 int filters;
1241 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
1242 int dilation[CCV_NNC_MAX_DIM_ALLOC(12)];
1243 int output_padding;
1244 int no_bias;
1245 int format;
1246 ccv_nnc_hint_t hint;
1247} ccv_cnnp_model_convolution_transpose_t;
1248
1249static void _ccv_cnnp_convolution_transpose_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1250{
1251 ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1252 PRINT(CCV_CLI_VERBOSE, "[cnnp_convolution_transpose_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_convolution_transpose_build] -\n"); fflush(stdout
); } } while (0)
;
1253 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1253, __extension__ __PRETTY_FUNCTION__); }))
;
1254 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1254, __extension__ __PRETTY_FUNCTION__
); }))
;
1255 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1256 int i;
1257 const int nd = CCV_NNC_MAX_DIM(2) + 2;
1258 ccv_nnc_tensor_param_t weights_params = params;
1259 if (self->format)
1260 weights_params.format = self->format;
1261 const int c = ccv_nnc_tensor_get_c(params);
1262 ccv_nnc_tensor_set_n(&weights_params, c);
1263 assert(c % self->groups == 0)((void) sizeof ((c % self->groups == 0) ? 1 : 0), __extension__
({ if (c % self->groups == 0) ; else __assert_fail ("c % self->groups == 0"
, "ccv_cnnp_model_addons.c", 1263, __extension__ __PRETTY_FUNCTION__
); }))
;
1264 ccv_nnc_tensor_set_c(&weights_params, nd, self->filters / self->groups);
1265 const int hw = ccv_nnc_tensor_hw(weights_params, nd, CCV_NNC_MAX_DIM(2));
1266 assert(hw >= 0)((void) sizeof ((hw >= 0) ? 1 : 0), __extension__ ({ if (hw
>= 0) ; else __assert_fail ("hw >= 0", "ccv_cnnp_model_addons.c"
, 1266, __extension__ __PRETTY_FUNCTION__); }))
;
1267 for (i = 0; i < CCV_NNC_MAX_DIM(2); i++)
1268 weights_params.dim[i + hw] = self->kdim[i];
1269 if (!self->weights.graph)
1270 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1271 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 1271, __extension__ __PRETTY_FUNCTION__); }))
;
1272 ccv_nnc_tensor_param_t bias_params = params;
1273 if (self->format)
1274 bias_params.format = self->format;
1275 memset(bias_params.dim, 0, sizeof(bias_params.dim));
1276 bias_params.dim[0] = self->filters;
1277 ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_TRANSPOSE_FORWARD(self->groups, self->filters, self->output_padding)ccv_nnc_cmd(CCV_NNC_CONVOLUTION_TRANSPOSE_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={}},.convolution_transpose={.count=self->filters
,.groups=self->groups,.output_padding=self->output_padding
}}), 0)
;
1278 for (i = 0; i < CCV_NNC_MAX_DIM(2); i++)
1279 cmd.info.size.dim[i] = self->kdim[i];
1280 cmd.info.size.dim[CCV_NNC_MAX_DIM(2)] = c;
1281 memcpy(cmd.info.convolution_transpose.dilation, self->dilation, sizeof(self->dilation));
1282 ccv_nnc_tensor_param_t output_params;
1283 // Dilate weight size based on the dilation factor.
1284 for (i = 0; i < CCV_NNC_MAX_DIM(2); i++)
1285 weights_params.dim[i + hw] = (self->kdim[i] - 1) * ccv_max(self->dilation[i], 1)({ typeof (self->dilation[i]) _a = (self->dilation[i]);
typeof (1) _b = (1); (_a > _b) ? _a : _b; })
+ 1;
1286 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1287 params,
1288 weights_params,
1289 bias_params,
1290 }, 3, self->hint, &output_params, 1);
1291 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1292 ccv_nnc_graph_exec_symbol_t convolution_transpose;
1293 if (self->no_bias)
1294 convolution_transpose = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->weights
}, (1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "convolution_transpose");
1295 else {
1296 if (!self->bias.graph)
1297 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1298 convolution_transpose = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights, self->bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->weights
, self->bias}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "convolution_transpose");
1299 }
1300 ccv_nnc_graph_exec_symbol_set_hint(graph, convolution_transpose, self->hint);
1301 outputs[0] = output;
1302}
1303
1304static void _ccv_cnnp_convolution_transpose_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1305{
1306 ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1307 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1308 const int n = ccv_max(ccv_nnc_tensor_get_n(weight_params), 1)({ typeof (ccv_nnc_tensor_get_n(weight_params)) _a = (ccv_nnc_tensor_get_n
(weight_params)); typeof (1) _b = (1); (_a > _b) ? _a : _b
; })
;
1309 const int count = ccv_nnc_tensor_count(weight_params);
1310 const float std = sqrtf(2) / sqrtf(count / n);
1311 const float bound = sqrtf(3) * std;
1312 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
1313 if (self->bias.graph)
1314 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
1315}
1316
1317static void _ccv_cnnp_convolution_transpose_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1318{
1319 ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1320 add_to_array(parameters, self->weights, is_trainable);
1321 if (self->bias.graph)
1322 add_to_array(parameters, self->bias, is_trainable);
1323}
1324
1325static ccv_cnnp_model_t* _ccv_cnnp_convolution_transpose_copy(const ccv_cnnp_model_t* const super, void* const context);
1326
1327static const ccv_cnnp_model_vtab_t ccv_cnnp_convolution_transpose_isa = {
1328 .build = _ccv_cnnp_convolution_transpose_build,
1329 .init_states = _ccv_cnnp_convolution_transpose_init_states,
1330 .add_to_parameter = _ccv_cnnp_convolution_transpose_add_to_parameter,
1331 .copy = _ccv_cnnp_convolution_transpose_copy,
1332};
1333
1334ccv_cnnp_model_t* ccv_cnnp_convolution_transpose(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC(12)], const int dilation[CCV_NNC_MAX_DIM_ALLOC(12)], const int output_padding, const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name)
1335{
1336 ccv_cnnp_model_convolution_transpose_t* const model_convolution_transpose = (ccv_cnnp_model_convolution_transpose_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_convolution_transpose_t));
1337 model_convolution_transpose->super.isa = &ccv_cnnp_convolution_transpose_isa;
1338 model_convolution_transpose->super.input_size = 1;
1339 model_convolution_transpose->super.outputs = &model_convolution_transpose->output;
1340 model_convolution_transpose->super.output_size = 1;
1341 model_convolution_transpose->super.is_trainable = is_trainable;
1342 ccv_cnnp_model_copy_name(&model_convolution_transpose->super, name);
1343 model_convolution_transpose->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1344 model_convolution_transpose->weights.graph = 0;
1345 model_convolution_transpose->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1346 model_convolution_transpose->bias.graph = 0;
1347 model_convolution_transpose->groups = groups;
1348 model_convolution_transpose->filters = filters;
1349 memcpy(model_convolution_transpose->kdim, kdim, sizeof(model_convolution_transpose->kdim));
1350 memcpy(model_convolution_transpose->dilation, dilation, sizeof(model_convolution_transpose->dilation));
1351 model_convolution_transpose->output_padding = output_padding;
1352 model_convolution_transpose->no_bias = no_bias;
1353 model_convolution_transpose->hint = hint;
1354 model_convolution_transpose->format = format;
1355 return (ccv_cnnp_model_t*)model_convolution_transpose;
1356}
1357
1358static ccv_cnnp_model_t* _ccv_cnnp_convolution_transpose_copy(const ccv_cnnp_model_t* const super, void* const context)
1359{
1360 ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1361 return ccv_cnnp_convolution_transpose(self->groups, self->filters, self->kdim, self->dilation, self->output_padding, self->no_bias, self->hint, self->format, self->super.is_trainable, self->super.name);
1362}
1363
1364// MARK - Dense Layer
1365
1366typedef struct {
1367 ccv_cnnp_model_t super;
1368 ccv_nnc_tensor_symbol_t output;
1369 ccv_nnc_tensor_symbol_t weights;
1370 ccv_nnc_tensor_symbol_t bias;
1371 int count;
1372 int no_bias;
1373 int flags;
1374} ccv_cnnp_model_dense_t;
1375
1376static void _ccv_cnnp_dense_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1377{
1378 ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1379 PRINT(CCV_CLI_VERBOSE, "[cnnp_dense_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_dense_build] -\n"); fflush(stdout); } } while
(0)
;
1380 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1380, __extension__ __PRETTY_FUNCTION__); }))
;
1381 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1381, __extension__ __PRETTY_FUNCTION__
); }))
;
1382 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1383 ccv_nnc_tensor_param_t weights_params = params;
1384 memset(weights_params.dim, 0, sizeof(weights_params.dim));
1385 weights_params.dim[0] = self->count;
1386 weights_params.dim[1] = params.dim[ccv_nnc_tensor_nd(params.dim) - 1];
1387 if (!self->weights.graph)
1388 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1389 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 1389, __extension__ __PRETTY_FUNCTION__); }))
;
1390 ccv_nnc_tensor_param_t bias_params = params;
1391 memset(bias_params.dim, 0, sizeof(bias_params.dim));
1392 bias_params.dim[0] = self->count;
1393 ccv_nnc_cmd_t cmd = {0};
1394 cmd.cmd = CCV_NNC_GEMM_FORWARD;
1395 cmd.info.blas.a[0] = 1;
1396 cmd.info.blas.a[1] = 1;
1397 cmd.info.blas.transpose_b[0] = 0;
1398 cmd.info.blas.transpose_b[1] = 1;
1399 cmd.info.blas.flags = self->flags;
1400 ccv_nnc_tensor_param_t output_params;
1401 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1402 params,
1403 weights_params,
1404 bias_params,
1405 }, 3, ccv_nnc_no_hint, &output_params, 1);
1406 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1407 if (self->no_bias)
1408 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->weights
}, (1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "dense");
1409 else {
1410 if (!self->bias.graph)
1411 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1412 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights, self->bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->weights
, self->bias}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "dense");
1413 }
1414 outputs[0] = output;
1415}
1416
1417static void _ccv_cnnp_dense_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1418{
1419 ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1420 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1421 const int c = weight_params.dim[1];
1422 const float std = sqrtf(2) / sqrtf(c);
1423 const float bound = sqrtf(3) * std;
1424 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
1425 if (self->bias.graph)
1426 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
1427}
1428
1429static void _ccv_cnnp_dense_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1430{
1431 ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1432 add_to_array(parameters, self->weights, is_trainable);
1433 if (self->bias.graph)
1434 add_to_array(parameters, self->bias, is_trainable);
1435}
1436
1437static ccv_cnnp_model_t* _ccv_cnnp_dense_copy(const ccv_cnnp_model_t* const super, void* const context);
1438
1439static const ccv_cnnp_model_vtab_t ccv_cnnp_dense_isa = {
1440 .build = _ccv_cnnp_dense_build,
1441 .init_states = _ccv_cnnp_dense_init_states,
1442 .add_to_parameter = _ccv_cnnp_dense_add_to_parameter,
1443 .copy = _ccv_cnnp_dense_copy,
1444};
1445
1446ccv_cnnp_model_t* ccv_cnnp_dense(const int count, const int no_bias, const int flags, const int is_trainable, const char* const name)
1447{
1448 ccv_cnnp_model_dense_t* const model_dense = (ccv_cnnp_model_dense_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_dense_t));
1449 model_dense->super.isa = &ccv_cnnp_dense_isa;
1450 model_dense->super.input_size = 1;
1451 model_dense->super.outputs = &model_dense->output;
1452 model_dense->super.output_size = 1;
1453 model_dense->super.is_trainable = is_trainable;
1454 ccv_cnnp_model_copy_name(&model_dense->super, name);
1455 model_dense->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1456 model_dense->weights.graph = 0;
1457 model_dense->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1458 model_dense->bias.graph = 0;
1459 model_dense->count = count;
1460 model_dense->no_bias = no_bias;
1461 model_dense->flags = flags;
1462 return (ccv_cnnp_model_t*)model_dense;
1463}
1464
1465static ccv_cnnp_model_t* _ccv_cnnp_dense_copy(const ccv_cnnp_model_t* const super, void* const context)
1466{
1467 const ccv_cnnp_model_dense_t* const self = (const ccv_cnnp_model_dense_t*)super;
1468 return ccv_cnnp_dense(self->count, self->no_bias, self->flags, self->super.is_trainable, self->super.name);
1469}
1470
1471// MARK - Pool Layers
1472
1473typedef struct {
1474 ccv_cnnp_model_t super;
1475 ccv_nnc_tensor_symbol_t output;
1476 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
1477 ccv_nnc_hint_t hint;
1478} ccv_cnnp_model_pool_t;
1479
1480static void _ccv_cnnp_max_pool_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1481{
1482 ccv_cnnp_model_pool_t* const self = (ccv_cnnp_model_pool_t*)super;
1483 PRINT(CCV_CLI_VERBOSE, "[cnnp_max_pool_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_max_pool_build] -\n"); fflush(stdout); } } while
(0)
;
1484 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1484, __extension__ __PRETTY_FUNCTION__); }))
;
1485 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1485, __extension__ __PRETTY_FUNCTION__
); }))
;
1486 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1487 const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM(2));
1488 ccv_nnc_cmd_t cmd;
1489 if (hw >= 0 && self->kdim[0] == 0 && self->kdim[1] == 0)
1490 cmd = CMD_MAX_POOL_FORWARD(params.dim[hw], params.dim[hw + 1])ccv_nnc_cmd(CCV_NNC_MAX_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={params.dim[hw], params.dim[hw + 1],1}}}), 0)
;
1491 else
1492 cmd = CMD_MAX_POOL_FORWARD(self->kdim[0], self->kdim[1])ccv_nnc_cmd(CCV_NNC_MAX_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={self->kdim[0], self->kdim[1],1}}}), 0)
;
1493 ccv_nnc_tensor_param_t output_params;
1494 ccv_nnc_hint_tensor_auto(cmd, &params, 1, self->hint, &output_params, 1);
1495 const ccv_nnc_tensor_symbol_t pool_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1496 const ccv_nnc_graph_exec_symbol_t exec = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(pool_output)(const ccv_nnc_tensor_symbol_t []){pool_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "max_pool");
1497 ccv_nnc_graph_exec_symbol_set_hint(graph, exec, self->hint);
1498 outputs[0] = pool_output;
1499}
1500
1501static ccv_cnnp_model_t* _ccv_cnnp_max_pool_copy(const ccv_cnnp_model_t* const super, void* const context);
1502
1503static const ccv_cnnp_model_vtab_t ccv_cnnp_max_pool_isa = {
1504 .build = _ccv_cnnp_max_pool_build,
1505 .copy = _ccv_cnnp_max_pool_copy,
1506};
1507
1508ccv_cnnp_model_t* ccv_cnnp_max_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC(12)], const ccv_nnc_hint_t hint, const char* const name)
1509{
1510 ccv_cnnp_model_pool_t* const model_pool = (ccv_cnnp_model_pool_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_pool_t));
1511 model_pool->super.isa = &ccv_cnnp_max_pool_isa;
1512 model_pool->super.input_size = 1;
1513 model_pool->super.outputs = &model_pool->output;
1514 model_pool->super.output_size = 1;
1515 ccv_cnnp_model_copy_name(&model_pool->super, name);
1516 memcpy(model_pool->kdim, kdim, sizeof(model_pool->kdim));
1517 model_pool->hint = hint;
1518 return (ccv_cnnp_model_t*)model_pool;
1519}
1520
1521static ccv_cnnp_model_t* _ccv_cnnp_max_pool_copy(const ccv_cnnp_model_t* const super, void* const context)
1522{
1523 const ccv_cnnp_model_pool_t* const self = (const ccv_cnnp_model_pool_t*)super;
1524 return ccv_cnnp_max_pool(self->kdim, self->hint, self->super.name);
1525}
1526
1527static void _ccv_cnnp_average_pool_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1528{
1529 ccv_cnnp_model_pool_t* const self = (ccv_cnnp_model_pool_t*)super;
1530 PRINT(CCV_CLI_VERBOSE, "[cnnp_average_pool_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_average_pool_build] -\n"); fflush(stdout); }
} while (0)
;
1531 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1531, __extension__ __PRETTY_FUNCTION__); }))
;
1532 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1532, __extension__ __PRETTY_FUNCTION__
); }))
;
1533 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1534 const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM(2));
1535 ccv_nnc_cmd_t cmd;
1536 if (hw >= 0 && self->kdim[0] == 0 && self->kdim[1] == 0)
1537 cmd = CMD_AVERAGE_POOL_FORWARD(params.dim[hw], params.dim[hw + 1])ccv_nnc_cmd(CCV_NNC_AVERAGE_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={params.dim[hw], params.dim[hw + 1],1}}}), 0)
;
1538 else
1539 cmd = CMD_AVERAGE_POOL_FORWARD(self->kdim[0], self->kdim[1])ccv_nnc_cmd(CCV_NNC_AVERAGE_POOL_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={self->kdim[0], self->kdim[1],1}}}), 0)
;
1540 ccv_nnc_tensor_param_t output_params;
1541 ccv_nnc_hint_tensor_auto(cmd, &params, 1, self->hint, &output_params, 1);
1542 const ccv_nnc_tensor_symbol_t pool_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1543 const ccv_nnc_graph_exec_symbol_t exec = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(pool_output)(const ccv_nnc_tensor_symbol_t []){pool_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "average_pool");
1544 ccv_nnc_graph_exec_symbol_set_hint(graph, exec, self->hint);
1545 outputs[0] = pool_output;
1546}
1547
1548static ccv_cnnp_model_t* _ccv_cnnp_average_pool_copy(const ccv_cnnp_model_t* const super, void* const context);
1549
1550static const ccv_cnnp_model_vtab_t ccv_cnnp_average_pool_isa = {
1551 .build = _ccv_cnnp_average_pool_build,
1552 .copy = _ccv_cnnp_average_pool_copy,
1553};
1554
1555ccv_cnnp_model_t* ccv_cnnp_average_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC(12)], const ccv_nnc_hint_t hint, const char* const name)
1556{
1557 ccv_cnnp_model_pool_t* const model_pool = (ccv_cnnp_model_pool_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_pool_t));
1558 model_pool->super.isa = &ccv_cnnp_average_pool_isa;
1559 model_pool->super.input_size = 1;
1560 model_pool->super.outputs = &model_pool->output;
1561 model_pool->super.output_size = 1;
1562 ccv_cnnp_model_copy_name(&model_pool->super, name);
1563 memcpy(model_pool->kdim, kdim, sizeof(model_pool->kdim));
1564 model_pool->hint = hint;
1565 return (ccv_cnnp_model_t*)model_pool;
1566}
1567
1568static ccv_cnnp_model_t* _ccv_cnnp_average_pool_copy(const ccv_cnnp_model_t* const super, void* const context)
1569{
1570 const ccv_cnnp_model_pool_t* const self = (const ccv_cnnp_model_pool_t*)super;
1571 return ccv_cnnp_average_pool(self->kdim, self->hint, self->super.name);
1572}
1573
1574// MARK - RELU Layer
1575
1576typedef struct {
1577 ccv_cnnp_model_t super;
1578 ccv_nnc_tensor_symbol_t output;
1579} ccv_cnnp_model_relu_t;
1580
1581static void _ccv_cnnp_relu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1582{
1583 PRINT(CCV_CLI_VERBOSE, "[cnnp_relu_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_relu_build] -\n"); fflush(stdout); } } while
(0)
;
1584 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1584, __extension__ __PRETTY_FUNCTION__); }))
;
1585 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1585, __extension__ __PRETTY_FUNCTION__
); }))
;
1586 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1587 ccv_nnc_tensor_param_t output_params;
1588 const ccv_nnc_cmd_t relu = CMD_RELU_FORWARD()ccv_nnc_cmd(CCV_NNC_RELU_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1589 ccv_nnc_hint_tensor_auto(relu, (ccv_nnc_tensor_param_t []){
1590 params,
1591 }, 1, ccv_nnc_no_hint, &output_params, 1);
1592 const ccv_nnc_tensor_symbol_t relu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1593 ccv_nnc_graph_exec_symbol_new(graph, relu, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(relu_output)(const ccv_nnc_tensor_symbol_t []){relu_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "relu");
1594 outputs[0] = relu_output;
1595}
1596
1597static ccv_cnnp_model_t* _ccv_cnnp_relu_copy(const ccv_cnnp_model_t* const self, void* const context);
1598
1599static const ccv_cnnp_model_vtab_t ccv_cnnp_relu_isa = {
1600 .build = _ccv_cnnp_relu_build,
1601 .copy = _ccv_cnnp_relu_copy,
1602};
1603
1604ccv_cnnp_model_t* ccv_cnnp_relu(const char* const name)
1605{
1606 ccv_cnnp_model_relu_t* const model_relu = (ccv_cnnp_model_relu_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_relu_t));
1607 model_relu->super.isa = &ccv_cnnp_relu_isa;
1608 model_relu->super.input_size = 1;
1609 model_relu->super.outputs = &model_relu->output;
1610 model_relu->super.output_size = 1;
1611 ccv_cnnp_model_copy_name(&model_relu->super, name);
1612 return (ccv_cnnp_model_t*)model_relu;
1613}
1614
1615static ccv_cnnp_model_t* _ccv_cnnp_relu_copy(const ccv_cnnp_model_t* const self, void* const context)
1616{
1617 return ccv_cnnp_relu(self->name);
1618}
1619
1620// MARK - Sigmoid Layer
1621
1622typedef struct {
1623 ccv_cnnp_model_t super;
1624 ccv_nnc_tensor_symbol_t output;
1625} ccv_cnnp_model_sigmoid_t;
1626
1627static void _ccv_cnnp_sigmoid_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1628{
1629 PRINT(CCV_CLI_VERBOSE, "[cnnp_sigmoid_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sigmoid_build] -\n"); fflush(stdout); } } while
(0)
;
1630 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1630, __extension__ __PRETTY_FUNCTION__); }))
;
1631 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1631, __extension__ __PRETTY_FUNCTION__
); }))
;
1632 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1633 ccv_nnc_tensor_param_t output_params;
1634 const ccv_nnc_cmd_t sigmoid = CMD_SIGMOID_FORWARD()ccv_nnc_cmd(CCV_NNC_SIGMOID_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1635 ccv_nnc_hint_tensor_auto(sigmoid, (ccv_nnc_tensor_param_t []){
1636 params,
1637 }, 1, ccv_nnc_no_hint, &output_params, 1);
1638 const ccv_nnc_tensor_symbol_t sigmoid_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1639 ccv_nnc_graph_exec_symbol_new(graph, sigmoid, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(sigmoid_output)(const ccv_nnc_tensor_symbol_t []){sigmoid_output}, (1 +1 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1
)
, "sigmoid");
1640 outputs[0] = sigmoid_output;
1641}
1642
1643static ccv_cnnp_model_t* _ccv_cnnp_sigmoid_copy(const ccv_cnnp_model_t* const self, void* const context);
1644
1645static const ccv_cnnp_model_vtab_t ccv_cnnp_sigmoid_isa = {
1646 .build = _ccv_cnnp_sigmoid_build,
1647 .copy = _ccv_cnnp_sigmoid_copy,
1648};
1649
1650ccv_cnnp_model_t* ccv_cnnp_sigmoid(const char* const name)
1651{
1652 ccv_cnnp_model_sigmoid_t* const model_sigmoid = (ccv_cnnp_model_sigmoid_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_sigmoid_t));
1653 model_sigmoid->super.isa = &ccv_cnnp_sigmoid_isa;
1654 model_sigmoid->super.input_size = 1;
1655 model_sigmoid->super.outputs = &model_sigmoid->output;
1656 model_sigmoid->super.output_size = 1;
1657 ccv_cnnp_model_copy_name(&model_sigmoid->super, name);
1658 return (ccv_cnnp_model_t*)model_sigmoid;
1659}
1660
1661static ccv_cnnp_model_t* _ccv_cnnp_sigmoid_copy(const ccv_cnnp_model_t* const self, void* const context)
1662{
1663 return ccv_cnnp_sigmoid(self->name);
1664}
1665
1666// MARK - Tanh Layer
1667
1668typedef struct {
1669 ccv_cnnp_model_t super;
1670 ccv_nnc_tensor_symbol_t output;
1671} ccv_cnnp_model_tanh_t;
1672
1673static void _ccv_cnnp_tanh_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1674{
1675 PRINT(CCV_CLI_VERBOSE, "[cnnp_tanh_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_tanh_build] -\n"); fflush(stdout); } } while
(0)
;
1676 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1676, __extension__ __PRETTY_FUNCTION__); }))
;
1677 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1677, __extension__ __PRETTY_FUNCTION__
); }))
;
1678 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1679 ccv_nnc_tensor_param_t output_params;
1680 const ccv_nnc_cmd_t tanh = CMD_TANH_FORWARD()ccv_nnc_cmd(CCV_NNC_TANH_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1681 ccv_nnc_hint_tensor_auto(tanh, (ccv_nnc_tensor_param_t []){
1682 params,
1683 }, 1, ccv_nnc_no_hint, &output_params, 1);
1684 const ccv_nnc_tensor_symbol_t tanh_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1685 ccv_nnc_graph_exec_symbol_new(graph, tanh, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(tanh_output)(const ccv_nnc_tensor_symbol_t []){tanh_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "tanh");
1686 outputs[0] = tanh_output;
1687}
1688
1689static ccv_cnnp_model_t* _ccv_cnnp_tanh_copy(const ccv_cnnp_model_t* const self, void* const context);
1690
1691static const ccv_cnnp_model_vtab_t ccv_cnnp_tanh_isa = {
1692 .build = _ccv_cnnp_tanh_build,
1693 .copy = _ccv_cnnp_tanh_copy,
1694};
1695
1696ccv_cnnp_model_t* ccv_cnnp_tanh(const char* const name)
1697{
1698 ccv_cnnp_model_tanh_t* const model_tanh = (ccv_cnnp_model_tanh_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_tanh_t));
1699 model_tanh->super.isa = &ccv_cnnp_tanh_isa;
1700 model_tanh->super.input_size = 1;
1701 model_tanh->super.outputs = &model_tanh->output;
1702 model_tanh->super.output_size = 1;
1703 ccv_cnnp_model_copy_name(&model_tanh->super, name);
1704 return (ccv_cnnp_model_t*)model_tanh;
1705}
1706
1707static ccv_cnnp_model_t* _ccv_cnnp_tanh_copy(const ccv_cnnp_model_t* const self, void* const context)
1708{
1709 return ccv_cnnp_tanh(self->name);
1710}
1711
1712// MARK - Swish Layer
1713
1714typedef struct {
1715 ccv_cnnp_model_t super;
1716 ccv_nnc_tensor_symbol_t output;
1717} ccv_cnnp_model_swish_t;
1718
1719static void _ccv_cnnp_swish_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1720{
1721 PRINT(CCV_CLI_VERBOSE, "[cnnp_swish_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_swish_build] -\n"); fflush(stdout); } } while
(0)
;
1722 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1722, __extension__ __PRETTY_FUNCTION__); }))
;
1723 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1723, __extension__ __PRETTY_FUNCTION__
); }))
;
1724 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1725 ccv_nnc_tensor_param_t output_params;
1726 const ccv_nnc_cmd_t swish = CMD_SWISH_FORWARD()ccv_nnc_cmd(CCV_NNC_SWISH_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1727 ccv_nnc_hint_tensor_auto(swish, (ccv_nnc_tensor_param_t []){
1728 params,
1729 }, 1, ccv_nnc_no_hint, &output_params, 1);
1730 const ccv_nnc_tensor_symbol_t swish_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1731 ccv_nnc_graph_exec_symbol_new(graph, swish, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(swish_output)(const ccv_nnc_tensor_symbol_t []){swish_output}, (1 +1 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "swish");
1732 outputs[0] = swish_output;
1733}
1734
1735static ccv_cnnp_model_t* _ccv_cnnp_swish_copy(const ccv_cnnp_model_t* const self, void* const context);
1736
1737static const ccv_cnnp_model_vtab_t ccv_cnnp_swish_isa = {
1738 .build = _ccv_cnnp_swish_build,
1739 .copy = _ccv_cnnp_swish_copy,
1740};
1741
1742ccv_cnnp_model_t* ccv_cnnp_swish(const char* const name)
1743{
1744 ccv_cnnp_model_swish_t* const model_swish = (ccv_cnnp_model_swish_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_swish_t));
1745 model_swish->super.isa = &ccv_cnnp_swish_isa;
1746 model_swish->super.input_size = 1;
1747 model_swish->super.outputs = &model_swish->output;
1748 model_swish->super.output_size = 1;
1749 ccv_cnnp_model_copy_name(&model_swish->super, name);
1750 return (ccv_cnnp_model_t*)model_swish;
1751}
1752
1753static ccv_cnnp_model_t* _ccv_cnnp_swish_copy(const ccv_cnnp_model_t* const self, void* const context)
1754{
1755 return ccv_cnnp_swish(self->name);
1756}
1757
1758// MARK - GELU Layer
1759
1760typedef struct {
1761 ccv_cnnp_model_t super;
1762 ccv_nnc_tensor_symbol_t output;
1763 int tanh;
1764} ccv_cnnp_model_gelu_t;
1765
1766static void _ccv_cnnp_gelu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1767{
1768 PRINT(CCV_CLI_VERBOSE, "[cnnp_gelu_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_gelu_build] -\n"); fflush(stdout); } } while
(0)
;
1769 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1769, __extension__ __PRETTY_FUNCTION__); }))
;
1770 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1770, __extension__ __PRETTY_FUNCTION__
); }))
;
1771 ccv_cnnp_model_gelu_t* const self = (ccv_cnnp_model_gelu_t*)super;
1772 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1773 ccv_nnc_tensor_param_t output_params;
1774 const ccv_nnc_cmd_t gelu = CMD_GELU_FORWARD(self->tanh)ccv_nnc_cmd(CCV_NNC_GELU_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.gelu={.tanh=self->tanh}}, 0)
;
1775 ccv_nnc_hint_tensor_auto(gelu, (ccv_nnc_tensor_param_t []){
1776 params,
1777 }, 1, ccv_nnc_no_hint, &output_params, 1);
1778 const ccv_nnc_tensor_symbol_t gelu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1779 ccv_nnc_graph_exec_symbol_new(graph, gelu, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(gelu_output)(const ccv_nnc_tensor_symbol_t []){gelu_output}, (1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "gelu");
1780 outputs[0] = gelu_output;
1781}
1782
1783static ccv_cnnp_model_t* _ccv_cnnp_gelu_copy(const ccv_cnnp_model_t* const self, void* const context);
1784
1785static const ccv_cnnp_model_vtab_t ccv_cnnp_gelu_isa = {
1786 .build = _ccv_cnnp_gelu_build,
1787 .copy = _ccv_cnnp_gelu_copy,
1788};
1789
1790ccv_cnnp_model_t* ccv_cnnp_gelu(const int tanh, const char* const name)
1791{
1792 ccv_cnnp_model_gelu_t* const model_gelu = (ccv_cnnp_model_gelu_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_gelu_t));
1793 model_gelu->super.isa = &ccv_cnnp_gelu_isa;
1794 model_gelu->super.input_size = 1;
1795 model_gelu->super.outputs = &model_gelu->output;
1796 model_gelu->super.output_size = 1;
1797 model_gelu->tanh = tanh;
1798 ccv_cnnp_model_copy_name(&model_gelu->super, name);
1799 return (ccv_cnnp_model_t*)model_gelu;
1800}
1801
1802static ccv_cnnp_model_t* _ccv_cnnp_gelu_copy(const ccv_cnnp_model_t* const super, void* const context)
1803{
1804 ccv_cnnp_model_gelu_t* const self = (ccv_cnnp_model_gelu_t*)super;
1805 return ccv_cnnp_gelu(self->tanh, self->super.name);
1806}
1807
1808// MARK - Leaky ReLU Layer
1809
1810typedef struct {
1811 ccv_cnnp_model_t super;
1812 ccv_nnc_tensor_symbol_t output;
1813 float negative_slope;
1814} ccv_cnnp_model_leaky_relu_t;
1815
1816static void _ccv_cnnp_leaky_relu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1817{
1818 PRINT(CCV_CLI_VERBOSE, "[cnnp_leaky_relu_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_leaky_relu_build] -\n"); fflush(stdout); } }
while (0)
;
1819 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1819, __extension__ __PRETTY_FUNCTION__); }))
;
1820 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1820, __extension__ __PRETTY_FUNCTION__
); }))
;
1821 ccv_cnnp_model_leaky_relu_t* const self = (ccv_cnnp_model_leaky_relu_t*)super;
1822 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1823 ccv_nnc_tensor_param_t output_params;
1824 const ccv_nnc_cmd_t leaky_relu = CMD_LEAKY_RELU_FORWARD(self->negative_slope)ccv_nnc_cmd(CCV_NNC_LEAKY_RELU_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.leaky_relu={.negative_slope=self->
negative_slope}}, 0)
;
1825 ccv_nnc_hint_tensor_auto(leaky_relu, (ccv_nnc_tensor_param_t []){
1826 params,
1827 }, 1, ccv_nnc_no_hint, &output_params, 1);
1828 const ccv_nnc_tensor_symbol_t leaky_relu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1829 ccv_nnc_graph_exec_symbol_new(graph, leaky_relu, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(leaky_relu_output)(const ccv_nnc_tensor_symbol_t []){leaky_relu_output}, (1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, "leaky_relu");
1830 outputs[0] = leaky_relu_output;
1831}
1832
1833static ccv_cnnp_model_t* _ccv_cnnp_leaky_relu_copy(const ccv_cnnp_model_t* const self, void* const context);
1834
1835static const ccv_cnnp_model_vtab_t ccv_cnnp_leaky_relu_isa = {
1836 .build = _ccv_cnnp_leaky_relu_build,
1837 .copy = _ccv_cnnp_leaky_relu_copy,
1838};
1839
1840ccv_cnnp_model_t* ccv_cnnp_leaky_relu(const float negative_slope, const char* const name)
1841{
1842 ccv_cnnp_model_leaky_relu_t* const model_leaky_relu = (ccv_cnnp_model_leaky_relu_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_leaky_relu_t));
1843 model_leaky_relu->super.isa = &ccv_cnnp_leaky_relu_isa;
1844 model_leaky_relu->super.input_size = 1;
1845 model_leaky_relu->super.outputs = &model_leaky_relu->output;
1846 model_leaky_relu->super.output_size = 1;
1847 model_leaky_relu->negative_slope = negative_slope;
1848 ccv_cnnp_model_copy_name(&model_leaky_relu->super, name);
1849 return (ccv_cnnp_model_t*)model_leaky_relu;
1850}
1851
1852static ccv_cnnp_model_t* _ccv_cnnp_leaky_relu_copy(const ccv_cnnp_model_t* const super, void* const context)
1853{
1854 ccv_cnnp_model_leaky_relu_t* const self = (ccv_cnnp_model_leaky_relu_t*)super;
1855 return ccv_cnnp_leaky_relu(self->negative_slope, self->super.name);
1856}
1857
1858// MARK - Softmax Layer
1859
1860typedef struct {
1861 ccv_cnnp_model_t super;
1862 ccv_nnc_tensor_symbol_t output;
1863} ccv_cnnp_model_softmax_t;
1864
1865static void _ccv_cnnp_softmax_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1866{
1867 PRINT(CCV_CLI_VERBOSE, "[cnnp_softmax_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_softmax_build] -\n"); fflush(stdout); } } while
(0)
;
1868 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 1868, __extension__ __PRETTY_FUNCTION__); }))
;
1869 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1869, __extension__ __PRETTY_FUNCTION__
); }))
;
1870 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1871 ccv_nnc_tensor_param_t output_params;
1872 const ccv_nnc_cmd_t softmax = CMD_SOFTMAX_FORWARD()ccv_nnc_cmd(CCV_NNC_SOFTMAX_FORWARD, 0, ccv_nnc_cmd_auto, 0);
1873 ccv_nnc_hint_tensor_auto(softmax, (ccv_nnc_tensor_param_t []){
1874 params,
1875 }, 1, ccv_nnc_no_hint, &output_params, 1);
1876 const ccv_nnc_tensor_symbol_t softmax_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1877 ccv_nnc_graph_exec_symbol_new(graph, softmax, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(softmax_output)(const ccv_nnc_tensor_symbol_t []){softmax_output}, (1 +1 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1
)
, "softmax");
1878 outputs[0] = softmax_output;
1879}
1880
1881static ccv_cnnp_model_t* _ccv_cnnp_softmax_copy(const ccv_cnnp_model_t* const self, void* const context);
1882
1883static const ccv_cnnp_model_vtab_t ccv_cnnp_softmax_isa = {
1884 .build = _ccv_cnnp_softmax_build,
1885 .copy = _ccv_cnnp_softmax_copy,
1886};
1887
1888ccv_cnnp_model_t* ccv_cnnp_softmax(const char* const name)
1889{
1890 ccv_cnnp_model_softmax_t* const model_softmax = (ccv_cnnp_model_softmax_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_softmax_t));
1891 model_softmax->super.isa = &ccv_cnnp_softmax_isa;
1892 model_softmax->super.input_size = 1;
1893 model_softmax->super.outputs = &model_softmax->output;
1894 model_softmax->super.output_size = 1;
1895 ccv_cnnp_model_copy_name(&model_softmax->super, name);
1896 return (ccv_cnnp_model_t*)model_softmax;
1897}
1898
1899static ccv_cnnp_model_t* _ccv_cnnp_softmax_copy(const ccv_cnnp_model_t* const self, void* const context)
1900{
1901 return ccv_cnnp_softmax(self->name);
1902}
1903
1904// MARK - Add Layer
1905
1906typedef struct {
1907 ccv_cnnp_model_t super;
1908 float p;
1909 float q;
1910 ccv_nnc_tensor_symbol_t output;
1911} ccv_cnnp_model_add_t;
1912
1913static void _ccv_cnnp_add_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1914{
1915 PRINT(CCV_CLI_VERBOSE, "[cnnp_add_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_add_build] -\n"); fflush(stdout); } } while (
0)
;
1916 const ccv_cnnp_model_add_t* const self = (const ccv_cnnp_model_add_t*)super;
1917 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 1917, __extension__ __PRETTY_FUNCTION__); }))
;
1918 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1918, __extension__ __PRETTY_FUNCTION__
); }))
;
1919 ccv_nnc_tensor_param_t input_params[2];
1920 int i;
1921 for (i = 0; i < 2; i++)
1922 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
1923 ccv_nnc_tensor_param_t output_params;
1924 const ccv_nnc_cmd_t add = CMD_ADD_FORWARD(self->p, self->q)ccv_nnc_cmd(CCV_NNC_ADD_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={self->p, self->q}}}, 0)
;
1925 ccv_nnc_hint_tensor_auto(add, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
1926 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1927 ccv_nnc_graph_exec_symbol_new(graph, add, inputs, input_size, outputs, output_size, "add");
1928}
1929
1930static ccv_cnnp_model_t* _ccv_cnnp_add_copy(const ccv_cnnp_model_t* const self, void* const context);
1931
1932static const ccv_cnnp_model_vtab_t ccv_cnnp_add_isa = {
1933 .build = _ccv_cnnp_add_build,
1934 .copy = _ccv_cnnp_add_copy,
1935};
1936
1937ccv_cnnp_model_t* ccv_cnnp_add(const float p, const float q, const char* const name)
1938{
1939 ccv_cnnp_model_add_t* const model_add = (ccv_cnnp_model_add_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_add_t));
1940 model_add->super.isa = &ccv_cnnp_add_isa;
1941 model_add->super.input_size = 2;
1942 model_add->super.outputs = &model_add->output;
1943 model_add->super.output_size = 1;
1944 model_add->p = p;
1945 model_add->q = q;
1946 ccv_cnnp_model_copy_name(&model_add->super, name);
1947 return (ccv_cnnp_model_t*)model_add;
1948}
1949
1950static ccv_cnnp_model_t* _ccv_cnnp_add_copy(const ccv_cnnp_model_t* const super, void* const context)
1951{
1952 const ccv_cnnp_model_add_t* const self = (const ccv_cnnp_model_add_t*)super;
1953 return ccv_cnnp_add(self->p, self->q, self->super.name);
1954}
1955
1956// MARK - Mul Layer
1957
1958typedef struct {
1959 ccv_cnnp_model_t super;
1960 ccv_nnc_tensor_symbol_t output;
1961 float p;
1962} ccv_cnnp_model_mul_t;
1963
1964static void _ccv_cnnp_mul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1965{
1966 PRINT(CCV_CLI_VERBOSE, "[cnnp_mul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_mul_build] -\n"); fflush(stdout); } } while (
0)
;
1967 const ccv_cnnp_model_mul_t* const self = (const ccv_cnnp_model_mul_t*)super;
1968 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 1968, __extension__ __PRETTY_FUNCTION__); }))
;
1969 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 1969, __extension__ __PRETTY_FUNCTION__
); }))
;
1970 ccv_nnc_tensor_param_t input_params[2];
1971 int i;
1972 for (i = 0; i < 2; i++)
1973 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
1974 ccv_nnc_tensor_param_t output_params;
1975 const ccv_nnc_cmd_t mul = CMD_MUL_FORWARD(self->p)ccv_nnc_cmd(CCV_NNC_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={self->p,}}}, 0)
;
1976 ccv_nnc_hint_tensor_auto(mul, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
1977 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1978 ccv_nnc_graph_exec_symbol_new(graph, mul, inputs, input_size, outputs, output_size, "mul");
1979}
1980
1981static ccv_cnnp_model_t* _ccv_cnnp_mul_copy(const ccv_cnnp_model_t* const self, void* const context);
1982
1983static const ccv_cnnp_model_vtab_t ccv_cnnp_mul_isa = {
1984 .build = _ccv_cnnp_mul_build,
1985 .copy = _ccv_cnnp_mul_copy,
1986};
1987
1988ccv_cnnp_model_t* ccv_cnnp_mul(const float p, const char* const name)
1989{
1990 ccv_cnnp_model_mul_t* const model_mul = (ccv_cnnp_model_mul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_mul_t));
1991 model_mul->super.isa = &ccv_cnnp_mul_isa;
1992 model_mul->super.input_size = 2;
1993 model_mul->super.outputs = &model_mul->output;
1994 model_mul->super.output_size = 1;
1995 model_mul->p = p;
1996 ccv_cnnp_model_copy_name(&model_mul->super, name);
1997 return (ccv_cnnp_model_t*)model_mul;
1998}
1999
2000static ccv_cnnp_model_t* _ccv_cnnp_mul_copy(const ccv_cnnp_model_t* const super, void* const context)
2001{
2002 const ccv_cnnp_model_mul_t* const self = (const ccv_cnnp_model_mul_t*)super;
2003 return ccv_cnnp_mul(self->p, self->super.name);
2004}
2005
2006// MARK - Scalar Mul Layer
2007
2008typedef struct {
2009 ccv_cnnp_model_t super;
2010 ccv_nnc_tensor_symbol_t output;
2011 float a;
2012} ccv_cnnp_model_scalar_mul_t;
2013
2014static void _ccv_cnnp_scalar_mul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2015{
2016 PRINT(CCV_CLI_VERBOSE, "[cnnp_scalar_mul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_scalar_mul_build] -\n"); fflush(stdout); } }
while (0)
;
2017 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2017, __extension__ __PRETTY_FUNCTION__); }))
;
2018 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2018, __extension__ __PRETTY_FUNCTION__
); }))
;
2019 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2020 ccv_nnc_tensor_param_t output_params;
2021 ccv_cnnp_model_scalar_mul_t* const self = (ccv_cnnp_model_scalar_mul_t*)super;
2022 const ccv_nnc_cmd_t scalar_mul = CMD_SCALAR_MUL_FORWARD(self->a)ccv_nnc_cmd(CCV_NNC_SCALAR_MUL_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={self->a,}}}, 0)
;
2023 ccv_nnc_hint_tensor_auto(scalar_mul, (ccv_nnc_tensor_param_t []){
2024 params,
2025 }, 1, ccv_nnc_no_hint, &output_params, 1);
2026 const ccv_nnc_tensor_symbol_t scalar_mul_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2027 ccv_nnc_graph_exec_symbol_new(graph, scalar_mul, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(scalar_mul_output)(const ccv_nnc_tensor_symbol_t []){scalar_mul_output}, (1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, "scalar_mul");
2028 outputs[0] = scalar_mul_output;
2029}
2030
2031static ccv_cnnp_model_t* _ccv_cnnp_scalar_mul_copy(const ccv_cnnp_model_t* const super, void* const context);
2032
2033static const ccv_cnnp_model_vtab_t ccv_cnnp_scalar_mul_isa = {
2034 .build = _ccv_cnnp_scalar_mul_build,
2035 .copy = _ccv_cnnp_scalar_mul_copy,
2036};
2037
2038ccv_cnnp_model_t* ccv_cnnp_scalar_mul(const float a, const char* const name)
2039{
2040 ccv_cnnp_model_scalar_mul_t* const model_scalar_mul = (ccv_cnnp_model_scalar_mul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_scalar_mul_t));
2041 model_scalar_mul->super.isa = &ccv_cnnp_scalar_mul_isa;
2042 model_scalar_mul->super.input_size = 1;
2043 model_scalar_mul->super.outputs = &model_scalar_mul->output;
2044 model_scalar_mul->super.output_size = 1;
2045 model_scalar_mul->a = a;
2046 ccv_cnnp_model_copy_name(&model_scalar_mul->super, name);
2047 return (ccv_cnnp_model_t*)model_scalar_mul;
2048}
2049
2050static ccv_cnnp_model_t* _ccv_cnnp_scalar_mul_copy(const ccv_cnnp_model_t* const super, void* const context)
2051{
2052 const ccv_cnnp_model_scalar_mul_t* const self = (const ccv_cnnp_model_scalar_mul_t*)super;
2053 return ccv_cnnp_scalar_mul(self->a, self->super.name);
2054}
2055
2056// MARK - Div Layer
2057
2058typedef struct {
2059 ccv_cnnp_model_t super;
2060 ccv_nnc_tensor_symbol_t output;
2061 int reciprocal;
2062} ccv_cnnp_model_div_t;
2063
2064static void _ccv_cnnp_div_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2065{
2066 const ccv_cnnp_model_div_t* const self = (const ccv_cnnp_model_div_t*)super;
2067 PRINT(CCV_CLI_VERBOSE, "[cnnp_div_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_div_build] -\n"); fflush(stdout); } } while (
0)
;
2068 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2068, __extension__ __PRETTY_FUNCTION__
); }))
;
2069 ccv_nnc_tensor_param_t input_params[2];
2070 int i;
2071 ccv_nnc_tensor_param_t output_params;
2072 const ccv_nnc_cmd_t div = CMD_EWDIV_FORWARD()ccv_nnc_cmd(CCV_NNC_EWDIV_FORWARD, 0, ccv_nnc_cmd_auto, 0);
2073 if (self->reciprocal)
2074 {
2075 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2075, __extension__ __PRETTY_FUNCTION__); }))
;
2076 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2077 input_params[1] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2078 ccv_nnc_hint_tensor_auto(div, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
2079 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2080 ccv_nnc_graph_exec_symbol_new(graph, div, TENSOR_SYMBOL_LIST(NO_TENSOR_SYMBOL, inputs[0])(const ccv_nnc_tensor_symbol_t []){(const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, inputs[0]}, (1 +1 +1 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, outputs, output_size, "div");
2081 } else {
2082 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2082, __extension__ __PRETTY_FUNCTION__); }))
;
2083 for (i = 0; i < 2; i++)
2084 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2085 ccv_nnc_hint_tensor_auto(div, input_params, input_size, ccv_nnc_no_hint, &output_params, 1);
2086 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2087 ccv_nnc_graph_exec_symbol_new(graph, div, inputs, input_size, outputs, output_size, "div");
2088 }
2089}
2090
2091static ccv_cnnp_model_t* _ccv_cnnp_div_copy(const ccv_cnnp_model_t* const self, void* const context);
2092
2093static const ccv_cnnp_model_vtab_t ccv_cnnp_div_isa = {
2094 .build = _ccv_cnnp_div_build,
2095 .copy = _ccv_cnnp_div_copy,
2096};
2097
2098ccv_cnnp_model_t* ccv_cnnp_div(const int reciprocal, const char* const name)
2099{
2100 ccv_cnnp_model_div_t* const model_div = (ccv_cnnp_model_div_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_div_t));
2101 model_div->super.isa = &ccv_cnnp_div_isa;
2102 model_div->super.input_size = reciprocal ? 1 : 2;
2103 model_div->super.outputs = &model_div->output;
2104 model_div->super.output_size = 1;
2105 model_div->reciprocal = reciprocal;
2106 ccv_cnnp_model_copy_name(&model_div->super, name);
2107 return (ccv_cnnp_model_t*)model_div;
2108}
2109
2110static ccv_cnnp_model_t* _ccv_cnnp_div_copy(const ccv_cnnp_model_t* const super, void* const context)
2111{
2112 const ccv_cnnp_model_div_t* const self = (const ccv_cnnp_model_div_t*)super;
2113 return ccv_cnnp_div(self->reciprocal, self->super.name);
2114}
2115
2116// MARK - Sqrt Layer
2117
2118typedef struct {
2119 ccv_cnnp_model_t super;
2120 ccv_nnc_tensor_symbol_t output;
2121} ccv_cnnp_model_sqrt_t;
2122
2123static void _ccv_cnnp_sqrt_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2124{
2125 PRINT(CCV_CLI_VERBOSE, "[cnnp_sqrt_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sqrt_build] -\n"); fflush(stdout); } } while
(0)
;
2126 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2126, __extension__ __PRETTY_FUNCTION__
); }))
;
2127 ccv_nnc_tensor_param_t input_params[1];
2128 ccv_nnc_tensor_param_t output_params;
2129 const ccv_nnc_cmd_t sqrt = CMD_EWSQRT_FORWARD()ccv_nnc_cmd(CCV_NNC_EWSQRT_FORWARD, 0, ccv_nnc_cmd_auto, 0);
2130 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2130, __extension__ __PRETTY_FUNCTION__); }))
;
2131 input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2132 ccv_nnc_hint_tensor_auto(sqrt, input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2133 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2134 ccv_nnc_graph_exec_symbol_new(graph, sqrt, inputs, 1, outputs, output_size, "sqrt");
2135}
2136
2137static ccv_cnnp_model_t* _ccv_cnnp_sqrt_copy(const ccv_cnnp_model_t* const self, void* const context);
2138
2139static const ccv_cnnp_model_vtab_t ccv_cnnp_sqrt_isa = {
2140 .build = _ccv_cnnp_sqrt_build,
2141 .copy = _ccv_cnnp_sqrt_copy,
2142};
2143
2144ccv_cnnp_model_t* ccv_cnnp_sqrt(const char* const name)
2145{
2146 ccv_cnnp_model_sqrt_t* const model_sqrt = (ccv_cnnp_model_sqrt_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_sqrt_t));
2147 model_sqrt->super.isa = &ccv_cnnp_sqrt_isa;
2148 model_sqrt->super.input_size = 1;
2149 model_sqrt->super.outputs = &model_sqrt->output;
2150 model_sqrt->super.output_size = 1;
2151 ccv_cnnp_model_copy_name(&model_sqrt->super, name);
2152 return (ccv_cnnp_model_t*)model_sqrt;
2153}
2154
2155static ccv_cnnp_model_t* _ccv_cnnp_sqrt_copy(const ccv_cnnp_model_t* const super, void* const context)
2156{
2157 const ccv_cnnp_model_sqrt_t* const self = (const ccv_cnnp_model_sqrt_t*)super;
2158 return ccv_cnnp_sqrt(self->super.name);
2159}
2160
2161// MARK - Cmul Layer
2162
2163typedef struct {
2164 ccv_cnnp_model_t super;
2165 ccv_nnc_tensor_symbol_t output;
2166} ccv_cnnp_model_cmul_t;
2167
2168static void _ccv_cnnp_cmul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2169{
2170 PRINT(CCV_CLI_VERBOSE, "[cnnp_cmul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_cmul_build] -\n"); fflush(stdout); } } while
(0)
;
2171 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2171, __extension__ __PRETTY_FUNCTION__); }))
;
2172 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2172, __extension__ __PRETTY_FUNCTION__
); }))
;
2173 ccv_nnc_tensor_param_t input_params[2];
2174 int i;
2175 for (i = 0; i < 2; i++)
2176 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2177 ccv_nnc_tensor_param_t output_params;
2178 const ccv_nnc_cmd_t mul = CMD_CMUL_FORWARD()ccv_nnc_cmd(CCV_NNC_CMUL_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}}}, 0)
;
2179 ccv_nnc_hint_tensor_auto(mul, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
2180 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2181 ccv_nnc_graph_exec_symbol_new(graph, mul, inputs, input_size, outputs, output_size, "cmul");
2182}
2183
2184static ccv_cnnp_model_t* _ccv_cnnp_cmul_copy(const ccv_cnnp_model_t* const self, void* const context);
2185
2186static const ccv_cnnp_model_vtab_t ccv_cnnp_cmul_isa = {
2187 .build = _ccv_cnnp_cmul_build,
2188 .copy = _ccv_cnnp_cmul_copy,
2189};
2190
2191ccv_cnnp_model_t* ccv_cnnp_cmul(const char* const name)
2192{
2193 ccv_cnnp_model_cmul_t* const model_cmul = (ccv_cnnp_model_cmul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_cmul_t));
2194 model_cmul->super.isa = &ccv_cnnp_cmul_isa;
2195 model_cmul->super.input_size = 2;
2196 model_cmul->super.outputs = &model_cmul->output;
2197 model_cmul->super.output_size = 1;
2198 ccv_cnnp_model_copy_name(&model_cmul->super, name);
2199 return (ccv_cnnp_model_t*)model_cmul;
2200}
2201
2202static ccv_cnnp_model_t* _ccv_cnnp_cmul_copy(const ccv_cnnp_model_t* const super, void* const context)
2203{
2204 return ccv_cnnp_cmul(super->name);
2205}
2206
2207// MARK - Transpose Layer
2208
2209typedef struct {
2210 ccv_cnnp_model_t super;
2211 ccv_nnc_tensor_symbol_t output;
2212 int transpose[2];
2213} ccv_cnnp_model_transpose_t;
2214
2215static void _ccv_cnnp_transpose_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2216{
2217 ccv_cnnp_model_transpose_t* const self = (ccv_cnnp_model_transpose_t*)super;
2218 PRINT(CCV_CLI_VERBOSE, "[cnnp_transpose_build] (%d, %d)\n", self->transpose[0], self->transpose[1])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_transpose_build] (%d, %d)\n", self->transpose
[0], self->transpose[1]); fflush(stdout); } } while (0)
;
2219 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2219, __extension__ __PRETTY_FUNCTION__); }))
;
2220 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2220, __extension__ __PRETTY_FUNCTION__
); }))
;
2221 if (self->transpose[0] == self->transpose[1])
2222 {
2223 outputs[0] = inputs[0];
2224 return;
2225 }
2226 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2227 ccv_nnc_tensor_param_t output_params;
2228 const ccv_nnc_cmd_t transpose = CMD_TRANSPOSE_FORWARD(self->transpose[0], self->transpose[1])ccv_nnc_cmd(CCV_NNC_TRANSPOSE_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.transpose={.axis={self->transpose[
0], self->transpose[1]}}}), 0)
;
2229 ccv_nnc_hint_tensor_auto(transpose, (ccv_nnc_tensor_param_t []){
2230 params,
2231 }, 1, ccv_nnc_no_hint, &output_params, 1);
2232 const ccv_nnc_tensor_symbol_t transpose_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2233 ccv_nnc_graph_exec_symbol_new(graph, transpose, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(transpose_output)(const ccv_nnc_tensor_symbol_t []){transpose_output}, (1 +1 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, "transpose");
2234 outputs[0] = transpose_output;
2235}
2236
2237static ccv_cnnp_model_t* _ccv_cnnp_transpose_copy(const ccv_cnnp_model_t* const super, void* const context);
2238
2239static const ccv_cnnp_model_vtab_t ccv_cnnp_transpose_isa = {
2240 .build = _ccv_cnnp_transpose_build,
2241 .copy = _ccv_cnnp_transpose_copy,
2242};
2243
2244ccv_cnnp_model_t* ccv_cnnp_transpose(const int axis_a, const int axis_b, const char* const name)
2245{
2246 ccv_cnnp_model_transpose_t* const model_transpose = (ccv_cnnp_model_transpose_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_transpose_t));
2247 model_transpose->super.isa = &ccv_cnnp_transpose_isa;
2248 model_transpose->super.input_size = 1;
2249 model_transpose->super.outputs = &model_transpose->output;
2250 model_transpose->super.output_size = 1;
2251 model_transpose->transpose[0] = axis_a;
2252 model_transpose->transpose[1] = axis_b;
2253 ccv_cnnp_model_copy_name(&model_transpose->super, name);
2254 return (ccv_cnnp_model_t*)model_transpose;
2255}
2256
2257static ccv_cnnp_model_t* _ccv_cnnp_transpose_copy(const ccv_cnnp_model_t* const super, void* const context)
2258{
2259 const ccv_cnnp_model_transpose_t* const self = (const ccv_cnnp_model_transpose_t*)super;
2260 return ccv_cnnp_transpose(self->transpose[0], self->transpose[1], self->super.name);
2261}
2262
2263// MARK - Layer Norm Layer
2264
2265typedef struct {
2266 ccv_cnnp_model_t super;
2267 ccv_nnc_tensor_symbol_t output;
2268 ccv_nnc_tensor_symbol_t bias;
2269 ccv_nnc_tensor_symbol_t scale;
2270 ccv_nnc_cmd_param_t params;
2271} ccv_cnnp_model_layer_norm_t;
2272
2273static void _ccv_cnnp_layer_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2274{
2275 PRINT(CCV_CLI_VERBOSE, "[cnnp_layer_norm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_layer_norm_build] -\n"); fflush(stdout); } }
while (0)
;
2276 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2276, __extension__ __PRETTY_FUNCTION__); }))
;
2277 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2277, __extension__ __PRETTY_FUNCTION__
); }))
;
2278 ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2279 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2280 ccv_nnc_tensor_param_t bias_params = params;
2281 const int nd = ccv_nnc_tensor_nd(params.dim);
2282 int i;
2283 for (i = 0; i < nd; i++)
2284 bias_params.dim[i] = 1;
2285 for (i = 0; i < self->params.lnorm.count; i++)
2286 bias_params.dim[self->params.lnorm.axis[i]] = params.dim[self->params.lnorm.axis[i]];
2287 if (self->params.lnorm.elementwise_affine)
2288 {
2289 // Both scale and bias are shared between if this model is reused.
2290 if (!self->scale.graph)
2291 self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
2292 if (!self->bias.graph)
2293 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
2294 }
2295 const ccv_nnc_cmd_t layer_norm = ccv_nnc_cmd(CCV_NNC_LAYER_NORM_FORWARD, 0, self->params, 0);
2296 ccv_nnc_tensor_param_t output_params[3];
2297 if (self->params.lnorm.elementwise_affine)
2298 ccv_nnc_hint_tensor_auto(layer_norm, (ccv_nnc_tensor_param_t []){
2299 params,
2300 bias_params,
2301 bias_params,
2302 }, 3, ccv_nnc_no_hint, output_params, 3);
2303 else
2304 ccv_nnc_hint_tensor_auto(layer_norm, (ccv_nnc_tensor_param_t []){
2305 params,
2306 }, 1, ccv_nnc_no_hint, output_params, 3);
2307 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2308 const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_mean");
2309 const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[2], "saved_inv_std");
2310 if (self->params.lnorm.elementwise_affine)
2311 ccv_nnc_graph_exec_symbol_new(graph, layer_norm, TENSOR_SYMBOL_LIST(inputs[0], self->scale, self->bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->scale,
self->bias}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_mean, saved_inv_std
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, "layer_norm");
2312 else
2313 ccv_nnc_graph_exec_symbol_new(graph, layer_norm, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_mean, saved_inv_std
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, "layer_norm");
2314 outputs[0] = output;
2315}
2316
2317static void _ccv_cnnp_layer_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2318{
2319 ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2320 if (self->scale.graph)
2321 initializer(context, CMD_SET_FORWARD(1)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
2322 if (self->bias.graph)
2323 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
2324}
2325
2326static void _ccv_cnnp_layer_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2327{
2328 ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2329 if (self->scale.graph)
2330 add_to_array(parameters, self->scale, is_trainable);
2331 if (self->bias.graph)
2332 add_to_array(parameters, self->bias, is_trainable);
2333}
2334
2335static ccv_cnnp_model_t* _ccv_cnnp_layer_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
2336
2337static const ccv_cnnp_model_vtab_t ccv_cnnp_layer_norm_isa = {
2338 .build = _ccv_cnnp_layer_norm_build,
2339 .init_states = _ccv_cnnp_layer_norm_init_states,
2340 .add_to_parameter = _ccv_cnnp_layer_norm_add_to_parameter,
2341 .copy = _ccv_cnnp_layer_norm_copy,
2342};
2343
2344ccv_cnnp_model_t* ccv_cnnp_layer_norm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC(12)], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name)
2345{
2346 ccv_cnnp_model_layer_norm_t* const model_layer_norm = (ccv_cnnp_model_layer_norm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_layer_norm_t));
2347 model_layer_norm->super.isa = &ccv_cnnp_layer_norm_isa;
2348 model_layer_norm->super.input_size = 1;
2349 model_layer_norm->super.outputs = &model_layer_norm->output;
2350 model_layer_norm->super.output_size = 1;
2351 model_layer_norm->super.is_trainable = is_trainable;
2352 ccv_cnnp_model_copy_name(&model_layer_norm->super, name);
2353 model_layer_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
2354 model_layer_norm->scale.graph = 0;
2355 model_layer_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
2356 model_layer_norm->bias.graph = 0;
2357 model_layer_norm->params.lnorm.epsilon = epsilon;
2358 model_layer_norm->params.lnorm.count = axis_count;
2359 model_layer_norm->params.lnorm.elementwise_affine = elementwise_affine;
2360 memcpy(model_layer_norm->params.lnorm.axis, axis, sizeof(int) * axis_count);
2361 return (ccv_cnnp_model_t*)model_layer_norm;
2362}
2363
2364static ccv_cnnp_model_t* _ccv_cnnp_layer_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
2365{
2366 const ccv_cnnp_model_layer_norm_t* const self = (const ccv_cnnp_model_layer_norm_t*)super;
2367 return ccv_cnnp_layer_norm(self->params.lnorm.epsilon, self->params.lnorm.axis, self->params.lnorm.count, self->params.lnorm.elementwise_affine, self->super.is_trainable, self->super.name);
2368}
2369
2370// MARK - Group Norm Layer
2371
2372typedef struct {
2373 ccv_cnnp_model_t super;
2374 ccv_nnc_tensor_symbol_t output;
2375 ccv_nnc_tensor_symbol_t bias;
2376 ccv_nnc_tensor_symbol_t scale;
2377 ccv_nnc_cmd_param_t params;
2378} ccv_cnnp_model_group_norm_t;
2379
2380static void _ccv_cnnp_group_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2381{
2382 PRINT(CCV_CLI_VERBOSE, "[cnnp_group_norm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_group_norm_build] -\n"); fflush(stdout); } }
while (0)
;
2383 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2383, __extension__ __PRETTY_FUNCTION__); }))
;
2384 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2384, __extension__ __PRETTY_FUNCTION__
); }))
;
2385 ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2386 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2387 ccv_nnc_tensor_param_t bias_params = params;
2388 const int nd = ccv_nnc_tensor_nd(params.dim);
2389 int i;
2390 for (i = 0; i < nd; i++)
2391 bias_params.dim[i] = 1;
2392 bias_params.dim[self->params.gnorm.group_axis] = params.dim[self->params.gnorm.group_axis];
2393 if (self->params.gnorm.elementwise_affine)
2394 {
2395 // Both scale and bias are shared between if this model is reused.
2396 if (!self->scale.graph)
2397 self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
2398 if (!self->bias.graph)
2399 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
2400 }
2401 const ccv_nnc_cmd_t group_norm = ccv_nnc_cmd(CCV_NNC_GROUP_NORM_FORWARD, 0, self->params, 0);
2402 ccv_nnc_tensor_param_t output_params[3];
2403 if (self->params.gnorm.elementwise_affine)
2404 ccv_nnc_hint_tensor_auto(group_norm, (ccv_nnc_tensor_param_t []){
2405 params,
2406 bias_params,
2407 bias_params,
2408 }, 3, ccv_nnc_no_hint, output_params, 3);
2409 else
2410 ccv_nnc_hint_tensor_auto(group_norm, (ccv_nnc_tensor_param_t []){
2411 params,
2412 }, 1, ccv_nnc_no_hint, output_params, 3);
2413 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2414 const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_mean");
2415 const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[2], "saved_inv_std");
2416 if (self->params.gnorm.elementwise_affine)
2417 ccv_nnc_graph_exec_symbol_new(graph, group_norm, TENSOR_SYMBOL_LIST(inputs[0], self->scale, self->bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->scale,
self->bias}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_mean, saved_inv_std
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, "group_norm");
2418 else
2419 ccv_nnc_graph_exec_symbol_new(graph, group_norm, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_mean, saved_inv_std
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
, "group_norm");
2420 outputs[0] = output;
2421}
2422
2423static void _ccv_cnnp_group_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2424{
2425 ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2426 if (self->scale.graph)
2427 initializer(context, CMD_SET_FORWARD(1)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
2428 if (self->bias.graph)
2429 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
2430}
2431
2432static void _ccv_cnnp_group_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2433{
2434 ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2435 if (self->scale.graph)
2436 add_to_array(parameters, self->scale, is_trainable);
2437 if (self->bias.graph)
2438 add_to_array(parameters, self->bias, is_trainable);
2439}
2440
2441static ccv_cnnp_model_t* _ccv_cnnp_group_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
2442
2443static const ccv_cnnp_model_vtab_t ccv_cnnp_group_norm_isa = {
2444 .build = _ccv_cnnp_group_norm_build,
2445 .init_states = _ccv_cnnp_group_norm_init_states,
2446 .add_to_parameter = _ccv_cnnp_group_norm_add_to_parameter,
2447 .copy = _ccv_cnnp_group_norm_copy,
2448};
2449
2450ccv_cnnp_model_t* ccv_cnnp_group_norm(const int group_axis, const int groups, const float epsilon, const int reduce_axis[CCV_NNC_MAX_DIM_ALLOC(12)], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name)
2451{
2452 ccv_cnnp_model_group_norm_t* const model_group_norm = (ccv_cnnp_model_group_norm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_group_norm_t));
2453 model_group_norm->super.isa = &ccv_cnnp_group_norm_isa;
2454 model_group_norm->super.input_size = 1;
2455 model_group_norm->super.outputs = &model_group_norm->output;
2456 model_group_norm->super.output_size = 1;
2457 model_group_norm->super.is_trainable = is_trainable;
2458 ccv_cnnp_model_copy_name(&model_group_norm->super, name);
2459 model_group_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
2460 model_group_norm->scale.graph = 0;
2461 model_group_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
2462 model_group_norm->bias.graph = 0;
2463 model_group_norm->params.gnorm.group_axis = group_axis;
2464 model_group_norm->params.gnorm.groups = groups;
2465 model_group_norm->params.gnorm.epsilon = epsilon;
2466 model_group_norm->params.gnorm.reduce_count = axis_count;
2467 model_group_norm->params.gnorm.elementwise_affine = elementwise_affine;
2468 memcpy(model_group_norm->params.gnorm.reduce_axis, reduce_axis, sizeof(int) * axis_count);
2469 return (ccv_cnnp_model_t*)model_group_norm;
2470}
2471
2472static ccv_cnnp_model_t* _ccv_cnnp_group_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
2473{
2474 const ccv_cnnp_model_group_norm_t* const self = (const ccv_cnnp_model_group_norm_t*)super;
2475 return ccv_cnnp_group_norm(self->params.gnorm.group_axis, self->params.gnorm.groups, self->params.gnorm.epsilon, self->params.gnorm.reduce_axis, self->params.gnorm.reduce_count, self->params.gnorm.elementwise_affine, self->super.is_trainable, self->super.name);
2476}
2477
2478// MARK - RMSNorm Layer
2479
2480typedef struct {
2481 ccv_cnnp_model_t super;
2482 ccv_nnc_tensor_symbol_t output;
2483 ccv_nnc_tensor_symbol_t scale;
2484 ccv_nnc_cmd_param_t params;
2485} ccv_cnnp_model_rmsnorm_t;
2486
2487static void _ccv_cnnp_rmsnorm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2488{
2489 PRINT(CCV_CLI_VERBOSE, "[cnnp_rmsnorm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_rmsnorm_build] -\n"); fflush(stdout); } } while
(0)
;
2490 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2490, __extension__ __PRETTY_FUNCTION__); }))
;
2491 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2491, __extension__ __PRETTY_FUNCTION__
); }))
;
2492 ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
2493 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2494 ccv_nnc_tensor_param_t scale_params = params;
2495 const int nd = ccv_nnc_tensor_nd(params.dim);
2496 int i;
2497 for (i = 0; i < nd; i++)
2498 scale_params.dim[i] = 1;
2499 for (i = 0; i < self->params.rmsnorm.count; i++)
2500 scale_params.dim[self->params.rmsnorm.axis[i]] = params.dim[self->params.rmsnorm.axis[i]];
2501 // Both scale and bias are shared between if this model is reused.
2502 if (!self->scale.graph)
2503 self->scale = ccv_nnc_tensor_symbol_new(graph, scale_params, "scale");
2504 const ccv_nnc_cmd_t rmsnorm = ccv_nnc_cmd(CCV_NNC_RMSNORM_FORWARD, 0, self->params, 0);
2505 ccv_nnc_tensor_param_t output_params[2];
2506 ccv_nnc_hint_tensor_auto(rmsnorm, (ccv_nnc_tensor_param_t []){
2507 params,
2508 scale_params,
2509 }, 2, ccv_nnc_no_hint, output_params, 2);
2510 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2511 const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_inv_std");
2512 ccv_nnc_graph_exec_symbol_new(graph, rmsnorm, TENSOR_SYMBOL_LIST(inputs[0], self->scale)(const ccv_nnc_tensor_symbol_t []){inputs[0], self->scale}
, (1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_inv_std)(const ccv_nnc_tensor_symbol_t []){output, saved_inv_std}, (1
+1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 -1)
, "rmsnorm");
2513 outputs[0] = output;
2514}
2515
2516static void _ccv_cnnp_rmsnorm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2517{
2518 ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
2519 if (self->scale.graph)
2520 initializer(context, CMD_SET_FORWARD(1)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={1,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->scale);
2521}
2522
2523static void _ccv_cnnp_rmsnorm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2524{
2525 ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
2526 if (self->scale.graph)
2527 add_to_array(parameters, self->scale, is_trainable);
2528}
2529
2530static ccv_cnnp_model_t* _ccv_cnnp_rmsnorm_copy(const ccv_cnnp_model_t* const super, void* const context);
2531
2532static const ccv_cnnp_model_vtab_t ccv_cnnp_rmsnorm_isa = {
2533 .build = _ccv_cnnp_rmsnorm_build,
2534 .init_states = _ccv_cnnp_rmsnorm_init_states,
2535 .add_to_parameter = _ccv_cnnp_rmsnorm_add_to_parameter,
2536 .copy = _ccv_cnnp_rmsnorm_copy,
2537};
2538
2539ccv_cnnp_model_t* ccv_cnnp_rmsnorm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC(12)], const int axis_count, const int is_trainable, const char* const name)
2540{
2541 ccv_cnnp_model_rmsnorm_t* const model_rmsnorm = (ccv_cnnp_model_rmsnorm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_rmsnorm_t));
2542 model_rmsnorm->super.isa = &ccv_cnnp_rmsnorm_isa;
2543 model_rmsnorm->super.input_size = 1;
2544 model_rmsnorm->super.outputs = &model_rmsnorm->output;
2545 model_rmsnorm->super.output_size = 1;
2546 model_rmsnorm->super.is_trainable = is_trainable;
2547 ccv_cnnp_model_copy_name(&model_rmsnorm->super, name);
2548 model_rmsnorm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
2549 model_rmsnorm->scale.graph = 0;
2550 model_rmsnorm->params.rmsnorm.epsilon = epsilon;
2551 model_rmsnorm->params.rmsnorm.count = axis_count;
2552 memcpy(model_rmsnorm->params.lnorm.axis, axis, sizeof(int) * axis_count);
2553 return (ccv_cnnp_model_t*)model_rmsnorm;
2554}
2555
2556static ccv_cnnp_model_t* _ccv_cnnp_rmsnorm_copy(const ccv_cnnp_model_t* const super, void* const context)
2557{
2558 const ccv_cnnp_model_rmsnorm_t* const self = (const ccv_cnnp_model_rmsnorm_t*)super;
2559 return ccv_cnnp_rmsnorm(self->params.rmsnorm.epsilon, self->params.rmsnorm.axis, self->params.rmsnorm.count, self->super.is_trainable, self->super.name);
2560}
2561
2562// MARK - Batched Matrix Mul Layer
2563
2564typedef struct {
2565 ccv_cnnp_model_t super;
2566 ccv_nnc_tensor_symbol_t output;
2567 int transpose_a[2];
2568 int transpose_b[2];
2569 int flags;
2570} ccv_cnnp_model_matmul_t;
2571
2572static void _ccv_cnnp_matmul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2573{
2574 PRINT(CCV_CLI_VERBOSE, "[cnnp_matmul_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_matmul_build] -\n"); fflush(stdout); } } while
(0)
;
2575 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2575, __extension__ __PRETTY_FUNCTION__); }))
;
2576 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2576, __extension__ __PRETTY_FUNCTION__
); }))
;
2577 ccv_cnnp_model_matmul_t* const self = (ccv_cnnp_model_matmul_t*)super;
2578 ccv_nnc_tensor_param_t a_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2579 ccv_nnc_tensor_param_t b_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
2580 ccv_nnc_tensor_param_t output_params;
2581 ccv_nnc_cmd_t matmul = CMD_GEMM_FORWARD(self->transpose_a, self->transpose_b)ccv_nnc_cmd(CCV_NNC_GEMM_FORWARD, 0, ((ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.blas={.a={1,1},.transpose_a={self->transpose_a
[0],self->transpose_a[1]},.transpose_b={self->transpose_b
[0],self->transpose_b[1]},}}), 0)
;
2582 matmul.info.blas.flags = self->flags;
2583 ccv_nnc_hint_tensor_auto(matmul, (ccv_nnc_tensor_param_t []){
2584 a_params,
2585 b_params,
2586 }, 2, ccv_nnc_no_hint, &output_params, 1);
2587 const ccv_nnc_tensor_symbol_t matmul_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2588 ccv_nnc_graph_exec_symbol_new(graph, matmul, inputs, input_size, TENSOR_SYMBOL_LIST(matmul_output)(const ccv_nnc_tensor_symbol_t []){matmul_output}, (1 +1 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1
)
, "matmul");
2589 outputs[0] = matmul_output;
2590}
2591
2592static ccv_cnnp_model_t* _ccv_cnnp_matmul_copy(const ccv_cnnp_model_t* const super, void* const context);
2593
2594static const ccv_cnnp_model_vtab_t ccv_cnnp_matmul_isa = {
2595 .build = _ccv_cnnp_matmul_build,
2596 .copy = _ccv_cnnp_matmul_copy,
2597};
2598
2599ccv_cnnp_model_t* ccv_cnnp_matmul(const int transpose_a[2], const int transpose_b[2], const int flags, const char* const name)
2600{
2601 ccv_cnnp_model_matmul_t* const model_matmul = (ccv_cnnp_model_matmul_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_matmul_t));
2602 model_matmul->super.isa = &ccv_cnnp_matmul_isa;
2603 model_matmul->super.input_size = 2;
2604 model_matmul->super.outputs = &model_matmul->output;
2605 model_matmul->super.output_size = 1;
2606 model_matmul->transpose_a[0] = transpose_a[0];
2607 model_matmul->transpose_a[1] = transpose_a[1];
2608 model_matmul->transpose_b[0] = transpose_b[0];
2609 model_matmul->transpose_b[1] = transpose_b[1];
2610 model_matmul->flags = flags;
2611 ccv_cnnp_model_copy_name(&model_matmul->super, name);
2612 return (ccv_cnnp_model_t*)model_matmul;
2613}
2614
2615static ccv_cnnp_model_t* _ccv_cnnp_matmul_copy(const ccv_cnnp_model_t* const super, void* const context)
2616{
2617 const ccv_cnnp_model_matmul_t* const self = (const ccv_cnnp_model_matmul_t*)super;
2618 return ccv_cnnp_matmul(self->transpose_a, self->transpose_b, self->flags, self->super.name);
2619}
2620
2621// MARK - Dropout Layer
2622
2623typedef struct {
2624 ccv_cnnp_model_t super;
2625 ccv_nnc_tensor_symbol_t output;
2626 ccv_nnc_graph_exec_symbol_t dropout;
2627 float p;
2628 int entirety;
2629} ccv_cnnp_model_dropout_t;
2630
2631static void _ccv_cnnp_dropout_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2632{
2633 PRINT(CCV_CLI_VERBOSE, "[cnnp_dropout_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_dropout_build] -\n"); fflush(stdout); } } while
(0)
;
2634 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2634, __extension__ __PRETTY_FUNCTION__); }))
;
2635 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2635, __extension__ __PRETTY_FUNCTION__
); }))
;
2636 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2637 ccv_nnc_tensor_param_t output_params[2];
2638 ccv_cnnp_model_dropout_t* const self = (ccv_cnnp_model_dropout_t*)super;
2639 const ccv_nnc_cmd_t dropout = CMD_DROPOUT_FORWARD(self->p, self->entirety)ccv_nnc_cmd(CCV_NNC_DROPOUT_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.dropout={.p=self->p,.entirety=self
->entirety}}), 0)
;
2640 ccv_nnc_hint_tensor_auto(dropout, (ccv_nnc_tensor_param_t []){
2641 params,
2642 }, 1, ccv_nnc_no_hint, output_params, 2);
2643 const ccv_nnc_tensor_symbol_t dropout_output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2644 const ccv_nnc_tensor_symbol_t mask = ccv_nnc_tensor_symbol_new(graph, output_params[1], "mask");
2645 self->dropout = ccv_nnc_graph_exec_symbol_new(graph, dropout, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(dropout_output, mask)(const ccv_nnc_tensor_symbol_t []){dropout_output, mask}, (1 +
1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 -1)
, "dropout");
2646 outputs[0] = dropout_output;
2647}
2648
2649static void _ccv_cnnp_dropout_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
2650{
2651 ccv_cnnp_model_dropout_t* const self = (ccv_cnnp_model_dropout_t*)super;
2652 if (self->dropout.graph)
2653 {
2654 if (is_test)
2655 // During test, the dropout is not applied. Data transfer is perfect because if these are the same tensor, it will skip.
2656 updater(context, self->dropout, CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, ccv_nnc_no_hint);
2657 else
2658 updater(context, self->dropout, CMD_DROPOUT_FORWARD(self->p, self->entirety)ccv_nnc_cmd(CCV_NNC_DROPOUT_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.dropout={.p=self->p,.entirety=self
->entirety}}), 0)
, ccv_nnc_no_hint);
2659 }
2660}
2661
2662static ccv_cnnp_model_t* _ccv_cnnp_dropout_copy(const ccv_cnnp_model_t* const super, void* const context);
2663
2664static const ccv_cnnp_model_vtab_t ccv_cnnp_dropout_isa = {
2665 .build = _ccv_cnnp_dropout_build,
2666 .set_is_test = _ccv_cnnp_dropout_set_is_test,
2667 .copy = _ccv_cnnp_dropout_copy,
2668};
2669
2670ccv_cnnp_model_t* ccv_cnnp_dropout(const float p, const int entirety, const char* const name)
2671{
2672 ccv_cnnp_model_dropout_t* const model_dropout = (ccv_cnnp_model_dropout_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_dropout_t));
2673 model_dropout->super.isa = &ccv_cnnp_dropout_isa;
2674 model_dropout->super.input_size = 1;
2675 model_dropout->super.outputs = &model_dropout->output;
2676 model_dropout->super.output_size = 1;
2677 model_dropout->p = p;
2678 model_dropout->entirety = entirety;
2679 ccv_cnnp_model_copy_name(&model_dropout->super, name);
2680 return (ccv_cnnp_model_t*)model_dropout;
2681}
2682
2683static ccv_cnnp_model_t* _ccv_cnnp_dropout_copy(const ccv_cnnp_model_t* const super, void* const context)
2684{
2685 const ccv_cnnp_model_dropout_t* const self = (const ccv_cnnp_model_dropout_t*)super;
2686 return ccv_cnnp_dropout(self->p, self->entirety, self->super.name);
2687}
2688
2689// MARK - Masked Fill Layer
2690
2691typedef struct {
2692 ccv_cnnp_model_t super;
2693 ccv_nnc_tensor_symbol_t output;
2694 float eq;
2695 float fill;
2696} ccv_cnnp_model_masked_fill_t;
2697
2698static void _ccv_cnnp_masked_fill_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2699{
2700 PRINT(CCV_CLI_VERBOSE, "[cnnp_masked_fill_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_masked_fill_build] -\n"); fflush(stdout); } }
while (0)
;
2701 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2701, __extension__ __PRETTY_FUNCTION__); }))
;
2702 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2702, __extension__ __PRETTY_FUNCTION__
); }))
;
2703 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2704 ccv_cnnp_model_masked_fill_t* const self = (ccv_cnnp_model_masked_fill_t*)super;
2705 const ccv_nnc_tensor_symbol_t masked_fill_output = ccv_nnc_tensor_symbol_new(graph, params, 0);
2706 ccv_nnc_graph_exec_symbol_new(graph, CMD_MASKED_FILL_FORWARD(self->eq, self->fill)ccv_nnc_cmd(CCV_NNC_MASKED_FILL_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={self->eq, self->fill}
}}, 0)
, TENSOR_SYMBOL_LIST(inputs[0], inputs[1])(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1]}, (1 +
1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 -1)
, TENSOR_SYMBOL_LIST(masked_fill_output)(const ccv_nnc_tensor_symbol_t []){masked_fill_output}, (1 +1
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 -1)
, "masked_fill");
2707 outputs[0] = masked_fill_output;
2708}
2709
2710static ccv_cnnp_model_t* _ccv_cnnp_masked_fill_copy(const ccv_cnnp_model_t* const super, void* const context);
2711
2712static const ccv_cnnp_model_vtab_t ccv_cnnp_masked_fill_isa = {
2713 .build = _ccv_cnnp_masked_fill_build,
2714 .copy = _ccv_cnnp_masked_fill_copy,
2715};
2716
2717ccv_cnnp_model_t* ccv_cnnp_masked_fill(const float eq, const float fill, const char* const name)
2718{
2719 ccv_cnnp_model_masked_fill_t* const model_masked_fill = (ccv_cnnp_model_masked_fill_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_masked_fill_t));
2720 model_masked_fill->super.isa = &ccv_cnnp_masked_fill_isa;
2721 model_masked_fill->super.input_size = 2;
2722 model_masked_fill->super.outputs = &model_masked_fill->output;
2723 model_masked_fill->super.output_size = 1;
2724 model_masked_fill->eq = eq;
2725 model_masked_fill->fill = fill;
2726 ccv_cnnp_model_copy_name(&model_masked_fill->super, name);
2727 return (ccv_cnnp_model_t*)model_masked_fill;
2728}
2729
2730static ccv_cnnp_model_t* _ccv_cnnp_masked_fill_copy(const ccv_cnnp_model_t* const super, void* const context)
2731{
2732 const ccv_cnnp_model_masked_fill_t* const self = (const ccv_cnnp_model_masked_fill_t*)super;
2733 return ccv_cnnp_masked_fill(self->eq, self->fill, self->super.name);
2734}
2735
2736// MARK - Index Select Layer
2737
2738typedef struct {
2739 ccv_cnnp_model_t super;
2740 ccv_nnc_tensor_symbol_t output;
2741} ccv_cnnp_model_index_select_t;
2742
2743static void _ccv_cnnp_index_select_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2744{
2745 PRINT(CCV_CLI_VERBOSE, "[cnnp_index_select_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_index_select_build] -\n"); fflush(stdout); }
} while (0)
;
2746 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 2746, __extension__ __PRETTY_FUNCTION__); }))
;
2747 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2747, __extension__ __PRETTY_FUNCTION__
); }))
;
2748 const ccv_nnc_tensor_param_t vocab_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2749 const ccv_nnc_tensor_param_t index_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
2750 ccv_nnc_tensor_param_t output_params;
2751 const ccv_nnc_cmd_t index_select = CMD_INDEX_SELECT_FORWARD()ccv_nnc_cmd(CCV_NNC_INDEX_SELECT_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
;
2752 ccv_nnc_hint_tensor_auto(index_select, (ccv_nnc_tensor_param_t []){
2753 vocab_params,
2754 index_params,
2755 }, 2, ccv_nnc_no_hint, &output_params, 1);
2756 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2757 ccv_nnc_graph_exec_symbol_new(graph, index_select, TENSOR_SYMBOL_LIST(inputs[0], inputs[1])(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1]}, (1 +
1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "index_select");
2758 outputs[0] = output;
2759}
2760
2761static ccv_cnnp_model_t* _ccv_cnnp_index_select_copy(const ccv_cnnp_model_t* const super, void* const context);
2762
2763static const ccv_cnnp_model_vtab_t ccv_cnnp_index_select_isa = {
2764 .build = _ccv_cnnp_index_select_build,
2765 .copy = _ccv_cnnp_index_select_copy,
2766};
2767
2768ccv_cnnp_model_t* ccv_cnnp_index_select(const char* const name)
2769{
2770 ccv_cnnp_model_index_select_t* const model_index_select = (ccv_cnnp_model_index_select_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_index_select_t));
2771 model_index_select->super.isa = &ccv_cnnp_index_select_isa;
2772 model_index_select->super.input_size = 2;
2773 model_index_select->super.outputs = &model_index_select->output;
2774 model_index_select->super.output_size = 1;
2775 ccv_cnnp_model_copy_name(&model_index_select->super, name);
2776 return (ccv_cnnp_model_t*)model_index_select;
2777}
2778
2779static ccv_cnnp_model_t* _ccv_cnnp_index_select_copy(const ccv_cnnp_model_t* const super, void* const context)
2780{
2781 ccv_cnnp_model_index_select_t* const self = (ccv_cnnp_model_index_select_t*)super;
2782 return ccv_cnnp_index_select(self->super.name);
2783}
2784
2785// MARK - Embedding Layer
2786
2787typedef struct {
2788 ccv_cnnp_model_t super;
2789 ccv_nnc_tensor_symbol_t output;
2790 ccv_nnc_tensor_symbol_t vocab;
2791 int datatype;
2792 int vocab_size;
2793 int embed_size;
2794} ccv_cnnp_model_embedding_t;
2795
2796static void _ccv_cnnp_embedding_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2797{
2798 ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
2799 PRINT(CCV_CLI_VERBOSE, "[cnnp_embedding_build] vocab_size: %d, embed_size: %d\n", self->vocab_size, self->embed_size)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_embedding_build] vocab_size: %d, embed_size: %d\n"
, self->vocab_size, self->embed_size); fflush(stdout); }
} while (0)
;
2800 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2800, __extension__ __PRETTY_FUNCTION__); }))
;
2801 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2801, __extension__ __PRETTY_FUNCTION__
); }))
;
2802 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2803 ccv_nnc_tensor_param_t vocab_params = params;
2804 memset(vocab_params.dim, 0, sizeof(vocab_params.dim));
2805 vocab_params.datatype = self->datatype;
2806 vocab_params.dim[0] = self->vocab_size;
2807 vocab_params.dim[1] = self->embed_size;
2808 if (!self->vocab.graph)
2809 self->vocab = ccv_nnc_tensor_symbol_new(graph, vocab_params, "vocab");
2810 assert(self->vocab.graph == graph)((void) sizeof ((self->vocab.graph == graph) ? 1 : 0), __extension__
({ if (self->vocab.graph == graph) ; else __assert_fail (
"self->vocab.graph == graph", "ccv_cnnp_model_addons.c", 2810
, __extension__ __PRETTY_FUNCTION__); }))
;
2811 ccv_nnc_tensor_param_t output_params;
2812 const ccv_nnc_cmd_t embedding = CMD_INDEX_SELECT_FORWARD()ccv_nnc_cmd(CCV_NNC_INDEX_SELECT_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
;
2813 ccv_nnc_hint_tensor_auto(embedding, (ccv_nnc_tensor_param_t []){
2814 vocab_params,
2815 params,
2816 }, 2, ccv_nnc_no_hint, &output_params, 1);
2817 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2818 ccv_nnc_graph_exec_symbol_new(graph, embedding, TENSOR_SYMBOL_LIST(self->vocab, inputs[0])(const ccv_nnc_tensor_symbol_t []){self->vocab, inputs[0]}
, (1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "embedding");
2819 outputs[0] = output;
2820}
2821
2822static void _ccv_cnnp_embedding_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2823{
2824 ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
2825 const float std = sqrtf(2) / sqrtf(self->vocab_size + self->embed_size);
2826 const float bound = sqrtf(3) * std;
2827 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->vocab);
2828}
2829
2830static void _ccv_cnnp_embedding_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2831{
2832 ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
2833 add_to_array(parameters, self->vocab, is_trainable);
2834}
2835
2836static ccv_cnnp_model_t* _ccv_cnnp_embedding_copy(const ccv_cnnp_model_t* const super, void* const context);
2837
2838static const ccv_cnnp_model_vtab_t ccv_cnnp_embedding_isa = {
2839 .build = _ccv_cnnp_embedding_build,
2840 .init_states = _ccv_cnnp_embedding_init_states,
2841 .add_to_parameter = _ccv_cnnp_embedding_add_to_parameter,
2842 .copy = _ccv_cnnp_embedding_copy,
2843};
2844
2845ccv_cnnp_model_t* ccv_cnnp_embedding(const int datatype, const int vocab_size, const int embed_size, const int is_trainable, const char* const name)
2846{
2847 ccv_cnnp_model_embedding_t* const model_embedding = (ccv_cnnp_model_embedding_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_embedding_t));
2848 model_embedding->super.isa = &ccv_cnnp_embedding_isa;
2849 model_embedding->super.input_size = 1;
2850 model_embedding->super.outputs = &model_embedding->output;
2851 model_embedding->super.output_size = 1;
2852 model_embedding->super.is_trainable = is_trainable;
2853 ccv_cnnp_model_copy_name(&model_embedding->super, name);
2854 model_embedding->vocab.d = CCV_NNC_NO_TENSOR_SYMBOL;
2855 model_embedding->vocab.graph = 0;
2856 assert(datatype == CCV_32F || datatype == CCV_16F)((void) sizeof ((datatype == CCV_32F || datatype == CCV_16F) ?
1 : 0), __extension__ ({ if (datatype == CCV_32F || datatype
== CCV_16F) ; else __assert_fail ("datatype == CCV_32F || datatype == CCV_16F"
, "ccv_cnnp_model_addons.c", 2856, __extension__ __PRETTY_FUNCTION__
); }))
;
2857 model_embedding->datatype = datatype;
2858 assert(vocab_size > 0)((void) sizeof ((vocab_size > 0) ? 1 : 0), __extension__ (
{ if (vocab_size > 0) ; else __assert_fail ("vocab_size > 0"
, "ccv_cnnp_model_addons.c", 2858, __extension__ __PRETTY_FUNCTION__
); }))
;
2859 model_embedding->vocab_size = vocab_size;
2860 assert(embed_size > 0)((void) sizeof ((embed_size > 0) ? 1 : 0), __extension__ (
{ if (embed_size > 0) ; else __assert_fail ("embed_size > 0"
, "ccv_cnnp_model_addons.c", 2860, __extension__ __PRETTY_FUNCTION__
); }))
;
2861 model_embedding->embed_size = embed_size;
2862 return (ccv_cnnp_model_t*)model_embedding;
2863}
2864
2865static ccv_cnnp_model_t* _ccv_cnnp_embedding_copy(const ccv_cnnp_model_t* const super, void* const context)
2866{
2867 ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
2868 return ccv_cnnp_embedding(self->datatype, self->vocab_size, self->embed_size, self->super.is_trainable, self->super.name);
2869}
2870
2871// MARK - Pool Layers
2872
2873typedef struct {
2874 ccv_cnnp_model_t super;
2875 ccv_nnc_tensor_symbol_t output;
2876 int type;
2877 float width_scale;
2878 float height_scale;
2879 int align_corners;
2880} ccv_cnnp_model_upsample_t;
2881
2882static void _ccv_cnnp_upsample_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2883{
2884 PRINT(CCV_CLI_VERBOSE, "[cnnp_upsample_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_upsample_build] -\n"); fflush(stdout); } } while
(0)
;
2885 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2885, __extension__ __PRETTY_FUNCTION__); }))
;
2886 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2886, __extension__ __PRETTY_FUNCTION__
); }))
;
2887 ccv_cnnp_model_upsample_t* const self = (ccv_cnnp_model_upsample_t*)super;
2888 const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2889 ccv_nnc_cmd_t cmd = CMD_UPSAMPLE_FORWARD(self->type, self->width_scale, self->height_scale, self->align_corners)ccv_nnc_cmd(CCV_NNC_UPSAMPLE_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.upsample={.type=self->type,.width_scale
=self->width_scale,.height_scale=self->height_scale,.align_corners
=self->align_corners}}), 0)
;
2890 ccv_nnc_tensor_param_t output_params;
2891 ccv_nnc_hint_tensor_auto(cmd, &params, 1, ccv_nnc_no_hint, &output_params, 1);
2892 const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2893 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0])(const ccv_nnc_tensor_symbol_t []){inputs[0]}, (1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output)(const ccv_nnc_tensor_symbol_t []){output}, (1 +1 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, "upsample");
2894 outputs[0] = output;
2895}
2896
2897static ccv_cnnp_model_t* _ccv_cnnp_upsample_copy(const ccv_cnnp_model_t* const super, void* const context);
2898
2899static const ccv_cnnp_model_vtab_t ccv_cnnp_upsample_isa = {
2900 .build = _ccv_cnnp_upsample_build,
2901 .copy = _ccv_cnnp_upsample_copy,
2902};
2903
2904ccv_cnnp_model_t* ccv_cnnp_upsample(const int type, const float width_scale, const float height_scale, const int align_corners, const char* const name)
2905{
2906 ccv_cnnp_model_upsample_t* const model_upsample = (ccv_cnnp_model_upsample_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_upsample_t));
2907 model_upsample->super.isa = &ccv_cnnp_upsample_isa;
2908 model_upsample->super.input_size = 1;
2909 model_upsample->super.outputs = &model_upsample->output;
2910 model_upsample->super.output_size = 1;
2911 ccv_cnnp_model_copy_name(&model_upsample->super, name);
2912 assert(type == CCV_NNC_UPSAMPLE_NEAREST || type == CCV_NNC_UPSAMPLE_BILINEAR)((void) sizeof ((type == CCV_NNC_UPSAMPLE_NEAREST || type == CCV_NNC_UPSAMPLE_BILINEAR
) ? 1 : 0), __extension__ ({ if (type == CCV_NNC_UPSAMPLE_NEAREST
|| type == CCV_NNC_UPSAMPLE_BILINEAR) ; else __assert_fail (
"type == CCV_NNC_UPSAMPLE_NEAREST || type == CCV_NNC_UPSAMPLE_BILINEAR"
, "ccv_cnnp_model_addons.c", 2912, __extension__ __PRETTY_FUNCTION__
); }))
;
2913 model_upsample->type = type;
2914 model_upsample->width_scale = width_scale;
2915 model_upsample->height_scale = height_scale;
2916 model_upsample->align_corners = align_corners;
2917 return (ccv_cnnp_model_t*)model_upsample;
2918}
2919
2920static ccv_cnnp_model_t* _ccv_cnnp_upsample_copy(const ccv_cnnp_model_t* const super, void* const context)
2921{
2922 const ccv_cnnp_model_upsample_t* const self = (const ccv_cnnp_model_upsample_t*)super;
2923 return ccv_cnnp_upsample(self->type, self->width_scale, self->height_scale, self->align_corners, self->super.name);
2924}
2925
2926// MARK - Reduce Sum Layer
2927
2928typedef struct {
2929 ccv_cnnp_model_t super;
2930 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
2931 int count;
2932 ccv_nnc_tensor_symbol_t output;
2933} ccv_cnnp_model_reduce_sum_t;
2934
2935static void _ccv_cnnp_reduce_sum_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2936{
2937 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_sum_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_sum_build] -\n"); fflush(stdout); } }
while (0)
;
2938 const ccv_cnnp_model_reduce_sum_t* const self = (const ccv_cnnp_model_reduce_sum_t*)super;
2939 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2939, __extension__ __PRETTY_FUNCTION__); }))
;
2940 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2940, __extension__ __PRETTY_FUNCTION__
); }))
;
2941 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2942 ccv_nnc_tensor_param_t output_params;
2943 ccv_nnc_cmd_t reduce_sum = CMD_REDUCE_SUM_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_SUM_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
2944 int i;
2945 for (i = 0; i < self->count; i++)
2946 reduce_sum.info.reduce.axis[i] = self->axis[i];
2947 reduce_sum.info.reduce.count = self->count;
2948 ccv_nnc_hint_tensor_auto(reduce_sum, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2949 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2950 ccv_nnc_graph_exec_symbol_new(graph, reduce_sum, inputs, input_size, outputs, output_size, "reduce_sum");
2951}
2952
2953static ccv_cnnp_model_t* _ccv_cnnp_reduce_sum_copy(const ccv_cnnp_model_t* const self, void* const context);
2954
2955static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_sum_isa = {
2956 .build = _ccv_cnnp_reduce_sum_build,
2957 .copy = _ccv_cnnp_reduce_sum_copy,
2958};
2959
2960ccv_cnnp_model_t* ccv_cnnp_reduce_sum(const int* const axis, const int axis_count, const char* const name)
2961{
2962 ccv_cnnp_model_reduce_sum_t* const model_reduce_sum = (ccv_cnnp_model_reduce_sum_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_sum_t));
2963 model_reduce_sum->super.isa = &ccv_cnnp_reduce_sum_isa;
2964 model_reduce_sum->super.input_size = 1;
2965 model_reduce_sum->super.outputs = &model_reduce_sum->output;
2966 model_reduce_sum->super.output_size = 1;
2967 ccv_cnnp_model_copy_name(&model_reduce_sum->super, name);
2968 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 2968, __extension__ __PRETTY_FUNCTION__
); }))
;
2969 int i;
2970 for (i = 0; i < axis_count; i++)
2971 model_reduce_sum->axis[i] = axis[i];
2972 model_reduce_sum->count = axis_count;
2973 return (ccv_cnnp_model_t*)model_reduce_sum;
2974}
2975
2976static ccv_cnnp_model_t* _ccv_cnnp_reduce_sum_copy(const ccv_cnnp_model_t* const super, void* const context)
2977{
2978 const ccv_cnnp_model_reduce_sum_t* const self = (const ccv_cnnp_model_reduce_sum_t*)super;
2979 return ccv_cnnp_reduce_sum(self->axis, self->count, self->super.name);
2980}
2981
2982// MARK - Reduce Mean Layer
2983
2984typedef struct {
2985 ccv_cnnp_model_t super;
2986 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
2987 int count;
2988 ccv_nnc_tensor_symbol_t output;
2989} ccv_cnnp_model_reduce_mean_t;
2990
2991static void _ccv_cnnp_reduce_mean_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2992{
2993 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_mean_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_mean_build] -\n"); fflush(stdout); } }
while (0)
;
2994 const ccv_cnnp_model_reduce_mean_t* const self = (const ccv_cnnp_model_reduce_mean_t*)super;
2995 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 2995, __extension__ __PRETTY_FUNCTION__); }))
;
2996 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 2996, __extension__ __PRETTY_FUNCTION__
); }))
;
2997 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2998 ccv_nnc_tensor_param_t output_params;
2999 ccv_nnc_cmd_t reduce_mean = CMD_REDUCE_MEAN_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_MEAN_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3000 int i;
3001 for (i = 0; i < self->count; i++)
3002 reduce_mean.info.reduce.axis[i] = self->axis[i];
3003 reduce_mean.info.reduce.count = self->count;
3004 ccv_nnc_hint_tensor_auto(reduce_mean, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3005 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3006 ccv_nnc_graph_exec_symbol_new(graph, reduce_mean, inputs, input_size, outputs, output_size, "reduce_mean");
3007}
3008
3009static ccv_cnnp_model_t* _ccv_cnnp_reduce_mean_copy(const ccv_cnnp_model_t* const self, void* const context);
3010
3011static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_mean_isa = {
3012 .build = _ccv_cnnp_reduce_mean_build,
3013 .copy = _ccv_cnnp_reduce_mean_copy,
3014};
3015
3016ccv_cnnp_model_t* ccv_cnnp_reduce_mean(const int* const axis, const int axis_count, const char* const name)
3017{
3018 ccv_cnnp_model_reduce_mean_t* const model_reduce_mean = (ccv_cnnp_model_reduce_mean_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_mean_t));
3019 model_reduce_mean->super.isa = &ccv_cnnp_reduce_mean_isa;
3020 model_reduce_mean->super.input_size = 1;
3021 model_reduce_mean->super.outputs = &model_reduce_mean->output;
3022 model_reduce_mean->super.output_size = 1;
3023 ccv_cnnp_model_copy_name(&model_reduce_mean->super, name);
3024 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3024, __extension__ __PRETTY_FUNCTION__
); }))
;
3025 int i;
3026 for (i = 0; i < axis_count; i++)
3027 model_reduce_mean->axis[i] = axis[i];
3028 model_reduce_mean->count = axis_count;
3029 return (ccv_cnnp_model_t*)model_reduce_mean;
3030}
3031
3032static ccv_cnnp_model_t* _ccv_cnnp_reduce_mean_copy(const ccv_cnnp_model_t* const super, void* const context)
3033{
3034 const ccv_cnnp_model_reduce_mean_t* const self = (const ccv_cnnp_model_reduce_mean_t*)super;
3035 return ccv_cnnp_reduce_mean(self->axis, self->count, self->super.name);
3036}
3037
3038// MARK - Reduce Max Layer
3039
3040typedef struct {
3041 ccv_cnnp_model_t super;
3042 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
3043 int count;
3044 ccv_nnc_tensor_symbol_t output;
3045} ccv_cnnp_model_reduce_max_t;
3046
3047static void _ccv_cnnp_reduce_max_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3048{
3049 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_max_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_max_build] -\n"); fflush(stdout); } }
while (0)
;
3050 const ccv_cnnp_model_reduce_max_t* const self = (const ccv_cnnp_model_reduce_max_t*)super;
3051 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3051, __extension__ __PRETTY_FUNCTION__); }))
;
3052 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3052, __extension__ __PRETTY_FUNCTION__
); }))
;
3053 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3054 ccv_nnc_tensor_param_t output_params;
3055 ccv_nnc_cmd_t reduce_max = CMD_REDUCE_MAX_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_MAX_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3056 int i;
3057 for (i = 0; i < self->count; i++)
3058 reduce_max.info.reduce.axis[i] = self->axis[i];
3059 reduce_max.info.reduce.count = self->count;
3060 ccv_nnc_hint_tensor_auto(reduce_max, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3061 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3062 ccv_nnc_graph_exec_symbol_new(graph, reduce_max, inputs, input_size, outputs, output_size, "reduce_max");
3063}
3064
3065static ccv_cnnp_model_t* _ccv_cnnp_reduce_max_copy(const ccv_cnnp_model_t* const self, void* const context);
3066
3067static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_max_isa = {
3068 .build = _ccv_cnnp_reduce_max_build,
3069 .copy = _ccv_cnnp_reduce_max_copy,
3070};
3071
3072ccv_cnnp_model_t* ccv_cnnp_reduce_max(const int* const axis, const int axis_count, const char* const name)
3073{
3074 ccv_cnnp_model_reduce_max_t* const model_reduce_max = (ccv_cnnp_model_reduce_max_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_max_t));
3075 model_reduce_max->super.isa = &ccv_cnnp_reduce_max_isa;
3076 model_reduce_max->super.input_size = 1;
3077 model_reduce_max->super.outputs = &model_reduce_max->output;
3078 model_reduce_max->super.output_size = 1;
3079 ccv_cnnp_model_copy_name(&model_reduce_max->super, name);
3080 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3080, __extension__ __PRETTY_FUNCTION__
); }))
;
3081 int i;
3082 for (i = 0; i < axis_count; i++)
3083 model_reduce_max->axis[i] = axis[i];
3084 model_reduce_max->count = axis_count;
3085 return (ccv_cnnp_model_t*)model_reduce_max;
3086}
3087
3088static ccv_cnnp_model_t* _ccv_cnnp_reduce_max_copy(const ccv_cnnp_model_t* const super, void* const context)
3089{
3090 const ccv_cnnp_model_reduce_max_t* const self = (const ccv_cnnp_model_reduce_max_t*)super;
3091 return ccv_cnnp_reduce_max(self->axis, self->count, self->super.name);
3092}
3093
3094// MARK - Reduce Min Layer
3095
3096typedef struct {
3097 ccv_cnnp_model_t super;
3098 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
3099 int count;
3100 ccv_nnc_tensor_symbol_t output;
3101} ccv_cnnp_model_reduce_min_t;
3102
3103static void _ccv_cnnp_reduce_min_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3104{
3105 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_min_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_min_build] -\n"); fflush(stdout); } }
while (0)
;
3106 const ccv_cnnp_model_reduce_min_t* const self = (const ccv_cnnp_model_reduce_min_t*)super;
3107 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3107, __extension__ __PRETTY_FUNCTION__); }))
;
3108 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3108, __extension__ __PRETTY_FUNCTION__
); }))
;
3109 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3110 ccv_nnc_tensor_param_t output_params;
3111 ccv_nnc_cmd_t reduce_min = CMD_REDUCE_MIN_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_MIN_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3112 int i;
3113 for (i = 0; i < self->count; i++)
3114 reduce_min.info.reduce.axis[i] = self->axis[i];
3115 reduce_min.info.reduce.count = self->count;
3116 ccv_nnc_hint_tensor_auto(reduce_min, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3117 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3118 ccv_nnc_graph_exec_symbol_new(graph, reduce_min, inputs, input_size, outputs, output_size, "reduce_min");
3119}
3120
3121static ccv_cnnp_model_t* _ccv_cnnp_reduce_min_copy(const ccv_cnnp_model_t* const self, void* const context);
3122
3123static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_min_isa = {
3124 .build = _ccv_cnnp_reduce_min_build,
3125 .copy = _ccv_cnnp_reduce_min_copy,
3126};
3127
3128ccv_cnnp_model_t* ccv_cnnp_reduce_min(const int* const axis, const int axis_count, const char* const name)
3129{
3130 ccv_cnnp_model_reduce_min_t* const model_reduce_min = (ccv_cnnp_model_reduce_min_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_min_t));
3131 model_reduce_min->super.isa = &ccv_cnnp_reduce_min_isa;
3132 model_reduce_min->super.input_size = 1;
3133 model_reduce_min->super.outputs = &model_reduce_min->output;
3134 model_reduce_min->super.output_size = 1;
3135 ccv_cnnp_model_copy_name(&model_reduce_min->super, name);
3136 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3136, __extension__ __PRETTY_FUNCTION__
); }))
;
3137 int i;
3138 for (i = 0; i < axis_count; i++)
3139 model_reduce_min->axis[i] = axis[i];
3140 model_reduce_min->count = axis_count;
3141 return (ccv_cnnp_model_t*)model_reduce_min;
3142}
3143
3144static ccv_cnnp_model_t* _ccv_cnnp_reduce_min_copy(const ccv_cnnp_model_t* const super, void* const context)
3145{
3146 const ccv_cnnp_model_reduce_min_t* const self = (const ccv_cnnp_model_reduce_min_t*)super;
3147 return ccv_cnnp_reduce_min(self->axis, self->count, self->super.name);
3148}
3149
3150// MARK - Reduce Norm2 Layer
3151
3152typedef struct {
3153 ccv_cnnp_model_t super;
3154 int axis[CCV_NNC_MAX_DIM_ALLOC(12)];
3155 int count;
3156 ccv_nnc_tensor_symbol_t output;
3157} ccv_cnnp_model_reduce_norm2_t;
3158
3159static void _ccv_cnnp_reduce_norm2_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3160{
3161 const ccv_cnnp_model_reduce_norm2_t* const self = (const ccv_cnnp_model_reduce_norm2_t*)super;
3162 PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_norm2_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_reduce_norm2_build] -\n"); fflush(stdout); }
} while (0)
;
3163 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3163, __extension__ __PRETTY_FUNCTION__); }))
;
3164 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3164, __extension__ __PRETTY_FUNCTION__
); }))
;
3165 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3166 ccv_nnc_tensor_param_t output_params;
3167 ccv_nnc_cmd_t reduce_norm2 = CMD_REDUCE_NORM2_FORWARD()ccv_nnc_cmd(CCV_NNC_REDUCE_NORM2_FORWARD, 0, ((ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}}
), 0)
;
3168 int i;
3169 for (i = 0; i < self->count; i++)
3170 reduce_norm2.info.reduce.axis[i] = self->axis[i];
3171 reduce_norm2.info.reduce.count = self->count;
3172 ccv_nnc_hint_tensor_auto(reduce_norm2, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3173 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3174 ccv_nnc_graph_exec_symbol_new(graph, reduce_norm2, inputs, input_size, outputs, output_size, "reduce_norm2");
3175}
3176
3177static ccv_cnnp_model_t* _ccv_cnnp_reduce_norm2_copy(const ccv_cnnp_model_t* const self, void* const context);
3178
3179static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_norm2_isa = {
3180 .build = _ccv_cnnp_reduce_norm2_build,
3181 .copy = _ccv_cnnp_reduce_norm2_copy,
3182};
3183
3184ccv_cnnp_model_t* ccv_cnnp_reduce_norm2(const int* const axis, const int axis_count, const char* const name)
3185{
3186 ccv_cnnp_model_reduce_norm2_t* const model_reduce_norm2 = (ccv_cnnp_model_reduce_norm2_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_reduce_norm2_t));
3187 model_reduce_norm2->super.isa = &ccv_cnnp_reduce_norm2_isa;
3188 model_reduce_norm2->super.input_size = 1;
3189 model_reduce_norm2->super.outputs = &model_reduce_norm2->output;
3190 model_reduce_norm2->super.output_size = 1;
3191 ccv_cnnp_model_copy_name(&model_reduce_norm2->super, name);
3192 assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC)((void) sizeof ((axis_count <= (12)) ? 1 : 0), __extension__
({ if (axis_count <= (12)) ; else __assert_fail ("axis_count <= CCV_NNC_MAX_DIM_ALLOC"
, "ccv_cnnp_model_addons.c", 3192, __extension__ __PRETTY_FUNCTION__
); }))
;
3193 int i;
3194 for (i = 0; i < axis_count; i++)
3195 model_reduce_norm2->axis[i] = axis[i];
3196 model_reduce_norm2->count = axis_count;
3197 return (ccv_cnnp_model_t*)model_reduce_norm2;
3198}
3199
3200static ccv_cnnp_model_t* _ccv_cnnp_reduce_norm2_copy(const ccv_cnnp_model_t* const super, void* const context)
3201{
3202 const ccv_cnnp_model_reduce_norm2_t* const self = (const ccv_cnnp_model_reduce_norm2_t*)super;
3203 return ccv_cnnp_reduce_norm2(self->axis, self->count, self->super.name);
3204}
3205
3206// MARK - Argmax Layer
3207
3208typedef struct {
3209 ccv_cnnp_model_t super;
3210 int axis;
3211 ccv_nnc_tensor_symbol_t output;
3212} ccv_cnnp_model_argmax_t;
3213
3214static void _ccv_cnnp_argmax_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3215{
3216 const ccv_cnnp_model_argmax_t* const self = (const ccv_cnnp_model_argmax_t*)super;
3217 PRINT(CCV_CLI_VERBOSE, "[cnnp_argmax_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_argmax_build] -\n"); fflush(stdout); } } while
(0)
;
3218 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3218, __extension__ __PRETTY_FUNCTION__); }))
;
3219 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3219, __extension__ __PRETTY_FUNCTION__
); }))
;
3220 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3221 ccv_nnc_tensor_param_t output_params;
3222 ccv_nnc_cmd_t argmax = CMD_ARGMAX_FORWARD()ccv_nnc_cmd(CCV_NNC_ARGMAX_FORWARD, 0, ((ccv_nnc_cmd_param_t)
{.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}})
, 0)
;
3223 argmax.info.reduce.axis[0] = self->axis;
3224 argmax.info.reduce.count = 1;
3225 ccv_nnc_hint_tensor_auto(argmax, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3226 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3227 ccv_nnc_graph_exec_symbol_new(graph, argmax, inputs, input_size, outputs, output_size, "argmax");
3228}
3229
3230static ccv_cnnp_model_t* _ccv_cnnp_argmax_copy(const ccv_cnnp_model_t* const self, void* const context);
3231
3232static const ccv_cnnp_model_vtab_t ccv_cnnp_argmax_isa = {
3233 .build = _ccv_cnnp_argmax_build,
3234 .copy = _ccv_cnnp_argmax_copy,
3235};
3236
3237ccv_cnnp_model_t* ccv_cnnp_argmax(const int axis, const char* const name)
3238{
3239 ccv_cnnp_model_argmax_t* const model_argmax = (ccv_cnnp_model_argmax_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_argmax_t));
3240 model_argmax->super.isa = &ccv_cnnp_argmax_isa;
3241 model_argmax->super.input_size = 1;
3242 model_argmax->super.outputs = &model_argmax->output;
3243 model_argmax->super.output_size = 1;
3244 ccv_cnnp_model_copy_name(&model_argmax->super, name);
3245 model_argmax->axis = axis;
3246 return (ccv_cnnp_model_t*)model_argmax;
3247}
3248
3249static ccv_cnnp_model_t* _ccv_cnnp_argmax_copy(const ccv_cnnp_model_t* const super, void* const context)
3250{
3251 const ccv_cnnp_model_argmax_t* const self = (const ccv_cnnp_model_argmax_t*)super;
3252 return ccv_cnnp_argmax(self->axis, self->super.name);
3253}
3254
3255// MARK - Argmin Layer
3256
3257typedef struct {
3258 ccv_cnnp_model_t super;
3259 int axis;
3260 ccv_nnc_tensor_symbol_t output;
3261} ccv_cnnp_model_argmin_t;
3262
3263static void _ccv_cnnp_argmin_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3264{
3265 const ccv_cnnp_model_argmin_t* const self = (const ccv_cnnp_model_argmin_t*)super;
3266 PRINT(CCV_CLI_VERBOSE, "[cnnp_argmin_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_argmin_build] -\n"); fflush(stdout); } } while
(0)
;
3267 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3267, __extension__ __PRETTY_FUNCTION__); }))
;
3268 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3268, __extension__ __PRETTY_FUNCTION__
); }))
;
3269 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3270 ccv_nnc_tensor_param_t output_params;
3271 ccv_nnc_cmd_t argmin = CMD_ARGMIN_FORWARD()ccv_nnc_cmd(CCV_NNC_ARGMIN_FORWARD, 0, ((ccv_nnc_cmd_param_t)
{.size={.dim={1,1,1}},.reduce={.count=(1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1),.axis={}}})
, 0)
;
3272 argmin.info.reduce.axis[0] = self->axis;
3273 argmin.info.reduce.count = 1;
3274 ccv_nnc_hint_tensor_auto(argmin, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3275 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3276 ccv_nnc_graph_exec_symbol_new(graph, argmin, inputs, input_size, outputs, output_size, "argmin");
3277}
3278
3279static ccv_cnnp_model_t* _ccv_cnnp_argmin_copy(const ccv_cnnp_model_t* const self, void* const context);
3280
3281static const ccv_cnnp_model_vtab_t ccv_cnnp_argmin_isa = {
3282 .build = _ccv_cnnp_argmin_build,
3283 .copy = _ccv_cnnp_argmin_copy,
3284};
3285
3286ccv_cnnp_model_t* ccv_cnnp_argmin(const int axis, const char* const name)
3287{
3288 ccv_cnnp_model_argmin_t* const model_argmin = (ccv_cnnp_model_argmin_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_argmin_t));
3289 model_argmin->super.isa = &ccv_cnnp_argmin_isa;
3290 model_argmin->super.input_size = 1;
3291 model_argmin->super.outputs = &model_argmin->output;
3292 model_argmin->super.output_size = 1;
3293 ccv_cnnp_model_copy_name(&model_argmin->super, name);
3294 model_argmin->axis = axis;
3295 return (ccv_cnnp_model_t*)model_argmin;
3296}
3297
3298static ccv_cnnp_model_t* _ccv_cnnp_argmin_copy(const ccv_cnnp_model_t* const super, void* const context)
3299{
3300 const ccv_cnnp_model_argmin_t* const self = (const ccv_cnnp_model_argmin_t*)super;
3301 return ccv_cnnp_argmin(self->axis, self->super.name);
3302}
3303
3304// MARK - Min Layer
3305
3306typedef struct {
3307 ccv_cnnp_model_t super;
3308 ccv_nnc_tensor_symbol_t output;
3309} ccv_cnnp_model_min_t;
3310
3311static void _ccv_cnnp_min_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3312{
3313 PRINT(CCV_CLI_VERBOSE, "[cnnp_min_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_min_build] -\n"); fflush(stdout); } } while (
0)
;
3314 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3314, __extension__ __PRETTY_FUNCTION__); }))
;
3315 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3315, __extension__ __PRETTY_FUNCTION__
); }))
;
3316 ccv_nnc_tensor_param_t input_params[2];
3317 int i;
3318 for (i = 0; i < 2; i++)
3319 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
3320 ccv_nnc_tensor_param_t output_params;
3321 const ccv_nnc_cmd_t min = CMD_MIN_FORWARD()ccv_nnc_cmd(CCV_NNC_MIN_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}}}, 0)
;
3322 ccv_nnc_hint_tensor_auto(min, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
3323 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3324 ccv_nnc_graph_exec_symbol_new(graph, min, inputs, input_size, outputs, output_size, "min");
3325}
3326
3327static ccv_cnnp_model_t* _ccv_cnnp_min_copy(const ccv_cnnp_model_t* const self, void* const context);
3328
3329static const ccv_cnnp_model_vtab_t ccv_cnnp_min_isa = {
3330 .build = _ccv_cnnp_min_build,
3331 .copy = _ccv_cnnp_min_copy,
3332};
3333
3334ccv_cnnp_model_t* ccv_cnnp_min(const char* const name)
3335{
3336 ccv_cnnp_model_min_t* const model_min = (ccv_cnnp_model_min_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_min_t));
3337 model_min->super.isa = &ccv_cnnp_min_isa;
3338 model_min->super.input_size = 2;
3339 model_min->super.outputs = &model_min->output;
3340 model_min->super.output_size = 1;
3341 ccv_cnnp_model_copy_name(&model_min->super, name);
3342 return (ccv_cnnp_model_t*)model_min;
3343}
3344
3345static ccv_cnnp_model_t* _ccv_cnnp_min_copy(const ccv_cnnp_model_t* const super, void* const context)
3346{
3347 const ccv_cnnp_model_min_t* const self = (const ccv_cnnp_model_min_t*)super;
3348 return ccv_cnnp_min(self->super.name);
3349}
3350
3351// MARK - Max Layer
3352
3353typedef struct {
3354 ccv_cnnp_model_t super;
3355 ccv_nnc_tensor_symbol_t output;
3356} ccv_cnnp_model_max_t;
3357
3358static void _ccv_cnnp_max_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3359{
3360 PRINT(CCV_CLI_VERBOSE, "[cnnp_max_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_max_build] -\n"); fflush(stdout); } } while (
0)
;
3361 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3361, __extension__ __PRETTY_FUNCTION__); }))
;
3362 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3362, __extension__ __PRETTY_FUNCTION__
); }))
;
3363 ccv_nnc_tensor_param_t input_params[2];
3364 int i;
3365 for (i = 0; i < 2; i++)
3366 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
3367 ccv_nnc_tensor_param_t output_params;
3368 const ccv_nnc_cmd_t max = CMD_MAX_FORWARD()ccv_nnc_cmd(CCV_NNC_MAX_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}}}, 0)
;
3369 ccv_nnc_hint_tensor_auto(max, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
3370 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3371 ccv_nnc_graph_exec_symbol_new(graph, max, inputs, input_size, outputs, output_size, "max");
3372}
3373
3374static ccv_cnnp_model_t* _ccv_cnnp_max_copy(const ccv_cnnp_model_t* const self, void* const context);
3375
3376static const ccv_cnnp_model_vtab_t ccv_cnnp_max_isa = {
3377 .build = _ccv_cnnp_max_build,
3378 .copy = _ccv_cnnp_max_copy,
3379};
3380
3381ccv_cnnp_model_t* ccv_cnnp_max(const char* const name)
3382{
3383 ccv_cnnp_model_max_t* const model_max = (ccv_cnnp_model_max_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_max_t));
3384 model_max->super.isa = &ccv_cnnp_max_isa;
3385 model_max->super.input_size = 2;
3386 model_max->super.outputs = &model_max->output;
3387 model_max->super.output_size = 1;
3388 ccv_cnnp_model_copy_name(&model_max->super, name);
3389 return (ccv_cnnp_model_t*)model_max;
3390}
3391
3392static ccv_cnnp_model_t* _ccv_cnnp_max_copy(const ccv_cnnp_model_t* const super, void* const context)
3393{
3394 const ccv_cnnp_model_max_t* const self = (const ccv_cnnp_model_max_t*)super;
3395 return ccv_cnnp_max(self->super.name);
3396}
3397
3398// MARK - LSTM Layer
3399
3400typedef struct {
3401 ccv_cnnp_model_t super;
3402 int masked;
3403 ccv_nnc_tensor_symbol_t output;
3404 ccv_nnc_tensor_symbol_t weights;
3405 ccv_nnc_tensor_symbol_t reserves;
3406 ccv_nnc_cmd_param_t params;
3407 ccv_nnc_graph_exec_symbol_t lstm;
3408} ccv_cnnp_model_lstm_t;
3409
3410static int _ccv_cnnp_lstm_weight_dim(int bidirectional, int num_layers, int input_size, int hidden_size, int proj_size, int bias)
3411{
3412 const int D = !!bidirectional + 1;
3413 if (hidden_size == proj_size)
3414 return (num_layers * (bias ? 8 : 0) + (num_layers - 1) * (hidden_size * 4 * D + hidden_size * 4) + input_size * 4 + hidden_size * 4) * D;
3415 else
3416 return (num_layers * (bias ? 8 : 0) + (num_layers - 1) * (proj_size * 4 * D + proj_size * 4) + (proj_size * 4 + input_size * 4) + num_layers * proj_size) * D;
3417}
3418
3419static void _ccv_cnnp_lstm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3420{
3421 ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3422 PRINT(CCV_CLI_VERBOSE, "[cnnp_lstm_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_lstm_build] -\n"); fflush(stdout); } } while
(0)
;
3423 assert(input_size == self->super.input_size)((void) sizeof ((input_size == self->super.input_size) ? 1
: 0), __extension__ ({ if (input_size == self->super.input_size
) ; else __assert_fail ("input_size == self->super.input_size"
, "ccv_cnnp_model_addons.c", 3423, __extension__ __PRETTY_FUNCTION__
); }))
;
3424 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3424, __extension__ __PRETTY_FUNCTION__
); }))
;
3425 const int proj_size = self->params.rnn.proj_size == 0 ? self->params.rnn.hidden_size : self->params.rnn.proj_size;
3426 ccv_nnc_tensor_param_t input_params[5];
3427 input_params[0]= ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3428 if (input_size == 2)
3429 input_params[1] = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
3430 input_params[4] = input_params[0];
3431 memset(input_params[4].dim, 0, sizeof(input_params[4].dim));
3432 const int x_nd = ccv_nnc_tensor_nd(input_params[0].dim);
3433 const int feature_count = input_params[0].dim[x_nd - 1];
3434 input_params[4].dim[0] = _ccv_cnnp_lstm_weight_dim(self->params.rnn.bidirectional, self->params.rnn.num_layers, feature_count, self->params.rnn.hidden_size, proj_size, self->params.rnn.bias);
3435 input_params[4].dim[1] = self->params.rnn.hidden_size;
3436 const ccv_nnc_cmd_t lstm = ccv_nnc_cmd(CCV_NNC_LSTM_FORWARD, 0, self->params, 0);
3437 ccv_nnc_tensor_param_t output_params[4];
3438 ccv_nnc_hint_tensor_auto(lstm, input_params, 5, ccv_nnc_no_hint, output_params, 4);
3439 outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
3440 if (!self->weights.graph)
3441 self->weights = ccv_nnc_tensor_symbol_new(graph, input_params[4], "weights");
3442 if (!self->reserves.graph)
3443 self->reserves = ccv_nnc_tensor_symbol_new(graph, output_params[3], "reserves");
3444 const ccv_nnc_tensor_symbol_t mask = input_size == 2 ? inputs[1] : NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
3445 self->lstm = ccv_nnc_graph_exec_symbol_new(graph, lstm, TENSOR_SYMBOL_LIST(inputs[0], mask, NO_TENSOR_SYMBOL, NO_TENSOR_SYMBOL, self->weights)(const ccv_nnc_tensor_symbol_t []){inputs[0], mask, (const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, (const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, self->weights}, (1 +1 +1
+1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -
1)
, TENSOR_SYMBOL_LIST(outputs[0], NO_TENSOR_SYMBOL, NO_TENSOR_SYMBOL, self->reserves)(const ccv_nnc_tensor_symbol_t []){outputs[0], (const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, (const ccv_nnc_tensor_symbol_t
){.d = CCV_NNC_NO_TENSOR_SYMBOL}, self->reserves}, (1 +1 +
1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
-1)
, "lstm");
3446}
3447
3448static void _ccv_cnnp_lstm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
3449{
3450 ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3451 if (self->weights.graph)
3452 {
3453 const float stdv = 1.0 / sqrt(self->params.rnn.hidden_size);
3454 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-stdv, stdv)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-stdv, stdv}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
3455 }
3456}
3457
3458static void _ccv_cnnp_lstm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
3459{
3460 ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3461 if (self->weights.graph)
3462 add_to_array(parameters, self->weights, is_trainable);
3463}
3464
3465static void _ccv_cnnp_lstm_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
3466{
3467 ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3468 if (self->lstm.graph)
3469 {
3470 self->params.rnn.is_test = is_test;
3471 updater(context, self->lstm, ccv_nnc_cmd(CCV_NNC_LSTM_FORWARD, 0, self->params, 0), ccv_nnc_no_hint);
3472 }
3473}
3474
3475static ccv_cnnp_model_t* _ccv_cnnp_lstm_copy(const ccv_cnnp_model_t* const self, void* const context);
3476
3477static const ccv_cnnp_model_vtab_t ccv_cnnp_lstm_isa = {
3478 .build = _ccv_cnnp_lstm_build,
3479 .init_states = _ccv_cnnp_lstm_init_states,
3480 .add_to_parameter = _ccv_cnnp_lstm_add_to_parameter,
3481 .copy = _ccv_cnnp_lstm_copy,
3482 .set_is_test = _ccv_cnnp_lstm_set_is_test,
3483};
3484
3485ccv_cnnp_model_t* ccv_cnnp_lstm(const int masked, const int hidden_size, const int proj_size, const int num_layers, const int bias, const int batch_first, const int bidirectional, const float dropout, const int is_trainable, const char* const name)
3486{
3487 ccv_cnnp_model_lstm_t* const model_lstm = (ccv_cnnp_model_lstm_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_lstm_t));
3488 model_lstm->super.isa = &ccv_cnnp_lstm_isa;
3489 model_lstm->super.input_size = masked ? 2 : 1;
3490 model_lstm->super.outputs = &model_lstm->output;
3491 model_lstm->super.output_size = 1;
3492 model_lstm->super.is_trainable = is_trainable;
3493 ccv_cnnp_model_copy_name(&model_lstm->super, name);
3494 model_lstm->masked = masked;
3495 model_lstm->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
3496 model_lstm->weights.graph = 0;
3497 model_lstm->params.rnn.hidden_size = hidden_size;
3498 model_lstm->params.rnn.proj_size = proj_size;
3499 model_lstm->params.rnn.num_layers = num_layers;
3500 model_lstm->params.rnn.bias = bias;
3501 model_lstm->params.rnn.batch_first = batch_first;
3502 model_lstm->params.rnn.bidirectional = bidirectional;
3503 model_lstm->params.rnn.dropout = dropout;
3504 return (ccv_cnnp_model_t*)model_lstm;
3505}
3506
3507static ccv_cnnp_model_t* _ccv_cnnp_lstm_copy(const ccv_cnnp_model_t* const super, void* const context)
3508{
3509 const ccv_cnnp_model_lstm_t* const self = (const ccv_cnnp_model_lstm_t*)super;
3510 return ccv_cnnp_lstm(self->masked, self->params.rnn.hidden_size, self->params.rnn.proj_size, self->params.rnn.num_layers, self->params.rnn.bias, self->params.rnn.batch_first, self->params.rnn.bidirectional, self->params.rnn.dropout, self->super.is_trainable, self->super.name);
3511}
3512
3513/// MARK - Datatype conversion layer.
3514
3515typedef struct {
3516 ccv_cnnp_model_t super;
3517 ccv_nnc_tensor_symbol_t output;
3518 int datatype;
3519 int ref_to_last;
3520} ccv_cnnp_model_datatype_conversion_t;
3521
3522static void _ccv_cnnp_datatype_conversion_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3523{
3524 ccv_cnnp_model_datatype_conversion_t* const self = (ccv_cnnp_model_datatype_conversion_t*)super;
3525 PRINT(CCV_CLI_VERBOSE, "[cnnp_datatype_conversion_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_datatype_conversion_build] -\n"); fflush(stdout
); } } while (0)
;
3526 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3527 if (self->ref_to_last)
3528 {
3529 assert(input_size > 1)((void) sizeof ((input_size > 1) ? 1 : 0), __extension__ (
{ if (input_size > 1) ; else __assert_fail ("input_size > 1"
, "ccv_cnnp_model_addons.c", 3529, __extension__ __PRETTY_FUNCTION__
); }))
;
3530 const ccv_nnc_tensor_param_t last_params = ccv_nnc_tensor_symbol_params(graph, inputs[input_size - 1]);
3531 params.datatype = last_params.datatype;
3532 } else
3533 params.datatype = self->datatype;
3534 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3534, __extension__ __PRETTY_FUNCTION__
); }))
;
3535 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3536 ccv_nnc_graph_exec_symbol_new(graph, CMD_DATATYPE_CONVERSION_FORWARD()ccv_nnc_cmd(CCV_NNC_DATATYPE_CONVERSION_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, output_size, outputs, output_size, 0);
3537}
3538
3539static ccv_cnnp_model_t* _ccv_cnnp_datatype_conversion_copy(const ccv_cnnp_model_t* const self, void* const context);
3540
3541static const ccv_cnnp_model_vtab_t ccv_cnnp_datatype_conversion_isa = {
3542 .build = _ccv_cnnp_datatype_conversion_build,
3543 .copy = _ccv_cnnp_datatype_conversion_copy,
3544};
3545
3546ccv_cnnp_model_t* ccv_cnnp_datatype_conversion(const int datatype, const int ref_to_last, const char* const name)
3547{
3548 ccv_cnnp_model_datatype_conversion_t* const model_datatype_conversion = (ccv_cnnp_model_datatype_conversion_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_datatype_conversion_t));
3549 model_datatype_conversion->super.isa = &ccv_cnnp_datatype_conversion_isa;
3550 model_datatype_conversion->super.input_size = 0;
3551 model_datatype_conversion->super.outputs = &model_datatype_conversion->output;
3552 model_datatype_conversion->super.output_size = 1;
3553 model_datatype_conversion->datatype = datatype;
3554 model_datatype_conversion->ref_to_last = ref_to_last;
3555 ccv_cnnp_model_copy_name(&model_datatype_conversion->super, name);
3556 return (ccv_cnnp_model_t*)model_datatype_conversion;
3557}
3558
3559static ccv_cnnp_model_t* _ccv_cnnp_datatype_conversion_copy(const ccv_cnnp_model_t* const super, void* const context)
3560{
3561 ccv_cnnp_model_datatype_conversion_t* const self = (ccv_cnnp_model_datatype_conversion_t*)super;
3562 return ccv_cnnp_datatype_conversion(self->datatype, self->ref_to_last, self->super.name);
3563}
3564
3565/// MARK - Clamp layer.
3566
3567typedef struct {
3568 ccv_cnnp_model_t super;
3569 ccv_nnc_tensor_symbol_t output;
3570 float min;
3571 float max;
3572} ccv_cnnp_model_clamp_t;
3573
3574static void _ccv_cnnp_clamp_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3575{
3576 ccv_cnnp_model_clamp_t* const self = (ccv_cnnp_model_clamp_t*)super;
3577 PRINT(CCV_CLI_VERBOSE, "[cnnp_clamp_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_clamp_build] -\n"); fflush(stdout); } } while
(0)
;
3578 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3579 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3579, __extension__ __PRETTY_FUNCTION__
); }))
;
3580 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3581 ccv_nnc_graph_exec_symbol_new(graph, CMD_CLAMP_FORWARD(self->min, self->max)ccv_nnc_cmd(CCV_NNC_CLAMP_FORWARD, 0, (ccv_nnc_cmd_param_t){.
size={.dim={1,1,1}},.clamp={.min=self->min,.max=self->max
}}, 0)
, inputs, output_size, outputs, output_size, 0);
3582}
3583
3584static ccv_cnnp_model_t* _ccv_cnnp_clamp_copy(const ccv_cnnp_model_t* const self, void* const context);
3585
3586static const ccv_cnnp_model_vtab_t ccv_cnnp_clamp_isa = {
3587 .build = _ccv_cnnp_clamp_build,
3588 .copy = _ccv_cnnp_clamp_copy,
3589};
3590
3591ccv_cnnp_model_t* ccv_cnnp_clamp(const float min, const float max, const char* const name)
3592{
3593 ccv_cnnp_model_clamp_t* const model_clamp = (ccv_cnnp_model_clamp_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_clamp_t));
3594 model_clamp->super.isa = &ccv_cnnp_clamp_isa;
3595 model_clamp->super.input_size = 0;
3596 model_clamp->super.outputs = &model_clamp->output;
3597 model_clamp->super.output_size = 1;
3598 model_clamp->min = min;
3599 model_clamp->max = max;
3600 ccv_cnnp_model_copy_name(&model_clamp->super, name);
3601 return (ccv_cnnp_model_t*)model_clamp;
3602}
3603
3604static ccv_cnnp_model_t* _ccv_cnnp_clamp_copy(const ccv_cnnp_model_t* const super, void* const context)
3605{
3606 ccv_cnnp_model_clamp_t* const self = (ccv_cnnp_model_clamp_t*)super;
3607 return ccv_cnnp_clamp(self->min, self->max, self->super.name);
3608}
3609
3610// MARK - Parameter Layer
3611
3612typedef struct {
3613 ccv_cnnp_model_t super;
3614 float init_bound;
3615 ccv_nnc_tensor_symbol_t weights;
3616 ccv_nnc_tensor_param_t weights_params;
3617 ccv_nnc_tensor_symbol_t output;
3618} ccv_cnnp_model_parameter_t;
3619
3620static void _ccv_cnnp_parameter_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3621{
3622 PRINT(CCV_CLI_VERBOSE, "[cnnp_parameter_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_parameter_build] -\n"); fflush(stdout); } } while
(0)
;
3623 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3623, __extension__ __PRETTY_FUNCTION__
); }))
;
3624 ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
3625 if (!self->weights.graph)
3626 self->weights = ccv_nnc_tensor_symbol_new(graph, self->weights_params, "weights");
3627 assert(self->weights.graph == graph)((void) sizeof ((self->weights.graph == graph) ? 1 : 0), __extension__
({ if (self->weights.graph == graph) ; else __assert_fail
("self->weights.graph == graph", "ccv_cnnp_model_addons.c"
, 3627, __extension__ __PRETTY_FUNCTION__); }))
;
3628 outputs[0] = self->weights;
3629}
3630
3631static void _ccv_cnnp_parameter_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
3632{
3633 ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
3634 if (self->init_bound > 0)
3635 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-self->init_bound, self->init_bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-self->init_bound, self->
init_bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
3636 else
3637 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
3638}
3639
3640static void _ccv_cnnp_parameter_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
3641{
3642 ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
3643 add_to_array(parameters, self->weights, is_trainable);
3644}
3645
3646static ccv_cnnp_model_t* _ccv_cnnp_parameter_copy(const ccv_cnnp_model_t* const super, void* const context);
3647
3648static const ccv_cnnp_model_vtab_t ccv_cnnp_parameter_isa = {
3649 .build = _ccv_cnnp_parameter_build,
3650 .init_states = _ccv_cnnp_parameter_init_states,
3651 .add_to_parameter = _ccv_cnnp_parameter_add_to_parameter,
3652 .copy = _ccv_cnnp_parameter_copy,
3653};
3654
3655ccv_cnnp_model_t* ccv_cnnp_parameter(const ccv_nnc_tensor_param_t params, const float init_bound, const int is_trainable, const char* const name)
3656{
3657 ccv_cnnp_model_parameter_t* const model_parameter = (ccv_cnnp_model_parameter_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_parameter_t));
3658 model_parameter->super.isa = &ccv_cnnp_parameter_isa;
3659 model_parameter->super.input_size = 0;
3660 model_parameter->super.outputs = &model_parameter->output;
3661 model_parameter->super.output_size = 1;
3662 model_parameter->super.is_trainable = is_trainable;
3663 ccv_cnnp_model_copy_name(&model_parameter->super, name);
3664 model_parameter->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
3665 model_parameter->weights.graph = 0;
3666 model_parameter->weights_params = params;
3667 return (ccv_cnnp_model_t*)model_parameter;
3668}
3669
3670static ccv_cnnp_model_t* _ccv_cnnp_parameter_copy(const ccv_cnnp_model_t* const super, void* const context)
3671{
3672 const ccv_cnnp_model_parameter_t* const self = (const ccv_cnnp_model_parameter_t*)super;
3673 return ccv_cnnp_parameter(self->weights_params, self->init_bound, self->super.is_trainable, self->super.name);
3674}
3675
3676// MARK - Scalar Layer
3677
3678typedef struct {
3679 ccv_cnnp_model_t super;
3680 int type;
3681 int format;
3682 int datatype;
3683 float value;
3684 ccv_nnc_tensor_symbol_t output;
3685} ccv_cnnp_model_scalar_t;
3686
3687static void _ccv_cnnp_scalar_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3688{
3689 PRINT(CCV_CLI_VERBOSE, "[cnnp_scalar_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_scalar_build] -\n"); fflush(stdout); } } while
(0)
;
3690 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3690, __extension__ __PRETTY_FUNCTION__
); }))
;
3691 ccv_cnnp_model_scalar_t* const self = (ccv_cnnp_model_scalar_t*)super;
3692 ccv_nnc_tensor_param_t params = {
3693 .type = self->type,
3694 .format = self->format,
3695 .datatype = self->datatype,
3696 .dim = {
3697 1
3698 }
3699 };
3700 if (input_size > 0)
3701 {
3702 ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3703 params.type = input_params.type;
3704 params.format = input_params.format;
3705 params.datatype = input_params.datatype;
3706 }
3707 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3708 ccv_nnc_graph_exec_symbol_new(graph, CMD_SET_FORWARD(self->value)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={self->value,}}}, 0)
, 0, 0, outputs, 1, 0);
3709}
3710
3711static ccv_cnnp_model_t* _ccv_cnnp_scalar_copy(const ccv_cnnp_model_t* const super, void* const context);
3712
3713static const ccv_cnnp_model_vtab_t ccv_cnnp_scalar_isa = {
3714 .build = _ccv_cnnp_scalar_build,
3715 .copy = _ccv_cnnp_scalar_copy,
3716};
3717
3718ccv_cnnp_model_t* ccv_cnnp_scalar(const int type, const int format, const int datatype, const float value, const char* const name)
3719{
3720 ccv_cnnp_model_scalar_t* const model_scalar = (ccv_cnnp_model_scalar_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_scalar_t));
3721 model_scalar->super.isa = &ccv_cnnp_scalar_isa;
3722 model_scalar->super.input_size = 0;
3723 model_scalar->super.outputs = &model_scalar->output;
3724 model_scalar->super.output_size = 1;
3725 ccv_cnnp_model_copy_name(&model_scalar->super, name);
3726 model_scalar->type = type;
3727 model_scalar->format = format;
3728 model_scalar->datatype = datatype;
3729 model_scalar->value = value;
3730 return (ccv_cnnp_model_t*)model_scalar;
3731}
3732
3733static ccv_cnnp_model_t* _ccv_cnnp_scalar_copy(const ccv_cnnp_model_t* const super, void* const context)
3734{
3735 const ccv_cnnp_model_scalar_t* const self = (const ccv_cnnp_model_scalar_t*)super;
3736 return ccv_cnnp_scalar(self->type, self->format, self->datatype, self->value, self->super.name);
3737}
3738
3739// MARK - Variable Layer
3740
3741typedef struct {
3742 ccv_cnnp_model_t super;
3743 ccv_nnc_tensor_param_t params;
3744 ccv_nnc_tensor_symbol_t output;
3745} ccv_cnnp_model_variable_t;
3746
3747static void _ccv_cnnp_variable_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3748{
3749 PRINT(CCV_CLI_VERBOSE, "[cnnp_variable_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_variable_build] -\n"); fflush(stdout); } } while
(0)
;
3750 assert(input_size == 0)((void) sizeof ((input_size == 0) ? 1 : 0), __extension__ ({ if
(input_size == 0) ; else __assert_fail ("input_size == 0", "ccv_cnnp_model_addons.c"
, 3750, __extension__ __PRETTY_FUNCTION__); }))
;
3751 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3751, __extension__ __PRETTY_FUNCTION__
); }))
;
3752 ccv_cnnp_model_variable_t* const self = (ccv_cnnp_model_variable_t*)super;
3753 outputs[0] = ccv_nnc_tensor_symbol_new(graph, self->params, 0);
3754}
3755
3756static ccv_cnnp_model_t* _ccv_cnnp_variable_copy(const ccv_cnnp_model_t* const super, void* const context);
3757
3758static const ccv_cnnp_model_vtab_t ccv_cnnp_variable_isa = {
3759 .build = _ccv_cnnp_variable_build,
3760 .copy = _ccv_cnnp_variable_copy,
3761};
3762
3763ccv_cnnp_model_t* ccv_cnnp_variable(const ccv_nnc_tensor_param_t params, const char* const name)
3764{
3765 ccv_cnnp_model_variable_t* const model_variable = (ccv_cnnp_model_variable_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_variable_t));
3766 model_variable->super.isa = &ccv_cnnp_variable_isa;
3767 model_variable->super.input_size = 0;
3768 model_variable->super.outputs = &model_variable->output;
3769 model_variable->super.output_size = 1;
3770 ccv_cnnp_model_copy_name(&model_variable->super, name);
3771 model_variable->params = params;
3772 return (ccv_cnnp_model_t*)model_variable;
3773}
3774
3775static ccv_cnnp_model_t* _ccv_cnnp_variable_copy(const ccv_cnnp_model_t* const super, void* const context)
3776{
3777 const ccv_cnnp_model_variable_t* const self = (const ccv_cnnp_model_variable_t*)super;
3778 return ccv_cnnp_variable(self->params, self->super.name);
3779}
3780
3781// MARK - Move Layer
3782
3783typedef struct {
3784 ccv_cnnp_model_t super;
3785 ccv_nnc_tensor_symbol_t output;
3786} ccv_cnnp_model_move_t;
3787
3788static void _ccv_cnnp_move_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3789{
3790 PRINT(CCV_CLI_VERBOSE, "[cnnp_move_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_move_build] -\n"); fflush(stdout); } } while
(0)
;
3791 assert(input_size == 2)((void) sizeof ((input_size == 2) ? 1 : 0), __extension__ ({ if
(input_size == 2) ; else __assert_fail ("input_size == 2", "ccv_cnnp_model_addons.c"
, 3791, __extension__ __PRETTY_FUNCTION__); }))
;
3792 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3792, __extension__ __PRETTY_FUNCTION__
); }))
;
3793 outputs[0] = inputs[1];
3794 ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, 1, outputs, 1, "move");
3795}
3796
3797static ccv_cnnp_model_t* _ccv_cnnp_move_copy(const ccv_cnnp_model_t* const super, void* const context);
3798
3799static const ccv_cnnp_model_vtab_t ccv_cnnp_move_isa = {
3800 .build = _ccv_cnnp_move_build,
3801 .copy = _ccv_cnnp_move_copy,
3802};
3803
3804ccv_cnnp_model_t* ccv_cnnp_move(const char* const name)
3805{
3806 ccv_cnnp_model_move_t* const model_move = (ccv_cnnp_model_move_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_move_t));
3807 model_move->super.isa = &ccv_cnnp_move_isa;
3808 model_move->super.input_size = 2;
3809 model_move->super.outputs = &model_move->output;
3810 model_move->super.output_size = 1;
3811 ccv_cnnp_model_copy_name(&model_move->super, name);
3812 return (ccv_cnnp_model_t*)model_move;
3813}
3814
3815static ccv_cnnp_model_t* _ccv_cnnp_move_copy(const ccv_cnnp_model_t* const super, void* const context)
3816{
3817 const ccv_cnnp_model_move_t* const self = (const ccv_cnnp_model_move_t*)super;
3818 return ccv_cnnp_move(self->super.name);
3819}
3820
3821// MARK - "Making" Contiguous Layer
3822
3823typedef struct {
3824 ccv_cnnp_model_t super;
3825 ccv_nnc_tensor_symbol_t output;
3826} ccv_cnnp_model_contiguous_t;
3827
3828static void _ccv_cnnp_contiguous_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3829{
3830 PRINT(CCV_CLI_VERBOSE, "[cnnp_contiguous_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_contiguous_build] -\n"); fflush(stdout); } }
while (0)
;
3831 assert(input_size == 1)((void) sizeof ((input_size == 1) ? 1 : 0), __extension__ ({ if
(input_size == 1) ; else __assert_fail ("input_size == 1", "ccv_cnnp_model_addons.c"
, 3831, __extension__ __PRETTY_FUNCTION__); }))
;
3832 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3832, __extension__ __PRETTY_FUNCTION__
); }))
;
3833 ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3834 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
3835 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
3836 {
3837 outputs[0] = inputs[0];
3838 return;
3839 }
3840 // Otherwise, we need to check its stride to know if it is contiguous.
3841 int old_stride[CCV_NNC_MAX_DIM_ALLOC(12)];
3842 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
3843 // We identify permute by checking if the stride is not in descending order.
3844 // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
3845 if (ccv_nnc_is_tensor_stride_packed(old_stride, params.dim))
3846 {
3847 outputs[0] = inputs[0];
3848 return;
3849 }
3850 outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3851 ccv_nnc_graph_exec_symbol_t make_contiguous = ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD()ccv_nnc_cmd(CCV_NNC_FORMAT_TRANSFORM_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, inputs, 1, outputs, 1, "contiguous");
3852 ccv_nnc_graph_exec_symbol_set_flags(graph, make_contiguous, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
3853}
3854
3855static ccv_cnnp_model_t* _ccv_cnnp_contiguous_copy(const ccv_cnnp_model_t* const super, void* const context);
3856
3857static const ccv_cnnp_model_vtab_t ccv_cnnp_contiguous_isa = {
3858 .build = _ccv_cnnp_contiguous_build,
3859 .copy = _ccv_cnnp_contiguous_copy,
3860};
3861
3862ccv_cnnp_model_t* ccv_cnnp_contiguous(const char* const name)
3863{
3864 ccv_cnnp_model_contiguous_t* const model_contiguous = (ccv_cnnp_model_contiguous_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_contiguous_t));
3865 model_contiguous->super.isa = &ccv_cnnp_contiguous_isa;
3866 model_contiguous->super.input_size = 1;
3867 model_contiguous->super.outputs = &model_contiguous->output;
3868 model_contiguous->super.output_size = 1;
3869 ccv_cnnp_model_copy_name(&model_contiguous->super, name);
3870 return (ccv_cnnp_model_t*)model_contiguous;
3871}
3872
3873static ccv_cnnp_model_t* _ccv_cnnp_contiguous_copy(const ccv_cnnp_model_t* const super, void* const context)
3874{
3875 const ccv_cnnp_model_contiguous_t* const self = (const ccv_cnnp_model_contiguous_t*)super;
3876 return ccv_cnnp_contiguous(self->super.name);
3877}
3878
3879// MARK - Scaled-Dot Product Attention Layer
3880
3881typedef struct {
3882 ccv_cnnp_model_t super;
3883 ccv_nnc_tensor_symbol_t output;
3884 ccv_nnc_tensor_symbol_t weights;
3885 ccv_nnc_tensor_symbol_t bias;
3886 float scale;
3887 int is_causal;
3888 int has_attn_mask;
3889 int flags;
3890 int fused_unify_head_weights;
3891 int no_bias;
3892} ccv_cnnp_model_scaled_dot_product_attention_t;
3893
3894static void _ccv_cnnp_scaled_dot_product_attention_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3895{
3896 PRINT(CCV_CLI_VERBOSE, "[cnnp_scaled_dot_product_attention_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_scaled_dot_product_attention_build] -\n"); fflush
(stdout); } } while (0)
;
3897 assert(input_size == 3 || input_size == 4)((void) sizeof ((input_size == 3 || input_size == 4) ? 1 : 0)
, __extension__ ({ if (input_size == 3 || input_size == 4) ; else
__assert_fail ("input_size == 3 || input_size == 4", "ccv_cnnp_model_addons.c"
, 3897, __extension__ __PRETTY_FUNCTION__); }))
;
3898 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 3898, __extension__ __PRETTY_FUNCTION__
); }))
;
3899 ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
3900 const ccv_nnc_tensor_param_t q_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3901 const ccv_nnc_tensor_param_t k_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
3902 const ccv_nnc_tensor_param_t v_params = ccv_nnc_tensor_symbol_params(graph, inputs[2]);
3903 const int v_nd = ccv_nnc_tensor_nd(v_params.dim);
3904 assert(v_nd == 3 || v_nd == 4)((void) sizeof ((v_nd == 3 || v_nd == 4) ? 1 : 0), __extension__
({ if (v_nd == 3 || v_nd == 4) ; else __assert_fail ("v_nd == 3 || v_nd == 4"
, "ccv_cnnp_model_addons.c", 3904, __extension__ __PRETTY_FUNCTION__
); }))
;
3905 const int hEv = (v_nd == 3 ? 1 : v_params.dim[2]) * v_params.dim[v_nd - 1];
3906 ccv_nnc_tensor_param_t weights_params = q_params;
3907 memset(weights_params.dim, 0, sizeof(weights_params.dim));
3908 weights_params.dim[0] = hEv;
3909 weights_params.dim[1] = hEv;
3910 ccv_nnc_tensor_param_t bias_params = q_params;
3911 memset(bias_params.dim, 0, sizeof(bias_params.dim));
3912 bias_params.dim[0] = hEv;
3913 ccv_nnc_cmd_t cmd = {0};
3914 cmd.cmd = CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_FORWARD;
3915 cmd.info.scaled_dot_product_attention.scale = self->scale;
3916 cmd.info.scaled_dot_product_attention.is_causal = self->is_causal;
3917 cmd.info.scaled_dot_product_attention.flags = self->flags;
3918 ccv_nnc_tensor_param_t output_params[3];
3919 ccv_nnc_tensor_symbol_t output;
3920 ccv_nnc_tensor_symbol_t saved_softmax_lse;
3921 ccv_nnc_tensor_symbol_t saved_v_proj = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
3922 ccv_nnc_tensor_symbol_t attn_mask = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
3923 ccv_nnc_tensor_symbol_t weights = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
3924 ccv_nnc_tensor_symbol_t bias = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
3925 if (self->has_attn_mask)
3926 attn_mask = inputs[3];
3927 if (self->fused_unify_head_weights)
3928 {
3929 if (!self->weights.graph)
3930 self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
3931 weights = self->weights;
3932 if (!self->no_bias)
3933 {
3934 if (!self->bias.graph)
3935 self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
3936 bias = self->bias;
3937 }
3938 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
3939 q_params,
3940 k_params,
3941 v_params,
3942 (ccv_nnc_tensor_param_t){},
3943 weights_params,
3944 bias_params,
3945 }, 6, ccv_nnc_no_hint, output_params, 3);
3946 output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
3947 saved_softmax_lse = ccv_nnc_tensor_symbol_new(graph, output_params[1], 0);
3948 saved_v_proj = ccv_nnc_tensor_symbol_new(graph, output_params[2], 0);
3949 } else {
3950 ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
3951 q_params,
3952 k_params,
3953 v_params,
3954 }, 3, ccv_nnc_no_hint, output_params, 2);
3955 output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
3956 saved_softmax_lse = ccv_nnc_tensor_symbol_new(graph, output_params[1], 0);
3957 }
3958 ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], attn_mask, weights, bias)(const ccv_nnc_tensor_symbol_t []){inputs[0], inputs[1], inputs
[2], attn_mask, weights, bias}, (1 +1 +1 +1 +1 +1 +1 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, TENSOR_SYMBOL_LIST(output, saved_softmax_lse, saved_v_proj)(const ccv_nnc_tensor_symbol_t []){output, saved_softmax_lse,
saved_v_proj}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 -1)
, "scaled_dot_product_attention");
3959 outputs[0] = output;
3960}
3961
3962static void _ccv_cnnp_scaled_dot_product_attention_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
3963{
3964 ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
3965 if (self->weights.graph)
3966 {
3967 assert(self->fused_unify_head_weights)((void) sizeof ((self->fused_unify_head_weights) ? 1 : 0),
__extension__ ({ if (self->fused_unify_head_weights) ; else
__assert_fail ("self->fused_unify_head_weights", "ccv_cnnp_model_addons.c"
, 3967, __extension__ __PRETTY_FUNCTION__); }))
;
3968 const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
3969 const int c = weight_params.dim[1];
3970 const float std = sqrtf(2) / sqrtf(c);
3971 const float bound = sqrtf(3) * std;
3972 initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound)ccv_nnc_cmd(CCV_NNC_RANDOM_UNIFORM_FORWARD, 0, (ccv_nnc_cmd_param_t
){.size={.dim={1,1,1}},.blas={.a={-bound, bound}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->weights);
3973 if (self->bias.graph)
3974 initializer(context, CMD_SET_FORWARD(0)ccv_nnc_cmd(CCV_NNC_SET_FORWARD, 0, (ccv_nnc_cmd_param_t){.size
={.dim={1,1,1}},.blas={.a={0,}}}, 0)
, ccv_nnc_no_hint, 0, 0, self->bias);
3975 }
3976}
3977
3978static void _ccv_cnnp_scaled_dot_product_attention_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
3979{
3980 ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
3981 if (self->weights.graph)
3982 {
3983 assert(self->fused_unify_head_weights)((void) sizeof ((self->fused_unify_head_weights) ? 1 : 0),
__extension__ ({ if (self->fused_unify_head_weights) ; else
__assert_fail ("self->fused_unify_head_weights", "ccv_cnnp_model_addons.c"
, 3983, __extension__ __PRETTY_FUNCTION__); }))
;
3984 add_to_array(parameters, self->weights, is_trainable);
3985 if (self->bias.graph)
3986 add_to_array(parameters, self->bias, is_trainable);
3987 }
3988}
3989
3990static ccv_cnnp_model_t* _ccv_cnnp_scaled_dot_product_attention_copy(const ccv_cnnp_model_t* const super, void* const context);
3991
3992static const ccv_cnnp_model_vtab_t ccv_cnnp_scaled_dot_product_attention_isa = {
3993 .build = _ccv_cnnp_scaled_dot_product_attention_build,
3994 .copy = _ccv_cnnp_scaled_dot_product_attention_copy,
3995};
3996
3997static const ccv_cnnp_model_vtab_t ccv_cnnp_scaled_dot_product_attention_fused_isa = {
3998 .build = _ccv_cnnp_scaled_dot_product_attention_build,
3999 .init_states = _ccv_cnnp_scaled_dot_product_attention_init_states,
4000 .add_to_parameter = _ccv_cnnp_scaled_dot_product_attention_add_to_parameter,
4001 .copy = _ccv_cnnp_scaled_dot_product_attention_copy,
4002};
4003
4004ccv_cnnp_model_t* ccv_cnnp_scaled_dot_product_attention(const float scale, const int is_causal, const int has_attn_mask, const int flags, const int fused_unify_head_weights, const int no_bias, const int is_trainable, const char* const name)
4005{
4006 ccv_cnnp_model_scaled_dot_product_attention_t* const model_scaled_dot_product_attention = (ccv_cnnp_model_scaled_dot_product_attention_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_scaled_dot_product_attention_t));
4007 model_scaled_dot_product_attention->super.isa = fused_unify_head_weights ? &ccv_cnnp_scaled_dot_product_attention_fused_isa : &ccv_cnnp_scaled_dot_product_attention_isa;
4008 model_scaled_dot_product_attention->super.input_size = has_attn_mask ? 4 : 3;
4009 model_scaled_dot_product_attention->super.outputs = &model_scaled_dot_product_attention->output;
4010 model_scaled_dot_product_attention->super.output_size = 1;
4011 model_scaled_dot_product_attention->super.is_trainable = is_trainable;
4012 ccv_cnnp_model_copy_name(&model_scaled_dot_product_attention->super, name);
4013 model_scaled_dot_product_attention->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
4014 model_scaled_dot_product_attention->weights.graph = 0;
4015 model_scaled_dot_product_attention->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
4016 model_scaled_dot_product_attention->bias.graph = 0;
4017 model_scaled_dot_product_attention->scale = scale;
4018 model_scaled_dot_product_attention->is_causal = is_causal;
4019 model_scaled_dot_product_attention->has_attn_mask = has_attn_mask;
4020 model_scaled_dot_product_attention->flags = flags;
4021 model_scaled_dot_product_attention->fused_unify_head_weights = fused_unify_head_weights;
4022 model_scaled_dot_product_attention->no_bias = no_bias;
4023 return (ccv_cnnp_model_t*)model_scaled_dot_product_attention;
4024}
4025
4026static ccv_cnnp_model_t* _ccv_cnnp_scaled_dot_product_attention_copy(const ccv_cnnp_model_t* const super, void* const context)
4027{
4028 const ccv_cnnp_model_scaled_dot_product_attention_t* const self = (const ccv_cnnp_model_scaled_dot_product_attention_t*)super;
4029 return ccv_cnnp_scaled_dot_product_attention(self->scale, self->is_causal, self->has_attn_mask, self->flags, self->fused_unify_head_weights, self->no_bias, self->super.is_trainable, self->super.name);
4030}
4031
4032// MARK - Debug Layer
4033
4034typedef struct {
4035 ccv_cnnp_model_t super;
4036 ccv_nnc_tensor_symbol_t output;
4037 ccv_cnnp_model_debug_f debugger;
4038 ccv_cnnp_model_debug_context_deinit_f debug_deinit;
4039 ccv_cnnp_model_debug_context_copy_f debug_copy;
4040 void* debug_context;
4041} ccv_cnnp_model_debug_t;
4042
4043static int _ccv_cnnp_debug_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
4044{
4045 if (cmd.cmd == CCV_NNC_CUSTOM_BACKWARD)
4046 {
4047 assert(0 && "don't support debug backward pass yet")((void) sizeof ((0 && "don't support debug backward pass yet"
) ? 1 : 0), __extension__ ({ if (0 && "don't support debug backward pass yet"
) ; else __assert_fail ("0 && \"don't support debug backward pass yet\""
, "ccv_cnnp_model_addons.c", 4047, __extension__ __PRETTY_FUNCTION__
); }))
;
4048 }
4049 ccv_cnnp_model_debug_t* const self = (ccv_cnnp_model_debug_t*)cmd.data;
4050 self->debugger(inputs, input_size, stream_context, self->debug_context);
4051 return CCV_NNC_EXEC_SUCCESS;
4052}
4053
4054static ccv_nnc_cmd_vtab_t ccv_cnnp_debug_exec_isa = {
4055 .exec = _ccv_cnnp_debug_exec
4056};
4057
4058static void _ccv_cnnp_debug_build(ccv_cnnp_model_t* const self, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4059{
4060 PRINT(CCV_CLI_VERBOSE, "[cnnp_debug_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_debug_build] -\n"); fflush(stdout); } } while
(0)
;
4061 assert(input_size >= 1)((void) sizeof ((input_size >= 1) ? 1 : 0), __extension__ (
{ if (input_size >= 1) ; else __assert_fail ("input_size >= 1"
, "ccv_cnnp_model_addons.c", 4061, __extension__ __PRETTY_FUNCTION__
); }))
;
4062 assert(output_size == 1)((void) sizeof ((output_size == 1) ? 1 : 0), __extension__ ({
if (output_size == 1) ; else __assert_fail ("output_size == 1"
, "ccv_cnnp_model_addons.c", 4062, __extension__ __PRETTY_FUNCTION__
); }))
;
4063 ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
4064 ccv_nnc_tensor_param_t output_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4065 if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
4066 {
4067 int ofs[CCV_NNC_MAX_DIM_ALLOC(12)] = {0};
4068 int stride[CCV_NNC_MAX_DIM_ALLOC(12)];
4069 ccv_nnc_tensor_get_stride(output_params.dim, stride);
4070 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, stride, output_params, 0);
4071 } else {
4072 int old_ofs[CCV_NNC_MAX_DIM_ALLOC(12)];
4073 int old_stride[CCV_NNC_MAX_DIM_ALLOC(12)];
4074 ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], old_ofs, old_stride);
4075 outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, to, old_ofs, old_stride, output_params, 0);
4076 }
4077 ccv_nnc_cmd_t cmd = ccv_nnc_cmd(CCV_NNC_CUSTOM_FORWARD, (ccv_nnc_cmd_vtab_t*)&ccv_cnnp_debug_exec_isa, (ccv_nnc_cmd_param_t){}, 0);
4078 cmd.data = self;
4079 ccv_nnc_graph_exec_symbol_t make_debug = ccv_nnc_graph_exec_symbol_new(graph, cmd, inputs, input_size, outputs, 1, "debug");
4080 // Disable any optimizations.
4081 ccv_nnc_graph_exec_symbol_set_flags(graph, make_debug, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
4082}
4083
4084static void _ccv_cnnp_debug_deinit(ccv_cnnp_model_t* const super)
4085{
4086 const ccv_cnnp_model_debug_t* const self = (const ccv_cnnp_model_debug_t*)super;
4087 if (self->debug_deinit && self->debug_context)
4088 self->debug_deinit(self->debug_context);
4089}
4090
4091static ccv_cnnp_model_t* _ccv_cnnp_debug_copy(const ccv_cnnp_model_t* const super, void* const context);
4092
4093static const ccv_cnnp_model_vtab_t ccv_cnnp_debug_isa = {
4094 .build = _ccv_cnnp_debug_build,
4095 .deinit = _ccv_cnnp_debug_deinit,
4096 .copy = _ccv_cnnp_debug_copy,
4097};
4098
4099ccv_cnnp_model_t* ccv_cnnp_debug(ccv_cnnp_model_debug_f func, void* const context, ccv_cnnp_model_debug_context_deinit_f deinit, ccv_cnnp_model_debug_context_copy_f copy, const char* const name)
4100{
4101 ccv_cnnp_model_debug_t* const model_debug = (ccv_cnnp_model_debug_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_debug_t));
4102 model_debug->super.isa = &ccv_cnnp_debug_isa;
4103 model_debug->super.input_size = 0;
4104 model_debug->super.outputs = &model_debug->output;
4105 model_debug->super.output_size = 1;
4106 model_debug->debugger = func;
4107 model_debug->debug_context = context;
4108 model_debug->debug_deinit = deinit;
4109 model_debug->debug_copy = copy;
4110 ccv_cnnp_model_copy_name(&model_debug->super, name);
4111 return (ccv_cnnp_model_t*)model_debug;
4112}
4113
4114static ccv_cnnp_model_t* _ccv_cnnp_debug_copy(const ccv_cnnp_model_t* const super, void* const context)
4115{
4116 const ccv_cnnp_model_debug_t* const self = (const ccv_cnnp_model_debug_t*)super;
4117 void* debug_context = self->debug_context;
4118 if (self->debug_copy && self->debug_context)
4119 debug_context = self->debug_copy(self->debug_context);
4120 return ccv_cnnp_debug(self->debugger, debug_context, self->debug_deinit, self->debug_copy, self->super.name);
4121}