Bug Summary

File:nnc/ccv_cnnp_model_core.c
Warning:line 292, column 1
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_cnnp_model_core.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/home/liu/buildslave/linux-x64-runtests/build/lib/nnc -resource-dir /usr/local/lib/clang/14.0.0 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D USE_DISPATCH -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/9/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -fdebug-compilation-dir=/home/liu/buildslave/linux-x64-runtests/build/lib/nnc -ferror-limit 19 -fblocks -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/buildslave/public_html/analyze/2022-06-22-151334-490440-1 -x c ccv_cnnp_model_core.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_cnnp_model.h"
6#include "3rdparty/khash/khash.h"
7
8// MARK - Baisc Layers
9
10static const ccv_cnnp_model_vtab_t ccv_cnnp_input_isa;
11
12#define CCV_CNNP_IS_MODEL_INPUT(x)((x)->isa == &ccv_cnnp_input_isa) ((x)->isa == &ccv_cnnp_input_isa)
13
14#define CCV_CNNP_IS_MODEL_PARAMETER(x)((x)->param_ref != 0 || (x)->param_sel != 0) ((x)->param_ref != 0 || (x)->param_sel != 0)
15
16typedef struct {
17 ccv_cnnp_model_t super;
18 int sequence_size;
19 ccv_cnnp_model_t* sequence[1];
20} ccv_cnnp_sequential_model_t;
21
22static void _ccv_cnnp_sequential_model_deinit(ccv_cnnp_model_t* const super)
23{
24 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
25 int i, j;
26 for (i = 0; i < self->sequence_size; i++)
27 {
28 ccv_cnnp_model_t* const model = self->sequence[i];
29 if (!model)
30 continue;
31 ccv_cnnp_model_free(model);
32 for (j = i + 1; j < self->sequence_size; j++)
33 if (self->sequence[j] == model)
34 self->sequence[j] = 0;
35 }
36}
37
38static void _ccv_cnnp_sequential_model_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
39{
40 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
41 ccv_cnnp_model_t* const sub_model = self->sequence[0];
42 // Go through each sub model to build the graph.
43 ccv_nnc_tensor_symbol_t input;
44 sub_model->data = self->super.data;
45 ccv_cnnp_model_build(sub_model, graph, inputs, input_size, &input, 1);
46 sub_model->data = 0;
47 int i;
48 for (i = 1; i < self->sequence_size; i++)
49 {
50 ccv_nnc_tensor_symbol_t output;
51 ccv_cnnp_model_t* const sub_model = self->sequence[i];
52 // Go through each sub model to build the graph.
53 sub_model->data = self->super.data;
54 ccv_cnnp_model_build(sub_model, graph, &input, 1, &output, 1);
55 sub_model->data = 0;
56 input = output;
57 }
58 outputs[0] = input;
59}
60
61static void _ccv_cnnp_sequential_model_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
62{
63 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
64 int i;
65 for (i = 0; i < self->sequence_size; i++)
66 ccv_cnnp_model_init_states(self->sequence[i], graph, initializer, context);
67}
68
69static void _ccv_cnnp_sequential_model_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
70{
71 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
72 int i;
73 for (i = 0; i < self->sequence_size; i++)
74 ccv_cnnp_model_set_is_test(self->sequence[i], is_test, updater, context);
75}
76
77static ccv_cnnp_model_t* _ccv_cnnp_sequential_model_copy(const ccv_cnnp_model_t* const super, void* const context);
78
79static void _ccv_cnnp_sequential_model_add_to_parameter_indices(ccv_cnnp_model_t* const super, const int index, ccv_array_t* const parameter_indices)
80{
81 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
82 int i;
83 for (i = 0; i < self->sequence_size; i++)
84 ccv_cnnp_model_add_to_parameter_indices(self->sequence[i], index, parameter_indices);
85}
86
87static void _ccv_cnnp_sequential_model_notify(const ccv_cnnp_model_t* const super, const int tag, void* const payload)
88{
89 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
90 int i;
91 for (i = 0; i < self->sequence_size; i++)
92 ccv_cnnp_model_notify(self->sequence[i], tag, payload);
93}
94
95static const ccv_cnnp_model_vtab_t ccv_cnnp_sequential_model_isa = {
96 .deinit = _ccv_cnnp_sequential_model_deinit,
97 .build = _ccv_cnnp_sequential_model_build,
98 .init_states = _ccv_cnnp_sequential_model_init_states,
99 .copy = _ccv_cnnp_sequential_model_copy,
100 .set_is_test = _ccv_cnnp_sequential_model_set_is_test,
101 .add_to_parameter_indices = _ccv_cnnp_sequential_model_add_to_parameter_indices,
102 .notify = _ccv_cnnp_sequential_model_notify,
103};
104
105KHASH_MAP_INIT_INT64(model, ccv_cnnp_model_t*)typedef struct kh_model_s { khint_t n_buckets, size, n_occupied
, upper_bound; khint32_t *flags; khint64_t *keys; ccv_cnnp_model_t
* *vals; } kh_model_t; static inline __attribute__ ((__unused__
)) kh_model_t *kh_init_model(void) { return (kh_model_t*)calloc
(1,sizeof(kh_model_t)); } static inline __attribute__ ((__unused__
)) void kh_destroy_model(kh_model_t *h) { if (h) { free((void
*)h->keys); free(h->flags); free((void *)h->vals); free
(h); } } static inline __attribute__ ((__unused__)) void kh_clear_model
(kh_model_t *h) { if (h && h->flags) { memset(h->
flags, 0xaa, ((h->n_buckets) < 16? 1 : (h->n_buckets
)>>4) * sizeof(khint32_t)); h->size = h->n_occupied
= 0; } } static inline __attribute__ ((__unused__)) khint_t kh_get_model
(const kh_model_t *h, khint64_t key) { if (h->n_buckets) {
khint_t k, i, last, mask, step = 0; mask = h->n_buckets -
1; k = (khint32_t)((key)>>33^(key)^(key)<<11); i
= k & mask; last = i; while (!((h->flags[i>>4]>>
((i&0xfU)<<1))&2) && (((h->flags[i>>
4]>>((i&0xfU)<<1))&1) || !((h->keys[i]
) == (key)))) { i = (i + (++step)) & mask; if (i == last)
return h->n_buckets; } return ((h->flags[i>>4]>>
((i&0xfU)<<1))&3)? h->n_buckets : i; } else return
0; } static inline __attribute__ ((__unused__)) int kh_resize_model
(kh_model_t *h, khint_t new_n_buckets) { khint32_t *new_flags
= 0; khint_t j = 1; { (--(new_n_buckets), (new_n_buckets)|=(
new_n_buckets)>>1, (new_n_buckets)|=(new_n_buckets)>>
2, (new_n_buckets)|=(new_n_buckets)>>4, (new_n_buckets)
|=(new_n_buckets)>>8, (new_n_buckets)|=(new_n_buckets)>>
16, ++(new_n_buckets)); if (new_n_buckets < 4) new_n_buckets
= 4; if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER
+ 0.5)) j = 0; else { new_flags = (khint32_t*)malloc(((new_n_buckets
) < 16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t))
; if (!new_flags) return -1; memset(new_flags, 0xaa, ((new_n_buckets
) < 16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t))
; if (h->n_buckets < new_n_buckets) { khint64_t *new_keys
= (khint64_t*)realloc((void *)h->keys,new_n_buckets * sizeof
(khint64_t)); if (!new_keys) { free(new_flags); return -1; } h
->keys = new_keys; if (1) { ccv_cnnp_model_t* *new_vals = (
ccv_cnnp_model_t**)realloc((void *)h->vals,new_n_buckets *
sizeof(ccv_cnnp_model_t*)); if (!new_vals) { free(new_flags)
; return -1; } h->vals = new_vals; } } } } if (j) { for (j
= 0; j != h->n_buckets; ++j) { if (((h->flags[j>>
4]>>((j&0xfU)<<1))&3) == 0) { khint64_t key
= h->keys[j]; ccv_cnnp_model_t* val; khint_t new_mask; new_mask
= new_n_buckets - 1; if (1) val = h->vals[j]; (h->flags
[j>>4]|=1ul<<((j&0xfU)<<1)); while (1) {
khint_t k, i, step = 0; k = (khint32_t)((key)>>33^(key
)^(key)<<11); i = k & new_mask; while (!((new_flags
[i>>4]>>((i&0xfU)<<1))&2)) i = (i +
(++step)) & new_mask; (new_flags[i>>4]&=~(2ul<<
((i&0xfU)<<1))); if (i < h->n_buckets &&
((h->flags[i>>4]>>((i&0xfU)<<1))&
3) == 0) { { khint64_t tmp = h->keys[i]; h->keys[i] = key
; key = tmp; } if (1) { ccv_cnnp_model_t* tmp = h->vals[i]
; h->vals[i] = val; val = tmp; } (h->flags[i>>4]|=
1ul<<((i&0xfU)<<1)); } else { h->keys[i] =
key; if (1) h->vals[i] = val; break; } } } } if (h->n_buckets
> new_n_buckets) { h->keys = (khint64_t*)realloc((void
*)h->keys,new_n_buckets * sizeof(khint64_t)); if (1) h->
vals = (ccv_cnnp_model_t**)realloc((void *)h->vals,new_n_buckets
* sizeof(ccv_cnnp_model_t*)); } free(h->flags); h->flags
= new_flags; h->n_buckets = new_n_buckets; h->n_occupied
= h->size; h->upper_bound = (khint_t)(h->n_buckets *
__ac_HASH_UPPER + 0.5); } return 0; } static inline __attribute__
((__unused__)) khint_t kh_put_model(kh_model_t *h, khint64_t
key, int *ret) { khint_t x; if (h->n_occupied >= h->
upper_bound) { if (h->n_buckets > (h->size<<1)
) { if (kh_resize_model(h, h->n_buckets - 1) < 0) { *ret
= -1; return h->n_buckets; } } else if (kh_resize_model(h
, h->n_buckets + 1) < 0) { *ret = -1; return h->n_buckets
; } } { khint_t k, i, site, last, mask = h->n_buckets - 1,
step = 0; x = site = h->n_buckets; k = (khint32_t)((key)>>
33^(key)^(key)<<11); i = k & mask; if (((h->flags
[i>>4]>>((i&0xfU)<<1))&2)) x = i; else
{ last = i; while (!((h->flags[i>>4]>>((i&
0xfU)<<1))&2) && (((h->flags[i>>4]
>>((i&0xfU)<<1))&1) || !((h->keys[i]) ==
(key)))) { if (((h->flags[i>>4]>>((i&0xfU
)<<1))&1)) site = i; i = (i + (++step)) & mask;
if (i == last) { x = site; break; } } if (x == h->n_buckets
) { if (((h->flags[i>>4]>>((i&0xfU)<<
1))&2) && site != h->n_buckets) x = site; else
x = i; } } } if (((h->flags[x>>4]>>((x&0xfU
)<<1))&2)) { h->keys[x] = key; (h->flags[x>>
4]&=~(3ul<<((x&0xfU)<<1))); ++h->size;
++h->n_occupied; *ret = 1; } else if (((h->flags[x>>
4]>>((x&0xfU)<<1))&1)) { h->keys[x] = key
; (h->flags[x>>4]&=~(3ul<<((x&0xfU)<<
1))); ++h->size; *ret = 2; } else *ret = 0; return x; } static
inline __attribute__ ((__unused__)) void kh_del_model(kh_model_t
*h, khint_t x) { if (x != h->n_buckets && !((h->
flags[x>>4]>>((x&0xfU)<<1))&3)) { (
h->flags[x>>4]|=1ul<<((x&0xfU)<<1));
--h->size; } }
106
107static ccv_cnnp_model_t* _ccv_cnnp_sequential_model_copy(const ccv_cnnp_model_t* const super, void* const context)
108{
109 const ccv_cnnp_sequential_model_t* const self = (const ccv_cnnp_sequential_model_t*)super;
110 ccv_cnnp_sequential_model_t* const sequential_model = (ccv_cnnp_sequential_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_sequential_model_t) + sizeof(ccv_cnnp_model_t*) * (self->sequence_size - 1) + sizeof(ccv_nnc_tensor_symbol_t));
111 sequential_model->super.isa = &ccv_cnnp_sequential_model_isa;
112 sequential_model->super.input_size = 1;
113 sequential_model->super.outputs = (ccv_nnc_tensor_symbol_t*)(sequential_model->sequence + self->sequence_size);
114 sequential_model->super.output_size = 1;
115 ccv_cnnp_model_copy_name(&sequential_model->super, self->super.name);
116 sequential_model->sequence_size = self->sequence_size;
117 int i;
118 khash_t(model)kh_model_t* model_map = context ? (khash_t(model)kh_model_t*)context : kh_init(model)kh_init_model();
119 for (i = 0; i < self->sequence_size; i++)
120 {
121 ccv_cnnp_model_t* const sub_model = self->sequence[i];
122 int ret;
123 khiter_t k = kh_put(model, model_map, (uint64_t)(uintptr_t)sub_model, &ret)kh_put_model(model_map, (uint64_t)(uintptr_t)sub_model, &
ret)
;
124 ccv_cnnp_model_t* model_copy;
125 if (ret != 0)
126 model_copy = kh_val(model_map, k)((model_map)->vals[k]) = _ccv_cnnp_model_copy(sub_model, model_map);
127 else
128 model_copy = kh_val(model_map, k)((model_map)->vals[k]);
129 sequential_model->sequence[i] = model_copy;
130 }
131 if (!context)
132 kh_destroy(model, model_map)kh_destroy_model(model_map);
133 return (ccv_cnnp_model_t*)sequential_model;
134}
135
136ccv_cnnp_model_t* ccv_cnnp_sequential_new(ccv_cnnp_model_t* const* const models, const int model_size, const char* const name)
137{
138 assert(model_size > 0)((void) sizeof ((model_size > 0) ? 1 : 0), __extension__ (
{ if (model_size > 0) ; else __assert_fail ("model_size > 0"
, "ccv_cnnp_model_core.c", 138, __extension__ __PRETTY_FUNCTION__
); }))
;
139 ccv_cnnp_sequential_model_t* const sequential_model = (ccv_cnnp_sequential_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_sequential_model_t) + sizeof(ccv_cnnp_model_t*) * (model_size - 1) + sizeof(ccv_nnc_tensor_symbol_t));
140 sequential_model->super.isa = &ccv_cnnp_sequential_model_isa;
141 sequential_model->super.input_size = models[0]->input_size;
142 sequential_model->super.outputs = (ccv_nnc_tensor_symbol_t*)(sequential_model->sequence + model_size);
143 sequential_model->super.output_size = 1;
144 ccv_cnnp_model_copy_name(&sequential_model->super, name);
145 sequential_model->sequence_size = model_size;
146 memcpy(sequential_model->sequence, models, sizeof(ccv_cnnp_model_t*) * model_size);
147 return (ccv_cnnp_model_t*)sequential_model;
148}
149
150typedef struct {
151 ccv_cnnp_model_t super;
152 // The name is similar to sequential model, but it is just topological sorted models.
153 int sequence_size;
154 ccv_cnnp_model_io_t sequence[1];
155} ccv_cnnp_functional_model_t;
156
157static void _ccv_cnnp_functional_model_deinit(ccv_cnnp_model_t* const super)
158{
159 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
160 int i, j = 0, k;
161 for (i = 0; i < self->sequence_size; i++)
162 {
163 ccv_cnnp_model_t* const model = self->sequence[i]->model;
164 if (!model)
165 continue;
166 self->sequence[j++] = (ccv_cnnp_model_io_t)model;
167 // Go through all their IO to remove itself as model.
168 assert(model->io)((void) sizeof ((model->io) ? 1 : 0), __extension__ ({ if (
model->io) ; else __assert_fail ("model->io", "ccv_cnnp_model_core.c"
, 168, __extension__ __PRETTY_FUNCTION__); }))
;
169 for (k = 0; k < model->io->rnum; k++)
170 {
171 ccv_cnnp_model_io_t model_io = *(ccv_cnnp_model_io_t*)ccv_array_get(model->io, k)((void*)(((char*)((model->io)->data)) + (size_t)(model->
io)->rsize * (size_t)(k)))
;
172 model_io->model = 0;
173 }
174 }
175 for (i = 0; i < j; i++)
176 ccv_cnnp_model_free((ccv_cnnp_model_t*)self->sequence[i]);
177}
178
179static void _ccv_cnnp_functional_model_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
180{
181 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
182 assert(self->super.input_size == input_size)((void) sizeof ((self->super.input_size == input_size) ? 1
: 0), __extension__ ({ if (self->super.input_size == input_size
) ; else __assert_fail ("self->super.input_size == input_size"
, "ccv_cnnp_model_core.c", 182, __extension__ __PRETTY_FUNCTION__
); }))
;
183 assert(self->super.output_size == output_size)((void) sizeof ((self->super.output_size == output_size) ?
1 : 0), __extension__ ({ if (self->super.output_size == output_size
) ; else __assert_fail ("self->super.output_size == output_size"
, "ccv_cnnp_model_core.c", 183, __extension__ __PRETTY_FUNCTION__
); }))
;
184 int i, j, k;
185 for (i = 0; i < self->super.input_size; i++)
186 self->sequence[i]->outputs[0] = self->sequence[i]->model->outputs[0] = inputs[i]; // Assigning the output symbol of input layer to be the input symbol.
187 ccv_array_t* input_symbols = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 1, 0);
188 ccv_array_t* parameter_indices = 0;
189 for (i = self->super.input_size; i < self->sequence_size; i++)
190 {
191 ccv_cnnp_model_t* const sub_model = self->sequence[i]->model;
192 ccv_array_clear(input_symbols);
193 const ccv_array_t* const incomings = self->sequence[i]->incomings;
194 for (j = 0; j < incomings->rnum; j++)
195 {
196 const ccv_cnnp_model_io_t input = *(ccv_cnnp_model_io_t*)ccv_array_get(incomings, j)((void*)(((char*)((incomings)->data)) + (size_t)(incomings
)->rsize * (size_t)(j)))
;
197 if (CCV_CNNP_IS_MODEL_PARAMETER(input)((input)->param_ref != 0 || (input)->param_sel != 0))
198 {
199 if (!parameter_indices)
200 parameter_indices = ccv_array_new(sizeof(int), 0, 0);
201 else
202 ccv_array_clear(parameter_indices);
203 const int param_sel = input->param_sel > 0 ? input->param_sel - 1 : input->param_sel;
204 assert(input->param_sel != 0)((void) sizeof ((input->param_sel != 0) ? 1 : 0), __extension__
({ if (input->param_sel != 0) ; else __assert_fail ("input->param_sel != 0"
, "ccv_cnnp_model_core.c", 204, __extension__ __PRETTY_FUNCTION__
); }))
;
205 ccv_cnnp_model_add_to_parameter_indices(input->model, param_sel, parameter_indices);
206 assert(parameter_indices->rnum > 0)((void) sizeof ((parameter_indices->rnum > 0) ? 1 : 0),
__extension__ ({ if (parameter_indices->rnum > 0) ; else
__assert_fail ("parameter_indices->rnum > 0", "ccv_cnnp_model_core.c"
, 206, __extension__ __PRETTY_FUNCTION__); }))
;
207 const int param_ref = input->param_ref > 0 ? input->param_ref - 1 : input->param_ref;
208 assert(input->param_ref != 0)((void) sizeof ((input->param_ref != 0) ? 1 : 0), __extension__
({ if (input->param_ref != 0) ; else __assert_fail ("input->param_ref != 0"
, "ccv_cnnp_model_core.c", 208, __extension__ __PRETTY_FUNCTION__
); }))
;
209 if (param_ref >= 0)
210 {
211 assert(param_ref < parameter_indices->rnum)((void) sizeof ((param_ref < parameter_indices->rnum) ?
1 : 0), __extension__ ({ if (param_ref < parameter_indices
->rnum) ; else __assert_fail ("param_ref < parameter_indices->rnum"
, "ccv_cnnp_model_core.c", 211, __extension__ __PRETTY_FUNCTION__
); }))
;
212 const ccv_nnc_tensor_symbol_t parameter = ccv_cnnp_parameter_from_indice(super, *(int*)ccv_array_get(parameter_indices, param_ref)((void*)(((char*)((parameter_indices)->data)) + (size_t)(parameter_indices
)->rsize * (size_t)(param_ref)))
);
213 ccv_array_push(input_symbols, &parameter);
214 } else // Otherwise, all of them.
215 for (k = 0; k < parameter_indices->rnum; k++)
216 {
217 const ccv_nnc_tensor_symbol_t parameter = ccv_cnnp_parameter_from_indice(super, *(int*)ccv_array_get(parameter_indices, k)((void*)(((char*)((parameter_indices)->data)) + (size_t)(parameter_indices
)->rsize * (size_t)(k)))
);
218 ccv_array_push(input_symbols, &parameter);
219 }
220 } else {
221 for (k = 0; k < input->model->output_size; k++)
222 ccv_array_push(input_symbols, &input->outputs[k]);
223 }
224 }
225 // Go through each sub model to build the graph.
226 sub_model->data = self->super.data;
227 ccv_cnnp_model_build(sub_model, graph, (ccv_nnc_tensor_symbol_t*)ccv_array_get(input_symbols, 0)((void*)(((char*)((input_symbols)->data)) + (size_t)(input_symbols
)->rsize * (size_t)(0)))
, input_symbols->rnum, self->sequence[i]->outputs, sub_model->output_size);
228 sub_model->data = 0;
229 }
230 ccv_array_free(input_symbols);
231 if (parameter_indices)
232 ccv_array_free(parameter_indices);
233 for (i = output_size, k = self->sequence_size - 1; k >= 0; k--)
234 {
235 ccv_cnnp_model_t* const sub_model = self->sequence[k]->model;
236 i -= sub_model->output_size;
237 if (i < 0)
238 break;
239 for (j = 0; j < sub_model->output_size; j++)
240 outputs[i + j] = self->sequence[k]->outputs[j];
241 }
242 assert(i <= 0)((void) sizeof ((i <= 0) ? 1 : 0), __extension__ ({ if (i <=
0) ; else __assert_fail ("i <= 0", "ccv_cnnp_model_core.c"
, 242, __extension__ __PRETTY_FUNCTION__); }))
;
243}
244
245static void _ccv_cnnp_functional_model_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
246{
247 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
248 int i;
249 for (i = self->super.input_size; i < self->sequence_size; i++)
250 ccv_cnnp_model_init_states(self->sequence[i]->model, graph, initializer, context);
251}
252
253static void _ccv_cnnp_functional_model_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
254{
255 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
256 int i;
257 for (i = self->super.input_size; i < self->sequence_size; i++)
258 ccv_cnnp_model_set_is_test(self->sequence[i]->model, is_test, updater, context);
259}
260
261static void _ccv_cnnp_functional_model_add_to_parameter_indices(ccv_cnnp_model_t* const super, const int index, ccv_array_t* const parameter_indices)
262{
263 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
264 int i;
265 for (i = self->super.input_size; i < self->sequence_size; i++)
266 ccv_cnnp_model_add_to_parameter_indices(self->sequence[i]->model, index, parameter_indices);
267}
268
269static void _ccv_cnnp_functional_model_notify(const ccv_cnnp_model_t* const super, const int tag, void* const payload)
270{
271 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
272 int i;
273 for (i = 0; i < self->sequence_size; i++)
274 {
275 const ccv_cnnp_model_t* const model = self->sequence[i]->model;
276 ccv_cnnp_model_notify(model, tag, payload);
277 }
278}
279
280static ccv_cnnp_model_t* _ccv_cnnp_functional_model_copy(const ccv_cnnp_model_t* const super, void* const context);
281
282static const ccv_cnnp_model_vtab_t ccv_cnnp_functional_model_isa = {
283 .deinit = _ccv_cnnp_functional_model_deinit,
284 .build = _ccv_cnnp_functional_model_build,
285 .init_states = _ccv_cnnp_functional_model_init_states,
286 .copy = _ccv_cnnp_functional_model_copy,
287 .set_is_test = _ccv_cnnp_functional_model_set_is_test,
288 .add_to_parameter_indices = _ccv_cnnp_functional_model_add_to_parameter_indices,
289 .notify = _ccv_cnnp_functional_model_notify,
290};
291
292KHASH_MAP_INIT_INT64(model_io, ccv_cnnp_model_io_t)typedef struct kh_model_io_s { khint_t n_buckets, size, n_occupied
, upper_bound; khint32_t *flags; khint64_t *keys; ccv_cnnp_model_io_t
*vals; } kh_model_io_t; static inline __attribute__ ((__unused__
)) kh_model_io_t *kh_init_model_io(void) { return (kh_model_io_t
*)calloc(1,sizeof(kh_model_io_t)); } static inline __attribute__
((__unused__)) void kh_destroy_model_io(kh_model_io_t *h) { if
(h) { free((void *)h->keys); free(h->flags); free((void
*)h->vals); free(h); } } static inline __attribute__ ((__unused__
)) void kh_clear_model_io(kh_model_io_t *h) { if (h &&
h->flags) { memset(h->flags, 0xaa, ((h->n_buckets) <
16? 1 : (h->n_buckets)>>4) * sizeof(khint32_t)); h->
size = h->n_occupied = 0; } } static inline __attribute__ (
(__unused__)) khint_t kh_get_model_io(const kh_model_io_t *h,
khint64_t key) { if (h->n_buckets) { khint_t k, i, last, mask
, step = 0; mask = h->n_buckets - 1; k = (khint32_t)((key)
>>33^(key)^(key)<<11); i = k & mask; last = i
; while (!((h->flags[i>>4]>>((i&0xfU)<<
1))&2) && (((h->flags[i>>4]>>((i&
0xfU)<<1))&1) || !((h->keys[i]) == (key)))) { i =
(i + (++step)) & mask; if (i == last) return h->n_buckets
; } return ((h->flags[i>>4]>>((i&0xfU)<<
1))&3)? h->n_buckets : i; } else return 0; } static inline
__attribute__ ((__unused__)) int kh_resize_model_io(kh_model_io_t
*h, khint_t new_n_buckets) { khint32_t *new_flags = 0; khint_t
j = 1; { (--(new_n_buckets), (new_n_buckets)|=(new_n_buckets
)>>1, (new_n_buckets)|=(new_n_buckets)>>2, (new_n_buckets
)|=(new_n_buckets)>>4, (new_n_buckets)|=(new_n_buckets)
>>8, (new_n_buckets)|=(new_n_buckets)>>16, ++(new_n_buckets
)); if (new_n_buckets < 4) new_n_buckets = 4; if (h->size
>= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0
; else { new_flags = (khint32_t*)malloc(((new_n_buckets) <
16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t)); if (
!new_flags) return -1; memset(new_flags, 0xaa, ((new_n_buckets
) < 16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t))
; if (h->n_buckets < new_n_buckets) { khint64_t *new_keys
= (khint64_t*)realloc((void *)h->keys,new_n_buckets * sizeof
(khint64_t)); if (!new_keys) { free(new_flags); return -1; } h
->keys = new_keys; if (1) { ccv_cnnp_model_io_t *new_vals =
(ccv_cnnp_model_io_t*)realloc((void *)h->vals,new_n_buckets
* sizeof(ccv_cnnp_model_io_t)); if (!new_vals) { free(new_flags
); return -1; } h->vals = new_vals; } } } } if (j) { for (
j = 0; j != h->n_buckets; ++j) { if (((h->flags[j>>
4]>>((j&0xfU)<<1))&3) == 0) { khint64_t key
= h->keys[j]; ccv_cnnp_model_io_t val; khint_t new_mask; new_mask
= new_n_buckets - 1; if (1) val = h->vals[j]; (h->flags
[j>>4]|=1ul<<((j&0xfU)<<1)); while (1) {
khint_t k, i, step = 0; k = (khint32_t)((key)>>33^(key
)^(key)<<11); i = k & new_mask; while (!((new_flags
[i>>4]>>((i&0xfU)<<1))&2)) i = (i +
(++step)) & new_mask; (new_flags[i>>4]&=~(2ul<<
((i&0xfU)<<1))); if (i < h->n_buckets &&
((h->flags[i>>4]>>((i&0xfU)<<1))&
3) == 0) { { khint64_t tmp = h->keys[i]; h->keys[i] = key
; key = tmp; } if (1) { ccv_cnnp_model_io_t tmp = h->vals[
i]; h->vals[i] = val; val = tmp; } (h->flags[i>>4
]|=1ul<<((i&0xfU)<<1)); } else { h->keys[i
] = key; if (1) h->vals[i] = val; break; } } } } if (h->
n_buckets > new_n_buckets) { h->keys = (khint64_t*)realloc
((void *)h->keys,new_n_buckets * sizeof(khint64_t)); if (1
) h->vals = (ccv_cnnp_model_io_t*)realloc((void *)h->vals
,new_n_buckets * sizeof(ccv_cnnp_model_io_t)); } free(h->flags
); h->flags = new_flags; h->n_buckets = new_n_buckets; h
->n_occupied = h->size; h->upper_bound = (khint_t)(h
->n_buckets * __ac_HASH_UPPER + 0.5); } return 0; } static
inline __attribute__ ((__unused__)) khint_t kh_put_model_io(
kh_model_io_t *h, khint64_t key, int *ret) { khint_t x; if (h
->n_occupied >= h->upper_bound) { if (h->n_buckets
> (h->size<<1)) { if (kh_resize_model_io(h, h->
n_buckets - 1) < 0) { *ret = -1; return h->n_buckets; }
} else if (kh_resize_model_io(h, h->n_buckets + 1) < 0
) { *ret = -1; return h->n_buckets; } } { khint_t k, i, site
, last, mask = h->n_buckets - 1, step = 0; x = site = h->
n_buckets; k = (khint32_t)((key)>>33^(key)^(key)<<
11); i = k & mask; if (((h->flags[i>>4]>>(
(i&0xfU)<<1))&2)) x = i; else { last = i; while
(!((h->flags[i>>4]>>((i&0xfU)<<1))&
2) && (((h->flags[i>>4]>>((i&0xfU)
<<1))&1) || !((h->keys[i]) == (key)))) { if (((h
->flags[i>>4]>>((i&0xfU)<<1))&1)
) site = i; i = (i + (++step)) & mask; if (i == last) { x
= site; break; } } if (x == h->n_buckets) { if (((h->flags
[i>>4]>>((i&0xfU)<<1))&2) &&
site != h->n_buckets) x = site; else x = i; } } } if (((h
->flags[x>>4]>>((x&0xfU)<<1))&2)
) { h->keys[x] = key; (h->flags[x>>4]&=~(3ul<<
((x&0xfU)<<1))); ++h->size; ++h->n_occupied; *
ret = 1; } else if (((h->flags[x>>4]>>((x&
0xfU)<<1))&1)) { h->keys[x] = key; (h->flags[
x>>4]&=~(3ul<<((x&0xfU)<<1))); ++h->
size; *ret = 2; } else *ret = 0; return x; } static inline __attribute__
((__unused__)) void kh_del_model_io(kh_model_io_t *h, khint_t
x) { if (x != h->n_buckets && !((h->flags[x>>
4]>>((x&0xfU)<<1))&3)) { (h->flags[x>>
4]|=1ul<<((x&0xfU)<<1)); --h->size; } }
14
Taking true branch
15
Taking false branch
16
Calling 'kh_resize_model_io'
17
Taking true branch
18
Assuming the condition is false
19
Taking false branch
20
'?' condition is true
21
Assuming 'new_flags' is non-null
22
Taking false branch
23
'?' condition is true
24
Taking true branch
25
Storing uninitialized value
26
Assuming 'new_keys' is non-null
27
Taking false branch
28
Taking true branch
29
Assuming 'new_vals' is non-null
30
Taking false branch
31
Taking true branch
32
Loop condition is false. Execution continues on line 292
33
Taking false branch
34
Returning from 'kh_resize_model_io'
35
Taking false branch
36
Assuming the condition is true
37
Taking true branch
38
Assuming the condition is false
39
Taking false branch
40
Assuming the condition is false
41
Taking false branch
47
Assuming field 'n_occupied' is >= field 'upper_bound'
48
Taking true branch
49
Taking true branch
50
Calling 'kh_resize_model_io'
51
Taking false branch
52
Assuming the condition is false
53
Taking false branch
54
'?' condition is true
55
Assuming 'new_flags' is non-null
56
Taking false branch
57
'?' condition is true
58
Taking false branch
59
Taking true branch
60
The value 0 is assigned to 'j'
61
Loop condition is true. Entering loop body
62
Assuming the condition is true
63
Taking true branch
64
Assigned value is garbage or undefined
293
294static ccv_cnnp_model_t* _ccv_cnnp_functional_model_copy(const ccv_cnnp_model_t* const super, void* const context)
295{
296 const ccv_cnnp_functional_model_t* const self = (const ccv_cnnp_functional_model_t*)super;
297 ccv_cnnp_functional_model_t* const functional_model = (ccv_cnnp_functional_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_functional_model_t) + sizeof(ccv_cnnp_model_t*) * (self->sequence_size - 1) + sizeof(ccv_nnc_tensor_symbol_t) * self->super.output_size);
298 functional_model->super.isa = &ccv_cnnp_functional_model_isa;
299 functional_model->super.outputs = (ccv_nnc_tensor_symbol_t*)(functional_model->sequence + self->sequence_size);
300 functional_model->super.output_size = self->super.output_size;
301 functional_model->super.input_size = self->super.input_size;
302 ccv_cnnp_model_copy_name(&functional_model->super, self->super.name);
303 functional_model->sequence_size = self->sequence_size;
304 // Now the difficult part, copy over the model_io.
305 khash_t(model_io)kh_model_io_t* model_io_map = kh_init(model_io)kh_init_model_io();
306 khash_t(model)kh_model_t* model_map = context ? (khash_t(model)kh_model_t*)context : kh_init(model)kh_init_model();
1
Assuming 'context' is null
2
'?' condition is false
307 int i, j;
308 for (i = 0; i < self->sequence_size; i++)
3
Assuming 'i' is >= field 'sequence_size'
4
Loop condition is false. Execution continues on line 332
309 {
310 const ccv_cnnp_model_t* const sub_model = self->sequence[i]->model;
311 int ret;
312 khiter_t k = kh_put(model, model_map, (uint64_t)(uintptr_t)sub_model, &ret)kh_put_model(model_map, (uint64_t)(uintptr_t)sub_model, &
ret)
;
313 ccv_cnnp_model_t* model_copy;
314 if (ret != 0)
315 model_copy = kh_val(model_map, k)((model_map)->vals[k]) = _ccv_cnnp_model_copy(sub_model, model_map);
316 else
317 model_copy = kh_val(model_map, k)((model_map)->vals[k]);
318 ccv_cnnp_model_io_t model_io = functional_model->sequence[i] = ccmallocmalloc(sizeof(struct ccv_cnnp_model_io_s) + sizeof(ccv_nnc_tensor_symbol_t) * sub_model->output_size);
319 model_io->param_ref = 0;
320 model_io->param_sel = 0;
321 model_io->visit = 0;
322 model_io->model = model_copy;
323 model_io->incomings = 0;
324 model_io->outgoings = 0;
325 model_io->outputs = (ccv_nnc_tensor_symbol_t*)(model_io + 1);
326 if (!model_copy->io)
327 model_copy->io = ccv_array_new(sizeof(ccv_cnnp_model_io_t), 1, 0);
328 ccv_array_push(model_copy->io, &model_io);
329 k = kh_put(model_io, model_io_map, (uint64_t)(uintptr_t)self->sequence[i], &ret)kh_put_model_io(model_io_map, (uint64_t)(uintptr_t)self->sequence
[i], &ret)
;
330 kh_val(model_io_map, k)((model_io_map)->vals[k]) = functional_model->sequence[i];
331 }
332 for (i = self->super.input_size; i < self->sequence_size; i++)
5
Assuming 'i' is < field 'sequence_size'
6
Loop condition is true. Entering loop body
333 {
334 if (self->sequence[i]->incomings)
7
Assuming field 'incomings' is non-null
8
Taking true branch
335 for (j = 0; j < self->sequence[i]->incomings->rnum; j++)
9
Loop condition is true. Entering loop body
44
Loop condition is true. Entering loop body
336 {
337 const ccv_cnnp_model_io_t input = *(ccv_cnnp_model_io_t*)ccv_array_get(self->sequence[i]->incomings, j)((void*)(((char*)((self->sequence[i]->incomings)->data
)) + (size_t)(self->sequence[i]->incomings)->rsize *
(size_t)(j)))
;
338 if (CCV_CNNP_IS_MODEL_PARAMETER(input)((input)->param_ref != 0 || (input)->param_sel != 0)) // I am pretty sure this is not in the model_io_map.
10
Assuming field 'param_ref' is equal to 0
11
Assuming field 'param_sel' is not equal to 0
12
Taking true branch
45
Assuming field 'param_ref' is not equal to 0
339 {
340 int ret;
341 khiter_t k = kh_put(model_io, model_io_map, (uint64_t)(uintptr_t)input, &ret)kh_put_model_io(model_io_map, (uint64_t)(uintptr_t)input, &
ret)
;
13
Calling 'kh_put_model_io'
42
Returning from 'kh_put_model_io'
46
Calling 'kh_put_model_io'
342 if (ret
42.1
'ret' is equal to 0
!= 0)
43
Taking false branch
343 {
344 // The model may not exist on the map due to wrapping (it is inside another sequential or functional model).
345 khiter_t m = kh_get(model, model_map, (uint64_t)(uintptr_t)input->model)kh_get_model(model_map, (uint64_t)(uintptr_t)input->model);
346 assert(m != kh_end(model_map))((void) sizeof ((m != ((model_map)->n_buckets)) ? 1 : 0), __extension__
({ if (m != ((model_map)->n_buckets)) ; else __assert_fail
("m != kh_end(model_map)", "ccv_cnnp_model_core.c", 346, __extension__
__PRETTY_FUNCTION__); }))
;
347 ccv_cnnp_model_t* const model_copy = kh_val(model_map, m)((model_map)->vals[m]);
348 ccv_cnnp_model_io_t model_io = ccmallocmalloc(sizeof(struct ccv_cnnp_model_io_s));
349 model_io->param_ref = input->param_ref;
350 model_io->param_sel = input->param_sel;
351 model_io->visit = 0;
352 model_io->model = model_copy;
353 model_io->incomings = 0;
354 model_io->outgoings = 0;
355 model_io->outputs = 0;
356 if (!model_copy->io)
357 model_copy->io = ccv_array_new(sizeof(ccv_cnnp_model_io_t), 1, 0);
358 ccv_array_push(model_copy->io, &model_io);
359 kh_val(model_io_map, k)((model_io_map)->vals[k]) = model_io;
360 if (input->outgoings)
361 {
362 model_io->outgoings = ccv_array_new(sizeof(ccv_cnnp_model_io_t), input->outgoings->rnum, 0);
363 int x;
364 for (x = 0; x < input->outgoings->rnum; x++)
365 {
366 khiter_t k = kh_get(model_io, model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t*)ccv_array_get(input->outgoings, x)))kh_get_model_io(model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t
*)((void*)(((char*)((input->outgoings)->data)) + (size_t
)(input->outgoings)->rsize * (size_t)(x)))))
;
367 assert(k != kh_end(model_io_map))((void) sizeof ((k != ((model_io_map)->n_buckets)) ? 1 : 0
), __extension__ ({ if (k != ((model_io_map)->n_buckets)) ;
else __assert_fail ("k != kh_end(model_io_map)", "ccv_cnnp_model_core.c"
, 367, __extension__ __PRETTY_FUNCTION__); }))
;
368 ccv_cnnp_model_io_t outgoing_io = kh_val(model_io_map, k)((model_io_map)->vals[k]);
369 ccv_array_push(model_io->outgoings, &outgoing_io);
370 }
371 }
372 }
373 }
374 }
375 }
376 if (!context)
377 kh_destroy(model, model_map)kh_destroy_model(model_map);
378 for (i = 0; i < self->sequence_size; i++)
379 {
380 const ccv_cnnp_model_io_t model_io = self->sequence[i];
381 ccv_cnnp_model_io_t model_io_copy = functional_model->sequence[i];
382 model_io_copy->param_ref = model_io->param_ref;
383 model_io_copy->param_sel = model_io->param_sel;
384 if (model_io->incomings)
385 {
386 model_io_copy->incomings = ccv_array_new(sizeof(ccv_cnnp_model_io_t), model_io->incomings->rnum, 0);
387 for (j = 0; j < model_io->incomings->rnum; j++)
388 {
389 khiter_t k = kh_get(model_io, model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t*)ccv_array_get(model_io->incomings, j)))kh_get_model_io(model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t
*)((void*)(((char*)((model_io->incomings)->data)) + (size_t
)(model_io->incomings)->rsize * (size_t)(j)))))
;
390 assert(k != kh_end(model_io_map))((void) sizeof ((k != ((model_io_map)->n_buckets)) ? 1 : 0
), __extension__ ({ if (k != ((model_io_map)->n_buckets)) ;
else __assert_fail ("k != kh_end(model_io_map)", "ccv_cnnp_model_core.c"
, 390, __extension__ __PRETTY_FUNCTION__); }))
;
391 ccv_cnnp_model_io_t input_io = kh_val(model_io_map, k)((model_io_map)->vals[k]);
392 ccv_array_push(model_io_copy->incomings, &input_io);
393 }
394 }
395 if (model_io->outgoings)
396 {
397 model_io_copy->outgoings = ccv_array_new(sizeof(ccv_cnnp_model_io_t), model_io->outgoings->rnum, 0);
398 for (j = 0; j < model_io->outgoings->rnum; j++)
399 {
400 khiter_t k = kh_get(model_io, model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t*)ccv_array_get(model_io->outgoings, j)))kh_get_model_io(model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t
*)((void*)(((char*)((model_io->outgoings)->data)) + (size_t
)(model_io->outgoings)->rsize * (size_t)(j)))))
;
401 assert(k != kh_end(model_io_map))((void) sizeof ((k != ((model_io_map)->n_buckets)) ? 1 : 0
), __extension__ ({ if (k != ((model_io_map)->n_buckets)) ;
else __assert_fail ("k != kh_end(model_io_map)", "ccv_cnnp_model_core.c"
, 401, __extension__ __PRETTY_FUNCTION__); }))
;
402 ccv_cnnp_model_io_t outgoing_io = kh_val(model_io_map, k)((model_io_map)->vals[k]);
403 ccv_array_push(model_io_copy->outgoings, &outgoing_io);
404 }
405 }
406 }
407 kh_destroy(model_io, model_io_map)kh_destroy_model_io(model_io_map);
408 return (ccv_cnnp_model_t*)functional_model;
409}
410
411ccv_cnnp_model_t* ccv_cnnp_model_new(const ccv_cnnp_model_io_t* const inputs, const int input_size, const ccv_cnnp_model_io_t* const outputs, const int output_size, const char* const name)
412{
413 assert(output_size > 0)((void) sizeof ((output_size > 0) ? 1 : 0), __extension__ (
{ if (output_size > 0) ; else __assert_fail ("output_size > 0"
, "ccv_cnnp_model_core.c", 413, __extension__ __PRETTY_FUNCTION__
); }))
;
414 // Do topological sort.
415 ccv_array_t* const reverse_top = ccv_array_new(sizeof(ccv_cnnp_model_io_t), output_size, 0);
416 int i, j, k;
417 // Go through output one by one, reverse traversal them, to detect potential overlap (overlap means, for example,
418 // outputs[1] is an incoming node for outputs[0]. Thus, if we reverse them, we may have outputs[0] build before outputs[1],
419 // hence, having issues.
420 for (i = 0; i < output_size; i++)
421 outputs[i]->visit = 2;
422 for (i = output_size - 1; i >= 0; i--)
423 {
424 if (outputs[i]->visit == 3) // If we need to remove it, no need to visit.
425 continue;
426 assert(outputs[i]->visit == 2)((void) sizeof ((outputs[i]->visit == 2) ? 1 : 0), __extension__
({ if (outputs[i]->visit == 2) ; else __assert_fail ("outputs[i]->visit == 2"
, "ccv_cnnp_model_core.c", 426, __extension__ __PRETTY_FUNCTION__
); }))
;
427 ccv_array_clear(reverse_top);
428 ccv_array_push(reverse_top, &outputs[i]);
429 for (j = 0; j < reverse_top->rnum; j++)
430 {
431 const ccv_cnnp_model_io_t output = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, j)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(j)))
;
432 assert(!CCV_CNNP_IS_MODEL_INPUT(output->model))((void) sizeof ((!((output->model)->isa == &ccv_cnnp_input_isa
)) ? 1 : 0), __extension__ ({ if (!((output->model)->isa
== &ccv_cnnp_input_isa)) ; else __assert_fail ("!CCV_CNNP_IS_MODEL_INPUT(output->model)"
, "ccv_cnnp_model_core.c", 432, __extension__ __PRETTY_FUNCTION__
); }))
;
433 // If it is input, push it here.
434 if (output->incomings && !CCV_CNNP_IS_MODEL_PARAMETER(output)((output)->param_ref != 0 || (output)->param_sel != 0))
435 for (k = 0; k < output->incomings->rnum; k++)
436 {
437 const ccv_cnnp_model_io_t input = *(ccv_cnnp_model_io_t*)ccv_array_get(output->incomings, k)((void*)(((char*)((output->incomings)->data)) + (size_t
)(output->incomings)->rsize * (size_t)(k)))
;
438 // If it is an input or parameter, skip.
439 if (CCV_CNNP_IS_MODEL_INPUT(input->model)((input->model)->isa == &ccv_cnnp_input_isa) || CCV_CNNP_IS_MODEL_PARAMETER(input)((input)->param_ref != 0 || (input)->param_sel != 0))
440 continue;
441 if (input->visit == 1 || input->visit == 3) // Visited, skip.
442 continue;
443 // If this is an output, we need to remove it from the output array. Otherwise mark it as visited.
444 input->visit = input->visit == 2 ? 3 : 1;
445 ccv_array_push(reverse_top, &input);
446 }
447 }
448 for (j = 1; j < reverse_top->rnum; j++)
449 {
450 const ccv_cnnp_model_io_t output = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, j)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(j)))
;
451 if (output->visit == 1) // Clean the visit back.
452 output->visit = 0;
453 }
454 }
455 ccv_array_clear(reverse_top);
456 for (i = 0; i < output_size; i++) // We will assign sequence in reverse order, thus, reverse the reverse top when copying the outputs.
457 {
458 if (outputs[output_size - 1 - i]->visit == 2)
459 ccv_array_push(reverse_top, &outputs[output_size - 1 - i]);
460 assert(outputs[output_size - 1 - i]->visit == 2 || outputs[output_size - 1 - i]->visit == 3)((void) sizeof ((outputs[output_size - 1 - i]->visit == 2 ||
outputs[output_size - 1 - i]->visit == 3) ? 1 : 0), __extension__
({ if (outputs[output_size - 1 - i]->visit == 2 || outputs
[output_size - 1 - i]->visit == 3) ; else __assert_fail ("outputs[output_size - 1 - i]->visit == 2 || outputs[output_size - 1 - i]->visit == 3"
, "ccv_cnnp_model_core.c", 460, __extension__ __PRETTY_FUNCTION__
); }))
;
461 outputs[output_size - 1 - i]->visit = 0; // Clean up all visits.
462 }
463 // Go from the output, until we meet inputs.
464 uint64_t input_bitmask[((input_size - 1) >> 6) + 1];
465 memset(input_bitmask, 0, sizeof(uint64_t) * (((input_size - 1) >> 6) + 1));
466 int tensor_output_size = 0; // io can be mapped to multiple tensor outputs, therefore, need to compute the exact tensor output size.
467 for (i = 0; i < output_size; i++)
468 tensor_output_size += outputs[i]->model->output_size;
469 for (i = 0; i < reverse_top->rnum; i++)
470 {
471 const ccv_cnnp_model_io_t output = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, i)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(i)))
;
472 assert(!CCV_CNNP_IS_MODEL_INPUT(output->model))((void) sizeof ((!((output->model)->isa == &ccv_cnnp_input_isa
)) ? 1 : 0), __extension__ ({ if (!((output->model)->isa
== &ccv_cnnp_input_isa)) ; else __assert_fail ("!CCV_CNNP_IS_MODEL_INPUT(output->model)"
, "ccv_cnnp_model_core.c", 472, __extension__ __PRETTY_FUNCTION__
); }))
;
473 // If it is input, push it here.
474 if (output->incomings && !CCV_CNNP_IS_MODEL_PARAMETER(output)((output)->param_ref != 0 || (output)->param_sel != 0))
475 for (j = 0; j < output->incomings->rnum; j++)
476 {
477 const ccv_cnnp_model_io_t input = *(ccv_cnnp_model_io_t*)ccv_array_get(output->incomings, j)((void*)(((char*)((output->incomings)->data)) + (size_t
)(output->incomings)->rsize * (size_t)(j)))
;
478 ++input->visit; // Mark it as visited.
479 if (input->visit != input->outgoings->rnum) // Not all dependencies visited.
480 continue;
481 if (!CCV_CNNP_IS_MODEL_INPUT(input->model)((input->model)->isa == &ccv_cnnp_input_isa) && !CCV_CNNP_IS_MODEL_PARAMETER(input)((input)->param_ref != 0 || (input)->param_sel != 0))
482 ccv_array_push(reverse_top, &input);
483 else if (CCV_CNNP_IS_MODEL_INPUT(input->model)((input->model)->isa == &ccv_cnnp_input_isa)) {
484 for (k = 0; k < input_size; k++)
485 if (input == inputs[k])
486 break;
487 assert(k < input_size)((void) sizeof ((k < input_size) ? 1 : 0), __extension__ (
{ if (k < input_size) ; else __assert_fail ("k < input_size"
, "ccv_cnnp_model_core.c", 487, __extension__ __PRETTY_FUNCTION__
); }))
;
488 input_bitmask[k >> 6] |= ((uint64_t)1 << (k & 63));
489 }
490 }
491 }
492 for (i = 0; i < reverse_top->rnum; i++)
493 {
494 const ccv_cnnp_model_io_t output = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, i)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(i)))
;
495 output->visit = 0; // Clean the visit back.
496 }
497 for (i = 0; i < input_size; i++)
498 inputs[i]->visit = 0; // Clean the visit back.
499 for (i = 0; i < input_size; i++)
500 { assert((input_bitmask[i >> 6] & ((uint64_t)1 << (i & 63))))((void) sizeof (((input_bitmask[i >> 6] & ((uint64_t
)1 << (i & 63)))) ? 1 : 0), __extension__ ({ if ((input_bitmask
[i >> 6] & ((uint64_t)1 << (i & 63)))) ; else
__assert_fail ("(input_bitmask[i >> 6] & ((uint64_t)1 << (i & 63)))"
, "ccv_cnnp_model_core.c", 500, __extension__ __PRETTY_FUNCTION__
); }))
; } // Assuming they all match.
501 const int sequence_size = reverse_top->rnum + input_size;
502 ccv_cnnp_functional_model_t* const functional_model = (ccv_cnnp_functional_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_functional_model_t) + sizeof(ccv_cnnp_model_t*) * (sequence_size - 1) + sizeof(ccv_nnc_tensor_symbol_t) * tensor_output_size);
503 functional_model->super.isa = &ccv_cnnp_functional_model_isa;
504 functional_model->super.outputs = (ccv_nnc_tensor_symbol_t*)(functional_model->sequence + sequence_size);
505 functional_model->super.output_size = tensor_output_size;
506 functional_model->super.input_size = input_size;
507 ccv_cnnp_model_copy_name(&functional_model->super, name);
508 functional_model->sequence_size = sequence_size;
509 memcpy(functional_model->sequence, inputs, sizeof(ccv_cnnp_model_io_t) * input_size);
510 for (i = 0; i < reverse_top->rnum; i++)
511 functional_model->sequence[input_size + i] = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, reverse_top->rnum - 1 - i)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(reverse_top->rnum - 1 - i)))
;
512 ccv_array_free(reverse_top);
513 return (ccv_cnnp_model_t*)functional_model;
514}
515
516static ccv_cnnp_model_t* _ccv_cnnp_input_copy(const ccv_cnnp_model_t* const self, void* const context)
517{
518 ccv_cnnp_model_t* const input = (ccv_cnnp_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_t) + sizeof(ccv_nnc_tensor_symbol_t));
519 input->isa = &ccv_cnnp_input_isa;
520 input->outputs = (ccv_nnc_tensor_symbol_t*)(input + 1);
521 input->output_size = 1;
522 return input;
523}
524
525static const ccv_cnnp_model_vtab_t ccv_cnnp_input_isa = {
526 .copy = _ccv_cnnp_input_copy,
527};
528
529ccv_cnnp_model_io_t ccv_cnnp_input(void)
530{
531 ccv_cnnp_model_t* const input = (ccv_cnnp_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_t) + sizeof(ccv_nnc_tensor_symbol_t));
532 input->isa = &ccv_cnnp_input_isa;
533 input->io = ccv_array_new(sizeof(ccv_cnnp_model_io_t), 1, 0);
534 ccv_cnnp_model_io_t input_io = ccmallocmalloc(sizeof(struct ccv_cnnp_model_io_s) + sizeof(ccv_nnc_tensor_symbol_t));
535 input_io->param_ref = 0;
536 input_io->param_sel = 0;
537 input_io->visit = 0;
538 input_io->incomings = 0;
539 input_io->outgoings = 0;
540 input_io->model = input;
541 input_io->outputs = (ccv_nnc_tensor_symbol_t*)(input_io + 1);
542 ccv_array_push(input->io, &input_io);
543 input->outputs = (ccv_nnc_tensor_symbol_t*)(input + 1);
544 input->output_size = 1;
545 return input_io;
546}
547
548// MARK - Dynamic Layer
549
550typedef struct {
551 ccv_cnnp_model_t super;
552 ccv_cnnp_model_dynamic_f func;
553 void* context;
554 ccv_cnnp_model_t* model;
555} ccv_cnnp_dynamic_model_t;
556
557static void _ccv_cnnp_dynamic_model_deinit(ccv_cnnp_model_t* const super)
558{
559 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
560 if (self->model)
561 ccv_cnnp_model_free(self->model);
562}
563
564static void _ccv_cnnp_dynamic_model_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
565{
566 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
567 if (!self->model)
568 {
569 ccv_nnc_tensor_param_t input_params[input_size];
570 int i;
571 for (i = 0; i < input_size; i++)
572 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
573 self->model = self->func(input_params, input_size, self->context);
574 // Update to use the settings of the compiled model.
575 self->super.input_size = self->model->input_size;
576 self->super.outputs = self->model->outputs;
577 self->super.output_size = self->model->output_size;
578 }
579 self->model->data = self->super.data;
580 ccv_cnnp_model_build(self->model, graph, inputs, input_size, outputs, output_size);
581 self->model->data = 0;
582}
583
584static void _ccv_cnnp_dynamic_model_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
585{
586 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
587 assert(self->model)((void) sizeof ((self->model) ? 1 : 0), __extension__ ({ if
(self->model) ; else __assert_fail ("self->model", "ccv_cnnp_model_core.c"
, 587, __extension__ __PRETTY_FUNCTION__); }))
;
588 ccv_cnnp_model_init_states(self->model, graph, initializer, context);
589}
590
591static void _ccv_cnnp_dynamic_model_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters)
592{
593 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
594 assert(self->model)((void) sizeof ((self->model) ? 1 : 0), __extension__ ({ if
(self->model) ; else __assert_fail ("self->model", "ccv_cnnp_model_core.c"
, 594, __extension__ __PRETTY_FUNCTION__); }))
;
595 ccv_cnnp_model_add_to_parameter(self->model, add_to_array, parameters);
596}
597
598static void _ccv_cnnp_dynamic_model_add_to_output(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const outputs)
599{
600 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
601 assert(self->model)((void) sizeof ((self->model) ? 1 : 0), __extension__ ({ if
(self->model) ; else __assert_fail ("self->model", "ccv_cnnp_model_core.c"
, 601, __extension__ __PRETTY_FUNCTION__); }))
;
602 ccv_cnnp_model_add_to_output(self->model, add_to_array, outputs);
603}
604
605static void _ccv_cnnp_dynamic_model_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
606{
607 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
608 assert(self->model)((void) sizeof ((self->model) ? 1 : 0), __extension__ ({ if
(self->model) ; else __assert_fail ("self->model", "ccv_cnnp_model_core.c"
, 608, __extension__ __PRETTY_FUNCTION__); }))
;
609 ccv_cnnp_model_set_is_test(self->model, is_test, updater, context);
610}
611
612static ccv_cnnp_model_t* _ccv_cnnp_dynamic_model_copy(const ccv_cnnp_model_t* const super, void* const context);
613
614static void _ccv_cnnp_dynamic_model_add_to_parameter_indices(ccv_cnnp_model_t* const super, const int index, ccv_array_t* const parameter_indices)
615{
616 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
617 assert(self->model)((void) sizeof ((self->model) ? 1 : 0), __extension__ ({ if
(self->model) ; else __assert_fail ("self->model", "ccv_cnnp_model_core.c"
, 617, __extension__ __PRETTY_FUNCTION__); }))
;
618 ccv_cnnp_model_add_to_parameter_indices(self->model, index, parameter_indices);
619}
620
621static void _ccv_cnnp_dynamic_model_notify(const ccv_cnnp_model_t* const super, const int tag, void* const payload)
622{
623 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
624 if (self->model)
625 ccv_cnnp_model_notify(self->model, tag, payload);
626}
627
628static const ccv_cnnp_model_vtab_t ccv_cnnp_dynamic_model_isa = {
629 .deinit = _ccv_cnnp_dynamic_model_deinit,
630 .build = _ccv_cnnp_dynamic_model_build,
631 .init_states = _ccv_cnnp_dynamic_model_init_states,
632 .add_to_parameter = _ccv_cnnp_dynamic_model_add_to_parameter,
633 .add_to_output = _ccv_cnnp_dynamic_model_add_to_output,
634 .copy = _ccv_cnnp_dynamic_model_copy,
635 .set_is_test = _ccv_cnnp_dynamic_model_set_is_test,
636 .add_to_parameter_indices = _ccv_cnnp_dynamic_model_add_to_parameter_indices,
637 .notify = _ccv_cnnp_dynamic_model_notify,
638};
639
640ccv_cnnp_model_t* ccv_cnnp_dynamic_new(ccv_cnnp_model_dynamic_f func, void* const context, const char* const name)
641{
642 ccv_cnnp_dynamic_model_t* const dynamic_model = (ccv_cnnp_dynamic_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_dynamic_model_t));
643 dynamic_model->super.isa = &ccv_cnnp_dynamic_model_isa;
644 dynamic_model->func = func;
645 dynamic_model->context = context;
646 ccv_cnnp_model_copy_name(&dynamic_model->super, name);
647 return (ccv_cnnp_model_t*)dynamic_model;
648}
649
650static ccv_cnnp_model_t* _ccv_cnnp_dynamic_model_copy(const ccv_cnnp_model_t* const super, void* const context)
651{
652 const ccv_cnnp_dynamic_model_t* const self = (const ccv_cnnp_dynamic_model_t*)super;
653 return ccv_cnnp_dynamic_new(self->func, self->context, self->super.name);
654}
655
656// MARK - Command Layer
657
658typedef struct {
659 ccv_cnnp_model_t super;
660 ccv_nnc_cmd_t cmd;
661 ccv_nnc_hint_t hint;
662 ccv_nnc_tensor_symbol_t* input_symbols; // This is only valid for INIT_SHARED_TENSOR / INIT_SHARED_TENSOR_AS_TRAINABLE
663 ccv_nnc_tensor_symbol_t* output_symbols; // This is just for the output symbol (in case we need to have no tensor symbol).
664 ccv_cnnp_cmd_exec_io_t* inputs;
665 int flags;
666 int input_size;
667 int* outputs;
668 int output_size;
669} ccv_cnnp_model_cmd_exec_t;
670
671static void _ccv_cnnp_cmd_exec_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
672{
673 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
674 ccv_nnc_tensor_param_t input_params[ccv_max(1, self->input_size)({ typeof (1) _a = (1); typeof (self->input_size) _b = (self
->input_size); (_a > _b) ? _a : _b; })
];
675 int i, j;
676 for (i = 0, j = 0; i < self->input_size; i++)
677 if (self->inputs[i].type == CCV_CNNP_IO)
678 {
679 self->input_symbols[i] = inputs[j++];
680 input_params[i] = ccv_nnc_tensor_symbol_params(graph, self->input_symbols[i]);
681 } else if (self->inputs[i].type == CCV_CNNP_NO_TENSOR) {
682 self->input_symbols[i] = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
683 } else if (!self->input_symbols[i].graph) {
684 // Otherwise, we only create this symbol if it doesn't exist.
685 const ccv_nnc_tensor_param_t params = self->inputs[i].init_state.info;
686 input_params[i] = params;
687 self->input_symbols[i] = ccv_nnc_tensor_symbol_new(graph, params, 0);
688 }
689 // We cannot simply mark the outputs as auto, because the subsequent build call may require this output to have params setup.
690 // Infer the parameters here.
691 ccv_nnc_tensor_param_t output_params[ccv_max(1, self->output_size)({ typeof (1) _a = (1); typeof (self->output_size) _b = (self
->output_size); (_a > _b) ? _a : _b; })
];
692 ccv_nnc_hint_tensor_auto(self->cmd, input_params, self->input_size, self->hint, output_params, self->output_size);
693 for (i = 0, j = 0; i < self->output_size; i++)
694 if (self->outputs[i] == CCV_CNNP_IO)
695 self->output_symbols[i] = outputs[j++] = ccv_nnc_tensor_symbol_new(graph, output_params[i], 0);
696 else if (self->outputs[i] == CCV_CNNP_TENSOR_NOT_OUTPUT)
697 self->output_symbols[i] = ccv_nnc_tensor_symbol_new(graph, output_params[i], 0);
698 else
699 self->output_symbols[i] = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
700 ccv_nnc_graph_exec_symbol_new(graph, self->cmd, self->input_symbols, self->input_size, self->output_symbols, self->output_size, 0);
701}
702
703static void _ccv_cnnp_cmd_exec_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
704{
705 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
706 int i;
707 for (i = 0; i < self->input_size; i++)
708 if (self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR || self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE)
709 self->inputs[i].init_state.init(self->input_symbols[i], initializer, context, self->inputs[i].init_state.context);
710}
711
712static void _ccv_cnnp_cmd_exec_add_to_output(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const outputs)
713{
714 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
715 int i;
716 for (i = 0; i < self->input_size; i++)
717 if (self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR)
718 add_to_array(outputs, self->input_symbols[i]); // Push this as retainable because it need to be init.
719}
720
721static void _ccv_cnnp_cmd_exec_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters)
722{
723 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
724 int i;
725 for (i = 0; i < self->input_size; i++)
726 if (self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE)
727 add_to_array(parameters, self->input_symbols[i]); // Push this as parameter.
728}
729
730static void _ccv_cnnp_cmd_exec_deinit(ccv_cnnp_model_t* const super)
731{
732 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
733 int i, j;
734 for (i = 0; i < self->input_size; i++)
735 if ((self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR || self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE) &&
736 self->inputs[i].init_state.context)
737 {
738 void* const context = self->inputs[i].init_state.context;
739 if (self->inputs[i].init_state.deinit)
740 self->inputs[i].init_state.deinit(context);
741 self->inputs[i].init_state.init = 0;
742 self->inputs[i].init_state.deinit = 0;
743 self->inputs[i].init_state.context = 0;
744 for (j = i + 1; j < self->input_size; j++)
745 if (self->inputs[j].init_state.context == context)
746 {
747 self->inputs[j].init_state.init = 0;
748 self->inputs[j].init_state.deinit = 0;
749 self->inputs[j].init_state.context = 0;
750 }
751 }
752}
753
754static ccv_cnnp_model_t* _ccv_cnnp_cmd_exec_copy(const ccv_cnnp_model_t* const super, void* const context);
755
756static const ccv_cnnp_model_vtab_t ccv_cnnp_cmd_exec_isa = {
757 .build = _ccv_cnnp_cmd_exec_build,
758 .init_states = _ccv_cnnp_cmd_exec_init_states,
759 .add_to_parameter = _ccv_cnnp_cmd_exec_add_to_parameter,
760 .add_to_output = _ccv_cnnp_cmd_exec_add_to_output,
761 .deinit = _ccv_cnnp_cmd_exec_deinit,
762 .copy = _ccv_cnnp_cmd_exec_copy,
763};
764
765static ccv_cnnp_model_t* _ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, int copy_io, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const char* const name)
766{
767 assert(input_size >= 0)((void) sizeof ((input_size >= 0) ? 1 : 0), __extension__ (
{ if (input_size >= 0) ; else __assert_fail ("input_size >= 0"
, "ccv_cnnp_model_core.c", 767, __extension__ __PRETTY_FUNCTION__
); }))
;
768 assert(output_size > 0)((void) sizeof ((output_size > 0) ? 1 : 0), __extension__ (
{ if (output_size > 0) ; else __assert_fail ("output_size > 0"
, "ccv_cnnp_model_core.c", 768, __extension__ __PRETTY_FUNCTION__
); }))
;
769 int i;
770 int io_input_size = 0;
771 for (i = 0; i < input_size; i++)
772 if (inputs[i].type == CCV_CNNP_IO)
773 ++io_input_size;
774 else {
775 assert(inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR || inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE)((void) sizeof ((inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR
|| inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE
) ? 1 : 0), __extension__ ({ if (inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR
|| inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE
) ; else __assert_fail ("inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR || inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE"
, "ccv_cnnp_model_core.c", 775, __extension__ __PRETTY_FUNCTION__
); }))
;
776 assert(inputs[i].init_state.init)((void) sizeof ((inputs[i].init_state.init) ? 1 : 0), __extension__
({ if (inputs[i].init_state.init) ; else __assert_fail ("inputs[i].init_state.init"
, "ccv_cnnp_model_core.c", 776, __extension__ __PRETTY_FUNCTION__
); }))
;
777 }
778 int io_output_size = 0;
779 for (i = 0; i < output_size; i++)
780 if (outputs[i] == CCV_CNNP_IO)
781 ++io_output_size;
782 else {
783 assert(outputs[i] == CCV_CNNP_TENSOR_NOT_OUTPUT || outputs[i] == CCV_CNNP_NO_TENSOR)((void) sizeof ((outputs[i] == CCV_CNNP_TENSOR_NOT_OUTPUT || outputs
[i] == CCV_CNNP_NO_TENSOR) ? 1 : 0), __extension__ ({ if (outputs
[i] == CCV_CNNP_TENSOR_NOT_OUTPUT || outputs[i] == CCV_CNNP_NO_TENSOR
) ; else __assert_fail ("outputs[i] == CCV_CNNP_TENSOR_NOT_OUTPUT || outputs[i] == CCV_CNNP_NO_TENSOR"
, "ccv_cnnp_model_core.c", 783, __extension__ __PRETTY_FUNCTION__
); }))
;
784 }
785 assert(io_output_size > 0)((void) sizeof ((io_output_size > 0) ? 1 : 0), __extension__
({ if (io_output_size > 0) ; else __assert_fail ("io_output_size > 0"
, "ccv_cnnp_model_core.c", 785, __extension__ __PRETTY_FUNCTION__
); }))
;
786 ccv_cnnp_model_cmd_exec_t* const model_cmd_exec = (ccv_cnnp_model_cmd_exec_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_cmd_exec_t) + sizeof(ccv_nnc_tensor_symbol_t) * (io_output_size + input_size + output_size) + sizeof(ccv_cnnp_cmd_exec_io_t) * input_size + sizeof(int) * output_size);
787 model_cmd_exec->super.isa = &ccv_cnnp_cmd_exec_isa;
788 model_cmd_exec->super.input_size = io_input_size;
789 model_cmd_exec->super.outputs = (ccv_nnc_tensor_symbol_t*)(model_cmd_exec + 1);
790 model_cmd_exec->super.output_size = io_output_size;
791 ccv_cnnp_model_copy_name(&model_cmd_exec->super, name);
792 model_cmd_exec->cmd = cmd;
793 model_cmd_exec->hint = hint;
794 model_cmd_exec->flags = flags;
795 model_cmd_exec->input_size = input_size;
796 model_cmd_exec->input_symbols = model_cmd_exec->super.outputs + io_output_size;
797 model_cmd_exec->output_symbols = model_cmd_exec->input_symbols + input_size;
798 model_cmd_exec->inputs = (ccv_cnnp_cmd_exec_io_t*)(model_cmd_exec->output_symbols + output_size);
799 if (input_size > 0)
800 {
801 memcpy(model_cmd_exec->inputs, inputs, sizeof(ccv_cnnp_cmd_exec_io_t) * input_size);
802 if (copy_io)
803 for (i = 0; i < input_size; i++)
804 if (inputs[i].type != CCV_CNNP_IO && inputs[i].init_state.copy)
805 model_cmd_exec->inputs[i].init_state.context = inputs[i].init_state.copy(inputs[i].init_state.context);
806 }
807 model_cmd_exec->output_size = output_size;
808 model_cmd_exec->outputs = (int*)(model_cmd_exec->inputs + input_size);
809 if (output_size > 0)
810 memcpy(model_cmd_exec->outputs, outputs, sizeof(int) * output_size);
811 return (ccv_cnnp_model_t*)model_cmd_exec;
812}
813
814ccv_cnnp_model_t* ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const char* const name)
815{
816 return _ccv_cnnp_cmd_exec(cmd, 0, hint, flags, inputs, input_size, outputs, output_size, name);
817}
818
819static ccv_cnnp_model_t* _ccv_cnnp_cmd_exec_copy(const ccv_cnnp_model_t* const super, void* const context)
820{
821 const ccv_cnnp_model_cmd_exec_t* const self = (const ccv_cnnp_model_cmd_exec_t*)super;
822 return _ccv_cnnp_cmd_exec(self->cmd, 1, self->hint, self->flags, self->inputs, self->input_size, self->outputs, self->output_size, self->super.name);
823}
824
825static void _ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context)
826{
827 initializer(initializer_context, CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, ccv_nnc_no_hint, 0, (ccv_nnc_tensor_t*)context, tensor_symbol);
828}
829
830ccv_cnnp_cmd_exec_io_init_state_t ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_t* const tensor)
831{
832 return (ccv_cnnp_cmd_exec_io_init_state_t){
833 .info = tensor->info,
834 .context = (void *)tensor,
835 .init = _ccv_cnnp_cmd_exec_io_copy,
836 };
837}
838
839typedef struct {
840 ccv_nnc_cmd_t cmd;
841 ccv_nnc_hint_t hint;
842 int flags;
843} ccv_cnnp_cmd_exec_io_set_by_t;
844
845static void _ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context)
846{
847 const ccv_cnnp_cmd_exec_io_set_by_t* const set_by = (ccv_cnnp_cmd_exec_io_set_by_t*)context;
848 initializer(initializer_context, set_by->cmd, set_by->hint, set_by->flags, 0, tensor_symbol);
849}
850
851static void* _ccv_cnnp_cmd_exec_io_set_by_copy(void* const context)
852{
853 ccv_cnnp_cmd_exec_io_set_by_t* const set_by = (ccv_cnnp_cmd_exec_io_set_by_t*)ccmallocmalloc(sizeof(ccv_cnnp_cmd_exec_io_set_by_t));
854 memcpy(set_by, context, sizeof(ccv_cnnp_cmd_exec_io_set_by_t));
855 return set_by;
856}
857
858ccv_cnnp_cmd_exec_io_init_state_t ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_param_t params)
859{
860 ccv_cnnp_cmd_exec_io_set_by_t* const set_by = (ccv_cnnp_cmd_exec_io_set_by_t*)ccmallocmalloc(sizeof(ccv_cnnp_cmd_exec_io_set_by_t));
861 set_by->cmd = cmd;
862 set_by->hint = hint;
863 set_by->flags = flags;
864 return (ccv_cnnp_cmd_exec_io_init_state_t){
865 .info = params,
866 .context = set_by,
867 .init = _ccv_cnnp_cmd_exec_io_set_by,
868 .copy = _ccv_cnnp_cmd_exec_io_set_by_copy,
869 .deinit = ccfreefree,
870 };
871}