Bug Summary

File:nnc/ccv_cnnp_model_core.c
Warning:line 259, column 36
Array access (via field 'vals') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_cnnp_model_core.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -resource-dir /usr/local/lib/clang/19 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2024-12-04-153127-42938-1 -x c ccv_cnnp_model_core.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_cnnp_model.h"
6#include "3rdparty/khash/khash.h"
7
8// MARK - Baisc Layers
9
10static const ccv_cnnp_model_vtab_t ccv_cnnp_input_isa;
11
12#define CCV_CNNP_IS_MODEL_INPUT(x)((x)->isa == &ccv_cnnp_input_isa) ((x)->isa == &ccv_cnnp_input_isa)
13
14#define CCV_CNNP_IS_MODEL_PARAMETER(x)((x)->param_ref != 0 || (x)->param_sel != 0) ((x)->param_ref != 0 || (x)->param_sel != 0)
15
16typedef struct {
17 ccv_cnnp_model_t super;
18 int sequence_size;
19 ccv_cnnp_model_t* sequence[1];
20} ccv_cnnp_sequential_model_t;
21
22static void _ccv_cnnp_sequential_model_deinit(ccv_cnnp_model_t* const super)
23{
24 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
25 int i, j;
26 for (i = 0; i < self->sequence_size; i++)
27 {
28 ccv_cnnp_model_t* const model = self->sequence[i];
29 if (!model)
30 continue;
31 ccv_cnnp_model_free(model);
32 for (j = i + 1; j < self->sequence_size; j++)
33 if (self->sequence[j] == model)
34 self->sequence[j] = 0;
35 }
36}
37
38static void _ccv_cnnp_sequential_model_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
39{
40 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
41 PRINT(CCV_CLI_VERBOSE, "[cnnp_sequential_model_build] 1. %p, sequence_size: %d\n", self, self->sequence_size)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sequential_model_build] 1. %p, sequence_size: %d\n"
, self, self->sequence_size); fflush(stdout); } } while (0
)
;
42 ccv_cnnp_model_t* const sub_model = self->sequence[0];
43 // Go through each sub model to build the graph.
44 ccv_nnc_tensor_symbol_t input;
45 sub_model->data = self->super.data;
46 ccv_cnnp_model_build(sub_model, graph, inputs, input_size, &input, 1);
47 sub_model->data = 0;
48 int i;
49 for (i = 1; i < self->sequence_size; i++)
50 {
51 ccv_nnc_tensor_symbol_t output;
52 ccv_cnnp_model_t* const sub_model = self->sequence[i];
53 // Go through each sub model to build the graph.
54 sub_model->data = self->super.data;
55 ccv_cnnp_model_build(sub_model, graph, &input, 1, &output, 1);
56 sub_model->data = 0;
57 input = output;
58 }
59 outputs[0] = input;
60 PRINT(CCV_CLI_VERBOSE, "[cnnp_sequential_model_build] 2. %p\n", self)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_sequential_model_build] 2. %p\n", self); fflush
(stdout); } } while (0)
;
61}
62
63static void _ccv_cnnp_sequential_model_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
64{
65 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
66 int i;
67 for (i = 0; i < self->sequence_size; i++)
68 ccv_cnnp_model_init_states(self->sequence[i], graph, initializer, context);
69}
70
71static void _ccv_cnnp_sequential_model_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
72{
73 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
74 int i;
75 for (i = 0; i < self->sequence_size; i++)
76 ccv_cnnp_model_set_is_test(self->sequence[i], is_test, updater, context);
77}
78
79static ccv_cnnp_model_t* _ccv_cnnp_sequential_model_copy(const ccv_cnnp_model_t* const super, void* const context);
80
81static void _ccv_cnnp_sequential_model_add_to_parameter_indices(ccv_cnnp_model_t* const super, const int index, ccv_array_t* const parameter_indices)
82{
83 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
84 int i;
85 for (i = 0; i < self->sequence_size; i++)
86 ccv_cnnp_model_add_to_parameter_indices(self->sequence[i], index, parameter_indices);
87}
88
89static void _ccv_cnnp_sequential_model_notify(const ccv_cnnp_model_t* const super, const int tag, void* const payload)
90{
91 ccv_cnnp_sequential_model_t* const self = (ccv_cnnp_sequential_model_t*)super;
92 int i;
93 for (i = 0; i < self->sequence_size; i++)
94 ccv_cnnp_model_notify(self->sequence[i], tag, payload);
95}
96
97static const ccv_cnnp_model_vtab_t ccv_cnnp_sequential_model_isa = {
98 .deinit = _ccv_cnnp_sequential_model_deinit,
99 .build = _ccv_cnnp_sequential_model_build,
100 .init_states = _ccv_cnnp_sequential_model_init_states,
101 .copy = _ccv_cnnp_sequential_model_copy,
102 .set_is_test = _ccv_cnnp_sequential_model_set_is_test,
103 .add_to_parameter_indices = _ccv_cnnp_sequential_model_add_to_parameter_indices,
104 .notify = _ccv_cnnp_sequential_model_notify,
105};
106
107KHASH_MAP_INIT_INT64(model, ccv_cnnp_model_t*)typedef struct kh_model_s { khint_t n_buckets, size, n_occupied
, upper_bound; khint32_t *flags; khint64_t *keys; ccv_cnnp_model_t
* *vals; } kh_model_t; static inline __attribute__ ((__unused__
)) kh_model_t *kh_init_model(void) { return (kh_model_t*)calloc
(1,sizeof(kh_model_t)); } static inline __attribute__ ((__unused__
)) void kh_destroy_model(kh_model_t *h) { if (h) { free((void
*)h->keys); free(h->flags); free((void *)h->vals); free
(h); } } static inline __attribute__ ((__unused__)) void kh_clear_model
(kh_model_t *h) { if (h && h->flags) { memset(h->
flags, 0xaa, ((h->n_buckets) < 16? 1 : (h->n_buckets
)>>4) * sizeof(khint32_t)); h->size = h->n_occupied
= 0; } } static inline __attribute__ ((__unused__)) khint_t kh_get_model
(const kh_model_t *h, khint64_t key) { if (h->n_buckets) {
khint_t k, i, last, mask, step = 0; mask = h->n_buckets -
1; k = (khint32_t)((key)>>33^(key)^(key)<<11); i
= k & mask; last = i; while (!((h->flags[i>>4]>>
((i&0xfU)<<1))&2) && (((h->flags[i>>
4]>>((i&0xfU)<<1))&1) || !((h->keys[i]
) == (key)))) { i = (i + (++step)) & mask; if (i == last)
return h->n_buckets; } return ((h->flags[i>>4]>>
((i&0xfU)<<1))&3)? h->n_buckets : i; } else return
0; } static inline __attribute__ ((__unused__)) int kh_resize_model
(kh_model_t *h, khint_t new_n_buckets) { khint32_t *new_flags
= 0; khint_t j = 1; { (--(new_n_buckets), (new_n_buckets)|=(
new_n_buckets)>>1, (new_n_buckets)|=(new_n_buckets)>>
2, (new_n_buckets)|=(new_n_buckets)>>4, (new_n_buckets)
|=(new_n_buckets)>>8, (new_n_buckets)|=(new_n_buckets)>>
16, ++(new_n_buckets)); if (new_n_buckets < 4) new_n_buckets
= 4; if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER
+ 0.5)) j = 0; else { new_flags = (khint32_t*)malloc(((new_n_buckets
) < 16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t))
; if (!new_flags) return -1; memset(new_flags, 0xaa, ((new_n_buckets
) < 16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t))
; if (h->n_buckets < new_n_buckets) { khint64_t *new_keys
= (khint64_t*)realloc((void *)h->keys,new_n_buckets * sizeof
(khint64_t)); if (!new_keys) { free(new_flags); return -1; } h
->keys = new_keys; if (1) { ccv_cnnp_model_t* *new_vals = (
ccv_cnnp_model_t**)realloc((void *)h->vals,new_n_buckets *
sizeof(ccv_cnnp_model_t*)); if (!new_vals) { free(new_flags)
; return -1; } h->vals = new_vals; } } } } if (j) { for (j
= 0; j != h->n_buckets; ++j) { if (((h->flags[j>>
4]>>((j&0xfU)<<1))&3) == 0) { khint64_t key
= h->keys[j]; ccv_cnnp_model_t* val; khint_t new_mask; new_mask
= new_n_buckets - 1; if (1) val = h->vals[j]; (h->flags
[j>>4]|=1ul<<((j&0xfU)<<1)); while (1) {
khint_t k, i, step = 0; k = (khint32_t)((key)>>33^(key
)^(key)<<11); i = k & new_mask; while (!((new_flags
[i>>4]>>((i&0xfU)<<1))&2)) i = (i +
(++step)) & new_mask; (new_flags[i>>4]&=~(2ul<<
((i&0xfU)<<1))); if (i < h->n_buckets &&
((h->flags[i>>4]>>((i&0xfU)<<1))&
3) == 0) { { khint64_t tmp = h->keys[i]; h->keys[i] = key
; key = tmp; } if (1) { ccv_cnnp_model_t* tmp = h->vals[i]
; h->vals[i] = val; val = tmp; } (h->flags[i>>4]|=
1ul<<((i&0xfU)<<1)); } else { h->keys[i] =
key; if (1) h->vals[i] = val; break; } } } } if (h->n_buckets
> new_n_buckets) { h->keys = (khint64_t*)realloc((void
*)h->keys,new_n_buckets * sizeof(khint64_t)); if (1) h->
vals = (ccv_cnnp_model_t**)realloc((void *)h->vals,new_n_buckets
* sizeof(ccv_cnnp_model_t*)); } free(h->flags); h->flags
= new_flags; h->n_buckets = new_n_buckets; h->n_occupied
= h->size; h->upper_bound = (khint_t)(h->n_buckets *
__ac_HASH_UPPER + 0.5); } return 0; } static inline __attribute__
((__unused__)) khint_t kh_put_model(kh_model_t *h, khint64_t
key, int *ret) { khint_t x; if (h->n_occupied >= h->
upper_bound) { if (h->n_buckets > (h->size<<1)
) { if (kh_resize_model(h, h->n_buckets - 1) < 0) { *ret
= -1; return h->n_buckets; } } else if (kh_resize_model(h
, h->n_buckets + 1) < 0) { *ret = -1; return h->n_buckets
; } } { khint_t k, i, site, last, mask = h->n_buckets - 1,
step = 0; x = site = h->n_buckets; k = (khint32_t)((key)>>
33^(key)^(key)<<11); i = k & mask; if (((h->flags
[i>>4]>>((i&0xfU)<<1))&2)) x = i; else
{ last = i; while (!((h->flags[i>>4]>>((i&
0xfU)<<1))&2) && (((h->flags[i>>4]
>>((i&0xfU)<<1))&1) || !((h->keys[i]) ==
(key)))) { if (((h->flags[i>>4]>>((i&0xfU
)<<1))&1)) site = i; i = (i + (++step)) & mask;
if (i == last) { x = site; break; } } if (x == h->n_buckets
) { if (((h->flags[i>>4]>>((i&0xfU)<<
1))&2) && site != h->n_buckets) x = site; else
x = i; } } } if (((h->flags[x>>4]>>((x&0xfU
)<<1))&2)) { h->keys[x] = key; (h->flags[x>>
4]&=~(3ul<<((x&0xfU)<<1))); ++h->size;
++h->n_occupied; *ret = 1; } else if (((h->flags[x>>
4]>>((x&0xfU)<<1))&1)) { h->keys[x] = key
; (h->flags[x>>4]&=~(3ul<<((x&0xfU)<<
1))); ++h->size; *ret = 2; } else *ret = 0; return x; } static
inline __attribute__ ((__unused__)) void kh_del_model(kh_model_t
*h, khint_t x) { if (x != h->n_buckets && !((h->
flags[x>>4]>>((x&0xfU)<<1))&3)) { (
h->flags[x>>4]|=1ul<<((x&0xfU)<<1));
--h->size; } }
108
109static ccv_cnnp_model_t* _ccv_cnnp_sequential_model_copy(const ccv_cnnp_model_t* const super, void* const context)
110{
111 const ccv_cnnp_sequential_model_t* const self = (const ccv_cnnp_sequential_model_t*)super;
112 ccv_cnnp_sequential_model_t* const sequential_model = (ccv_cnnp_sequential_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_sequential_model_t) + sizeof(ccv_cnnp_model_t*) * (self->sequence_size - 1) + sizeof(ccv_nnc_tensor_symbol_t));
113 sequential_model->super.isa = &ccv_cnnp_sequential_model_isa;
114 sequential_model->super.input_size = 1;
115 sequential_model->super.outputs = (ccv_nnc_tensor_symbol_t*)(sequential_model->sequence + self->sequence_size);
116 sequential_model->super.output_size = 1;
117 ccv_cnnp_model_copy_name(&sequential_model->super, self->super.name);
118 sequential_model->sequence_size = self->sequence_size;
119 int i;
120 khash_t(model)kh_model_t* model_map = context ? (khash_t(model)kh_model_t*)context : kh_init(model)kh_init_model();
121 for (i = 0; i < self->sequence_size; i++)
122 {
123 ccv_cnnp_model_t* const sub_model = self->sequence[i];
124 int ret;
125 khiter_t k = kh_put(model, model_map, (uint64_t)(uintptr_t)sub_model, &ret)kh_put_model(model_map, (uint64_t)(uintptr_t)sub_model, &
ret)
;
126 ccv_cnnp_model_t* model_copy;
127 if (ret != 0)
128 model_copy = kh_val(model_map, k)((model_map)->vals[k]) = _ccv_cnnp_model_copy(sub_model, model_map);
129 else
130 model_copy = kh_val(model_map, k)((model_map)->vals[k]);
131 sequential_model->sequence[i] = model_copy;
132 }
133 if (!context)
134 kh_destroy(model, model_map)kh_destroy_model(model_map);
135 return (ccv_cnnp_model_t*)sequential_model;
136}
137
138ccv_cnnp_model_t* ccv_cnnp_sequential_new(ccv_cnnp_model_t* const* const models, const int model_size, const int is_trainable, const char* const name)
139{
140 assert(model_size > 0)((void) sizeof ((model_size > 0) ? 1 : 0), __extension__ (
{ if (model_size > 0) ; else __assert_fail ("model_size > 0"
, "ccv_cnnp_model_core.c", 140, __extension__ __PRETTY_FUNCTION__
); }))
;
141 ccv_cnnp_sequential_model_t* const sequential_model = (ccv_cnnp_sequential_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_sequential_model_t) + sizeof(ccv_cnnp_model_t*) * (model_size - 1) + sizeof(ccv_nnc_tensor_symbol_t));
142 sequential_model->super.isa = &ccv_cnnp_sequential_model_isa;
143 sequential_model->super.input_size = models[0]->input_size;
144 sequential_model->super.outputs = (ccv_nnc_tensor_symbol_t*)(sequential_model->sequence + model_size);
145 sequential_model->super.output_size = 1;
146 sequential_model->super.is_trainable = is_trainable;
147 ccv_cnnp_model_copy_name(&sequential_model->super, name);
148 sequential_model->sequence_size = model_size;
149 memcpy(sequential_model->sequence, models, sizeof(ccv_cnnp_model_t*) * model_size);
150 return (ccv_cnnp_model_t*)sequential_model;
151}
152
153typedef struct {
154 ccv_cnnp_model_t super;
155 // The model's outputs, it is different from super.output_size, as latter is for actual tensor symbols.
156 int model_output_size;
157 // The name is similar to sequential model, but it is just topological sorted models.
158 int sequence_size;
159 int* model_outputs; // Which model, as in sequences, have some outputs.
160 ccv_cnnp_model_io_t sequence[1];
161} ccv_cnnp_functional_model_t;
162
163static void _ccv_cnnp_functional_model_deinit(ccv_cnnp_model_t* const super)
164{
165 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
166 int i, j = 0, k;
167 for (i = 0; i < self->sequence_size; i++)
168 {
169 ccv_cnnp_model_t* const model = self->sequence[i]->model;
170 if (!model)
171 continue;
172 self->sequence[j++] = (ccv_cnnp_model_io_t)model;
173 // Go through all their IO to remove itself as model.
174 assert(model->io)((void) sizeof ((model->io) ? 1 : 0), __extension__ ({ if (
model->io) ; else __assert_fail ("model->io", "ccv_cnnp_model_core.c"
, 174, __extension__ __PRETTY_FUNCTION__); }))
;
175 for (k = 0; k < model->io->rnum; k++)
176 {
177 ccv_cnnp_model_io_t model_io = *(ccv_cnnp_model_io_t*)ccv_array_get(model->io, k)((void*)(((char*)((model->io)->data)) + (size_t)(model->
io)->rsize * (size_t)(k)))
;
178 model_io->model = 0;
179 }
180 }
181 for (i = 0; i < j; i++)
182 ccv_cnnp_model_free((ccv_cnnp_model_t*)self->sequence[i]);
183}
184
185KHASH_MAP_INIT_INT64(io_node, ccv_array_t*)typedef struct kh_io_node_s { khint_t n_buckets, size, n_occupied
, upper_bound; khint32_t *flags; khint64_t *keys; ccv_array_t
* *vals; } kh_io_node_t; static inline __attribute__ ((__unused__
)) kh_io_node_t *kh_init_io_node(void) { return (kh_io_node_t
*)calloc(1,sizeof(kh_io_node_t)); } static inline __attribute__
((__unused__)) void kh_destroy_io_node(kh_io_node_t *h) { if
(h) { free((void *)h->keys); free(h->flags); free((void
*)h->vals); free(h); } } static inline __attribute__ ((__unused__
)) void kh_clear_io_node(kh_io_node_t *h) { if (h && h
->flags) { memset(h->flags, 0xaa, ((h->n_buckets) <
16? 1 : (h->n_buckets)>>4) * sizeof(khint32_t)); h->
size = h->n_occupied = 0; } } static inline __attribute__ (
(__unused__)) khint_t kh_get_io_node(const kh_io_node_t *h, khint64_t
key) { if (h->n_buckets) { khint_t k, i, last, mask, step
= 0; mask = h->n_buckets - 1; k = (khint32_t)((key)>>
33^(key)^(key)<<11); i = k & mask; last = i; while (
!((h->flags[i>>4]>>((i&0xfU)<<1))&
2) && (((h->flags[i>>4]>>((i&0xfU)
<<1))&1) || !((h->keys[i]) == (key)))) { i = (i +
(++step)) & mask; if (i == last) return h->n_buckets;
} return ((h->flags[i>>4]>>((i&0xfU)<<
1))&3)? h->n_buckets : i; } else return 0; } static inline
__attribute__ ((__unused__)) int kh_resize_io_node(kh_io_node_t
*h, khint_t new_n_buckets) { khint32_t *new_flags = 0; khint_t
j = 1; { (--(new_n_buckets), (new_n_buckets)|=(new_n_buckets
)>>1, (new_n_buckets)|=(new_n_buckets)>>2, (new_n_buckets
)|=(new_n_buckets)>>4, (new_n_buckets)|=(new_n_buckets)
>>8, (new_n_buckets)|=(new_n_buckets)>>16, ++(new_n_buckets
)); if (new_n_buckets < 4) new_n_buckets = 4; if (h->size
>= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0
; else { new_flags = (khint32_t*)malloc(((new_n_buckets) <
16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t)); if (
!new_flags) return -1; memset(new_flags, 0xaa, ((new_n_buckets
) < 16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t))
; if (h->n_buckets < new_n_buckets) { khint64_t *new_keys
= (khint64_t*)realloc((void *)h->keys,new_n_buckets * sizeof
(khint64_t)); if (!new_keys) { free(new_flags); return -1; } h
->keys = new_keys; if (1) { ccv_array_t* *new_vals = (ccv_array_t
**)realloc((void *)h->vals,new_n_buckets * sizeof(ccv_array_t
*)); if (!new_vals) { free(new_flags); return -1; } h->vals
= new_vals; } } } } if (j) { for (j = 0; j != h->n_buckets
; ++j) { if (((h->flags[j>>4]>>((j&0xfU)<<
1))&3) == 0) { khint64_t key = h->keys[j]; ccv_array_t
* val; khint_t new_mask; new_mask = new_n_buckets - 1; if (1)
val = h->vals[j]; (h->flags[j>>4]|=1ul<<((
j&0xfU)<<1)); while (1) { khint_t k, i, step = 0; k
= (khint32_t)((key)>>33^(key)^(key)<<11); i = k &
new_mask; while (!((new_flags[i>>4]>>((i&0xfU
)<<1))&2)) i = (i + (++step)) & new_mask; (new_flags
[i>>4]&=~(2ul<<((i&0xfU)<<1))); if (
i < h->n_buckets && ((h->flags[i>>4]>>
((i&0xfU)<<1))&3) == 0) { { khint64_t tmp = h->
keys[i]; h->keys[i] = key; key = tmp; } if (1) { ccv_array_t
* tmp = h->vals[i]; h->vals[i] = val; val = tmp; } (h->
flags[i>>4]|=1ul<<((i&0xfU)<<1)); } else
{ h->keys[i] = key; if (1) h->vals[i] = val; break; } }
} } if (h->n_buckets > new_n_buckets) { h->keys = (
khint64_t*)realloc((void *)h->keys,new_n_buckets * sizeof(
khint64_t)); if (1) h->vals = (ccv_array_t**)realloc((void
*)h->vals,new_n_buckets * sizeof(ccv_array_t*)); } free(h
->flags); h->flags = new_flags; h->n_buckets = new_n_buckets
; h->n_occupied = h->size; h->upper_bound = (khint_t
)(h->n_buckets * __ac_HASH_UPPER + 0.5); } return 0; } static
inline __attribute__ ((__unused__)) khint_t kh_put_io_node(kh_io_node_t
*h, khint64_t key, int *ret) { khint_t x; if (h->n_occupied
>= h->upper_bound) { if (h->n_buckets > (h->size
<<1)) { if (kh_resize_io_node(h, h->n_buckets - 1) <
0) { *ret = -1; return h->n_buckets; } } else if (kh_resize_io_node
(h, h->n_buckets + 1) < 0) { *ret = -1; return h->n_buckets
; } } { khint_t k, i, site, last, mask = h->n_buckets - 1,
step = 0; x = site = h->n_buckets; k = (khint32_t)((key)>>
33^(key)^(key)<<11); i = k & mask; if (((h->flags
[i>>4]>>((i&0xfU)<<1))&2)) x = i; else
{ last = i; while (!((h->flags[i>>4]>>((i&
0xfU)<<1))&2) && (((h->flags[i>>4]
>>((i&0xfU)<<1))&1) || !((h->keys[i]) ==
(key)))) { if (((h->flags[i>>4]>>((i&0xfU
)<<1))&1)) site = i; i = (i + (++step)) & mask;
if (i == last) { x = site; break; } } if (x == h->n_buckets
) { if (((h->flags[i>>4]>>((i&0xfU)<<
1))&2) && site != h->n_buckets) x = site; else
x = i; } } } if (((h->flags[x>>4]>>((x&0xfU
)<<1))&2)) { h->keys[x] = key; (h->flags[x>>
4]&=~(3ul<<((x&0xfU)<<1))); ++h->size;
++h->n_occupied; *ret = 1; } else if (((h->flags[x>>
4]>>((x&0xfU)<<1))&1)) { h->keys[x] = key
; (h->flags[x>>4]&=~(3ul<<((x&0xfU)<<
1))); ++h->size; *ret = 2; } else *ret = 0; return x; } static
inline __attribute__ ((__unused__)) void kh_del_io_node(kh_io_node_t
*h, khint_t x) { if (x != h->n_buckets && !((h->
flags[x>>4]>>((x&0xfU)<<1))&3)) { (
h->flags[x>>4]|=1ul<<((x&0xfU)<<1));
--h->size; } }
11
Null pointer value stored to field 'vals'
20
Taking true branch
21
Taking false branch
22
Calling 'kh_resize_io_node'
23
Taking true branch
24
Assuming the condition is false
25
Taking false branch
26
'?' condition is true
27
Assuming 'new_flags' is null
28
Taking true branch
29
Returning without writing to 'h->vals'
30
Returning from 'kh_resize_io_node'
31
Taking true branch
32
Returning without writing to 'h->vals'
186
187typedef struct {
188 ccv_array_t* nodes;
189 ccv_nnc_graph_exec_symbol_new_hook_f previous_func;
190 void* previous_context;
191} ccv_functional_model_build_node_hook_t;
192
193static void _ccv_cnnp_functional_model_build_node_new(void* context, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name)
194{
195 ccv_functional_model_build_node_hook_t* const hook = (ccv_functional_model_build_node_hook_t*)context;
196 ccv_array_push(hook->nodes, &symbol);
197 if (hook->previous_func)
198 hook->previous_func(hook->previous_context, symbol, cmd, inputs, input_size, outputs, output_size, name);
199}
200
201static void _ccv_cnnp_functional_model_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
202{
203 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
204 PRINT(CCV_CLI_VERBOSE, "[cnnp_functional_model_build] 1. %p, input_size: %d, output_size: %d\n", self, input_size, output_size)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_functional_model_build] 1. %p, input_size: %d, output_size: %d\n"
, self, input_size, output_size); fflush(stdout); } } while (
0)
;
1
Assuming the condition is false
2
Taking false branch
3
Loop condition is false. Exiting loop
205 assert(self->super.input_size == input_size)((void) sizeof ((self->super.input_size == input_size) ? 1
: 0), __extension__ ({ if (self->super.input_size == input_size
) ; else __assert_fail ("self->super.input_size == input_size"
, "ccv_cnnp_model_core.c", 205, __extension__ __PRETTY_FUNCTION__
); }))
;
4
Assuming 'input_size' is equal to field 'input_size'
5
Taking true branch
206 assert(self->super.output_size == output_size)((void) sizeof ((self->super.output_size == output_size) ?
1 : 0), __extension__ ({ if (self->super.output_size == output_size
) ; else __assert_fail ("self->super.output_size == output_size"
, "ccv_cnnp_model_core.c", 206, __extension__ __PRETTY_FUNCTION__
); }))
;
6
Assuming 'output_size' is equal to field 'output_size'
7
Taking true branch
207 int i, j, k;
208 for (i = 0; i < self->super.input_size; i++)
8
Assuming 'i' is >= field 'input_size'
9
Loop condition is false. Execution continues on line 210
209 self->sequence[i]->outputs[0] = self->sequence[i]->model->outputs[0] = inputs[i]; // Assigning the output symbol of input layer to be the input symbol.
210 ccv_array_t* input_symbols = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 1, 0);
211 ccv_array_t* parameter_indices = 0;
212 khash_t(io_node)kh_io_node_t* io_node_map = kh_init(io_node)kh_init_io_node();
10
Calling 'kh_init_io_node'
12
Returning from 'kh_init_io_node'
213 for (i = self->super.input_size; i < self->sequence_size; i++)
13
Assuming 'i' is < field 'sequence_size'
14
Loop condition is true. Entering loop body
214 {
215 ccv_cnnp_model_t* const sub_model = self->sequence[i]->model;
216 ccv_array_clear(input_symbols);
217 const ccv_array_t* const incomings = self->sequence[i]->incomings;
218 if (incomings)
15
Assuming 'incomings' is null
16
Taking false branch
219 for (j = 0; j < incomings->rnum; j++)
220 {
221 const ccv_cnnp_model_io_t input = *(ccv_cnnp_model_io_t*)ccv_array_get(incomings, j)((void*)(((char*)((incomings)->data)) + (size_t)(incomings
)->rsize * (size_t)(j)))
;
222 if (CCV_CNNP_IS_MODEL_PARAMETER(input)((input)->param_ref != 0 || (input)->param_sel != 0))
223 {
224 if (!parameter_indices)
225 parameter_indices = ccv_array_new(sizeof(int), 0, 0);
226 else
227 ccv_array_clear(parameter_indices);
228 const int param_sel = input->param_sel > 0 ? input->param_sel - 1 : input->param_sel;
229 assert(input->param_sel != 0)((void) sizeof ((input->param_sel != 0) ? 1 : 0), __extension__
({ if (input->param_sel != 0) ; else __assert_fail ("input->param_sel != 0"
, "ccv_cnnp_model_core.c", 229, __extension__ __PRETTY_FUNCTION__
); }))
;
230 ccv_cnnp_model_add_to_parameter_indices(input->model, param_sel, parameter_indices);
231 assert(parameter_indices->rnum > 0)((void) sizeof ((parameter_indices->rnum > 0) ? 1 : 0),
__extension__ ({ if (parameter_indices->rnum > 0) ; else
__assert_fail ("parameter_indices->rnum > 0", "ccv_cnnp_model_core.c"
, 231, __extension__ __PRETTY_FUNCTION__); }))
;
232 const int param_ref = input->param_ref > 0 ? input->param_ref - 1 : input->param_ref;
233 assert(input->param_ref != 0)((void) sizeof ((input->param_ref != 0) ? 1 : 0), __extension__
({ if (input->param_ref != 0) ; else __assert_fail ("input->param_ref != 0"
, "ccv_cnnp_model_core.c", 233, __extension__ __PRETTY_FUNCTION__
); }))
;
234 if (param_ref >= 0)
235 {
236 assert(param_ref < parameter_indices->rnum)((void) sizeof ((param_ref < parameter_indices->rnum) ?
1 : 0), __extension__ ({ if (param_ref < parameter_indices
->rnum) ; else __assert_fail ("param_ref < parameter_indices->rnum"
, "ccv_cnnp_model_core.c", 236, __extension__ __PRETTY_FUNCTION__
); }))
;
237 const ccv_nnc_tensor_symbol_t parameter = ccv_cnnp_parameter_from_indice(super, *(int*)ccv_array_get(parameter_indices, param_ref)((void*)(((char*)((parameter_indices)->data)) + (size_t)(parameter_indices
)->rsize * (size_t)(param_ref)))
);
238 ccv_array_push(input_symbols, &parameter);
239 } else // Otherwise, all of them.
240 for (k = 0; k < parameter_indices->rnum; k++)
241 {
242 const ccv_nnc_tensor_symbol_t parameter = ccv_cnnp_parameter_from_indice(super, *(int*)ccv_array_get(parameter_indices, k)((void*)(((char*)((parameter_indices)->data)) + (size_t)(parameter_indices
)->rsize * (size_t)(k)))
);
243 ccv_array_push(input_symbols, &parameter);
244 }
245 } else {
246 for (k = 0; k < input->model->output_size; k++)
247 ccv_array_push(input_symbols, &input->outputs[k]);
248 }
249 }
250 // Go through each sub model to build the graph.
251 ccv_array_t* nodes;
252 ccv_functional_model_build_node_hook_t hook;
253 const ccv_array_t* const dependencies = self->sequence[i]->dependencies;
254 if ((dependencies && dependencies->rnum > 0) || self->sequence[i]->dependents > 0)
17
Assuming 'dependencies' is non-null
18
Assuming field 'rnum' is > 0
255 {
256 int ret;
257 khiter_t k = kh_put(io_node, io_node_map, (uint64_t)(uintptr_t)self->sequence[i], &ret)kh_put_io_node(io_node_map, (uint64_t)(uintptr_t)self->sequence
[i], &ret)
;
19
Calling 'kh_put_io_node'
33
Returning from 'kh_put_io_node'
258 if (ret
33.1
'ret' is not equal to 0
!= 0)
34
Taking true branch
259 nodes = kh_val(io_node_map, k)((io_node_map)->vals[k]) = ccv_array_new(sizeof(ccv_nnc_graph_exec_symbol_t), 1, 0);
35
Array access (via field 'vals') results in a null pointer dereference
260 else
261 nodes = kh_val(io_node_map, k)((io_node_map)->vals[k]);
262 hook.nodes = nodes;
263 hook.previous_context = ccv_nnc_graph_exec_symbol_new_hook(graph, _ccv_cnnp_functional_model_build_node_new, &hook, &hook.previous_func);
264 }
265 sub_model->data = self->super.data;
266 ccv_cnnp_model_build(sub_model, graph, (ccv_nnc_tensor_symbol_t*)ccv_array_get(input_symbols, 0)((void*)(((char*)((input_symbols)->data)) + (size_t)(input_symbols
)->rsize * (size_t)(0)))
, input_symbols->rnum, self->sequence[i]->outputs, sub_model->output_size);
267 if ((dependencies && dependencies->rnum > 0) || self->sequence[i]->dependents > 0)
268 {
269 ccv_nnc_graph_exec_symbol_new_hook(graph, hook.previous_func, hook.previous_context, 0);
270 if (dependencies)
271 for (j = 0; j < dependencies->rnum; j++)
272 {
273 const ccv_cnnp_model_io_t dependency = *(ccv_cnnp_model_io_t*)ccv_array_get(dependencies, j)((void*)(((char*)((dependencies)->data)) + (size_t)(dependencies
)->rsize * (size_t)(j)))
;
274 khiter_t k = kh_get(io_node, io_node_map, (uint64_t)(uintptr_t)dependency)kh_get_io_node(io_node_map, (uint64_t)(uintptr_t)dependency);
275 if (k == kh_end(io_node_map)((io_node_map)->n_buckets))
276 continue;
277 const ccv_array_t* const dependency_nodes = kh_val(io_node_map, k)((io_node_map)->vals[k]);
278 int x, y;
279 for (y = 0; y < dependency_nodes->rnum; y++)
280 for (x = 0; x < nodes->rnum; x++)
281 ccv_nnc_graph_exec_symbol_concat(graph, *(ccv_nnc_graph_exec_symbol_t*)ccv_array_get(dependency_nodes, y)((void*)(((char*)((dependency_nodes)->data)) + (size_t)(dependency_nodes
)->rsize * (size_t)(y)))
, *(ccv_nnc_graph_exec_symbol_t*)ccv_array_get(nodes, x)((void*)(((char*)((nodes)->data)) + (size_t)(nodes)->rsize
* (size_t)(x)))
);
282 }
283 }
284 sub_model->data = 0;
285 }
286 khiter_t it;
287 for (it = kh_begin(io_node_map)(khint_t)(0); it != kh_end(io_node_map)((io_node_map)->n_buckets); ++it)
288 {
289 if (!kh_exist(io_node_map, it)(!(((io_node_map)->flags[(it)>>4]>>(((it)&
0xfU)<<1))&3))
)
290 continue;
291 ccv_array_t* const nodes = kh_val(io_node_map, it)((io_node_map)->vals[it]);
292 ccv_array_free(nodes);
293 }
294 kh_destroy(io_node, io_node_map)kh_destroy_io_node(io_node_map);
295 ccv_array_free(input_symbols);
296 if (parameter_indices)
297 ccv_array_free(parameter_indices);
298 for (i = 0, k = 0; k < self->model_output_size; k++)
299 {
300 ccv_cnnp_model_t* const sub_model = self->sequence[self->model_outputs[k]]->model;
301 for (j = 0; j < sub_model->output_size; j++)
302 outputs[i + j] = self->sequence[self->model_outputs[k]]->outputs[j];
303 i += sub_model->output_size;
304 }
305 assert(i == output_size)((void) sizeof ((i == output_size) ? 1 : 0), __extension__ ({
if (i == output_size) ; else __assert_fail ("i == output_size"
, "ccv_cnnp_model_core.c", 305, __extension__ __PRETTY_FUNCTION__
); }))
;
306 PRINT(CCV_CLI_VERBOSE, "[cnnp_functional_model_build] 2. %p\n", self)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_functional_model_build] 2. %p\n", self); fflush
(stdout); } } while (0)
;
307}
308
309static void _ccv_cnnp_functional_model_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
310{
311 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
312 int i;
313 for (i = self->super.input_size; i < self->sequence_size; i++)
314 ccv_cnnp_model_init_states(self->sequence[i]->model, graph, initializer, context);
315}
316
317static void _ccv_cnnp_functional_model_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
318{
319 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
320 int i;
321 for (i = self->super.input_size; i < self->sequence_size; i++)
322 ccv_cnnp_model_set_is_test(self->sequence[i]->model, is_test, updater, context);
323}
324
325static void _ccv_cnnp_functional_model_add_to_parameter_indices(ccv_cnnp_model_t* const super, const int index, ccv_array_t* const parameter_indices)
326{
327 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
328 int i;
329 for (i = self->super.input_size; i < self->sequence_size; i++)
330 ccv_cnnp_model_add_to_parameter_indices(self->sequence[i]->model, index, parameter_indices);
331}
332
333static void _ccv_cnnp_functional_model_notify(const ccv_cnnp_model_t* const super, const int tag, void* const payload)
334{
335 ccv_cnnp_functional_model_t* const self = (ccv_cnnp_functional_model_t*)super;
336 int i;
337 for (i = 0; i < self->sequence_size; i++)
338 {
339 const ccv_cnnp_model_t* const model = self->sequence[i]->model;
340 ccv_cnnp_model_notify(model, tag, payload);
341 }
342}
343
344static ccv_cnnp_model_t* _ccv_cnnp_functional_model_copy(const ccv_cnnp_model_t* const super, void* const context);
345
346static const ccv_cnnp_model_vtab_t ccv_cnnp_functional_model_isa = {
347 .deinit = _ccv_cnnp_functional_model_deinit,
348 .build = _ccv_cnnp_functional_model_build,
349 .init_states = _ccv_cnnp_functional_model_init_states,
350 .copy = _ccv_cnnp_functional_model_copy,
351 .set_is_test = _ccv_cnnp_functional_model_set_is_test,
352 .add_to_parameter_indices = _ccv_cnnp_functional_model_add_to_parameter_indices,
353 .notify = _ccv_cnnp_functional_model_notify,
354};
355
356KHASH_MAP_INIT_INT64(model_io, ccv_cnnp_model_io_t)typedef struct kh_model_io_s { khint_t n_buckets, size, n_occupied
, upper_bound; khint32_t *flags; khint64_t *keys; ccv_cnnp_model_io_t
*vals; } kh_model_io_t; static inline __attribute__ ((__unused__
)) kh_model_io_t *kh_init_model_io(void) { return (kh_model_io_t
*)calloc(1,sizeof(kh_model_io_t)); } static inline __attribute__
((__unused__)) void kh_destroy_model_io(kh_model_io_t *h) { if
(h) { free((void *)h->keys); free(h->flags); free((void
*)h->vals); free(h); } } static inline __attribute__ ((__unused__
)) void kh_clear_model_io(kh_model_io_t *h) { if (h &&
h->flags) { memset(h->flags, 0xaa, ((h->n_buckets) <
16? 1 : (h->n_buckets)>>4) * sizeof(khint32_t)); h->
size = h->n_occupied = 0; } } static inline __attribute__ (
(__unused__)) khint_t kh_get_model_io(const kh_model_io_t *h,
khint64_t key) { if (h->n_buckets) { khint_t k, i, last, mask
, step = 0; mask = h->n_buckets - 1; k = (khint32_t)((key)
>>33^(key)^(key)<<11); i = k & mask; last = i
; while (!((h->flags[i>>4]>>((i&0xfU)<<
1))&2) && (((h->flags[i>>4]>>((i&
0xfU)<<1))&1) || !((h->keys[i]) == (key)))) { i =
(i + (++step)) & mask; if (i == last) return h->n_buckets
; } return ((h->flags[i>>4]>>((i&0xfU)<<
1))&3)? h->n_buckets : i; } else return 0; } static inline
__attribute__ ((__unused__)) int kh_resize_model_io(kh_model_io_t
*h, khint_t new_n_buckets) { khint32_t *new_flags = 0; khint_t
j = 1; { (--(new_n_buckets), (new_n_buckets)|=(new_n_buckets
)>>1, (new_n_buckets)|=(new_n_buckets)>>2, (new_n_buckets
)|=(new_n_buckets)>>4, (new_n_buckets)|=(new_n_buckets)
>>8, (new_n_buckets)|=(new_n_buckets)>>16, ++(new_n_buckets
)); if (new_n_buckets < 4) new_n_buckets = 4; if (h->size
>= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0
; else { new_flags = (khint32_t*)malloc(((new_n_buckets) <
16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t)); if (
!new_flags) return -1; memset(new_flags, 0xaa, ((new_n_buckets
) < 16? 1 : (new_n_buckets)>>4) * sizeof(khint32_t))
; if (h->n_buckets < new_n_buckets) { khint64_t *new_keys
= (khint64_t*)realloc((void *)h->keys,new_n_buckets * sizeof
(khint64_t)); if (!new_keys) { free(new_flags); return -1; } h
->keys = new_keys; if (1) { ccv_cnnp_model_io_t *new_vals =
(ccv_cnnp_model_io_t*)realloc((void *)h->vals,new_n_buckets
* sizeof(ccv_cnnp_model_io_t)); if (!new_vals) { free(new_flags
); return -1; } h->vals = new_vals; } } } } if (j) { for (
j = 0; j != h->n_buckets; ++j) { if (((h->flags[j>>
4]>>((j&0xfU)<<1))&3) == 0) { khint64_t key
= h->keys[j]; ccv_cnnp_model_io_t val; khint_t new_mask; new_mask
= new_n_buckets - 1; if (1) val = h->vals[j]; (h->flags
[j>>4]|=1ul<<((j&0xfU)<<1)); while (1) {
khint_t k, i, step = 0; k = (khint32_t)((key)>>33^(key
)^(key)<<11); i = k & new_mask; while (!((new_flags
[i>>4]>>((i&0xfU)<<1))&2)) i = (i +
(++step)) & new_mask; (new_flags[i>>4]&=~(2ul<<
((i&0xfU)<<1))); if (i < h->n_buckets &&
((h->flags[i>>4]>>((i&0xfU)<<1))&
3) == 0) { { khint64_t tmp = h->keys[i]; h->keys[i] = key
; key = tmp; } if (1) { ccv_cnnp_model_io_t tmp = h->vals[
i]; h->vals[i] = val; val = tmp; } (h->flags[i>>4
]|=1ul<<((i&0xfU)<<1)); } else { h->keys[i
] = key; if (1) h->vals[i] = val; break; } } } } if (h->
n_buckets > new_n_buckets) { h->keys = (khint64_t*)realloc
((void *)h->keys,new_n_buckets * sizeof(khint64_t)); if (1
) h->vals = (ccv_cnnp_model_io_t*)realloc((void *)h->vals
,new_n_buckets * sizeof(ccv_cnnp_model_io_t)); } free(h->flags
); h->flags = new_flags; h->n_buckets = new_n_buckets; h
->n_occupied = h->size; h->upper_bound = (khint_t)(h
->n_buckets * __ac_HASH_UPPER + 0.5); } return 0; } static
inline __attribute__ ((__unused__)) khint_t kh_put_model_io(
kh_model_io_t *h, khint64_t key, int *ret) { khint_t x; if (h
->n_occupied >= h->upper_bound) { if (h->n_buckets
> (h->size<<1)) { if (kh_resize_model_io(h, h->
n_buckets - 1) < 0) { *ret = -1; return h->n_buckets; }
} else if (kh_resize_model_io(h, h->n_buckets + 1) < 0
) { *ret = -1; return h->n_buckets; } } { khint_t k, i, site
, last, mask = h->n_buckets - 1, step = 0; x = site = h->
n_buckets; k = (khint32_t)((key)>>33^(key)^(key)<<
11); i = k & mask; if (((h->flags[i>>4]>>(
(i&0xfU)<<1))&2)) x = i; else { last = i; while
(!((h->flags[i>>4]>>((i&0xfU)<<1))&
2) && (((h->flags[i>>4]>>((i&0xfU)
<<1))&1) || !((h->keys[i]) == (key)))) { if (((h
->flags[i>>4]>>((i&0xfU)<<1))&1)
) site = i; i = (i + (++step)) & mask; if (i == last) { x
= site; break; } } if (x == h->n_buckets) { if (((h->flags
[i>>4]>>((i&0xfU)<<1))&2) &&
site != h->n_buckets) x = site; else x = i; } } } if (((h
->flags[x>>4]>>((x&0xfU)<<1))&2)
) { h->keys[x] = key; (h->flags[x>>4]&=~(3ul<<
((x&0xfU)<<1))); ++h->size; ++h->n_occupied; *
ret = 1; } else if (((h->flags[x>>4]>>((x&
0xfU)<<1))&1)) { h->keys[x] = key; (h->flags[
x>>4]&=~(3ul<<((x&0xfU)<<1))); ++h->
size; *ret = 2; } else *ret = 0; return x; } static inline __attribute__
((__unused__)) void kh_del_model_io(kh_model_io_t *h, khint_t
x) { if (x != h->n_buckets && !((h->flags[x>>
4]>>((x&0xfU)<<1))&3)) { (h->flags[x>>
4]|=1ul<<((x&0xfU)<<1)); --h->size; } }
357
358static ccv_cnnp_model_t* _ccv_cnnp_functional_model_copy(const ccv_cnnp_model_t* const super, void* const context)
359{
360 const ccv_cnnp_functional_model_t* const self = (const ccv_cnnp_functional_model_t*)super;
361 ccv_cnnp_functional_model_t* const functional_model = (ccv_cnnp_functional_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_functional_model_t) + sizeof(ccv_cnnp_model_t*) * (self->sequence_size - 1) + sizeof(ccv_nnc_tensor_symbol_t) * self->super.output_size + sizeof(int) * self->model_output_size);
362 functional_model->super.isa = &ccv_cnnp_functional_model_isa;
363 functional_model->super.outputs = (ccv_nnc_tensor_symbol_t*)(functional_model->sequence + self->sequence_size);
364 functional_model->super.output_size = self->super.output_size;
365 functional_model->super.input_size = self->super.input_size;
366 ccv_cnnp_model_copy_name(&functional_model->super, self->super.name);
367 functional_model->sequence_size = self->sequence_size;
368 functional_model->model_output_size = self->model_output_size;
369 functional_model->model_outputs = (int*)(functional_model->super.outputs + functional_model->super.output_size);
370 memcpy(functional_model->model_outputs, self->model_outputs, sizeof(int) * self->model_output_size);
371 // Now the difficult part, copy over the model_io.
372 khash_t(model_io)kh_model_io_t* model_io_map = kh_init(model_io)kh_init_model_io();
373 khash_t(model)kh_model_t* model_map = context ? (khash_t(model)kh_model_t*)context : kh_init(model)kh_init_model();
374 int i, j;
375 for (i = 0; i < self->sequence_size; i++)
376 {
377 const ccv_cnnp_model_t* const sub_model = self->sequence[i]->model;
378 int ret;
379 khiter_t k = kh_put(model, model_map, (uint64_t)(uintptr_t)sub_model, &ret)kh_put_model(model_map, (uint64_t)(uintptr_t)sub_model, &
ret)
;
380 ccv_cnnp_model_t* model_copy;
381 if (ret != 0)
382 model_copy = kh_val(model_map, k)((model_map)->vals[k]) = _ccv_cnnp_model_copy(sub_model, model_map);
383 else
384 model_copy = kh_val(model_map, k)((model_map)->vals[k]);
385 ccv_cnnp_model_io_t model_io = functional_model->sequence[i] = ccmallocmalloc(sizeof(struct ccv_cnnp_model_io_s) + sizeof(ccv_nnc_tensor_symbol_t) * sub_model->output_size);
386 model_io->param_ref = 0;
387 model_io->param_sel = 0;
388 model_io->visit = 0;
389 model_io->model = model_copy;
390 model_io->dependencies = 0;
391 model_io->dependents = 0;
392 model_io->incomings = 0;
393 model_io->outgoings = 0;
394 model_io->outputs = (ccv_nnc_tensor_symbol_t*)(model_io + 1);
395 if (!model_copy->io)
396 model_copy->io = ccv_array_new(sizeof(ccv_cnnp_model_io_t), 1, 0);
397 ccv_array_push(model_copy->io, &model_io);
398 k = kh_put(model_io, model_io_map, (uint64_t)(uintptr_t)self->sequence[i], &ret)kh_put_model_io(model_io_map, (uint64_t)(uintptr_t)self->sequence
[i], &ret)
;
399 kh_val(model_io_map, k)((model_io_map)->vals[k]) = functional_model->sequence[i];
400 }
401 for (i = self->super.input_size; i < self->sequence_size; i++)
402 {
403 if (self->sequence[i]->incomings)
404 for (j = 0; j < self->sequence[i]->incomings->rnum; j++)
405 {
406 const ccv_cnnp_model_io_t input = *(ccv_cnnp_model_io_t*)ccv_array_get(self->sequence[i]->incomings, j)((void*)(((char*)((self->sequence[i]->incomings)->data
)) + (size_t)(self->sequence[i]->incomings)->rsize *
(size_t)(j)))
;
407 if (CCV_CNNP_IS_MODEL_PARAMETER(input)((input)->param_ref != 0 || (input)->param_sel != 0)) // I am pretty sure this is not in the model_io_map.
408 {
409 int ret;
410 khiter_t k = kh_put(model_io, model_io_map, (uint64_t)(uintptr_t)input, &ret)kh_put_model_io(model_io_map, (uint64_t)(uintptr_t)input, &
ret)
;
411 if (ret != 0)
412 {
413 // The model may not exist on the map due to wrapping (it is inside another sequential or functional model).
414 khiter_t m = kh_get(model, model_map, (uint64_t)(uintptr_t)input->model)kh_get_model(model_map, (uint64_t)(uintptr_t)input->model);
415 assert(m != kh_end(model_map))((void) sizeof ((m != ((model_map)->n_buckets)) ? 1 : 0), __extension__
({ if (m != ((model_map)->n_buckets)) ; else __assert_fail
("m != kh_end(model_map)", "ccv_cnnp_model_core.c", 415, __extension__
__PRETTY_FUNCTION__); }))
;
416 ccv_cnnp_model_t* const model_copy = kh_val(model_map, m)((model_map)->vals[m]);
417 ccv_cnnp_model_io_t model_io = ccmallocmalloc(sizeof(struct ccv_cnnp_model_io_s));
418 model_io->param_ref = input->param_ref;
419 model_io->param_sel = input->param_sel;
420 model_io->visit = 0;
421 model_io->model = model_copy;
422 model_io->incomings = 0;
423 model_io->dependencies = 0;
424 model_io->dependents = 0;
425 model_io->outgoings = 0;
426 model_io->outputs = 0;
427 if (!model_copy->io)
428 model_copy->io = ccv_array_new(sizeof(ccv_cnnp_model_io_t), 1, 0);
429 ccv_array_push(model_copy->io, &model_io);
430 kh_val(model_io_map, k)((model_io_map)->vals[k]) = model_io;
431 if (input->outgoings)
432 {
433 model_io->outgoings = ccv_array_new(sizeof(ccv_cnnp_model_io_t), input->outgoings->rnum, 0);
434 int x;
435 for (x = 0; x < input->outgoings->rnum; x++)
436 {
437 khiter_t k = kh_get(model_io, model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t*)ccv_array_get(input->outgoings, x)))kh_get_model_io(model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t
*)((void*)(((char*)((input->outgoings)->data)) + (size_t
)(input->outgoings)->rsize * (size_t)(x)))))
;
438 assert(k != kh_end(model_io_map))((void) sizeof ((k != ((model_io_map)->n_buckets)) ? 1 : 0
), __extension__ ({ if (k != ((model_io_map)->n_buckets)) ;
else __assert_fail ("k != kh_end(model_io_map)", "ccv_cnnp_model_core.c"
, 438, __extension__ __PRETTY_FUNCTION__); }))
;
439 ccv_cnnp_model_io_t outgoing_io = kh_val(model_io_map, k)((model_io_map)->vals[k]);
440 ccv_array_push(model_io->outgoings, &outgoing_io);
441 }
442 }
443 }
444 }
445 }
446 }
447 if (!context)
448 kh_destroy(model, model_map)kh_destroy_model(model_map);
449 for (i = 0; i < self->sequence_size; i++)
450 {
451 const ccv_cnnp_model_io_t model_io = self->sequence[i];
452 ccv_cnnp_model_io_t model_io_copy = functional_model->sequence[i];
453 model_io_copy->param_ref = model_io->param_ref;
454 model_io_copy->param_sel = model_io->param_sel;
455 if (model_io->incomings)
456 {
457 model_io_copy->incomings = ccv_array_new(sizeof(ccv_cnnp_model_io_t), model_io->incomings->rnum, 0);
458 for (j = 0; j < model_io->incomings->rnum; j++)
459 {
460 khiter_t k = kh_get(model_io, model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t*)ccv_array_get(model_io->incomings, j)))kh_get_model_io(model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t
*)((void*)(((char*)((model_io->incomings)->data)) + (size_t
)(model_io->incomings)->rsize * (size_t)(j)))))
;
461 assert(k != kh_end(model_io_map))((void) sizeof ((k != ((model_io_map)->n_buckets)) ? 1 : 0
), __extension__ ({ if (k != ((model_io_map)->n_buckets)) ;
else __assert_fail ("k != kh_end(model_io_map)", "ccv_cnnp_model_core.c"
, 461, __extension__ __PRETTY_FUNCTION__); }))
;
462 ccv_cnnp_model_io_t input_io = kh_val(model_io_map, k)((model_io_map)->vals[k]);
463 ccv_array_push(model_io_copy->incomings, &input_io);
464 }
465 }
466 if (model_io->dependencies)
467 {
468 model_io_copy->dependencies = ccv_array_new(sizeof(ccv_cnnp_model_io_t), model_io->dependencies->rnum, 0);
469 for (j = 0; j < model_io->dependencies->rnum; j++)
470 {
471 khiter_t k = kh_get(model_io, model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t*)ccv_array_get(model_io->dependencies, j)))kh_get_model_io(model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t
*)((void*)(((char*)((model_io->dependencies)->data)) + (
size_t)(model_io->dependencies)->rsize * (size_t)(j))))
)
;
472 assert(k != kh_end(model_io_map))((void) sizeof ((k != ((model_io_map)->n_buckets)) ? 1 : 0
), __extension__ ({ if (k != ((model_io_map)->n_buckets)) ;
else __assert_fail ("k != kh_end(model_io_map)", "ccv_cnnp_model_core.c"
, 472, __extension__ __PRETTY_FUNCTION__); }))
;
473 ccv_cnnp_model_io_t input_io = kh_val(model_io_map, k)((model_io_map)->vals[k]);
474 ccv_array_push(model_io_copy->dependencies, &input_io);
475 }
476 }
477 model_io_copy->dependents = model_io->dependents;
478 if (model_io->outgoings)
479 {
480 model_io_copy->outgoings = ccv_array_new(sizeof(ccv_cnnp_model_io_t), model_io->outgoings->rnum, 0);
481 for (j = 0; j < model_io->outgoings->rnum; j++)
482 {
483 khiter_t k = kh_get(model_io, model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t*)ccv_array_get(model_io->outgoings, j)))kh_get_model_io(model_io_map, (uint64_t)(uintptr_t)(*(ccv_cnnp_model_io_t
*)((void*)(((char*)((model_io->outgoings)->data)) + (size_t
)(model_io->outgoings)->rsize * (size_t)(j)))))
;
484 assert(k != kh_end(model_io_map))((void) sizeof ((k != ((model_io_map)->n_buckets)) ? 1 : 0
), __extension__ ({ if (k != ((model_io_map)->n_buckets)) ;
else __assert_fail ("k != kh_end(model_io_map)", "ccv_cnnp_model_core.c"
, 484, __extension__ __PRETTY_FUNCTION__); }))
;
485 ccv_cnnp_model_io_t outgoing_io = kh_val(model_io_map, k)((model_io_map)->vals[k]);
486 ccv_array_push(model_io_copy->outgoings, &outgoing_io);
487 }
488 }
489 }
490 kh_destroy(model_io, model_io_map)kh_destroy_model_io(model_io_map);
491 return (ccv_cnnp_model_t*)functional_model;
492}
493
494ccv_cnnp_model_t* ccv_cnnp_model_new(const ccv_cnnp_model_io_t* const inputs, const int input_size, const ccv_cnnp_model_io_t* const outputs, const int output_size, const int is_trainable, const char* const name)
495{
496 assert(output_size > 0)((void) sizeof ((output_size > 0) ? 1 : 0), __extension__ (
{ if (output_size > 0) ; else __assert_fail ("output_size > 0"
, "ccv_cnnp_model_core.c", 496, __extension__ __PRETTY_FUNCTION__
); }))
;
497 // Do topological sort.
498 ccv_array_t* const reverse_top = ccv_array_new(sizeof(ccv_cnnp_model_io_t), output_size, 0);
499 int i, j, k;
500 // Go through output one by one, reverse traversal them, to detect potential overlap (overlap means, for example,
501 // outputs[1] is an incoming node for outputs[0]. Thus, if we reverse them, we may have outputs[0] build before outputs[1],
502 // hence, having issues.
503 for (i = 0; i < output_size; i++)
504 outputs[i]->visit = 2;
505 for (i = output_size - 1; i >= 0; i--)
506 {
507 if (outputs[i]->visit == 3) // If we need to remove it, no need to visit.
508 continue;
509 assert(outputs[i]->visit == 2)((void) sizeof ((outputs[i]->visit == 2) ? 1 : 0), __extension__
({ if (outputs[i]->visit == 2) ; else __assert_fail ("outputs[i]->visit == 2"
, "ccv_cnnp_model_core.c", 509, __extension__ __PRETTY_FUNCTION__
); }))
;
510 ccv_array_clear(reverse_top);
511 ccv_array_push(reverse_top, &outputs[i]);
512 for (j = 0; j < reverse_top->rnum; j++)
513 {
514 const ccv_cnnp_model_io_t output = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, j)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(j)))
;
515 assert(!CCV_CNNP_IS_MODEL_INPUT(output->model))((void) sizeof ((!((output->model)->isa == &ccv_cnnp_input_isa
)) ? 1 : 0), __extension__ ({ if (!((output->model)->isa
== &ccv_cnnp_input_isa)) ; else __assert_fail ("!CCV_CNNP_IS_MODEL_INPUT(output->model)"
, "ccv_cnnp_model_core.c", 515, __extension__ __PRETTY_FUNCTION__
); }))
;
516 // If it is input, push it here.
517 if (output->incomings && !CCV_CNNP_IS_MODEL_PARAMETER(output)((output)->param_ref != 0 || (output)->param_sel != 0))
518 for (k = 0; k < output->incomings->rnum; k++)
519 {
520 const ccv_cnnp_model_io_t input = *(ccv_cnnp_model_io_t*)ccv_array_get(output->incomings, k)((void*)(((char*)((output->incomings)->data)) + (size_t
)(output->incomings)->rsize * (size_t)(k)))
;
521 // If it is an input or parameter, skip.
522 if (CCV_CNNP_IS_MODEL_INPUT(input->model)((input->model)->isa == &ccv_cnnp_input_isa) || CCV_CNNP_IS_MODEL_PARAMETER(input)((input)->param_ref != 0 || (input)->param_sel != 0))
523 continue;
524 if (input->visit == 1 || input->visit == 3) // Visited, skip.
525 continue;
526 // If this is an output, we need to remove it from the output array. Otherwise mark it as visited.
527 input->visit = input->visit == 2 ? 3 : 1;
528 ccv_array_push(reverse_top, &input);
529 }
530 // Similar for dependencies.
531 if (output->dependencies && !CCV_CNNP_IS_MODEL_PARAMETER(output)((output)->param_ref != 0 || (output)->param_sel != 0))
532 for (k = 0; k < output->dependencies->rnum; k++)
533 {
534 const ccv_cnnp_model_io_t dependency = *(ccv_cnnp_model_io_t*)ccv_array_get(output->dependencies, k)((void*)(((char*)((output->dependencies)->data)) + (size_t
)(output->dependencies)->rsize * (size_t)(k)))
;
535 // If it is an input or parameter, skip.
536 if (CCV_CNNP_IS_MODEL_INPUT(dependency->model)((dependency->model)->isa == &ccv_cnnp_input_isa) || CCV_CNNP_IS_MODEL_PARAMETER(dependency)((dependency)->param_ref != 0 || (dependency)->param_sel
!= 0)
)
537 continue;
538 if (dependency->visit == 1 || dependency->visit == 3) // Visited, skip.
539 continue;
540 // If this is an output, we need to remove it from the output array. Otherwise mark it as visited.
541 dependency->visit = dependency->visit == 2 ? 3 : 1;
542 ccv_array_push(reverse_top, &dependency);
543 }
544 }
545 for (j = 1; j < reverse_top->rnum; j++)
546 {
547 const ccv_cnnp_model_io_t output = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, j)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(j)))
;
548 if (output->visit == 1) // Clean the visit back.
549 output->visit = 0;
550 }
551 }
552 ccv_array_clear(reverse_top);
553 for (i = 0; i < output_size; i++) // We will assign sequence in reverse order, thus, reverse the reverse top when copying the outputs.
554 {
555 if (outputs[output_size - 1 - i]->visit == 2)
556 ccv_array_push(reverse_top, &outputs[output_size - 1 - i]);
557 assert(outputs[output_size - 1 - i]->visit == 2 || outputs[output_size - 1 - i]->visit == 3)((void) sizeof ((outputs[output_size - 1 - i]->visit == 2 ||
outputs[output_size - 1 - i]->visit == 3) ? 1 : 0), __extension__
({ if (outputs[output_size - 1 - i]->visit == 2 || outputs
[output_size - 1 - i]->visit == 3) ; else __assert_fail ("outputs[output_size - 1 - i]->visit == 2 || outputs[output_size - 1 - i]->visit == 3"
, "ccv_cnnp_model_core.c", 557, __extension__ __PRETTY_FUNCTION__
); }))
;
558 outputs[output_size - 1 - i]->visit = 0; // Clean up all visits.
559 }
560 // Go from the output, until we meet inputs.
561 uint64_t input_bitmask[((input_size - 1) >> 6) + 1];
562 memset(input_bitmask, 0, sizeof(uint64_t) * (((input_size - 1) >> 6) + 1));
563 int tensor_output_size = 0; // io can be mapped to multiple tensor outputs, therefore, need to compute the exact tensor output size.
564 for (i = 0; i < output_size; i++)
565 tensor_output_size += outputs[i]->model->output_size;
566 for (i = 0; i < reverse_top->rnum; i++)
567 {
568 const ccv_cnnp_model_io_t output = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, i)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(i)))
;
569 assert(!CCV_CNNP_IS_MODEL_INPUT(output->model))((void) sizeof ((!((output->model)->isa == &ccv_cnnp_input_isa
)) ? 1 : 0), __extension__ ({ if (!((output->model)->isa
== &ccv_cnnp_input_isa)) ; else __assert_fail ("!CCV_CNNP_IS_MODEL_INPUT(output->model)"
, "ccv_cnnp_model_core.c", 569, __extension__ __PRETTY_FUNCTION__
); }))
;
570 // If it is input, push it here.
571 if (output->incomings && !CCV_CNNP_IS_MODEL_PARAMETER(output)((output)->param_ref != 0 || (output)->param_sel != 0))
572 for (j = 0; j < output->incomings->rnum; j++)
573 {
574 const ccv_cnnp_model_io_t input = *(ccv_cnnp_model_io_t*)ccv_array_get(output->incomings, j)((void*)(((char*)((output->incomings)->data)) + (size_t
)(output->incomings)->rsize * (size_t)(j)))
;
575 ++input->visit; // Mark it as visited.
576 if (input->visit != input->outgoings->rnum + input->dependents) // Not all dependencies visited.
577 continue;
578 if (!CCV_CNNP_IS_MODEL_INPUT(input->model)((input->model)->isa == &ccv_cnnp_input_isa) && !CCV_CNNP_IS_MODEL_PARAMETER(input)((input)->param_ref != 0 || (input)->param_sel != 0))
579 ccv_array_push(reverse_top, &input);
580 else if (CCV_CNNP_IS_MODEL_INPUT(input->model)((input->model)->isa == &ccv_cnnp_input_isa)) {
581 for (k = 0; k < input_size; k++)
582 if (input == inputs[k])
583 break;
584 assert(k < input_size)((void) sizeof ((k < input_size) ? 1 : 0), __extension__ (
{ if (k < input_size) ; else __assert_fail ("k < input_size"
, "ccv_cnnp_model_core.c", 584, __extension__ __PRETTY_FUNCTION__
); }))
;
585 input_bitmask[k >> 6] |= ((uint64_t)1 << (k & 63));
586 }
587 }
588 if (output->dependencies && !CCV_CNNP_IS_MODEL_PARAMETER(output)((output)->param_ref != 0 || (output)->param_sel != 0))
589 for (j = 0; j < output->dependencies->rnum; j++)
590 {
591 const ccv_cnnp_model_io_t dependency = *(ccv_cnnp_model_io_t*)ccv_array_get(output->dependencies, j)((void*)(((char*)((output->dependencies)->data)) + (size_t
)(output->dependencies)->rsize * (size_t)(j)))
;
592 ++dependency->visit; // Mark it as visited.
593 if (dependency->visit != (dependency->outgoings ? dependency->outgoings->rnum : 0) + dependency->dependents) // Not all dependencies visited.
594 continue;
595 if (!CCV_CNNP_IS_MODEL_INPUT(dependency->model)((dependency->model)->isa == &ccv_cnnp_input_isa) && !CCV_CNNP_IS_MODEL_PARAMETER(dependency)((dependency)->param_ref != 0 || (dependency)->param_sel
!= 0)
)
596 ccv_array_push(reverse_top, &dependency);
597 else if (CCV_CNNP_IS_MODEL_INPUT(dependency->model)((dependency->model)->isa == &ccv_cnnp_input_isa)) {
598 for (k = 0; k < input_size; k++)
599 if (dependency == inputs[k])
600 break;
601 assert(k < input_size)((void) sizeof ((k < input_size) ? 1 : 0), __extension__ (
{ if (k < input_size) ; else __assert_fail ("k < input_size"
, "ccv_cnnp_model_core.c", 601, __extension__ __PRETTY_FUNCTION__
); }))
;
602 input_bitmask[k >> 6] |= ((uint64_t)1 << (k & 63));
603 }
604 }
605 }
606 for (i = 0; i < reverse_top->rnum; i++)
607 {
608 const ccv_cnnp_model_io_t output = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, i)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(i)))
;
609 output->visit = 0; // Clean the visit back.
610 }
611 for (i = 0; i < input_size; i++)
612 inputs[i]->visit = 0; // Clean the visit back.
613 for (i = 0; i < input_size; i++)
614 { assert((input_bitmask[i >> 6] & ((uint64_t)1 << (i & 63))))((void) sizeof (((input_bitmask[i >> 6] & ((uint64_t
)1 << (i & 63)))) ? 1 : 0), __extension__ ({ if ((input_bitmask
[i >> 6] & ((uint64_t)1 << (i & 63)))) ; else
__assert_fail ("(input_bitmask[i >> 6] & ((uint64_t)1 << (i & 63)))"
, "ccv_cnnp_model_core.c", 614, __extension__ __PRETTY_FUNCTION__
); }))
; } // Assuming they all match.
615 const int sequence_size = reverse_top->rnum + input_size;
616 ccv_cnnp_functional_model_t* const functional_model = (ccv_cnnp_functional_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_functional_model_t) + sizeof(ccv_cnnp_model_t*) * (sequence_size - 1) + sizeof(ccv_nnc_tensor_symbol_t) * tensor_output_size + sizeof(int) * output_size);
617 functional_model->super.isa = &ccv_cnnp_functional_model_isa;
618 functional_model->super.outputs = (ccv_nnc_tensor_symbol_t*)(functional_model->sequence + sequence_size);
619 functional_model->super.output_size = tensor_output_size;
620 functional_model->super.input_size = input_size;
621 functional_model->super.is_trainable = is_trainable;
622 functional_model->model_output_size = output_size;
623 functional_model->model_outputs = (int*)(functional_model->super.outputs + tensor_output_size);
624 ccv_cnnp_model_copy_name(&functional_model->super, name);
625 functional_model->sequence_size = sequence_size;
626 memcpy(functional_model->sequence, inputs, sizeof(ccv_cnnp_model_io_t) * input_size);
627 for (i = 0; i < reverse_top->rnum; i++)
628 functional_model->sequence[input_size + i] = *(ccv_cnnp_model_io_t*)ccv_array_get(reverse_top, reverse_top->rnum - 1 - i)((void*)(((char*)((reverse_top)->data)) + (size_t)(reverse_top
)->rsize * (size_t)(reverse_top->rnum - 1 - i)))
;
629 for (i = 0; i < output_size; i++)
630 {
631 for (j = sequence_size - 1; j >= input_size; j--)
632 if (functional_model->sequence[j] == outputs[i])
633 {
634 functional_model->model_outputs[i] = j;
635 break;
636 }
637 }
638 ccv_array_free(reverse_top);
639 return (ccv_cnnp_model_t*)functional_model;
640}
641
642static ccv_cnnp_model_t* _ccv_cnnp_input_copy(const ccv_cnnp_model_t* const self, void* const context)
643{
644 ccv_cnnp_model_t* const input = (ccv_cnnp_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_t) + sizeof(ccv_nnc_tensor_symbol_t));
645 input->isa = &ccv_cnnp_input_isa;
646 input->outputs = (ccv_nnc_tensor_symbol_t*)(input + 1);
647 input->output_size = 1;
648 return input;
649}
650
651static const ccv_cnnp_model_vtab_t ccv_cnnp_input_isa = {
652 .copy = _ccv_cnnp_input_copy,
653};
654
655ccv_cnnp_model_io_t ccv_cnnp_input(void)
656{
657 ccv_cnnp_model_t* const input = (ccv_cnnp_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_t) + sizeof(ccv_nnc_tensor_symbol_t));
658 input->isa = &ccv_cnnp_input_isa;
659 input->io = ccv_array_new(sizeof(ccv_cnnp_model_io_t), 1, 0);
660 ccv_cnnp_model_io_t input_io = ccmallocmalloc(sizeof(struct ccv_cnnp_model_io_s) + sizeof(ccv_nnc_tensor_symbol_t));
661 input_io->param_ref = 0;
662 input_io->param_sel = 0;
663 input_io->visit = 0;
664 input_io->incomings = 0;
665 input_io->dependencies = 0;
666 input_io->dependents = 0;
667 input_io->outgoings = 0;
668 input_io->model = input;
669 input_io->outputs = (ccv_nnc_tensor_symbol_t*)(input_io + 1);
670 ccv_array_push(input->io, &input_io);
671 input->outputs = (ccv_nnc_tensor_symbol_t*)(input + 1);
672 input->output_size = 1;
673 return input_io;
674}
675
676// MARK - Dynamic Layer
677
678typedef struct {
679 ccv_cnnp_model_t super;
680 ccv_cnnp_model_dynamic_f func;
681 void* context;
682 ccv_cnnp_model_t* model;
683} ccv_cnnp_dynamic_model_t;
684
685static void _ccv_cnnp_dynamic_model_deinit(ccv_cnnp_model_t* const super)
686{
687 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
688 if (self->model)
689 ccv_cnnp_model_free(self->model);
690}
691
692static void _ccv_cnnp_dynamic_model_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
693{
694 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
695 PRINT(CCV_CLI_VERBOSE, "[cnnp_dynamic_model_build] 1. %p, func: %p\n", self, self->func)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_dynamic_model_build] 1. %p, func: %p\n", self
, self->func); fflush(stdout); } } while (0)
;
696 if (!self->model)
697 {
698 ccv_nnc_tensor_param_t input_params[input_size];
699 int i;
700 for (i = 0; i < input_size; i++)
701 input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
702 self->model = self->func(input_params, input_size, self->context);
703 // Update to use the settings of the compiled model.
704 self->super.input_size = self->model->input_size;
705 self->super.outputs = self->model->outputs;
706 self->super.output_size = self->model->output_size;
707 }
708 self->model->data = self->super.data;
709 ccv_cnnp_model_build(self->model, graph, inputs, input_size, outputs, output_size);
710 self->model->data = 0;
711 PRINT(CCV_CLI_VERBOSE, "[cnnp_dynamic_model_build] 2. %p\n", self)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_dynamic_model_build] 2. %p\n", self); fflush
(stdout); } } while (0)
;
712}
713
714static void _ccv_cnnp_dynamic_model_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
715{
716 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
717 assert(self->model)((void) sizeof ((self->model) ? 1 : 0), __extension__ ({ if
(self->model) ; else __assert_fail ("self->model", "ccv_cnnp_model_core.c"
, 717, __extension__ __PRETTY_FUNCTION__); }))
;
718 ccv_cnnp_model_init_states(self->model, graph, initializer, context);
719}
720
721static void _ccv_cnnp_dynamic_model_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
722{
723 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
724 assert(self->model)((void) sizeof ((self->model) ? 1 : 0), __extension__ ({ if
(self->model) ; else __assert_fail ("self->model", "ccv_cnnp_model_core.c"
, 724, __extension__ __PRETTY_FUNCTION__); }))
;
725 ccv_cnnp_model_set_is_test(self->model, is_test, updater, context);
726}
727
728static ccv_cnnp_model_t* _ccv_cnnp_dynamic_model_copy(const ccv_cnnp_model_t* const super, void* const context);
729
730static void _ccv_cnnp_dynamic_model_add_to_parameter_indices(ccv_cnnp_model_t* const super, const int index, ccv_array_t* const parameter_indices)
731{
732 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
733 assert(self->model)((void) sizeof ((self->model) ? 1 : 0), __extension__ ({ if
(self->model) ; else __assert_fail ("self->model", "ccv_cnnp_model_core.c"
, 733, __extension__ __PRETTY_FUNCTION__); }))
;
734 ccv_cnnp_model_add_to_parameter_indices(self->model, index, parameter_indices);
735}
736
737static void _ccv_cnnp_dynamic_model_notify(const ccv_cnnp_model_t* const super, const int tag, void* const payload)
738{
739 ccv_cnnp_dynamic_model_t* const self = (ccv_cnnp_dynamic_model_t*)super;
740 if (self->model)
741 ccv_cnnp_model_notify(self->model, tag, payload);
742}
743
744static const ccv_cnnp_model_vtab_t ccv_cnnp_dynamic_model_isa = {
745 .deinit = _ccv_cnnp_dynamic_model_deinit,
746 .build = _ccv_cnnp_dynamic_model_build,
747 .init_states = _ccv_cnnp_dynamic_model_init_states,
748 .copy = _ccv_cnnp_dynamic_model_copy,
749 .set_is_test = _ccv_cnnp_dynamic_model_set_is_test,
750 .add_to_parameter_indices = _ccv_cnnp_dynamic_model_add_to_parameter_indices,
751 .notify = _ccv_cnnp_dynamic_model_notify,
752};
753
754ccv_cnnp_model_t* ccv_cnnp_dynamic_new(ccv_cnnp_model_dynamic_f func, void* const context, const char* const name)
755{
756 ccv_cnnp_dynamic_model_t* const dynamic_model = (ccv_cnnp_dynamic_model_t*)cccalloccalloc(1, sizeof(ccv_cnnp_dynamic_model_t));
757 dynamic_model->super.isa = &ccv_cnnp_dynamic_model_isa;
758 dynamic_model->super.is_trainable = -1;
759 dynamic_model->func = func;
760 dynamic_model->context = context;
761 ccv_cnnp_model_copy_name(&dynamic_model->super, name);
762 return (ccv_cnnp_model_t*)dynamic_model;
763}
764
765static ccv_cnnp_model_t* _ccv_cnnp_dynamic_model_copy(const ccv_cnnp_model_t* const super, void* const context)
766{
767 const ccv_cnnp_dynamic_model_t* const self = (const ccv_cnnp_dynamic_model_t*)super;
768 return ccv_cnnp_dynamic_new(self->func, self->context, self->super.name);
769}
770
771// MARK - Command Layer
772
773typedef struct {
774 ccv_cnnp_model_t super;
775 ccv_nnc_cmd_t cmd;
776 ccv_nnc_hint_t hint;
777 ccv_nnc_tensor_symbol_t* input_symbols; // This is only valid for INIT_SHARED_TENSOR / INIT_SHARED_TENSOR_AS_TRAINABLE
778 ccv_nnc_tensor_symbol_t* output_symbols; // This is just for the output symbol (in case we need to have no tensor symbol).
779 ccv_cnnp_cmd_exec_io_t* inputs;
780 int flags;
781 int input_size;
782 int* outputs;
783 int output_size;
784} ccv_cnnp_model_cmd_exec_t;
785
786static void _ccv_cnnp_cmd_exec_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
787{
788 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
789 PRINT(CCV_CLI_VERBOSE, "[cnnp_cmd_exec_build] -\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("[cnnp_cmd_exec_build] -\n"); fflush(stdout); } } while
(0)
;
790 ccv_nnc_tensor_param_t input_params[ccv_max(1, self->input_size)({ typeof (1) _a = (1); typeof (self->input_size) _b = (self
->input_size); (_a > _b) ? _a : _b; })
];
791 int i, j;
792 for (i = 0, j = 0; i < self->input_size; i++)
793 if (self->inputs[i].type == CCV_CNNP_IO)
794 {
795 self->input_symbols[i] = inputs[j++];
796 input_params[i] = ccv_nnc_tensor_symbol_params(graph, self->input_symbols[i]);
797 } else if (self->inputs[i].type == CCV_CNNP_NO_TENSOR) {
798 self->input_symbols[i] = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
799 } else if (!self->input_symbols[i].graph) {
800 // Otherwise, we only create this symbol if it doesn't exist.
801 const ccv_nnc_tensor_param_t params = self->inputs[i].init_state.info;
802 input_params[i] = params;
803 self->input_symbols[i] = ccv_nnc_tensor_symbol_new(graph, params, 0);
804 }
805 // We cannot simply mark the outputs as auto, because the subsequent build call may require this output to have params setup.
806 // Infer the parameters here.
807 ccv_nnc_tensor_param_t output_params[ccv_max(1, self->output_size)({ typeof (1) _a = (1); typeof (self->output_size) _b = (self
->output_size); (_a > _b) ? _a : _b; })
];
808 ccv_nnc_hint_tensor_auto(self->cmd, input_params, self->input_size, self->hint, output_params, self->output_size);
809 for (i = 0, j = 0; i < self->output_size; i++)
810 if (self->outputs[i] == CCV_CNNP_IO)
811 self->output_symbols[i] = outputs[j++] = ccv_nnc_tensor_symbol_new(graph, output_params[i], 0);
812 else if (self->outputs[i] == CCV_CNNP_TENSOR_NOT_OUTPUT)
813 self->output_symbols[i] = ccv_nnc_tensor_symbol_new(graph, output_params[i], 0);
814 else
815 self->output_symbols[i] = NO_TENSOR_SYMBOL(const ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL
}
;
816 ccv_nnc_graph_exec_symbol_new(graph, self->cmd, self->input_symbols, self->input_size, self->output_symbols, self->output_size, 0);
817}
818
819static void _ccv_cnnp_cmd_exec_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
820{
821 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
822 int i;
823 for (i = 0; i < self->input_size; i++)
824 if (self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR || self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE)
825 self->inputs[i].init_state.init(self->input_symbols[i], initializer, context, self->inputs[i].init_state.context);
826}
827
828static void _ccv_cnnp_cmd_exec_add_to_output(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const outputs)
829{
830 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
831 int i;
832 for (i = 0; i < self->input_size; i++)
833 if (self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR)
834 add_to_array(outputs, self->input_symbols[i], 0); // Push this as retainable because it need to be init.
835}
836
837static void _ccv_cnnp_cmd_exec_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
838{
839 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
840 int i;
841 for (i = 0; i < self->input_size; i++)
842 if (self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE)
843 add_to_array(parameters, self->input_symbols[i], is_trainable); // Push this as parameter.
844}
845
846static void _ccv_cnnp_cmd_exec_deinit(ccv_cnnp_model_t* const super)
847{
848 ccv_cnnp_model_cmd_exec_t* const self = (ccv_cnnp_model_cmd_exec_t*)super;
849 int i, j;
850 for (i = 0; i < self->input_size; i++)
851 if ((self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR || self->inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE) &&
852 self->inputs[i].init_state.context)
853 {
854 void* const context = self->inputs[i].init_state.context;
855 if (self->inputs[i].init_state.deinit)
856 self->inputs[i].init_state.deinit(context);
857 self->inputs[i].init_state.init = 0;
858 self->inputs[i].init_state.deinit = 0;
859 self->inputs[i].init_state.context = 0;
860 for (j = i + 1; j < self->input_size; j++)
861 if (self->inputs[j].init_state.context == context)
862 {
863 self->inputs[j].init_state.init = 0;
864 self->inputs[j].init_state.deinit = 0;
865 self->inputs[j].init_state.context = 0;
866 }
867 }
868}
869
870static ccv_cnnp_model_t* _ccv_cnnp_cmd_exec_copy(const ccv_cnnp_model_t* const super, void* const context);
871
872static const ccv_cnnp_model_vtab_t ccv_cnnp_cmd_exec_isa = {
873 .build = _ccv_cnnp_cmd_exec_build,
874 .init_states = _ccv_cnnp_cmd_exec_init_states,
875 .add_to_parameter = _ccv_cnnp_cmd_exec_add_to_parameter,
876 .add_to_output = _ccv_cnnp_cmd_exec_add_to_output,
877 .deinit = _ccv_cnnp_cmd_exec_deinit,
878 .copy = _ccv_cnnp_cmd_exec_copy,
879};
880
881static ccv_cnnp_model_t* _ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, int copy_io, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const int is_trainable, const char* const name)
882{
883 assert(input_size >= 0)((void) sizeof ((input_size >= 0) ? 1 : 0), __extension__ (
{ if (input_size >= 0) ; else __assert_fail ("input_size >= 0"
, "ccv_cnnp_model_core.c", 883, __extension__ __PRETTY_FUNCTION__
); }))
;
884 assert(output_size > 0)((void) sizeof ((output_size > 0) ? 1 : 0), __extension__ (
{ if (output_size > 0) ; else __assert_fail ("output_size > 0"
, "ccv_cnnp_model_core.c", 884, __extension__ __PRETTY_FUNCTION__
); }))
;
885 int i;
886 int io_input_size = 0;
887 for (i = 0; i < input_size; i++)
888 if (inputs[i].type == CCV_CNNP_IO)
889 ++io_input_size;
890 else {
891 assert(inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR || inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE)((void) sizeof ((inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR
|| inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE
) ? 1 : 0), __extension__ ({ if (inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR
|| inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE
) ; else __assert_fail ("inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR || inputs[i].type == CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE"
, "ccv_cnnp_model_core.c", 891, __extension__ __PRETTY_FUNCTION__
); }))
;
892 assert(inputs[i].init_state.init)((void) sizeof ((inputs[i].init_state.init) ? 1 : 0), __extension__
({ if (inputs[i].init_state.init) ; else __assert_fail ("inputs[i].init_state.init"
, "ccv_cnnp_model_core.c", 892, __extension__ __PRETTY_FUNCTION__
); }))
;
893 }
894 int io_output_size = 0;
895 for (i = 0; i < output_size; i++)
896 if (outputs[i] == CCV_CNNP_IO)
897 ++io_output_size;
898 else {
899 assert(outputs[i] == CCV_CNNP_TENSOR_NOT_OUTPUT || outputs[i] == CCV_CNNP_NO_TENSOR)((void) sizeof ((outputs[i] == CCV_CNNP_TENSOR_NOT_OUTPUT || outputs
[i] == CCV_CNNP_NO_TENSOR) ? 1 : 0), __extension__ ({ if (outputs
[i] == CCV_CNNP_TENSOR_NOT_OUTPUT || outputs[i] == CCV_CNNP_NO_TENSOR
) ; else __assert_fail ("outputs[i] == CCV_CNNP_TENSOR_NOT_OUTPUT || outputs[i] == CCV_CNNP_NO_TENSOR"
, "ccv_cnnp_model_core.c", 899, __extension__ __PRETTY_FUNCTION__
); }))
;
900 }
901 assert(io_output_size > 0)((void) sizeof ((io_output_size > 0) ? 1 : 0), __extension__
({ if (io_output_size > 0) ; else __assert_fail ("io_output_size > 0"
, "ccv_cnnp_model_core.c", 901, __extension__ __PRETTY_FUNCTION__
); }))
;
902 ccv_cnnp_model_cmd_exec_t* const model_cmd_exec = (ccv_cnnp_model_cmd_exec_t*)cccalloccalloc(1, sizeof(ccv_cnnp_model_cmd_exec_t) + sizeof(ccv_nnc_tensor_symbol_t) * (io_output_size + input_size + output_size) + sizeof(ccv_cnnp_cmd_exec_io_t) * input_size + sizeof(int) * output_size);
903 model_cmd_exec->super.isa = &ccv_cnnp_cmd_exec_isa;
904 model_cmd_exec->super.input_size = io_input_size;
905 model_cmd_exec->super.outputs = (ccv_nnc_tensor_symbol_t*)(model_cmd_exec + 1);
906 model_cmd_exec->super.output_size = io_output_size;
907 model_cmd_exec->super.is_trainable = is_trainable;
908 ccv_cnnp_model_copy_name(&model_cmd_exec->super, name);
909 model_cmd_exec->cmd = cmd;
910 model_cmd_exec->hint = hint;
911 model_cmd_exec->flags = flags;
912 model_cmd_exec->input_size = input_size;
913 model_cmd_exec->input_symbols = model_cmd_exec->super.outputs + io_output_size;
914 model_cmd_exec->output_symbols = model_cmd_exec->input_symbols + input_size;
915 model_cmd_exec->inputs = (ccv_cnnp_cmd_exec_io_t*)(model_cmd_exec->output_symbols + output_size);
916 if (input_size > 0)
917 {
918 memcpy(model_cmd_exec->inputs, inputs, sizeof(ccv_cnnp_cmd_exec_io_t) * input_size);
919 if (copy_io)
920 for (i = 0; i < input_size; i++)
921 if (inputs[i].type != CCV_CNNP_IO && inputs[i].init_state.copy)
922 model_cmd_exec->inputs[i].init_state.context = inputs[i].init_state.copy(inputs[i].init_state.context);
923 }
924 model_cmd_exec->output_size = output_size;
925 model_cmd_exec->outputs = (int*)(model_cmd_exec->inputs + input_size);
926 if (output_size > 0)
927 memcpy(model_cmd_exec->outputs, outputs, sizeof(int) * output_size);
928 return (ccv_cnnp_model_t*)model_cmd_exec;
929}
930
931ccv_cnnp_model_t* ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const int is_trainable, const char* const name)
932{
933 return _ccv_cnnp_cmd_exec(cmd, 0, hint, flags, inputs, input_size, outputs, output_size, is_trainable, name);
934}
935
936static ccv_cnnp_model_t* _ccv_cnnp_cmd_exec_copy(const ccv_cnnp_model_t* const super, void* const context)
937{
938 const ccv_cnnp_model_cmd_exec_t* const self = (const ccv_cnnp_model_cmd_exec_t*)super;
939 return _ccv_cnnp_cmd_exec(self->cmd, 1, self->hint, self->flags, self->inputs, self->input_size, self->outputs, self->output_size, self->super.is_trainable, self->super.name);
940}
941
942static void _ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context)
943{
944 initializer(initializer_context, CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, ccv_nnc_no_hint, 0, (ccv_nnc_tensor_t*)context, tensor_symbol);
945}
946
947ccv_cnnp_cmd_exec_io_init_state_t ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_t* const tensor)
948{
949 return (ccv_cnnp_cmd_exec_io_init_state_t){
950 .info = tensor->info,
951 .context = (void *)tensor,
952 .init = _ccv_cnnp_cmd_exec_io_copy,
953 };
954}
955
956typedef struct {
957 ccv_nnc_cmd_t cmd;
958 ccv_nnc_hint_t hint;
959 int flags;
960} ccv_cnnp_cmd_exec_io_set_by_t;
961
962static void _ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context)
963{
964 const ccv_cnnp_cmd_exec_io_set_by_t* const set_by = (ccv_cnnp_cmd_exec_io_set_by_t*)context;
965 initializer(initializer_context, set_by->cmd, set_by->hint, set_by->flags, 0, tensor_symbol);
966}
967
968static void* _ccv_cnnp_cmd_exec_io_set_by_copy(void* const context)
969{
970 ccv_cnnp_cmd_exec_io_set_by_t* const set_by = (ccv_cnnp_cmd_exec_io_set_by_t*)ccmallocmalloc(sizeof(ccv_cnnp_cmd_exec_io_set_by_t));
971 memcpy(set_by, context, sizeof(ccv_cnnp_cmd_exec_io_set_by_t));
972 return set_by;
973}
974
975ccv_cnnp_cmd_exec_io_init_state_t ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_param_t params)
976{
977 ccv_cnnp_cmd_exec_io_set_by_t* const set_by = (ccv_cnnp_cmd_exec_io_set_by_t*)ccmallocmalloc(sizeof(ccv_cnnp_cmd_exec_io_set_by_t));
978 set_by->cmd = cmd;
979 set_by->hint = hint;
980 set_by->flags = flags;
981 return (ccv_cnnp_cmd_exec_io_init_state_t){
982 .info = params,
983 .context = set_by,
984 .init = _ccv_cnnp_cmd_exec_io_set_by,
985 .copy = _ccv_cnnp_cmd_exec_io_set_by_copy,
986 .deinit = ccfreefree,
987 };
988}