Bug Summary

File:nnc/ccv_nnc_graph.c
Warning:line 1902, column 2
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_nnc_graph.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/home/liu/buildslave/linux-x64-runtests/build/lib/nnc -resource-dir /usr/local/lib/clang/14.0.6 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D USE_DISPATCH -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/14.0.6/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/9/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -fdebug-compilation-dir=/home/liu/buildslave/linux-x64-runtests/build/lib/nnc -ferror-limit 19 -fblocks -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/buildslave/public_html/analyze/2022-08-02-175445-1304081-1 -x c ccv_nnc_graph.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_nnc_graph.h"
6
7// MARK - Level-2 API
8
9ccv_nnc_graph_t* ccv_nnc_graph_new(void)
10{
11 ccv_nnc_graph_t* graph = (ccv_nnc_graph_t*)cccalloccalloc(1, sizeof(ccv_nnc_graph_t));
12 graph->exec_info = ccv_array_new(sizeof(ccv_nnc_graph_exec_info_t), 5, 0);
13 return graph;
14}
15
16void ccv_nnc_graph_set_sources(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const sources, const int source_size)
17{
18 if (!graph->sources)
19 graph->sources = ccv_array_new(sizeof(ccv_nnc_graph_exec_t), source_size, 0);
20 else
21 ccv_array_clear(graph->sources);
22 int i;
23 for (i = 0; i < source_size; i++)
24 ccv_array_push(graph->sources, sources + i);
25 graph->topsorted = 0;
26}
27
28ccv_nnc_graph_exec_t* ccv_nnc_graph_sources(const ccv_nnc_graph_t* const graph)
29{
30 return graph->sources ? (ccv_nnc_graph_exec_t*)ccv_array_get(graph->sources, 0)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph
->sources)->rsize * (size_t)(0)))
: 0;
31}
32
33int ccv_nnc_graph_source_size(const ccv_nnc_graph_t* const graph)
34{
35 return graph->sources ? graph->sources->rnum : 0;
36}
37
38void ccv_nnc_graph_set_destinations(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const destinations, const int destination_size)
39{
40 if (!graph->destinations)
41 graph->destinations = ccv_array_new(sizeof(ccv_nnc_graph_exec_t), destination_size, 0);
42 else
43 ccv_array_clear(graph->sources);
44 int i;
45 for (i = 0; i < destination_size; i++)
46 ccv_array_push(graph->destinations, destinations + i);
47 graph->topsorted = 0;
48}
49
50ccv_nnc_graph_exec_t* ccv_nnc_graph_destinations(const ccv_nnc_graph_t* const graph)
51{
52 return graph->destinations ? (ccv_nnc_graph_exec_t*)ccv_array_get(graph->destinations, 0)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0)))
: 0;
53}
54
55int ccv_nnc_graph_destination_size(const ccv_nnc_graph_t* const graph)
56{
57 return graph->destinations ? graph->destinations->rnum : 0;
58}
59
60void ccv_nnc_graph_exec_set(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_cmd_t cmd)
61{
62 assert(exec.d < graph->exec_info->rnum)((void) sizeof ((exec.d < graph->exec_info->rnum) ? 1
: 0), __extension__ ({ if (exec.d < graph->exec_info->
rnum) ; else __assert_fail ("exec.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 62, __extension__ __PRETTY_FUNCTION__); }
))
;
63 assert(exec.graph == graph)((void) sizeof ((exec.graph == graph) ? 1 : 0), __extension__
({ if (exec.graph == graph) ; else __assert_fail ("exec.graph == graph"
, "ccv_nnc_graph.c", 63, __extension__ __PRETTY_FUNCTION__); }
))
;
64 ccv_nnc_graph_exec_info_t* const exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, exec.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(exec.d)))
;
65 exec_info->cmd = cmd;
66}
67
68ccv_nnc_cmd_t ccv_nnc_graph_exec_cmd(const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec)
69{
70 assert(exec.d < graph->exec_info->rnum)((void) sizeof ((exec.d < graph->exec_info->rnum) ? 1
: 0), __extension__ ({ if (exec.d < graph->exec_info->
rnum) ; else __assert_fail ("exec.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 70, __extension__ __PRETTY_FUNCTION__); }
))
;
71 assert(exec.graph == graph)((void) sizeof ((exec.graph == graph) ? 1 : 0), __extension__
({ if (exec.graph == graph) ; else __assert_fail ("exec.graph == graph"
, "ccv_nnc_graph.c", 71, __extension__ __PRETTY_FUNCTION__); }
))
;
72 ccv_nnc_graph_exec_info_t* const exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, exec.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(exec.d)))
;
73 return exec_info->cmd;
74}
75
76void ccv_nnc_graph_exec_set_hint(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_hint_t hint)
77{
78 assert(exec.d < graph->exec_info->rnum)((void) sizeof ((exec.d < graph->exec_info->rnum) ? 1
: 0), __extension__ ({ if (exec.d < graph->exec_info->
rnum) ; else __assert_fail ("exec.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 78, __extension__ __PRETTY_FUNCTION__); }
))
;
79 assert(exec.graph == graph)((void) sizeof ((exec.graph == graph) ? 1 : 0), __extension__
({ if (exec.graph == graph) ; else __assert_fail ("exec.graph == graph"
, "ccv_nnc_graph.c", 79, __extension__ __PRETTY_FUNCTION__); }
))
;
80 ccv_nnc_graph_exec_info_t* const exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, exec.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(exec.d)))
;
81 exec_info->hint = hint;
82}
83
84static int _ccv_nnc_tensor_multiview_level_count(const ccv_nnc_tensor_multiview_t* const mv)
85{
86 if (!CCV_IS_TENSOR_MULTIVIEW(mv)((*(int*)(mv)) & CCV_TENSOR_MULTIVIEW))
87 return 1;
88 const int count = mv->kind + mv->repeat;
89 int i, c = 0;
90 for (i = 0; i < count; i++)
91 {
92 ccv_nnc_tensor_t* tv = CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i];
93 if (tv == CCV_NNC_TENSOR_PLACEHOLDER((ccv_nnc_tensor_t*)(intptr_t)(0x10)))
94 c = ccv_max(c, 1)({ typeof (c) _a = (c); typeof (1) _b = (1); (_a > _b) ? _a
: _b; })
;
95 else
96 c = ccv_max(c, _ccv_nnc_tensor_multiview_level_count((ccv_nnc_tensor_multiview_t*)tv))({ typeof (c) _a = (c); typeof (_ccv_nnc_tensor_multiview_level_count
((ccv_nnc_tensor_multiview_t*)tv)) _b = (_ccv_nnc_tensor_multiview_level_count
((ccv_nnc_tensor_multiview_t*)tv)); (_a > _b) ? _a : _b; }
)
;
97 }
98 return c + 1;
99}
100
101static ccv_nnc_graph_tensor_wrap_t* _ccv_nnc_graph_tensor_wrap_new(const ccv_nnc_tensor_multiview_t* const mv)
102{
103 const int level_count = _ccv_nnc_tensor_multiview_level_count(mv);
104 ccv_nnc_graph_tensor_wrap_t* tensor_wrap = (ccv_nnc_graph_tensor_wrap_t*)ccmallocmalloc(sizeof(ccv_nnc_graph_tensor_wrap_t) + sizeof(ccv_nnc_tensor_t*) * (level_count - 1));
105 tensor_wrap->update_required = 0;
106 tensor_wrap->count = level_count;
107 tensor_wrap->index = 0;
108 tensor_wrap->tensors[0] = (ccv_nnc_tensor_t*)mv;
109 return tensor_wrap;
110}
111
112static void _ccv_nnc_graph_exec_rewind(ccv_nnc_graph_exec_info_t* const info, ccv_nnc_graph_t* const graph)
113{
114 if (!info->tensor_wraps_ref)
115 return;
116 int i;
117 assert(info->tensor_wraps_ref <= graph->tensor_wraps->rnum)((void) sizeof ((info->tensor_wraps_ref <= graph->tensor_wraps
->rnum) ? 1 : 0), __extension__ ({ if (info->tensor_wraps_ref
<= graph->tensor_wraps->rnum) ; else __assert_fail (
"info->tensor_wraps_ref <= graph->tensor_wraps->rnum"
, "ccv_nnc_graph.c", 117, __extension__ __PRETTY_FUNCTION__);
}))
;
118 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *(ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(graph->tensor_wraps, info->tensor_wraps_ref - 1)((void*)(((char*)((graph->tensor_wraps)->data)) + (size_t
)(graph->tensor_wraps)->rsize * (size_t)(info->tensor_wraps_ref
- 1)))
;;
119 // Rewind from tensor wraps.
120 for (i = 0; i < info->input_size; i++)
121 if (tensor_wrap_array->tensor_wraps[i])
122 info->inputs[i] = tensor_wrap_array->tensor_wraps[i]->tensors[0];
123 const int d = info->input_size;
124 for (i = 0; i < info->output_size; i++)
125 if (tensor_wrap_array->tensor_wraps[d + i])
126 info->outputs[i] = tensor_wrap_array->tensor_wraps[d + i]->tensors[0];
127 const int dd = info->input_size + info->output_size;
128 for (i = 0; i < info->update_size; i++)
129 if (tensor_wrap_array->tensor_wraps[dd + i])
130 info->updates[i] = tensor_wrap_array->tensor_wraps[dd + i]->tensors[0];
131}
132
133static void _ccv_nnc_graph_tensor_wrap_free(ccv_nnc_graph_tensor_wrap_t* const tensor_wrap)
134{
135 ccfreefree(tensor_wrap);
136}
137
138ccv_nnc_graph_tensor_wrap_array_t* ccv_nnc_get_tensor_wrap_array(ccv_nnc_graph_t* const graph, const int tensor_wrap_size, int* const tensor_wraps_ref)
139{
140 ccv_nnc_graph_tensor_wrap_array_t** tensor_wrap_array_ref = *tensor_wraps_ref ? (ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(graph->tensor_wraps, *tensor_wraps_ref - 1)((void*)(((char*)((graph->tensor_wraps)->data)) + (size_t
)(graph->tensor_wraps)->rsize * (size_t)(*tensor_wraps_ref
- 1)))
: 0;
141 // Otherwise, find an open slot.
142 if (!tensor_wrap_array_ref)
143 {
144 if (!graph->tensor_wraps)
145 graph->tensor_wraps = ccv_array_new(sizeof(ccv_nnc_graph_tensor_wrap_array_t*), 0, 0);
146 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = 0;
147 ccv_array_push(graph->tensor_wraps, &tensor_wrap_array);
148 tensor_wrap_array_ref = (ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(graph->tensor_wraps, graph->tensor_wraps->rnum - 1)((void*)(((char*)((graph->tensor_wraps)->data)) + (size_t
)(graph->tensor_wraps)->rsize * (size_t)(graph->tensor_wraps
->rnum - 1)))
;
149 *tensor_wraps_ref = graph->tensor_wraps->rnum;
150 }
151 int i;
152 if (*tensor_wrap_array_ref)
153 {
154 if ((*tensor_wrap_array_ref)->size != tensor_wrap_size)
155 *tensor_wrap_array_ref = (ccv_nnc_graph_tensor_wrap_array_t*)ccreallocrealloc(*tensor_wrap_array_ref, sizeof(ccv_nnc_graph_tensor_wrap_array_t) + sizeof(ccv_nnc_graph_tensor_wrap_t*) * (tensor_wrap_size - 1));
156 for (i = (*tensor_wrap_array_ref)->size; i < tensor_wrap_size; i++)
157 (*tensor_wrap_array_ref)->tensor_wraps[i] = 0;
158 } else
159 *tensor_wrap_array_ref = (ccv_nnc_graph_tensor_wrap_array_t*)cccalloccalloc(sizeof(ccv_nnc_graph_tensor_wrap_array_t) + sizeof(ccv_nnc_graph_tensor_wrap_t*) * (tensor_wrap_size - 1), 1);
160 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *tensor_wrap_array_ref;
161 tensor_wrap_array->size = tensor_wrap_size;
162 return tensor_wrap_array;
163}
164
165void ccv_nnc_set_tensor_wraps(ccv_nnc_graph_tensor_wrap_t** const tensor_wraps, ccv_nnc_tensor_t* const* const tensors, const int tensor_size)
166{
167 int i;
168 for (i = 0; i < tensor_size; i++)
169 if (tensors[i])
170 {
171 if (CCV_IS_TENSOR_MULTIVIEW(tensors[i])((*(int*)(tensors[i])) & CCV_TENSOR_MULTIVIEW) &&
172 ((ccv_nnc_tensor_multiview_t*)tensors[i])->anchor != CCV_NNC_MULTIVIEW_PHI(intptr_t)0x1)
173 {
174 if (!tensor_wraps[i] || tensors[i] != tensor_wraps[i]->tensors[0])
175 {
176 if (tensor_wraps[i])
177 _ccv_nnc_graph_tensor_wrap_free(tensor_wraps[i]);
178 tensor_wraps[i] = _ccv_nnc_graph_tensor_wrap_new((ccv_nnc_tensor_multiview_t*)tensors[i]);
179 }
180 } else {
181 if (tensor_wraps[i])
182 _ccv_nnc_graph_tensor_wrap_free(tensor_wraps[i]);
183 tensor_wraps[i] = 0;
184 }
185 }
186}
187
188void ccv_nnc_graph_register_tensor_wraps(ccv_nnc_graph_t* graph, const int tensor_wraps_ref_d)
189{
190 ccv_nnc_graph_t* p = graph;
191 const ccv_nnc_graph_tensor_wraps_ref_t tensor_wraps_ref = {
192 .d = tensor_wraps_ref_d,
193 .graph = graph,
194 };
195 do {
196 if (!p->tensor_wraps_refs)
197 {
198 p->tensor_wraps_refs = ccv_array_new(sizeof(ccv_nnc_graph_tensor_wraps_ref_t), 0, 0);
199 ccv_array_push(p->tensor_wraps_refs, &tensor_wraps_ref);
200 } else {
201 int i;
202 int has_tensor_wraps_ref = 0;
203 for (i = 0; !has_tensor_wraps_ref && i < p->tensor_wraps_refs->rnum; i++)
204 {
205 ccv_nnc_graph_tensor_wraps_ref_t* tensor_wraps_ref = (ccv_nnc_graph_tensor_wraps_ref_t*)ccv_array_get(p->tensor_wraps_refs, i)((void*)(((char*)((p->tensor_wraps_refs)->data)) + (size_t
)(p->tensor_wraps_refs)->rsize * (size_t)(i)))
;
206 has_tensor_wraps_ref = (tensor_wraps_ref->d == tensor_wraps_ref_d && tensor_wraps_ref->graph == graph);
207 }
208 if (!has_tensor_wraps_ref)
209 ccv_array_push(p->tensor_wraps_refs, &tensor_wraps_ref);
210 }
211 p = p->p;
212 } while (p);
213}
214
215static void _ccv_nnc_graph_redo_tensor_wraps(ccv_nnc_graph_exec_info_t* const info, ccv_nnc_graph_t* const graph)
216{
217 int i;
218 const int has_wrap = ccv_nnc_tensors_have_wraps(info->inputs, info->input_size) ||
219 ccv_nnc_tensors_have_wraps(info->outputs, info->output_size) ||
220 ccv_nnc_tensors_have_wraps(info->updates, info->update_size);
221 if (has_wrap)
222 {
223 const int tensor_wrap_size = info->input_size + info->output_size + info->update_size;
224 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = ccv_nnc_get_tensor_wrap_array(graph, tensor_wrap_size, &info->tensor_wraps_ref);
225 ccv_nnc_set_tensor_wraps(tensor_wrap_array->tensor_wraps, info->inputs, info->input_size);
226 const int d = info->input_size;
227 ccv_nnc_set_tensor_wraps(tensor_wrap_array->tensor_wraps + d, info->outputs, info->output_size);
228 const int dd = info->input_size + info->output_size;
229 ccv_nnc_set_tensor_wraps(tensor_wrap_array->tensor_wraps + dd, info->updates, info->update_size);
230 } else if (info->tensor_wraps_ref) {
231 ccv_nnc_graph_tensor_wrap_array_t** tensor_wrap_array_ref = (ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(graph->tensor_wraps, info->tensor_wraps_ref - 1)((void*)(((char*)((graph->tensor_wraps)->data)) + (size_t
)(graph->tensor_wraps)->rsize * (size_t)(info->tensor_wraps_ref
- 1)))
;
232 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *tensor_wrap_array_ref;
233 if (tensor_wrap_array)
234 {
235 for (i = 0; i < tensor_wrap_array->size; i++)
236 if (tensor_wrap_array->tensor_wraps[i])
237 _ccv_nnc_graph_tensor_wrap_free(tensor_wrap_array->tensor_wraps[i]);
238 ccfreefree(tensor_wrap_array);
239 *tensor_wrap_array_ref = 0;
240 info->tensor_wraps_ref = 0;
241 }
242 }
243}
244
245static void _ccv_nnc_graph_deregister_tensor_wraps(ccv_nnc_graph_t* graph, const int tensor_wraps_ref_d)
246{
247 ccv_nnc_graph_t* p = graph;
248 do {
249 int i;
250 // Remove from the array.
251 if (p->tensor_wraps_refs)
252 for (i = 0; i < p->tensor_wraps_refs->rnum; i++)
253 {
254 ccv_nnc_graph_tensor_wraps_ref_t* const tensor_wraps_ref = (ccv_nnc_graph_tensor_wraps_ref_t*)ccv_array_get(p->tensor_wraps_refs, i)((void*)(((char*)((p->tensor_wraps_refs)->data)) + (size_t
)(p->tensor_wraps_refs)->rsize * (size_t)(i)))
;
255 if (tensor_wraps_ref->d == tensor_wraps_ref_d && tensor_wraps_ref->graph == graph)
256 {
257 --p->tensor_wraps_refs->rnum;
258 if (i < p->tensor_wraps_refs->rnum)
259 memcpy(tensor_wraps_ref, tensor_wraps_ref + 1, sizeof(ccv_nnc_graph_exec_t) * (p->tensor_wraps_refs->rnum - i));
260 break;
261 }
262 }
263 p = p->p;
264 } while (p);
265}
266
267void ccv_nnc_graph_exec_set_io_flags(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const int* const input_flags, const int input_flag_size, const int* const output_flags, const int output_flag_size)
268{
269 assert(exec.d < graph->exec_info->rnum)((void) sizeof ((exec.d < graph->exec_info->rnum) ? 1
: 0), __extension__ ({ if (exec.d < graph->exec_info->
rnum) ; else __assert_fail ("exec.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 269, __extension__ __PRETTY_FUNCTION__);
}))
;
270 assert(exec.graph == graph)((void) sizeof ((exec.graph == graph) ? 1 : 0), __extension__
({ if (exec.graph == graph) ; else __assert_fail ("exec.graph == graph"
, "ccv_nnc_graph.c", 270, __extension__ __PRETTY_FUNCTION__);
}))
;
271 ccv_nnc_graph_exec_info_t* const info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, exec.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(exec.d)))
;
272 assert(input_flag_size <= info->input_size)((void) sizeof ((input_flag_size <= info->input_size) ?
1 : 0), __extension__ ({ if (input_flag_size <= info->
input_size) ; else __assert_fail ("input_flag_size <= info->input_size"
, "ccv_nnc_graph.c", 272, __extension__ __PRETTY_FUNCTION__);
}))
;
273 assert(output_flag_size <= info->output_size)((void) sizeof ((output_flag_size <= info->output_size)
? 1 : 0), __extension__ ({ if (output_flag_size <= info->
output_size) ; else __assert_fail ("output_flag_size <= info->output_size"
, "ccv_nnc_graph.c", 273, __extension__ __PRETTY_FUNCTION__);
}))
;
274 if (info->input_size + info->output_size == 0)
275 return;
276 if (!info->input_flags)
277 {
278 info->input_flags = (int*)cccalloccalloc(info->input_size + info->output_size, sizeof(int));
279 info->output_flags = info->input_flags + info->input_size;
280 }
281 if (input_flag_size > 0)
282 memcpy(info->input_flags, input_flags, sizeof(int) * input_flag_size);
283 if (output_flag_size > 0)
284 memcpy(info->output_flags, output_flags, sizeof(int) * output_flag_size);
285}
286
287void ccv_nnc_graph_exec_pair_with(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_graph_exec_t pair_exec)
288{
289 assert(exec.graph == graph)((void) sizeof ((exec.graph == graph) ? 1 : 0), __extension__
({ if (exec.graph == graph) ; else __assert_fail ("exec.graph == graph"
, "ccv_nnc_graph.c", 289, __extension__ __PRETTY_FUNCTION__);
}))
;
290 assert(exec.d >= 0)((void) sizeof ((exec.d >= 0) ? 1 : 0), __extension__ ({ if
(exec.d >= 0) ; else __assert_fail ("exec.d >= 0", "ccv_nnc_graph.c"
, 290, __extension__ __PRETTY_FUNCTION__); }))
;
291 assert(exec.d < graph->exec_info->rnum)((void) sizeof ((exec.d < graph->exec_info->rnum) ? 1
: 0), __extension__ ({ if (exec.d < graph->exec_info->
rnum) ; else __assert_fail ("exec.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 291, __extension__ __PRETTY_FUNCTION__);
}))
;
292 assert(pair_exec.graph == graph || pair_exec.graph == graph->pair)((void) sizeof ((pair_exec.graph == graph || pair_exec.graph ==
graph->pair) ? 1 : 0), __extension__ ({ if (pair_exec.graph
== graph || pair_exec.graph == graph->pair) ; else __assert_fail
("pair_exec.graph == graph || pair_exec.graph == graph->pair"
, "ccv_nnc_graph.c", 292, __extension__ __PRETTY_FUNCTION__);
}))
;
293 assert(pair_exec.d >= 0)((void) sizeof ((pair_exec.d >= 0) ? 1 : 0), __extension__
({ if (pair_exec.d >= 0) ; else __assert_fail ("pair_exec.d >= 0"
, "ccv_nnc_graph.c", 293, __extension__ __PRETTY_FUNCTION__);
}))
;
294 if (pair_exec.graph == graph)
295 { assert(pair_exec.d < graph->exec_info->rnum)((void) sizeof ((pair_exec.d < graph->exec_info->rnum
) ? 1 : 0), __extension__ ({ if (pair_exec.d < graph->exec_info
->rnum) ; else __assert_fail ("pair_exec.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 295, __extension__ __PRETTY_FUNCTION__);
}))
; }
296 else
297 { assert(pair_exec.d < graph->pair->exec_info->rnum)((void) sizeof ((pair_exec.d < graph->pair->exec_info
->rnum) ? 1 : 0), __extension__ ({ if (pair_exec.d < graph
->pair->exec_info->rnum) ; else __assert_fail ("pair_exec.d < graph->pair->exec_info->rnum"
, "ccv_nnc_graph.c", 297, __extension__ __PRETTY_FUNCTION__);
}))
; }
298 ccv_nnc_graph_exec_info_t* const exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, exec.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(exec.d)))
;
299 exec_info->pair_ref = pair_exec.d + 1;
300}
301
302static ccv_nnc_tensor_t* _ccv_nnc_any_tensor_from_tensor_multiview(ccv_nnc_tensor_multiview_t* const mv)
303{
304 ccv_nnc_tensor_t* tensor = (ccv_nnc_tensor_t*)mv;
305 while (CCV_IS_TENSOR_MULTIVIEW(tensor)((*(int*)(tensor)) & CCV_TENSOR_MULTIVIEW))
306 {
307 ccv_nnc_tensor_multiview_t* mv = (ccv_nnc_tensor_multiview_t*)tensor;
308 const int count = 0;
309 const int off = mv->kind;
310 const int mod = mv->repeat;
311 // If reached the root.
312 tensor = CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[count >= off ? ((count - off) % mod) + off : count]; // Unwrap.
313 }
314 return tensor;
315}
316
317void ccv_nnc_graph_exec_set_io(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size)
318{
319 assert(exec.d < graph->exec_info->rnum)((void) sizeof ((exec.d < graph->exec_info->rnum) ? 1
: 0), __extension__ ({ if (exec.d < graph->exec_info->
rnum) ; else __assert_fail ("exec.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 319, __extension__ __PRETTY_FUNCTION__);
}))
;
320 assert(exec.graph == graph)((void) sizeof ((exec.graph == graph) ? 1 : 0), __extension__
({ if (exec.graph == graph) ; else __assert_fail ("exec.graph == graph"
, "ccv_nnc_graph.c", 320, __extension__ __PRETTY_FUNCTION__);
}))
;
321 ccv_nnc_graph_exec_info_t* const info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, exec.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(exec.d)))
;
322 // De-register from the graph if it contains multiview tensors.
323 if (info->tensor_wraps_ref)
324 _ccv_nnc_graph_deregister_tensor_wraps(graph, info->tensor_wraps_ref - 1);
325 // In case it is already executed, rewind.
326 _ccv_nnc_graph_exec_rewind(info, graph);
327 if (input_size == 0 && output_size == 0)
328 {
329 if (info->input_size > 0 || info->output_size > 0)
330 ccfreefree(info->inputs);
331 info->inputs = 0;
332 info->outputs = 0;
333 info->input_size = 0;
334 info->output_size = 0;
335 _ccv_nnc_graph_redo_tensor_wraps(info, graph);
336 if (info->tensor_wraps_ref)
337 ccv_nnc_graph_register_tensor_wraps(graph, info->tensor_wraps_ref - 1);
338 return;
339 }
340 if (info->inputs)
341 info->inputs = (ccv_nnc_tensor_t**)ccreallocrealloc(info->inputs, sizeof(ccv_nnc_tensor_t*) * (input_size + output_size));
342 else
343 info->inputs = (ccv_nnc_tensor_t**)ccmallocmalloc(sizeof(ccv_nnc_tensor_t*) * (input_size + output_size));
344 info->outputs = info->inputs + input_size;
345 if (inputs)
346 memcpy(info->inputs, inputs, sizeof(ccv_nnc_tensor_t*) * input_size);
347 if (outputs)
348 memcpy(info->outputs, outputs, sizeof(ccv_nnc_tensor_t*) * output_size);
349 int i;
350 int tensor_memory = 0, tensor_formats = 0, tensor_datatypes = 0;
351 for (i = 0; i < input_size + output_size; i++)
352 if (info->inputs[i])
353 {
354 ccv_nnc_tensor_t* const tensor = CCV_IS_TENSOR_MULTIVIEW(info->inputs[i])((*(int*)(info->inputs[i])) & CCV_TENSOR_MULTIVIEW) ? _ccv_nnc_any_tensor_from_tensor_multiview((ccv_nnc_tensor_multiview_t*)info->inputs[i]) : info->inputs[i];
355 tensor_memory |= CCV_TENSOR_GET_MEMORY(tensor->info.type)((tensor->info.type) & 0x3), tensor_formats |= tensor->info.format, tensor_datatypes |= tensor->info.datatype;
356 }
357 info->cmd.backend = ccv_nnc_cmd_find_backend(info->cmd, tensor_memory, tensor_formats, tensor_datatypes);
358 info->input_size = input_size;
359 info->output_size = output_size;
360 _ccv_nnc_graph_redo_tensor_wraps(info, graph);
361 // Register again if the tensor wraps exist.
362 if (info->tensor_wraps_ref)
363 ccv_nnc_graph_register_tensor_wraps(graph, info->tensor_wraps_ref - 1);
364 // Free flags.
365 if (info->input_flags)
366 {
367 ccfreefree(info->input_flags);
368 info->input_flags = info->output_flags = 0;
369 }
370}
371
372void ccv_nnc_graph_exec_add_as_affected(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const update)
373{
374 assert(CCV_IS_TENSOR_MULTIVIEW(update))((void) sizeof ((((*(int*)(update)) & CCV_TENSOR_MULTIVIEW
)) ? 1 : 0), __extension__ ({ if (((*(int*)(update)) & CCV_TENSOR_MULTIVIEW
)) ; else __assert_fail ("CCV_IS_TENSOR_MULTIVIEW(update)", "ccv_nnc_graph.c"
, 374, __extension__ __PRETTY_FUNCTION__); }))
;
375 assert(exec.d < graph->exec_info->rnum)((void) sizeof ((exec.d < graph->exec_info->rnum) ? 1
: 0), __extension__ ({ if (exec.d < graph->exec_info->
rnum) ; else __assert_fail ("exec.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 375, __extension__ __PRETTY_FUNCTION__);
}))
;
376 assert(exec.graph == graph)((void) sizeof ((exec.graph == graph) ? 1 : 0), __extension__
({ if (exec.graph == graph) ; else __assert_fail ("exec.graph == graph"
, "ccv_nnc_graph.c", 376, __extension__ __PRETTY_FUNCTION__);
}))
;
377 ccv_nnc_graph_exec_info_t* const info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, exec.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(exec.d)))
;
378 const int register_tensor_wraps = !info->tensor_wraps_ref;
379 const int update_index = info->update_size;
380 ++info->update_size;
381 if (info->updates)
382 info->updates = (ccv_nnc_tensor_t**)ccreallocrealloc(info->updates, sizeof(ccv_nnc_tensor_t*) * info->update_size);
383 else
384 info->updates = (ccv_nnc_tensor_t**)ccmallocmalloc(sizeof(ccv_nnc_tensor_t*) * info->update_size);
385 info->updates[update_index] = update;
386 _ccv_nnc_graph_redo_tensor_wraps(info, graph);
387 if (register_tensor_wraps)
388 ccv_nnc_graph_register_tensor_wraps(graph, info->tensor_wraps_ref - 1);
389}
390
391ccv_nnc_graph_exec_t ccv_nnc_graph_exec_new(ccv_nnc_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size)
392{
393 int d = graph->exec_info->rnum;
394 ccv_nnc_graph_exec_info_t info = {
395 .cmd = cmd,
396 .hint = hint,
397 .input_size = input_size,
398 .output_size = output_size,
399 };
400 assert(inputs || input_size == 0)((void) sizeof ((inputs || input_size == 0) ? 1 : 0), __extension__
({ if (inputs || input_size == 0) ; else __assert_fail ("inputs || input_size == 0"
, "ccv_nnc_graph.c", 400, __extension__ __PRETTY_FUNCTION__);
}))
;
401 assert(outputs || output_size == 0)((void) sizeof ((outputs || output_size == 0) ? 1 : 0), __extension__
({ if (outputs || output_size == 0) ; else __assert_fail ("outputs || output_size == 0"
, "ccv_nnc_graph.c", 401, __extension__ __PRETTY_FUNCTION__);
}))
;
402 if (input_size > 0 || output_size > 0)
403 {
404 info.inputs = (ccv_nnc_tensor_t**)ccmallocmalloc(sizeof(ccv_nnc_tensor_t*) * (input_size + output_size));
405 info.outputs = info.inputs + input_size;
406 if (inputs)
407 memcpy(info.inputs, inputs, sizeof(ccv_nnc_tensor_t*) * input_size);
408 if (outputs)
409 memcpy(info.outputs, outputs, sizeof(ccv_nnc_tensor_t*) * output_size);
410 info.input_size = input_size;
411 info.output_size = output_size;
412 int i;
413 int tensor_memory = 0, tensor_formats = 0, tensor_datatypes = 0;
414 for (i = 0; i < input_size + output_size; i++)
415 if (info.inputs[i])
416 {
417 ccv_nnc_tensor_t* const tensor = CCV_IS_TENSOR_MULTIVIEW(info.inputs[i])((*(int*)(info.inputs[i])) & CCV_TENSOR_MULTIVIEW) ? _ccv_nnc_any_tensor_from_tensor_multiview((ccv_nnc_tensor_multiview_t*)info.inputs[i]) : info.inputs[i];
418 tensor_memory |= CCV_TENSOR_GET_MEMORY(tensor->info.type)((tensor->info.type) & 0x3), tensor_formats |= tensor->info.format, tensor_datatypes |= tensor->info.datatype;
419 }
420 info.cmd.backend = ccv_nnc_cmd_find_backend(info.cmd, tensor_memory, tensor_formats, tensor_datatypes);
421 }
422 _ccv_nnc_graph_redo_tensor_wraps(&info, graph);
423 // Add itself to the graph's wraps array, this will help the run time when we run the graph and do unwrapping.
424 if (info.tensor_wraps_ref)
425 ccv_nnc_graph_register_tensor_wraps(graph, info.tensor_wraps_ref - 1);
426 ccv_array_push(graph->exec_info, &info);
427 return (ccv_nnc_graph_exec_t){
428 .d = d,
429 .graph = graph,
430 };
431}
432
433void ccv_nnc_graph_add_carry_over(ccv_nnc_graph_t* const graph, const ccv_nnc_tensor_t* const from, const ccv_nnc_tensor_t* const to)
434{
435 ccv_nnc_graph_tensor_carry_over_t carry_over = {
436 .from = _ccv_nnc_graph_tensor_wrap_new((ccv_nnc_tensor_multiview_t*)from),
437 .to = _ccv_nnc_graph_tensor_wrap_new((ccv_nnc_tensor_multiview_t*)to)
438 };
439 if (!graph->carry_overs)
440 graph->carry_overs = ccv_array_new(sizeof(ccv_nnc_graph_tensor_carry_over_t), 0, 0);
441 ccv_array_push(graph->carry_overs, &carry_over);
442}
443
444int ccv_nnc_graph_exec_concat(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination)
445{
446 assert(graph == source.graph)((void) sizeof ((graph == source.graph) ? 1 : 0), __extension__
({ if (graph == source.graph) ; else __assert_fail ("graph == source.graph"
, "ccv_nnc_graph.c", 446, __extension__ __PRETTY_FUNCTION__);
}))
;
447 assert(graph == destination.graph)((void) sizeof ((graph == destination.graph) ? 1 : 0), __extension__
({ if (graph == destination.graph) ; else __assert_fail ("graph == destination.graph"
, "ccv_nnc_graph.c", 447, __extension__ __PRETTY_FUNCTION__);
}))
;
448 assert(source.d < graph->exec_info->rnum)((void) sizeof ((source.d < graph->exec_info->rnum) ?
1 : 0), __extension__ ({ if (source.d < graph->exec_info
->rnum) ; else __assert_fail ("source.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 448, __extension__ __PRETTY_FUNCTION__);
}))
;
449 assert(destination.d < graph->exec_info->rnum)((void) sizeof ((destination.d < graph->exec_info->rnum
) ? 1 : 0), __extension__ ({ if (destination.d < graph->
exec_info->rnum) ; else __assert_fail ("destination.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 449, __extension__ __PRETTY_FUNCTION__);
}))
;
450 ccv_nnc_graph_exec_info_t* src_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, source.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(source.d)))
;
451 if (src_info->outgoings == 0)
452 src_info->outgoings = ccv_array_new(sizeof(int32_t), 1, 0);
453 else {
454 int i;
455 // Check if this is already connected, if so, skip.
456 for (i = 0; i < src_info->outgoings->rnum; i++)
457 if (*(int*)ccv_array_get(src_info->outgoings, i)((void*)(((char*)((src_info->outgoings)->data)) + (size_t
)(src_info->outgoings)->rsize * (size_t)(i)))
== destination.d)
458 return -1;
459 }
460 ccv_array_push(src_info->outgoings, &destination.d);
461 graph->topsorted = 0;
462 return 0;
463}
464
465int ccv_nnc_graph_exec_disjoin(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination)
466{
467 assert(graph == source.graph)((void) sizeof ((graph == source.graph) ? 1 : 0), __extension__
({ if (graph == source.graph) ; else __assert_fail ("graph == source.graph"
, "ccv_nnc_graph.c", 467, __extension__ __PRETTY_FUNCTION__);
}))
;
468 assert(graph == destination.graph)((void) sizeof ((graph == destination.graph) ? 1 : 0), __extension__
({ if (graph == destination.graph) ; else __assert_fail ("graph == destination.graph"
, "ccv_nnc_graph.c", 468, __extension__ __PRETTY_FUNCTION__);
}))
;
469 assert(source.d < graph->exec_info->rnum)((void) sizeof ((source.d < graph->exec_info->rnum) ?
1 : 0), __extension__ ({ if (source.d < graph->exec_info
->rnum) ; else __assert_fail ("source.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 469, __extension__ __PRETTY_FUNCTION__);
}))
;
470 assert(destination.d < graph->exec_info->rnum)((void) sizeof ((destination.d < graph->exec_info->rnum
) ? 1 : 0), __extension__ ({ if (destination.d < graph->
exec_info->rnum) ; else __assert_fail ("destination.d < graph->exec_info->rnum"
, "ccv_nnc_graph.c", 470, __extension__ __PRETTY_FUNCTION__);
}))
;
471 ccv_nnc_graph_exec_info_t* src_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, source.d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(source.d)))
;
472 if (!src_info->outgoings)
473 return -1;
474 int i;
475 // Check if this is already connected, if so, skip.
476 for (i = 0; i < src_info->outgoings->rnum; i++)
477 if (*(int*)ccv_array_get(src_info->outgoings, i)((void*)(((char*)((src_info->outgoings)->data)) + (size_t
)(src_info->outgoings)->rsize * (size_t)(i)))
== destination.d)
478 {
479 if (i < src_info->outgoings->rnum - 1)
480 *(int*)ccv_array_get(src_info->outgoings, i)((void*)(((char*)((src_info->outgoings)->data)) + (size_t
)(src_info->outgoings)->rsize * (size_t)(i)))
= *(int*)ccv_array_get(src_info->outgoings, src_info->outgoings->rnum - 1)((void*)(((char*)((src_info->outgoings)->data)) + (size_t
)(src_info->outgoings)->rsize * (size_t)(src_info->outgoings
->rnum - 1)))
;
481 --src_info->outgoings->rnum;
482 graph->topsorted = 0;
483 return 0;
484 }
485 return -1;
486}
487
488int ccv_nnc_graph_exec_count(const ccv_nnc_graph_t* const graph)
489{
490 return graph->exec_info ? graph->exec_info->rnum : 0;
491}
492
493void* ccv_nnc_graph_buffer(ccv_nnc_graph_t* const graph, int size)
494{
495 if (graph->buffer_size >= size)
496 return graph->buffer;
497 graph->buffer_size = size;
498 graph->buffer = (graph->buffer) ? ccreallocrealloc(graph->buffer, size) : ccmallocmalloc(size);
499 return graph->buffer;
500}
501
502void ccv_nnc_graph_topsort(ccv_nnc_graph_t* const graph, int* const exec_cvt, const int exec_cvt_size)
503{
504 assert(exec_cvt_size == graph->exec_info->rnum)((void) sizeof ((exec_cvt_size == graph->exec_info->rnum
) ? 1 : 0), __extension__ ({ if (exec_cvt_size == graph->exec_info
->rnum) ; else __assert_fail ("exec_cvt_size == graph->exec_info->rnum"
, "ccv_nnc_graph.c", 504, __extension__ __PRETTY_FUNCTION__);
}))
;
505 assert(graph->sources && graph->sources->rnum)((void) sizeof ((graph->sources && graph->sources
->rnum) ? 1 : 0), __extension__ ({ if (graph->sources &&
graph->sources->rnum) ; else __assert_fail ("graph->sources && graph->sources->rnum"
, "ccv_nnc_graph.c", 505, __extension__ __PRETTY_FUNCTION__);
}))
;
506 assert(graph->destinations && graph->destinations->rnum)((void) sizeof ((graph->destinations && graph->
destinations->rnum) ? 1 : 0), __extension__ ({ if (graph->
destinations && graph->destinations->rnum) ; else
__assert_fail ("graph->destinations && graph->destinations->rnum"
, "ccv_nnc_graph.c", 506, __extension__ __PRETTY_FUNCTION__);
}))
;
507 int i, j;
508 for (i = 0; i < exec_cvt_size; i++)
509 exec_cvt[i] = -1;
510 ccv_array_t* exec_info = ccv_array_new(sizeof(ccv_nnc_graph_exec_info_t), graph->exec_info->rnum, 0);
511 // If there are breakpoints, it is more complicated, we first start to the breakpoints, and then continue from the breakpoints to the destinations.
512 if (graph->breakpoint_size)
513 {
514 ccv_nnc_graph_visit_t* visit = ccv_nnc_graph_visit_new(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, (ccv_nnc_graph_exec_t*)ccv_array_get(graph->sources, 0), graph->sources->rnum, graph->breakpoints, graph->breakpoint_size, 0)({ ccv_nnc_graph_visit_t* _visit_ = (ccv_nnc_graph_visit_t*)malloc
(sizeof(ccv_nnc_graph_visit_t) + sizeof(_visit_->node[0]) *
((graph->exec_info->rnum) - 1)); _visit_->size = 0;
do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = (graph->exec_info->
rnum + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (graph->sources->rnum
); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t*)((void*)
(((char*)((graph->sources)->data)) + (size_t)(graph->
sources)->rsize * (size_t)(0))))[_i_].graph == graph) ? 1 :
0), __extension__ ({ if (((ccv_nnc_graph_exec_t*)((void*)(((
char*)((graph->sources)->data)) + (size_t)(graph->sources
)->rsize * (size_t)(0))))[_i_].graph == graph) ; else __assert_fail
("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 514, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->sources)->data)) + (size_t)(graph->sources
)->rsize * (size_t)(0))))[_i_].d; } int _exist_size_[2] = {
(graph->sources->rnum), 0, }; int _p_ = 0, _q_ = 1; while
(_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_
= 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t _idx_
= _exists_[_p_][_i_]; if (_incomings_[_idx_].r == 1) continue
; _incomings_[_idx_].r = 1; if (((ccv_nnc_graph_exec_info_t*)
((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); ++_incomings_[d].c; _exists_[_q_][_exist_size_
[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (
_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph->sources
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t*
)((void*)(((char*)((graph->sources)->data)) + (size_t)(
graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t*)((void
*)(((char*)((graph->sources)->data)) + (size_t)(graph->
sources)->rsize * (size_t)(0))))[_i_].graph == graph) ; else
__assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 514, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->sources)->data)) + (size_t)(graph->sources
)->rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (graph
->sources->rnum); _exist_size_[1] = 0; _p_ = 0, _q_ = 1
; int _bump_ = 1; while (_exist_size_[_p_] > 0) { _exist_size_
[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) {
const int32_t _idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_
].r == 2) continue; _incomings_[_idx_].r = 2; if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; _exists_[_q_][_exist_size_[_q_
]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_
), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph->breakpoint_size
); _i_++) { ((void) sizeof (((graph->breakpoints)[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if ((graph->breakpoints
)[_i_].graph == graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph.c", 514, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = (graph->breakpoints)[_i_].d; } _exist_size_
[0] = (graph->breakpoint_size); _exist_size_[1] = 0; _p_ =
0, _q_ = 1; while (_exist_size_[_p_] > 0) { _exist_size_[
_q_] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const
int32_t _idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r
!= 2) continue; _incomings_[_idx_].r = 3; if (_incomings_[_idx_
].edges > 0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_
++) { const int d = _edges_[_incomings_[_idx_].edges - 1 + _j_
]; _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_];
} } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_
= 0; _i_ < (graph->breakpoint_size); _i_++) { ((void) sizeof
(((graph->breakpoints)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph->breakpoints)[_i_].graph == graph) ; else __assert_fail
("(graph->breakpoints)[_i_].graph == graph", "ccv_nnc_graph.c"
, 514, __extension__ __PRETTY_FUNCTION__); })); _incomings_[(
graph->breakpoints)[_i_].d].d = 1; } for (_i_ = 0; _i_ <
(graph->sources->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->sources)->data)) + (size_t)
(graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t*)((void
*)(((char*)((graph->sources)->data)) + (size_t)(graph->
sources)->rsize * (size_t)(0))))[_i_].graph == graph) ; else
__assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 514, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->sources)->data)) + (size_t)(graph->sources
)->rsize * (size_t)(0))))[_i_].d; } _p_ = 0; _q_ = 1; _exist_size_
[0] = (graph->sources->rnum); _exist_size_[1] = 0; int _d_
= 0; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_];) { const int32_t _idx_
= _exists_[_p_][_i_]; _visit_->node[_visit_->size].index
= ((_idx_)); _visit_->node[_visit_->size].term = ((_incomings_
[_idx_].d)); ++_visit_->size;; if (_incomings_[_idx_].d) {
++_d_; _incomings_[_idx_].r = 4; } if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) { if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum == 1) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 3 && _d_ < (graph
->breakpoint_size)) { _exists_[_p_][_i_] = d; continue; } }
else for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void
*)(((char*)((graph->exec_info)->data)) + (size_t)(graph
->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->
rnum; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 3 && _d_ < (graph
->breakpoint_size)) { _exists_[_q_][_exist_size_[_q_]] = d
; ++_exist_size_[_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_) =
(_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph->breakpoint_size
); _i_++) { ((void) sizeof (((graph->breakpoints)[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if ((graph->breakpoints
)[_i_].graph == graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph.c", 514, __extension__ __PRETTY_FUNCTION__);
})); if (_incomings_[(graph->breakpoints)[_i_].d].r == 4)
continue; if (!(0)) { ((void) sizeof ((_incomings_[(graph->
breakpoints)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if (_incomings_
[(graph->breakpoints)[_i_].d].c == 0) ; else __assert_fail
("_incomings_[(graph->breakpoints)[_i_].d].c == 0", "ccv_nnc_graph.c"
, 514, __extension__ __PRETTY_FUNCTION__); })); } else if (_incomings_
[(graph->breakpoints)[_i_].d].c > 0) continue; _visit_->
node[_visit_->size].index = (((graph->breakpoints)[_i_]
.d)); _visit_->node[_visit_->size].term = ((_incomings_
[(graph->breakpoints)[_i_].d].d)); ++_visit_->size;; } if
(_heap_mem_) free(_incomings_); } while (0);; ((void) sizeof
((_visit_->size <= (graph->exec_info->rnum)) ? 1
: 0), __extension__ ({ if (_visit_->size <= (graph->
exec_info->rnum)) ; else __assert_fail ("_visit_->size <= (graph->exec_info->rnum)"
, "ccv_nnc_graph.c", 514, __extension__ __PRETTY_FUNCTION__);
})); _visit_; })
;
515 for (i = 0; i < graph->breakpoint_size; i++)
516 exec_cvt[graph->breakpoints[i].d] = -2; // Mark this as breakpoints, so we will skip the first round.
517 ccv_nnc_graph_visit_for(visit, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof (((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))) const node __attribute__
((unused)) = ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((
graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0)))) + idx;
{
518 assert(!node->pair_ref)((void) sizeof ((!node->pair_ref) ? 1 : 0), __extension__ (
{ if (!node->pair_ref) ; else __assert_fail ("!node->pair_ref"
, "ccv_nnc_graph.c", 518, __extension__ __PRETTY_FUNCTION__);
}))
; // If node has a pair ref, we cannot fix it up.
519 if (exec_cvt[idx] == -2) // Skip breakpoint.
520 continue;
521 // Loop over node and push to the array.
522 ccv_array_push(exec_info, node);
523 // Go to its sub-graph to fix exec_idx
524 for (i = 0; i < node->graph_ref_size; i++)
525 {
526 const int graph_ref = CCV_NNC_GRAPH_REF(node)((node)->_heap_graph_ref ? (node)->_heap_graph_ref : (node
)->_inline_graph_ref)
[i] - 1;
527 if (graph_ref >= 0)
528 {
529 ccv_nnc_graph_t* const sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, graph_ref)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(graph_ref)))
;
530 sub_graph->exec_idx = exec_info->rnum;
531 }
532 }
533 exec_cvt[idx] = exec_info->rnum - 1;
534 } ccv_nnc_graph_visit_endfor} }
535 ccv_nnc_graph_visit_free(visit);
536 graph->breakpoint_offset = exec_info->rnum;
537 visit = ccv_nnc_graph_visit_new(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, graph->breakpoints, graph->breakpoint_size, (ccv_nnc_graph_exec_t*)ccv_array_get(graph->destinations, 0), graph->destinations->rnum, 0)({ ccv_nnc_graph_visit_t* _visit_ = (ccv_nnc_graph_visit_t*)malloc
(sizeof(ccv_nnc_graph_visit_t) + sizeof(_visit_->node[0]) *
((graph->exec_info->rnum) - 1)); _visit_->size = 0;
do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = (graph->exec_info->
rnum + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (graph->breakpoint_size
); _i_++) { ((void) sizeof (((graph->breakpoints)[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if ((graph->breakpoints
)[_i_].graph == graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph.c", 537, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = (graph->breakpoints)[_i_].d; } int
_exist_size_[2] = { (graph->breakpoint_size), 0, }; int _p_
= 0, _q_ = 1; while (_exist_size_[_p_] > 0) { _exist_size_
[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) {
const int32_t _idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_
].r == 1) continue; _incomings_[_idx_].r = 1; if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); ++_incomings_[d].c; _exists_[_q_][_exist_size_
[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (
_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph->breakpoint_size
); _i_++) { ((void) sizeof (((graph->breakpoints)[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if ((graph->breakpoints
)[_i_].graph == graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph.c", 537, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = (graph->breakpoints)[_i_].d; } _exist_size_
[0] = (graph->breakpoint_size); _exist_size_[1] = 0; _p_ =
0, _q_ = 1; int _bump_ = 1; while (_exist_size_[_p_] > 0)
{ _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_][_i_]; if (
_incomings_[_idx_].r == 2) continue; _incomings_[_idx_].r = 2
; if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->
exec_info)->data)) + (size_t)(graph->exec_info)->rsize
* (size_t)(0))))[_idx_].outgoings) for (_j_ = 0; _j_ < ((
ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0))))[_idx_].outgoings->rnum; _j_++) { const int d = *(int
*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t*)((void*)(((
char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->data
)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)(((char*)(
(graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize * (size_t
)(_j_))); if (_incomings_[d].edges == 0) { _incomings_[d].edges
= _bump_; _bump_ += _incomings_[d].c; _incomings_[d].c = 0; }
_edges_[_incomings_[d].edges - 1 + _incomings_[d].c] = _idx_
; ++_incomings_[d].c; _exists_[_q_][_exist_size_[_q_]] = d; ++
_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) =
(_i_)); } for (_i_ = 0; _i_ < (graph->destinations->
rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t*)((void
*)(((char*)((graph->destinations)->data)) + (size_t)(graph
->destinations)->rsize * (size_t)(0))))[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t*)((void
*)(((char*)((graph->destinations)->data)) + (size_t)(graph
->destinations)->rsize * (size_t)(0))))[_i_].graph == graph
) ; else __assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations)->data)) + (size_t)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 537, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->destinations)->data)) + (size_t)(graph->destinations
)->rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (graph
->destinations->rnum); _exist_size_[1] = 0; _p_ = 0, _q_
= 1; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 2) continue
; _incomings_[_idx_].r = 3; if (_incomings_[_idx_].edges >
0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; _exists_
[_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_)
= (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ <
(graph->destinations->rnum); _i_++) { ((void) sizeof (
(((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations
)->data)) + (size_t)(graph->destinations)->rsize * (
size_t)(0))))[_i_].graph == graph) ? 1 : 0), __extension__ ({
if (((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations
)->data)) + (size_t)(graph->destinations)->rsize * (
size_t)(0))))[_i_].graph == graph) ; else __assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations)->data)) + (size_t)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 537, __extension__ __PRETTY_FUNCTION__);
})); _incomings_[((ccv_nnc_graph_exec_t*)((void*)(((char*)((
graph->destinations)->data)) + (size_t)(graph->destinations
)->rsize * (size_t)(0))))[_i_].d].d = 1; } for (_i_ = 0; _i_
< (graph->breakpoint_size); _i_++) { ((void) sizeof ((
(graph->breakpoints)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph->breakpoints)[_i_].graph == graph) ; else __assert_fail
("(graph->breakpoints)[_i_].graph == graph", "ccv_nnc_graph.c"
, 537, __extension__ __PRETTY_FUNCTION__); })); _exists_[0][_i_
] = (graph->breakpoints)[_i_].d; } _p_ = 0; _q_ = 1; _exist_size_
[0] = (graph->breakpoint_size); _exist_size_[1] = 0; int _d_
= 0; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_];) { const int32_t _idx_
= _exists_[_p_][_i_]; _visit_->node[_visit_->size].index
= ((_idx_)); _visit_->node[_visit_->size].term = ((_incomings_
[_idx_].d)); ++_visit_->size;; if (_incomings_[_idx_].d) {
++_d_; _incomings_[_idx_].r = 4; } if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) { if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum == 1) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 3 && _d_ < (graph
->destinations->rnum)) { _exists_[_p_][_i_] = d; continue
; } } else for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
->rnum; _j_++) { const int d = *(int*)((void*)(((char*)(((
(ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0))))[_idx_].outgoings)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->rsize * (size_t)(_j_))); --_incomings_[d].c; if (_incomings_
[d].c == 0 && _incomings_[d].r == 3 && _d_ <
(graph->destinations->rnum)) { _exists_[_q_][_exist_size_
[_q_]] = d; ++_exist_size_[_q_]; } } } ++_i_; } ((_i_) = (_p_
), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph
->destinations->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations)->data)) + (size_t)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 537, __extension__ __PRETTY_FUNCTION__);
})); if (_incomings_[((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->destinations)->data)) + (size_t)(graph->destinations
)->rsize * (size_t)(0))))[_i_].d].r == 4) continue; if (!(
0)) { ((void) sizeof ((_incomings_[((ccv_nnc_graph_exec_t*)((
void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].d].
c == 0) ? 1 : 0), __extension__ ({ if (_incomings_[((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].d].
c == 0) ; else __assert_fail ("_incomings_[((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations)->data)) + (size_t)(graph->destinations)->rsize * (size_t)(0))))[_i_].d].c == 0"
, "ccv_nnc_graph.c", 537, __extension__ __PRETTY_FUNCTION__);
})); } else if (_incomings_[((ccv_nnc_graph_exec_t*)((void*)
(((char*)((graph->destinations)->data)) + (size_t)(graph
->destinations)->rsize * (size_t)(0))))[_i_].d].c > 0
) continue; _visit_->node[_visit_->size].index = ((((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].d))
; _visit_->node[_visit_->size].term = ((_incomings_[((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].d].
d)); ++_visit_->size;; } if (_heap_mem_) free(_incomings_)
; } while (0);; ((void) sizeof ((_visit_->size <= (graph
->exec_info->rnum)) ? 1 : 0), __extension__ ({ if (_visit_
->size <= (graph->exec_info->rnum)) ; else __assert_fail
("_visit_->size <= (graph->exec_info->rnum)", "ccv_nnc_graph.c"
, 537, __extension__ __PRETTY_FUNCTION__); })); _visit_; })
;
538 ccv_nnc_graph_visit_for(visit, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof (((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))) const node __attribute__
((unused)) = ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((
graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0)))) + idx;
{
539 assert(!node->pair_ref)((void) sizeof ((!node->pair_ref) ? 1 : 0), __extension__ (
{ if (!node->pair_ref) ; else __assert_fail ("!node->pair_ref"
, "ccv_nnc_graph.c", 539, __extension__ __PRETTY_FUNCTION__);
}))
; // If node has a pair ref, we cannot fix it up.
540 // Loop over node and push to the array.
541 ccv_array_push(exec_info, node);
542 // Go to its sub-graph to fix exec_idx
543 for (i = 0; i < node->graph_ref_size; i++)
544 {
545 const int graph_ref = CCV_NNC_GRAPH_REF(node)((node)->_heap_graph_ref ? (node)->_heap_graph_ref : (node
)->_inline_graph_ref)
[i] - 1;
546 if (graph_ref >= 0)
547 {
548 ccv_nnc_graph_t* const sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, graph_ref)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(graph_ref)))
;
549 sub_graph->exec_idx = exec_info->rnum;
550 }
551 }
552 exec_cvt[idx] = exec_info->rnum - 1;
553 } ccv_nnc_graph_visit_endfor} }
554 ccv_nnc_graph_visit_free(visit);
555 for (i = 0; i < graph->breakpoint_size; i++)
556 { assert(exec_cvt[graph->breakpoints[i].d] >= 0)((void) sizeof ((exec_cvt[graph->breakpoints[i].d] >= 0
) ? 1 : 0), __extension__ ({ if (exec_cvt[graph->breakpoints
[i].d] >= 0) ; else __assert_fail ("exec_cvt[graph->breakpoints[i].d] >= 0"
, "ccv_nnc_graph.c", 556, __extension__ __PRETTY_FUNCTION__);
}))
; } // All breakpoints should be assigned.
557 } else {
558 ccv_nnc_graph_visit_t* visit = ccv_nnc_graph_visit_new(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, (ccv_nnc_graph_exec_t*)ccv_array_get(graph->sources, 0), graph->sources->rnum, (ccv_nnc_graph_exec_t*)ccv_array_get(graph->destinations, 0), graph->destinations->rnum, 0)({ ccv_nnc_graph_visit_t* _visit_ = (ccv_nnc_graph_visit_t*)malloc
(sizeof(ccv_nnc_graph_visit_t) + sizeof(_visit_->node[0]) *
((graph->exec_info->rnum) - 1)); _visit_->size = 0;
do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = (graph->exec_info->
rnum + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (graph->sources->rnum
); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t*)((void*)
(((char*)((graph->sources)->data)) + (size_t)(graph->
sources)->rsize * (size_t)(0))))[_i_].graph == graph) ? 1 :
0), __extension__ ({ if (((ccv_nnc_graph_exec_t*)((void*)(((
char*)((graph->sources)->data)) + (size_t)(graph->sources
)->rsize * (size_t)(0))))[_i_].graph == graph) ; else __assert_fail
("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 558, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->sources)->data)) + (size_t)(graph->sources
)->rsize * (size_t)(0))))[_i_].d; } int _exist_size_[2] = {
(graph->sources->rnum), 0, }; int _p_ = 0, _q_ = 1; while
(_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_
= 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t _idx_
= _exists_[_p_][_i_]; if (_incomings_[_idx_].r == 1) continue
; _incomings_[_idx_].r = 1; if (((ccv_nnc_graph_exec_info_t*)
((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); ++_incomings_[d].c; _exists_[_q_][_exist_size_
[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (
_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph->sources
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t*
)((void*)(((char*)((graph->sources)->data)) + (size_t)(
graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t*)((void
*)(((char*)((graph->sources)->data)) + (size_t)(graph->
sources)->rsize * (size_t)(0))))[_i_].graph == graph) ; else
__assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 558, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->sources)->data)) + (size_t)(graph->sources
)->rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (graph
->sources->rnum); _exist_size_[1] = 0; _p_ = 0, _q_ = 1
; int _bump_ = 1; while (_exist_size_[_p_] > 0) { _exist_size_
[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) {
const int32_t _idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_
].r == 2) continue; _incomings_[_idx_].r = 2; if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; _exists_[_q_][_exist_size_[_q_
]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_
), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph->destinations
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t*
)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations)->data)) + (size_t)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 558, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->destinations)->data)) + (size_t)(graph->destinations
)->rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (graph
->destinations->rnum); _exist_size_[1] = 0; _p_ = 0, _q_
= 1; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 2) continue
; _incomings_[_idx_].r = 3; if (_incomings_[_idx_].edges >
0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; _exists_
[_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_)
= (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ <
(graph->destinations->rnum); _i_++) { ((void) sizeof (
(((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations
)->data)) + (size_t)(graph->destinations)->rsize * (
size_t)(0))))[_i_].graph == graph) ? 1 : 0), __extension__ ({
if (((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations
)->data)) + (size_t)(graph->destinations)->rsize * (
size_t)(0))))[_i_].graph == graph) ; else __assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations)->data)) + (size_t)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 558, __extension__ __PRETTY_FUNCTION__);
})); _incomings_[((ccv_nnc_graph_exec_t*)((void*)(((char*)((
graph->destinations)->data)) + (size_t)(graph->destinations
)->rsize * (size_t)(0))))[_i_].d].d = 1; } for (_i_ = 0; _i_
< (graph->sources->rnum); _i_++) { ((void) sizeof (
(((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->sources
)->data)) + (size_t)(graph->sources)->rsize * (size_t
)(0))))[_i_].graph == graph) ? 1 : 0), __extension__ ({ if ((
(ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->sources)->
data)) + (size_t)(graph->sources)->rsize * (size_t)(0))
))[_i_].graph == graph) ; else __assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph->sources)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 558, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->sources)->data)) + (size_t)(graph->sources
)->rsize * (size_t)(0))))[_i_].d; } _p_ = 0; _q_ = 1; _exist_size_
[0] = (graph->sources->rnum); _exist_size_[1] = 0; int _d_
= 0; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_];) { const int32_t _idx_
= _exists_[_p_][_i_]; _visit_->node[_visit_->size].index
= ((_idx_)); _visit_->node[_visit_->size].term = ((_incomings_
[_idx_].d)); ++_visit_->size;; if (_incomings_[_idx_].d) {
++_d_; _incomings_[_idx_].r = 4; } if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) { if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum == 1) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 3 && _d_ < (graph
->destinations->rnum)) { _exists_[_p_][_i_] = d; continue
; } } else for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
->rnum; _j_++) { const int d = *(int*)((void*)(((char*)(((
(ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0))))[_idx_].outgoings)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->rsize * (size_t)(_j_))); --_incomings_[d].c; if (_incomings_
[d].c == 0 && _incomings_[d].r == 3 && _d_ <
(graph->destinations->rnum)) { _exists_[_q_][_exist_size_
[_q_]] = d; ++_exist_size_[_q_]; } } } ++_i_; } ((_i_) = (_p_
), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph
->destinations->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations)->data)) + (size_t)(graph->destinations)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph.c", 558, __extension__ __PRETTY_FUNCTION__);
})); if (_incomings_[((ccv_nnc_graph_exec_t*)((void*)(((char
*)((graph->destinations)->data)) + (size_t)(graph->destinations
)->rsize * (size_t)(0))))[_i_].d].r == 4) continue; if (!(
0)) { ((void) sizeof ((_incomings_[((ccv_nnc_graph_exec_t*)((
void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].d].
c == 0) ? 1 : 0), __extension__ ({ if (_incomings_[((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].d].
c == 0) ; else __assert_fail ("_incomings_[((ccv_nnc_graph_exec_t*)((void*)(((char*)((graph->destinations)->data)) + (size_t)(graph->destinations)->rsize * (size_t)(0))))[_i_].d].c == 0"
, "ccv_nnc_graph.c", 558, __extension__ __PRETTY_FUNCTION__);
})); } else if (_incomings_[((ccv_nnc_graph_exec_t*)((void*)
(((char*)((graph->destinations)->data)) + (size_t)(graph
->destinations)->rsize * (size_t)(0))))[_i_].d].c > 0
) continue; _visit_->node[_visit_->size].index = ((((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].d))
; _visit_->node[_visit_->size].term = ((_incomings_[((ccv_nnc_graph_exec_t
*)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0))))[_i_].d].
d)); ++_visit_->size;; } if (_heap_mem_) free(_incomings_)
; } while (0);; ((void) sizeof ((_visit_->size <= (graph
->exec_info->rnum)) ? 1 : 0), __extension__ ({ if (_visit_
->size <= (graph->exec_info->rnum)) ; else __assert_fail
("_visit_->size <= (graph->exec_info->rnum)", "ccv_nnc_graph.c"
, 558, __extension__ __PRETTY_FUNCTION__); })); _visit_; })
;
559 ccv_nnc_graph_visit_for(visit, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof (((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))) const node __attribute__
((unused)) = ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((
graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0)))) + idx;
{
560 assert(!node->pair_ref)((void) sizeof ((!node->pair_ref) ? 1 : 0), __extension__ (
{ if (!node->pair_ref) ; else __assert_fail ("!node->pair_ref"
, "ccv_nnc_graph.c", 560, __extension__ __PRETTY_FUNCTION__);
}))
; // If node has a pair ref, we cannot fix it up.
561 // Loop over node and push to the array.
562 ccv_array_push(exec_info, node);
563 // Go to its sub-graph to fix exec_idx
564 for (i = 0; i < node->graph_ref_size; i++)
565 {
566 const int graph_ref = CCV_NNC_GRAPH_REF(node)((node)->_heap_graph_ref ? (node)->_heap_graph_ref : (node
)->_inline_graph_ref)
[i] - 1;
567 if (graph_ref >= 0)
568 {
569 ccv_nnc_graph_t* const sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, graph_ref)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(graph_ref)))
;
570 sub_graph->exec_idx = exec_info->rnum;
571 }
572 }
573 exec_cvt[idx] = exec_info->rnum - 1;
574 } ccv_nnc_graph_visit_endfor} }
575 ccv_nnc_graph_visit_free(visit);
576 }
577 assert(graph->exec_info->rnum == exec_info->rnum)((void) sizeof ((graph->exec_info->rnum == exec_info->
rnum) ? 1 : 0), __extension__ ({ if (graph->exec_info->
rnum == exec_info->rnum) ; else __assert_fail ("graph->exec_info->rnum == exec_info->rnum"
, "ccv_nnc_graph.c", 577, __extension__ __PRETTY_FUNCTION__);
}))
;
578 ccv_array_free(graph->exec_info);
579 graph->exec_info = exec_info;
580 for (i = 0; i < graph->sources->rnum; i++)
581 {
582 ccv_nnc_graph_exec_t* const source = (ccv_nnc_graph_exec_t*)ccv_array_get(graph->sources, i)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph
->sources)->rsize * (size_t)(i)))
;
583 source->d = exec_cvt[source->d];
584 }
585 for (i = 0; i < graph->destinations->rnum; i++)
586 {
587 ccv_nnc_graph_exec_t* const destination = (ccv_nnc_graph_exec_t*)ccv_array_get(graph->destinations, i)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(i)))
;
588 destination->d = exec_cvt[destination->d];
589 }
590 // Update all outgoings to reflect the latest.
591 for (i = 0; i < exec_info->rnum; i++)
592 {
593 ccv_nnc_graph_exec_info_t* const info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(exec_info, i)((void*)(((char*)((exec_info)->data)) + (size_t)(exec_info
)->rsize * (size_t)(i)))
;
594 if (info->outgoings)
595 for (j = 0; j < info->outgoings->rnum; j++)
596 *(int*)ccv_array_get(info->outgoings, j)((void*)(((char*)((info->outgoings)->data)) + (size_t)(
info->outgoings)->rsize * (size_t)(j)))
= exec_cvt[*(int*)ccv_array_get(info->outgoings, j)((void*)(((char*)((info->outgoings)->data)) + (size_t)(
info->outgoings)->rsize * (size_t)(j)))
];
597 }
598 graph->topsorted = 1;
599}
600
601typedef struct {
602 int device_id;
603 int exec_idx;
604 ccv_array_t* signal_set;
605 ccv_array_t* command_set; // The set of command executed in this stream. In case there is a tie (on rank). We will check this.
606} ccv_nnc_stream_data_t;
607
608static void _ccv_nnc_graph_schedule_assign_signals(ccv_array_t* const incoming, ccv_nnc_graph_exec_schedule_t* const node, ccv_array_t* const stream_data, int* const signal_size, ccv_nnc_graph_exec_schedule_t* const exec_info, const int exec_info_size)
609{
610 assert(incoming->rnum > 0)((void) sizeof ((incoming->rnum > 0) ? 1 : 0), __extension__
({ if (incoming->rnum > 0) ; else __assert_fail ("incoming->rnum > 0"
, "ccv_nnc_graph.c", 610, __extension__ __PRETTY_FUNCTION__);
}))
;
611 int i, j, k;
612 int wait_size = 0, max_wait_size = 0;
613 for (i = 0; i < incoming->rnum; i++)
614 {
615 const int incoming_idx = *(int*)ccv_array_get(incoming, i)((void*)(((char*)((incoming)->data)) + (size_t)(incoming)->
rsize * (size_t)(i)))
;
616 ccv_nnc_graph_exec_schedule_t* const incoming_exec_info = exec_info + incoming_idx;
617 assert(incoming_exec_info->stream_size > 0)((void) sizeof ((incoming_exec_info->stream_size > 0) ?
1 : 0), __extension__ ({ if (incoming_exec_info->stream_size
> 0) ; else __assert_fail ("incoming_exec_info->stream_size > 0"
, "ccv_nnc_graph.c", 617, __extension__ __PRETTY_FUNCTION__);
}))
;
618 max_wait_size += incoming_exec_info->stream_size;
619 }
620 int waits[ccv_max(1, max_wait_size)({ typeof (1) _a = (1); typeof (max_wait_size) _b = (max_wait_size
); (_a > _b) ? _a : _b; })
];
621 assert(node->stream_size > 0)((void) sizeof ((node->stream_size > 0) ? 1 : 0), __extension__
({ if (node->stream_size > 0) ; else __assert_fail ("node->stream_size > 0"
, "ccv_nnc_graph.c", 621, __extension__ __PRETTY_FUNCTION__);
}))
;
622 for (i = 0; i < incoming->rnum; i++)
623 {
624 const int incoming_idx = *(int*)ccv_array_get(incoming, i)((void*)(((char*)((incoming)->data)) + (size_t)(incoming)->
rsize * (size_t)(i)))
;
625 assert(incoming_idx < exec_info_size)((void) sizeof ((incoming_idx < exec_info_size) ? 1 : 0), __extension__
({ if (incoming_idx < exec_info_size) ; else __assert_fail
("incoming_idx < exec_info_size", "ccv_nnc_graph.c", 625,
__extension__ __PRETTY_FUNCTION__); }))
;
626 assert(incoming_idx >= 0)((void) sizeof ((incoming_idx >= 0) ? 1 : 0), __extension__
({ if (incoming_idx >= 0) ; else __assert_fail ("incoming_idx >= 0"
, "ccv_nnc_graph.c", 626, __extension__ __PRETTY_FUNCTION__);
}))
;
627 ccv_nnc_graph_exec_schedule_t* const incoming_exec_info = exec_info + incoming_idx;
628 assert(incoming_exec_info->stream_size > 0)((void) sizeof ((incoming_exec_info->stream_size > 0) ?
1 : 0), __extension__ ({ if (incoming_exec_info->stream_size
> 0) ; else __assert_fail ("incoming_exec_info->stream_size > 0"
, "ccv_nnc_graph.c", 628, __extension__ __PRETTY_FUNCTION__);
}))
;
629 int stream_synced = 1;
630 // If the current node's stream is a subset of the incoming node's stream, there
631 // is no need to sync with signal, because we are already synced with the incoming.
632 for (j = 0; stream_synced && j < node->stream_size; j++)
633 {
634 const int s = SCHEDULE_STREAMS(*node)((*node).stream_size <= 1 ? (*node)._inline_streams : (*node
)._heap_streams)
[j];
635 assert(s >= 0)((void) sizeof ((s >= 0) ? 1 : 0), __extension__ ({ if (s >=
0) ; else __assert_fail ("s >= 0", "ccv_nnc_graph.c", 635
, __extension__ __PRETTY_FUNCTION__); }))
;
636 int flag = 0;
637 for (k = 0; !flag && k < incoming_exec_info->stream_size; k++)
638 flag = (SCHEDULE_STREAMS(*incoming_exec_info)((*incoming_exec_info).stream_size <= 1 ? (*incoming_exec_info
)._inline_streams : (*incoming_exec_info)._heap_streams)
[k] == s);
639 stream_synced = flag;
640 }
641 if (stream_synced)
642 continue;
643 // Otherwise, find the streams we need to sync with, and create signals for these.
644 for (j = 0; j < incoming_exec_info->stream_size; j++)
645 {
646 const int s = SCHEDULE_STREAMS(*incoming_exec_info)((*incoming_exec_info).stream_size <= 1 ? (*incoming_exec_info
)._inline_streams : (*incoming_exec_info)._heap_streams)
[j];
647 assert(s >= 0)((void) sizeof ((s >= 0) ? 1 : 0), __extension__ ({ if (s >=
0) ; else __assert_fail ("s >= 0", "ccv_nnc_graph.c", 647
, __extension__ __PRETTY_FUNCTION__); }))
;
648 int flag = 0;
649 for (k = 0; !flag && k < node->stream_size; k++)
650 flag = (SCHEDULE_STREAMS(*node)((*node).stream_size <= 1 ? (*node)._inline_streams : (*node
)._heap_streams)
[k] == s);
651 if (!flag) // Need to have a signal.
652 {
653 if (SCHEDULE_SIGNALS(*incoming_exec_info)((*incoming_exec_info).stream_size <= 1 ? (*incoming_exec_info
)._inline_signals : (*incoming_exec_info)._heap_signals)
[j] < 0)
654 SCHEDULE_SIGNALS(*incoming_exec_info)((*incoming_exec_info).stream_size <= 1 ? (*incoming_exec_info
)._inline_signals : (*incoming_exec_info)._heap_signals)
[j] = (*signal_size)++;
655 else {
656 int flag = 0;
657 // If any of the stream the current node has already seen this signal, we are good already.
658 for (k = 0; !flag && k < node->stream_size; k++)
659 {
660 assert(SCHEDULE_STREAMS(*node)[k] >= 0)((void) sizeof ((((*node).stream_size <= 1 ? (*node)._inline_streams
: (*node)._heap_streams)[k] >= 0) ? 1 : 0), __extension__
({ if (((*node).stream_size <= 1 ? (*node)._inline_streams
: (*node)._heap_streams)[k] >= 0) ; else __assert_fail ("SCHEDULE_STREAMS(*node)[k] >= 0"
, "ccv_nnc_graph.c", 660, __extension__ __PRETTY_FUNCTION__);
}))
;
661 ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, SCHEDULE_STREAMS(*node)[k])((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(((*node).stream_size <= 1 ? (*node)
._inline_streams : (*node)._heap_streams)[k])))
;
662 flag = (data->signal_set && ccv_array_find_int(data->signal_set, SCHEDULE_SIGNALS(*incoming_exec_info)((*incoming_exec_info).stream_size <= 1 ? (*incoming_exec_info
)._inline_signals : (*incoming_exec_info)._heap_signals)
[j]));
663 }
664 if (flag)
665 continue;
666 }
667 // Otherwise, we need to wait for this. Currently, our granularity is about wait on all streams.
668 waits[wait_size++] = SCHEDULE_SIGNALS(*incoming_exec_info)((*incoming_exec_info).stream_size <= 1 ? (*incoming_exec_info
)._inline_signals : (*incoming_exec_info)._heap_signals)
[j];
669 // All streams on this node have seen this signal.
670 for (k = 0; k < node->stream_size; k++)
671 {
672 ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, SCHEDULE_STREAMS(*node)[k])((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(((*node).stream_size <= 1 ? (*node)
._inline_streams : (*node)._heap_streams)[k])))
;
673 if (!data->signal_set)
674 data->signal_set = ccv_array_new(sizeof(int), 0, 0);
675 ccv_array_push(data->signal_set, &SCHEDULE_SIGNALS(*incoming_exec_info)((*incoming_exec_info).stream_size <= 1 ? (*incoming_exec_info
)._inline_signals : (*incoming_exec_info)._heap_signals)
[j]);
676 }
677 }
678 }
679 }
680 node->wait_size = wait_size;
681 if (wait_size > 0)
682 {
683 node->waits = node->waits ? ccreallocrealloc(node->waits, sizeof(int) * wait_size) : ccmallocmalloc(sizeof(int) * wait_size);
684 memcpy(node->waits, waits, sizeof(int) * wait_size);
685 }
686}
687
688typedef struct {
689 int rank;
690 ccv_array_t* outgoings;
691} ccv_nnc_incoming_t;
692
693static int _ccv_nnc_device_ids_for_stream_data(ccv_nnc_graph_exec_info_t* const node, const int device_id, ccv_array_t* const stream_data, int* const device_ids, const int max_device_id_size)
694{
695 // TODO: I need to re-think whether this is GPU only or not.
696 int device_id_size = ccv_nnc_device_ids_for_io(node->inputs, node->input_size, node->outputs, node->output_size, CCV_TENSOR_GPU_MEMORY, device_ids, max_device_id_size);
697 if (device_id_size == 0)
698 {
699 // If there is a default data, use that device id. Otherwise, use the device id passed in (this will be the default data device id).
700 if (stream_data->rnum > 0)
701 {
702 ccv_nnc_stream_data_t* const default_data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, 0)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(0)))
;
703 device_ids[0] = default_data->device_id;
704 } else
705 device_ids[0] = device_id >= 0 ? device_id : 0;
706 device_id_size = 1;
707 }
708 return device_id_size;
709}
710
711void ccv_nnc_graph_static_schedule_free(ccv_nnc_graph_static_schedule_t* const schedule)
712{
713 int i;
714 ccv_nnc_graph_exec_schedule_t* const schd_info = schedule->exec_info;
715 for (i = 0; i < schedule->exec_info_size; i++)
716 {
717 if (schd_info[i].stream_size > 1)
718 ccfreefree(schd_info[i]._heap_streams);
719 if (schd_info[i].waits)
720 ccfreefree(schd_info[i].waits);
721 }
722 if (schedule->stream_1s)
723 ccfreefree(schedule->stream_1s);
724 if (schedule->waits)
725 ccfreefree(schedule->waits);
726 if (schedule->psort)
727 ccfreefree(schedule->psort);
728 if (schedule->begin)
729 ccv_nnc_stream_signal_free(schedule->begin);
730 if (schedule->end)
731 ccv_nnc_stream_signal_free(schedule->end);
732 ccfreefree(schedule);
733}
734
735static ccv_nnc_graph_static_schedule_t* _ccv_nnc_graph_static_schedule_new(ccv_nnc_graph_t* const graph, const int stream_type, const int device_id, ccv_nnc_stream_context_t* const stream_context, const ccv_nnc_graph_exec_t* const _sources, const int _source_size, const ccv_nnc_graph_exec_t* const _destinations, const int _destination_size)
736{
737 assert(graph->sources && graph->sources->rnum)((void) sizeof ((graph->sources && graph->sources
->rnum) ? 1 : 0), __extension__ ({ if (graph->sources &&
graph->sources->rnum) ; else __assert_fail ("graph->sources && graph->sources->rnum"
, "ccv_nnc_graph.c", 737, __extension__ __PRETTY_FUNCTION__);
}))
;
738 assert(graph->destinations && graph->destinations->rnum)((void) sizeof ((graph->destinations && graph->
destinations->rnum) ? 1 : 0), __extension__ ({ if (graph->
destinations && graph->destinations->rnum) ; else
__assert_fail ("graph->destinations && graph->destinations->rnum"
, "ccv_nnc_graph.c", 738, __extension__ __PRETTY_FUNCTION__);
}))
;
739 assert(graph->topsorted)((void) sizeof ((graph->topsorted) ? 1 : 0), __extension__
({ if (graph->topsorted) ; else __assert_fail ("graph->topsorted"
, "ccv_nnc_graph.c", 739, __extension__ __PRETTY_FUNCTION__);
}))
; // Only support this on a topsorted graph.
740 const int exec_info_size = graph->exec_info->rnum;
741 assert(exec_info_size > 0)((void) sizeof ((exec_info_size > 0) ? 1 : 0), __extension__
({ if (exec_info_size > 0) ; else __assert_fail ("exec_info_size > 0"
, "ccv_nnc_graph.c", 741, __extension__ __PRETTY_FUNCTION__);
}))
;
742 const ccv_nnc_graph_exec_t* const sources = _sources == 0 ? (ccv_nnc_graph_exec_t*)ccv_array_get(graph->sources, 0)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph
->sources)->rsize * (size_t)(0)))
: _sources;
743 const int source_size = _sources == 0 ? graph->sources->rnum : _source_size;
744 if (!_sources)
745 { assert(_source_size == 0)((void) sizeof ((_source_size == 0) ? 1 : 0), __extension__ (
{ if (_source_size == 0) ; else __assert_fail ("_source_size == 0"
, "ccv_nnc_graph.c", 745, __extension__ __PRETTY_FUNCTION__);
}))
; }
746 const ccv_nnc_graph_exec_t* const destinations = _destinations == 0 ? (ccv_nnc_graph_exec_t*)ccv_array_get(graph->destinations, 0)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0)))
: _destinations;
747 const int destination_size = _destinations == 0 ? graph->destinations->rnum : _destination_size;
748 if (!_destinations)
749 { assert(_destination_size == 0)((void) sizeof ((_destination_size == 0) ? 1 : 0), __extension__
({ if (_destination_size == 0) ; else __assert_fail ("_destination_size == 0"
, "ccv_nnc_graph.c", 749, __extension__ __PRETTY_FUNCTION__);
}))
; }
750 const int root_schedule = (_sources == 0 && _destinations == 0);
751 ccv_nnc_graph_static_schedule_t* const schedule = cccalloccalloc(1, sizeof(ccv_nnc_graph_static_schedule_t) + sizeof(ccv_nnc_graph_exec_schedule_t) * (exec_info_size - 1));
752 schedule->exec_info_size = exec_info_size;
753 ccv_nnc_graph_exec_schedule_t* const schd_info = schedule->exec_info;
754 ccv_nnc_graph_exec_info_t* const exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(0)))
;
755 ccv_nnc_graph_visit_t* visit = ccv_nnc_graph_visit_new(graph, exec_info, exec_info_size, sources, source_size, destinations, destination_size, 0)({ ccv_nnc_graph_visit_t* _visit_ = (ccv_nnc_graph_visit_t*)malloc
(sizeof(ccv_nnc_graph_visit_t) + sizeof(_visit_->node[0]) *
((exec_info_size) - 1)); _visit_->size = 0; do { typedef struct
{ int8_t d; int8_t r; uint16_t c; int32_t edges; } ccv_nnc_incoming_t
; int _i_, _j_; int _incoming_edges_ = 0; for (_i_ = 0; _i_ <
(exec_info_size); _i_++) _incoming_edges_ += ((exec_info)[_i_
].outgoings) ? (exec_info)[_i_].outgoings->rnum : 0; const
int _heap_mem_ = (exec_info_size + _incoming_edges_ > 1024
); ccv_nnc_incoming_t* _incomings_; if (_heap_mem_) _incomings_
= (ccv_nnc_incoming_t*)malloc(sizeof(ccv_nnc_incoming_t) * (
exec_info_size) + sizeof(int32_t) * ((exec_info_size) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (exec_info_size) + sizeof(int32_t
) * ((exec_info_size) * 2 + _incoming_edges_)); memset(_incomings_
, 0, sizeof(ccv_nnc_incoming_t) * (exec_info_size)); int32_t*
_exists_[2] = { (int32_t*)(_incomings_ + (exec_info_size)), (
int32_t*)(_incomings_ + (exec_info_size)) + (exec_info_size),
}; int32_t* const _edges_ = _exists_[1] + (exec_info_size); for
(_i_ = 0; _i_ < (source_size); _i_++) { ((void) sizeof ((
(sources)[_i_].graph == graph) ? 1 : 0), __extension__ ({ if (
(sources)[_i_].graph == graph) ; else __assert_fail ("(sources)[_i_].graph == graph"
, "ccv_nnc_graph.c", 755, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = (sources)[_i_].d; } int _exist_size_
[2] = { (source_size), 0, }; int _p_ = 0, _q_ = 1; while (_exist_size_
[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ <
_exist_size_[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_
][_i_]; if (_incomings_[_idx_].r == 1) continue; _incomings_[
_idx_].r = 1; if ((exec_info)[_idx_].outgoings) for (_j_ = 0;
_j_ < (exec_info)[_idx_].outgoings->rnum; _j_++) { const
int d = *(int*)((void*)(((char*)(((exec_info)[_idx_].outgoings
)->data)) + (size_t)((exec_info)[_idx_].outgoings)->rsize
* (size_t)(_j_))); ++_incomings_[d].c; _exists_[_q_][_exist_size_
[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (
_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (source_size);
_i_++) { ((void) sizeof (((sources)[_i_].graph == graph) ? 1
: 0), __extension__ ({ if ((sources)[_i_].graph == graph) ; else
__assert_fail ("(sources)[_i_].graph == graph", "ccv_nnc_graph.c"
, 755, __extension__ __PRETTY_FUNCTION__); })); _exists_[0][_i_
] = (sources)[_i_].d; } _exist_size_[0] = (source_size); _exist_size_
[1] = 0; _p_ = 0, _q_ = 1; int _bump_ = 1; while (_exist_size_
[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ <
_exist_size_[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_
][_i_]; if (_incomings_[_idx_].r == 2) continue; _incomings_[
_idx_].r = 2; if ((exec_info)[_idx_].outgoings) for (_j_ = 0;
_j_ < (exec_info)[_idx_].outgoings->rnum; _j_++) { const
int d = *(int*)((void*)(((char*)(((exec_info)[_idx_].outgoings
)->data)) + (size_t)((exec_info)[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; _exists_[_q_][_exist_size_[_q_
]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_
), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (destination_size
); _i_++) { ((void) sizeof (((destinations)[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if ((destinations)[_i_].graph ==
graph) ; else __assert_fail ("(destinations)[_i_].graph == graph"
, "ccv_nnc_graph.c", 755, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = (destinations)[_i_].d; } _exist_size_
[0] = (destination_size); _exist_size_[1] = 0; _p_ = 0, _q_ =
1; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0;
for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 2) continue
; _incomings_[_idx_].r = 3; if (_incomings_[_idx_].edges >
0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; _exists_
[_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_)
= (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ <
(destination_size); _i_++) { ((void) sizeof (((destinations)
[_i_].graph == graph) ? 1 : 0), __extension__ ({ if ((destinations
)[_i_].graph == graph) ; else __assert_fail ("(destinations)[_i_].graph == graph"
, "ccv_nnc_graph.c", 755, __extension__ __PRETTY_FUNCTION__);
})); _incomings_[(destinations)[_i_].d].d = 1; } for (_i_ = 0
; _i_ < (source_size); _i_++) { ((void) sizeof (((sources)
[_i_].graph == graph) ? 1 : 0), __extension__ ({ if ((sources
)[_i_].graph == graph) ; else __assert_fail ("(sources)[_i_].graph == graph"
, "ccv_nnc_graph.c", 755, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = (sources)[_i_].d; } _p_ = 0; _q_ = 1
; _exist_size_[0] = (source_size); _exist_size_[1] = 0; int _d_
= 0; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_];) { const int32_t _idx_
= _exists_[_p_][_i_]; _visit_->node[_visit_->size].index
= ((_idx_)); _visit_->node[_visit_->size].term = ((_incomings_
[_idx_].d)); ++_visit_->size;; if (_incomings_[_idx_].d) {
++_d_; _incomings_[_idx_].r = 4; } if ((exec_info)[_idx_].outgoings
) { if ((exec_info)[_idx_].outgoings->rnum == 1) { const int
d = *(int*)((void*)(((char*)(((exec_info)[_idx_].outgoings)->
data)) + (size_t)((exec_info)[_idx_].outgoings)->rsize * (
size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c == 0 &&
_incomings_[d].r == 3 && _d_ < (destination_size)
) { _exists_[_p_][_i_] = d; continue; } } else for (_j_ = 0; _j_
< (exec_info)[_idx_].outgoings->rnum; _j_++) { const int
d = *(int*)((void*)(((char*)(((exec_info)[_idx_].outgoings)->
data)) + (size_t)((exec_info)[_idx_].outgoings)->rsize * (
size_t)(_j_))); --_incomings_[d].c; if (_incomings_[d].c == 0
&& _incomings_[d].r == 3 && _d_ < (destination_size
)) { _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_
]; } } } ++_i_; } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_
)); } for (_i_ = 0; _i_ < (destination_size); _i_++) { ((void
) sizeof (((destinations)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((destinations)[_i_].graph == graph) ; else __assert_fail
("(destinations)[_i_].graph == graph", "ccv_nnc_graph.c", 755
, __extension__ __PRETTY_FUNCTION__); })); if (_incomings_[(destinations
)[_i_].d].r == 4) continue; if (!(0)) { ((void) sizeof ((_incomings_
[(destinations)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if
(_incomings_[(destinations)[_i_].d].c == 0) ; else __assert_fail
("_incomings_[(destinations)[_i_].d].c == 0", "ccv_nnc_graph.c"
, 755, __extension__ __PRETTY_FUNCTION__); })); } else if (_incomings_
[(destinations)[_i_].d].c > 0) continue; _visit_->node[
_visit_->size].index = (((destinations)[_i_].d)); _visit_->
node[_visit_->size].term = ((_incomings_[(destinations)[_i_
].d].d)); ++_visit_->size;; } if (_heap_mem_) free(_incomings_
); } while (0);; ((void) sizeof ((_visit_->size <= (exec_info_size
)) ? 1 : 0), __extension__ ({ if (_visit_->size <= (exec_info_size
)) ; else __assert_fail ("_visit_->size <= (exec_info_size)"
, "ccv_nnc_graph.c", 755, __extension__ __PRETTY_FUNCTION__);
})); _visit_; })
;
756 if (!root_schedule)
757 {
758 // If this is not a root schedule, we need to do partial topsort.
759 int psort_size = 0;
760 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
761 ++psort_size;
762 } ccv_nnc_graph_visit_endfor} }
763 schedule->psort = (int*)ccmallocmalloc(sizeof(int) * psort_size);
764 schedule->psort_size = psort_size;
765 psort_size = 0;
766 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
767 schedule->psort[psort_size++] = idx;
768 } ccv_nnc_graph_visit_endfor} }
769 }
770 int i, j, k;
771 // Generate exec dependencies (or, in other words, partial ordering of executions).
772 ccv_sparse_matrix_t* exec_dep = ccv_sparse_matrix_new(exec_info_size, exec_info_size, CCV_32S | CCV_C1, CCV_SPARSE_ROW_MAJOR, 0);
773 int* buf = (int*)ccmallocmalloc(sizeof(int) * exec_info_size * 2);
774 int buf_size;
775#define for_block(x, val) \
776 do { \
777 if (((int32_t*)val)[0] > 0) \
778 { \
779 buf[buf_size * 2] = x; \
780 buf[buf_size * 2 + 1] = ((int32_t*)val)[0] + 1; \
781 ++buf_size; \
782 } \
783 } while (0)
784 for (i = 0; i < exec_info_size; i++)
785 schd_info[i].stream_size = -1;
786 ccv_nnc_graph_visit_for(visit, exec_info, node, idx, term){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int term __attribute__((unused)) = (visit)->node[_i_
].term; typeof ((exec_info)) const node __attribute__((unused
)) = (exec_info) + idx;
{
787 buf_size = 0; /* save all its parent deps to this buffer */
788 ccv_sparse_matrix_vector_t* vector = ccv_get_sparse_matrix_vector(exec_dep, idx);
789 schd_info[idx].stream_size = 0;
790 if (vector)
791 CCV_SPARSE_VECTOR_FOREACH(exec_dep, vector, for_block)do { switch ((((exec_dep)->type) & 0xFF000)) { case CCV_32S
: { do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.i32 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.i32 + (0))); } } } while (0); break; } case CCV_32F:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.f32 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.f32 + (0))); } } } while (0); break; } case CCV_64S:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.i64 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.i64 + (0))); } } } while (0); break; } case CCV_64F:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.f64 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.f64 + (0))); } } } while (0); break; } default: { do
{ int _i_; __attribute__((unused)) const size_t _c_ = (((exec_dep
)->type) & 0xFFF); if ((exec_dep)->type & CCV_DENSE_VECTOR
) { for (_i_ = 0; _i_ < (vector)->size; _i_++) { for_block
((_i_), ((vector)->data.u8 + (_i_ * _c_))); } } else { const
size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t) + ((_ccv_get_data_type_size
[(((exec_dep)->type) & 0xFF000) >> 12] * (((exec_dep
)->type) & 0xFFF) + 3) & -4); uint8_t* const _vidx_
= (uint8_t*)(vector)->index; for (_i_ = 0; _i_ < (vector
)->size; _i_++) { ccv_sparse_matrix_index_t* const _idx_i_
= (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_ * _i_); if
(_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t _d_
= { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.u8 + (0))); } } } while (0); } } } while (0)
;
792 if (!node->outgoings)
793 continue;
794 for (i = 0; i < node->outgoings->rnum; i++)
795 {
796 int outgoing = *(int*)ccv_array_get(node->outgoings, i)((void*)(((char*)((node->outgoings)->data)) + (size_t)(
node->outgoings)->rsize * (size_t)(i)))
;
797 const int32_t one = 1;
798 ccv_numeric_data_t cell = ccv_get_sparse_matrix_cell(exec_dep, outgoing, idx);
799 /* If not found, set, if the current node is the destination node, no need
800 * set itself as parent of subsequent nodes because its terminal nature. */
801 if (!term && (!cell.i32 || cell.i32[0] == 0))
802 ccv_set_sparse_matrix_cell(exec_dep, outgoing, idx, &one);
803 for (j = 0; j < buf_size; j++) /* set with all idx's dependencies as well */
804 {
805 ccv_numeric_data_t cell = ccv_get_sparse_matrix_cell(exec_dep, outgoing, buf[j * 2]);
806 /* If not found, set */
807 if (!cell.i32 || cell.i32[0] == 0)
808 ccv_set_sparse_matrix_cell(exec_dep, outgoing, buf[j * 2], &buf[j * 2 + 1]);
809 else {
810 /* Otherwise, set to the longest one */
811 int32_t dep = ccv_max(cell.i32[0], buf[j * 2 + 1])({ typeof (cell.i32[0]) _a = (cell.i32[0]); typeof (buf[j * 2
+ 1]) _b = (buf[j * 2 + 1]); (_a > _b) ? _a : _b; })
;
812 ccv_set_sparse_matrix_cell(exec_dep, outgoing, buf[j * 2], &dep);
813 }
814 }
815 }
816 } ccv_nnc_graph_visit_endfor} }
817#undef for_block
818 ccfreefree(buf);
819 // Algorithm to allocate signals and streams for this graph.
820 ccv_array_t* const stream_data = ccv_array_new(sizeof(ccv_nnc_stream_data_t), 0, 0);
821 ccv_array_t** const outgoings = cccalloccalloc(exec_info_size, sizeof(ccv_array_t*));
822 ccv_nnc_incoming_t* const incomings = cccalloccalloc(exec_info_size, sizeof(ccv_nnc_incoming_t));
823 int max_device_id_size = 1;
824 // Filter out outgoing nodes that we will be able to access it afterwards anyway.
825 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
826 max_device_id_size = ccv_max(node->input_size + node->output_size, max_device_id_size)({ typeof (node->input_size + node->output_size) _a = (
node->input_size + node->output_size); typeof (max_device_id_size
) _b = (max_device_id_size); (_a > _b) ? _a : _b; })
;
827 if (node->outgoings)
828 {
829 outgoings[idx] = ccv_array_new(sizeof(int), 0, 0);
830 for (i = 0; i < node->outgoings->rnum; i++)
831 {
832 const int di = *(int*)ccv_array_get(node->outgoings, i)((void*)(((char*)((node->outgoings)->data)) + (size_t)(
node->outgoings)->rsize * (size_t)(i)))
;
833 // Skip if we haven't accessed this exec.
834 if (schd_info[di].stream_size < 0)
835 continue;
836 int flag = 0;
837 for (j = 0; !flag && j < node->outgoings->rnum; j++)
838 {
839 if (j != i)
840 {
841 const int dj = *(int*)ccv_array_get(node->outgoings, j)((void*)(((char*)((node->outgoings)->data)) + (size_t)(
node->outgoings)->rsize * (size_t)(j)))
;
842 ccv_numeric_data_t cell = ccv_get_sparse_matrix_cell(exec_dep, di, dj);
843 flag = (cell.i32 && cell.i32[0]);
844 }
845 }
846 if (!flag)
847 {
848 ccv_array_push(outgoings[idx], &di);
849 if (!incomings[di].outgoings)
850 incomings[di].outgoings = ccv_array_new(sizeof(int), 1, 0);
851 ccv_array_push(incomings[di].outgoings, &idx);
852 }
853 }
854 }
855 } ccv_nnc_graph_visit_endfor} }
856#define visitor(node, idx, _) \
857 if (node->outgoings) \
858 for (i = 0; i < node->outgoings->rnum; i++) \
859 { \
860 const int d = *(int*)ccv_array_get(node->outgoings, i)((void*)(((char*)((node->outgoings)->data)) + (size_t)(
node->outgoings)->rsize * (size_t)(i)))
; \
861 node->rank = ccv_max(incomings[d].rank + 1, node->rank)({ typeof (incomings[d].rank + 1) _a = (incomings[d].rank + 1
); typeof (node->rank) _b = (node->rank); (_a > _b) ?
_a : _b; })
; \
862 }
863 CCV_NNC_GRAPH_VISIT(graph, incomings, exec_info_size, destinations, destination_size, sources, source_size, 0, visitor)do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (exec_info_size); _i_++) _incoming_edges_
+= ((incomings)[_i_].outgoings) ? (incomings)[_i_].outgoings
->rnum : 0; const int _heap_mem_ = (exec_info_size + _incoming_edges_
> 1024); ccv_nnc_incoming_t* _incomings_; if (_heap_mem_)
_incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof(ccv_nnc_incoming_t
) * (exec_info_size) + sizeof(int32_t) * ((exec_info_size) * 2
+ _incoming_edges_)); else _incomings_ = (ccv_nnc_incoming_t
*)__builtin_alloca (sizeof(ccv_nnc_incoming_t) * (exec_info_size
) + sizeof(int32_t) * ((exec_info_size) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (exec_info_size
)); int32_t* _exists_[2] = { (int32_t*)(_incomings_ + (exec_info_size
)), (int32_t*)(_incomings_ + (exec_info_size)) + (exec_info_size
), }; int32_t* const _edges_ = _exists_[1] + (exec_info_size)
; for (_i_ = 0; _i_ < (destination_size); _i_++) { ((void)
sizeof (((destinations)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((destinations)[_i_].graph == graph) ; else __assert_fail
("(destinations)[_i_].graph == graph", "ccv_nnc_graph.c", 863
, __extension__ __PRETTY_FUNCTION__); })); _exists_[0][_i_] =
(destinations)[_i_].d; } int _exist_size_[2] = { (destination_size
), 0, }; int _p_ = 0, _q_ = 1; while (_exist_size_[_p_] > 0
) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_][_i_]; if (
_incomings_[_idx_].r == 1) continue; _incomings_[_idx_].r = 1
; if ((incomings)[_idx_].outgoings) for (_j_ = 0; _j_ < (incomings
)[_idx_].outgoings->rnum; _j_++) { const int d = *(int*)((
void*)(((char*)(((incomings)[_idx_].outgoings)->data)) + (
size_t)((incomings)[_idx_].outgoings)->rsize * (size_t)(_j_
))); ++_incomings_[d].c; _exists_[_q_][_exist_size_[_q_]] = d
; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_
) = (_i_)); } for (_i_ = 0; _i_ < (destination_size); _i_++
) { ((void) sizeof (((destinations)[_i_].graph == graph) ? 1 :
0), __extension__ ({ if ((destinations)[_i_].graph == graph)
; else __assert_fail ("(destinations)[_i_].graph == graph", "ccv_nnc_graph.c"
, 863, __extension__ __PRETTY_FUNCTION__); })); _exists_[0][_i_
] = (destinations)[_i_].d; } _exist_size_[0] = (destination_size
); _exist_size_[1] = 0; _p_ = 0, _q_ = 1; int _bump_ = 1; while
(_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_
= 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t _idx_
= _exists_[_p_][_i_]; if (_incomings_[_idx_].r == 2) continue
; _incomings_[_idx_].r = 2; if ((incomings)[_idx_].outgoings)
for (_j_ = 0; _j_ < (incomings)[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)(((incomings
)[_idx_].outgoings)->data)) + (size_t)((incomings)[_idx_].
outgoings)->rsize * (size_t)(_j_))); if (_incomings_[d].edges
== 0) { _incomings_[d].edges = _bump_; _bump_ += _incomings_
[d].c; _incomings_[d].c = 0; } _edges_[_incomings_[d].edges -
1 + _incomings_[d].c] = _idx_; ++_incomings_[d].c; _exists_[
_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_) =
(_p_), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ <
(source_size); _i_++) { ((void) sizeof (((sources)[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if ((sources)[_i_].graph
== graph) ; else __assert_fail ("(sources)[_i_].graph == graph"
, "ccv_nnc_graph.c", 863, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = (sources)[_i_].d; } _exist_size_[0] =
(source_size); _exist_size_[1] = 0; _p_ = 0, _q_ = 1; while (
_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ =
0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t _idx_ =
_exists_[_p_][_i_]; if (_incomings_[_idx_].r != 2) continue;
_incomings_[_idx_].r = 3; if (_incomings_[_idx_].edges > 0
) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; _exists_
[_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_)
= (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ <
(source_size); _i_++) { ((void) sizeof (((sources)[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if ((sources)[_i_].graph
== graph) ; else __assert_fail ("(sources)[_i_].graph == graph"
, "ccv_nnc_graph.c", 863, __extension__ __PRETTY_FUNCTION__);
})); _incomings_[(sources)[_i_].d].d = 1; } for (_i_ = 0; _i_
< (destination_size); _i_++) { ((void) sizeof (((destinations
)[_i_].graph == graph) ? 1 : 0), __extension__ ({ if ((destinations
)[_i_].graph == graph) ; else __assert_fail ("(destinations)[_i_].graph == graph"
, "ccv_nnc_graph.c", 863, __extension__ __PRETTY_FUNCTION__);
})); _exists_[0][_i_] = (destinations)[_i_].d; } _p_ = 0; _q_
= 1; _exist_size_[0] = (destination_size); _exist_size_[1] =
0; int _d_ = 0; while (_exist_size_[_p_] > 0) { _exist_size_
[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_];) { const
int32_t _idx_ = _exists_[_p_][_i_]; visitor(((incomings) + _idx_
), (_idx_), (_incomings_[_idx_].d)); if (_incomings_[_idx_].d
) { ++_d_; _incomings_[_idx_].r = 4; } if ((incomings)[_idx_]
.outgoings) { if ((incomings)[_idx_].outgoings->rnum == 1)
{ const int d = *(int*)((void*)(((char*)(((incomings)[_idx_]
.outgoings)->data)) + (size_t)((incomings)[_idx_].outgoings
)->rsize * (size_t)(0))); --_incomings_[d].c; if (_incomings_
[d].c == 0 && _incomings_[d].r == 3 && _d_ <
(source_size)) { _exists_[_p_][_i_] = d; continue; } } else for
(_j_ = 0; _j_ < (incomings)[_idx_].outgoings->rnum; _j_
++) { const int d = *(int*)((void*)(((char*)(((incomings)[_idx_
].outgoings)->data)) + (size_t)((incomings)[_idx_].outgoings
)->rsize * (size_t)(_j_))); --_incomings_[d].c; if (_incomings_
[d].c == 0 && _incomings_[d].r == 3 && _d_ <
(source_size)) { _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_
[_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (
_i_)); } for (_i_ = 0; _i_ < (source_size); _i_++) { ((void
) sizeof (((sources)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((sources)[_i_].graph == graph) ; else __assert_fail (
"(sources)[_i_].graph == graph", "ccv_nnc_graph.c", 863, __extension__
__PRETTY_FUNCTION__); })); if (_incomings_[(sources)[_i_].d]
.r == 4) continue; if (!(0)) { ((void) sizeof ((_incomings_[(
sources)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if (_incomings_
[(sources)[_i_].d].c == 0) ; else __assert_fail ("_incomings_[(sources)[_i_].d].c == 0"
, "ccv_nnc_graph.c", 863, __extension__ __PRETTY_FUNCTION__);
})); } else if (_incomings_[(sources)[_i_].d].c > 0) continue
; visitor(((incomings) + (sources)[_i_].d), ((sources)[_i_].d
), (_incomings_[(sources)[_i_].d].d)); } if (_heap_mem_) free
(_incomings_); } while (0);
;
864#undef visitor
865 int device_ids[max_device_id_size];
866 int outgoing_device_ids[max_device_id_size];
867 int signal_size = 0;
868 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
869 // Go through the incomings.
870 const int device_id_size = _ccv_nnc_device_ids_for_stream_data(node, device_id, stream_data, device_ids, max_device_id_size);
871 if (schd_info[idx].stream_size == 0)
872 {
873 schd_info[idx].stream_size = device_id_size; // At least at the same size as the device_id_size.
874 if (device_id_size > 1)
875 {
876 schd_info[idx]._heap_streams = (int*)ccmallocmalloc(sizeof(int) * device_id_size * 2);
877 schd_info[idx]._heap_signals = (schd_info[idx]._heap_streams + device_id_size);
878 }
879 for (i = 0; i < device_id_size; i++)
880 SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] = -1, SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[i] = -1;
881 }
882 for (i = 0; i < device_id_size; i++)
883 // Go through until the end to assign streams.
884 if (SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] < 0)
885 {
886 int stream_idx = -1;
887 int stream_has_command = 0;
888 // First, find a good stream in stream data (the stream is good if it can be recycled, and it has the same command).
889 // Otherwise, we prefer a usable stream (it doesn't have the command, but it can be recycled).
890 for (j = 0; (stream_idx < 0 || !stream_has_command) && j < stream_data->rnum; j++)
891 {
892 ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, j)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(j)))
;
893 if (data->device_id == device_ids[i])
894 {
895 const ccv_numeric_data_t cell = ccv_get_sparse_matrix_cell(exec_dep, idx, data->exec_idx);
896 // If there is a path to conclude that exec_idx is before idx, then we can reuse
897 // this stream. Otherwise the work in this "empty stream" could still be ongoing,
898 // and we may delay the following work unnecessarily.
899 if (cell.i32 && cell.i32[0] > 0)
900 {
901 if (ccv_array_find_uint(data->command_set, node->cmd.cmd))
902 stream_idx = j, stream_has_command = 1;
903 else if (stream_idx < 0) // Otherwise, only assign the stream idx if it is not assigned yet.
904 stream_idx = j;
905 }
906 }
907 }
908 if (stream_idx < 0)
909 {
910 stream_idx = stream_data->rnum;
911 const ccv_nnc_stream_data_t data = {
912 .device_id = device_ids[i],
913 };
914 ccv_array_push(stream_data, &data);
915 }
916 assert(stream_idx >= 0)((void) sizeof ((stream_idx >= 0) ? 1 : 0), __extension__ (
{ if (stream_idx >= 0) ; else __assert_fail ("stream_idx >= 0"
, "ccv_nnc_graph.c", 916, __extension__ __PRETTY_FUNCTION__);
}))
;
917 ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, stream_idx)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(stream_idx)))
;
918 if (!data->command_set)
919 data->command_set = ccv_array_new(sizeof(uint32_t), 1, 0);
920 SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] = stream_idx;
921 ccv_array_add_unique_uint(data->command_set, node->cmd.cmd);
922 // Assign all subsequent node to use this stream.
923 int outgoing_idx = idx;
924 while (outgoings[outgoing_idx] && outgoings[outgoing_idx]->rnum)
925 {
926 int highest_rank = -1;
927 int highest_idx = -1;
928 int stream_n = -1;
929 int stream_has_command = 0;
930 for (j = 0; j < outgoings[outgoing_idx]->rnum; j++)
931 {
932 const int d = *(int*)ccv_array_get(outgoings[outgoing_idx], j)((void*)(((char*)((outgoings[outgoing_idx])->data)) + (size_t
)(outgoings[outgoing_idx])->rsize * (size_t)(j)))
;
933 // This is not outside of our scope at this point.
934 assert(schd_info[d].stream_size >= 0)((void) sizeof ((schd_info[d].stream_size >= 0) ? 1 : 0), __extension__
({ if (schd_info[d].stream_size >= 0) ; else __assert_fail
("schd_info[d].stream_size >= 0", "ccv_nnc_graph.c", 934,
__extension__ __PRETTY_FUNCTION__); }))
;
935 ccv_nnc_graph_exec_info_t* const outgoing_node = exec_info + d;
936 const int outgoing_device_id_size = _ccv_nnc_device_ids_for_stream_data(outgoing_node, device_id, stream_data, outgoing_device_ids, max_device_id_size);
937 if (schd_info[d].stream_size == 0)
938 {
939 schd_info[d].stream_size = outgoing_device_id_size; // At least at the same size as the device_id_size.
940 if (outgoing_device_id_size > 1)
941 {
942 schd_info[d]._heap_streams = (int*)ccmallocmalloc(sizeof(int) * outgoing_device_id_size * 2);
943 schd_info[d]._heap_signals = (schd_info[d]._heap_streams + outgoing_device_id_size);
944 }
945 for (k = 0; k < outgoing_device_id_size; k++)
946 SCHEDULE_STREAMS(schd_info[d])((schd_info[d]).stream_size <= 1 ? (schd_info[d])._inline_streams
: (schd_info[d])._heap_streams)
[k] = -1, SCHEDULE_SIGNALS(schd_info[d])((schd_info[d]).stream_size <= 1 ? (schd_info[d])._inline_signals
: (schd_info[d])._heap_signals)
[k] = -1;
947 }
948 assert(schd_info[d].stream_size == outgoing_device_id_size)((void) sizeof ((schd_info[d].stream_size == outgoing_device_id_size
) ? 1 : 0), __extension__ ({ if (schd_info[d].stream_size == outgoing_device_id_size
) ; else __assert_fail ("schd_info[d].stream_size == outgoing_device_id_size"
, "ccv_nnc_graph.c", 948, __extension__ __PRETTY_FUNCTION__);
}))
;
949 for (k = 0; k < outgoing_device_id_size; k++)
950 // If it should be on the same device and the stream is not assign, potentially.
951 if (outgoing_device_ids[k] == device_ids[i] &&
952 SCHEDULE_STREAMS(schd_info[d])((schd_info[d]).stream_size <= 1 ? (schd_info[d])._inline_streams
: (schd_info[d])._heap_streams)
[k] < 0 &&
953 (incomings[d].rank > highest_rank ||
954 (incomings[d].rank == highest_rank &&
955 !stream_has_command && ccv_array_find_uint(data->command_set, outgoing_node->cmd.cmd))))
956 {
957 highest_rank = incomings[d].rank;
958 highest_idx = d;
959 stream_n = k;
960 // This is 1 if rank is the same (thus, I must break the tie already), if the rank is not the same, we need to compute this.
961 stream_has_command = (incomings[d].rank == highest_rank || ccv_array_find_uint(data->command_set, outgoing_node->cmd.cmd));
962 }
963 }
964 if (highest_idx >= 0)
965 {
966 outgoing_idx = highest_idx;
967 ccv_nnc_graph_exec_info_t* const outgoing_node = exec_info + outgoing_idx;
968 assert(stream_n >= 0)((void) sizeof ((stream_n >= 0) ? 1 : 0), __extension__ ({
if (stream_n >= 0) ; else __assert_fail ("stream_n >= 0"
, "ccv_nnc_graph.c", 968, __extension__ __PRETTY_FUNCTION__);
}))
;
969 SCHEDULE_STREAMS(schd_info[outgoing_idx])((schd_info[outgoing_idx]).stream_size <= 1 ? (schd_info[outgoing_idx
])._inline_streams : (schd_info[outgoing_idx])._heap_streams)
[stream_n] = stream_idx;
970 ccv_array_add_unique_uint(data->command_set, outgoing_node->cmd.cmd);
971 } else
972 break;
973 }
974 data->exec_idx = outgoing_idx;
975 }
976 } ccv_nnc_graph_visit_endfor} }
977 // Go through to assign signals when necessary.
978 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
979 if (incomings[idx].outgoings && incomings[idx].outgoings->rnum)
980 _ccv_nnc_graph_schedule_assign_signals(incomings[idx].outgoings, schd_info + idx, stream_data, &signal_size, schd_info, exec_info_size);
981 } ccv_nnc_graph_visit_endfor} }
982 for (i = 0; i < exec_info_size; i++)
983 if (outgoings[i])
984 ccv_array_free(outgoings[i]);
985 ccfreefree(outgoings);
986 ccv_matrix_free(exec_dep);
987 ccv_nnc_stream_data_t* const default_data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, 0)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(0)))
;
988 if (device_id >= 0)
989 {
990 // If the default stream (stream 0) is not the same as desired stream, swap with the one that is.
991 if (default_data->device_id != device_id)
992 {
993 int exchange_stream_idx = -1;
994 // Find the stream idx to exchange.
995 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
996 int flag = 0;
997 for(i = 0; !flag && i < schd_info[idx].stream_size; i++)
998 {
999 const int stream_idx = SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i];
1000 ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, stream_idx)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(stream_idx)))
;
1001 if (data->device_id == device_id)
1002 {
1003 exchange_stream_idx = stream_idx;
1004 flag = 1;
1005 }
1006 }
1007 if (flag)
1008 break;
1009 } ccv_nnc_graph_visit_endfor} }
1010 assert(exchange_stream_idx >= 0)((void) sizeof ((exchange_stream_idx >= 0) ? 1 : 0), __extension__
({ if (exchange_stream_idx >= 0) ; else __assert_fail ("exchange_stream_idx >= 0"
, "ccv_nnc_graph.c", 1010, __extension__ __PRETTY_FUNCTION__)
; }))
;
1011 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
1012 for (i = 0; i < schd_info[idx].stream_size; i++)
1013 if (SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] == 0)
1014 SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] = -1;
1015 } ccv_nnc_graph_visit_endfor} }
1016 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
1017 for (i = 0; i < schd_info[idx].stream_size; i++)
1018 if (SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] == exchange_stream_idx)
1019 SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] = 0;
1020 } ccv_nnc_graph_visit_endfor} }
1021 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
1022 for (i = 0; i < schd_info[idx].stream_size; i++)
1023 if (SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] == -1)
1024 SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] = exchange_stream_idx;
1025 } ccv_nnc_graph_visit_endfor} }
1026 ((ccv_nnc_stream_data_t*)ccv_array_get(stream_data, exchange_stream_idx)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(exchange_stream_idx)))
)->device_id = default_data->device_id;
1027 default_data->device_id = device_id;
1028 }
1029 }
1030 int graph_stream_1_size = 0;
1031 for (i = 0; i < source_size; i++)
1032 {
1033 const int idx = sources[i].d;
1034 // If it has incoming nodes, check whether these are on stream 0.
1035 if (incomings[idx].outgoings && incomings[idx].outgoings->rnum)
1036 {
1037 int flag = 0;
1038 const ccv_array_t* const incoming = incomings[idx].outgoings;
1039 for (j = 0; !flag && j < incoming->rnum; j++)
1040 {
1041 const int incoming_idx = *(int*)ccv_array_get(incoming, j)((void*)(((char*)((incoming)->data)) + (size_t)(incoming)->
rsize * (size_t)(j)))
;
1042 for (k = 0; !flag && k < schd_info[incoming_idx].stream_size; k++)
1043 flag = (SCHEDULE_STREAMS(schd_info[incoming_idx])((schd_info[incoming_idx]).stream_size <= 1 ? (schd_info[incoming_idx
])._inline_streams : (schd_info[incoming_idx])._heap_streams)
[k] == 0); // If this is the default stream, we already have a good start.
1044 }
1045 if (flag)
1046 continue;
1047 }
1048 for (j = 0; j < schd_info[idx].stream_size; j++)
1049 if (SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[j] != 0) // If this is not the default stream, we need explicit begin signal to start.
1050 ++graph_stream_1_size;
1051 }
1052 if (graph_stream_1_size > 0)
1053 {
1054 schedule->stream_1s = ccmallocmalloc(sizeof(int) * graph_stream_1_size);
1055 graph_stream_1_size = 0;
1056 for (i = 0; i < source_size; i++)
1057 {
1058 const int idx = sources[i].d;
1059 // If it has incoming nodes, check whether these are on stream 0.
1060 if (incomings[idx].outgoings && incomings[idx].outgoings->rnum)
1061 {
1062 int flag = 0;
1063 const ccv_array_t* const incoming = incomings[idx].outgoings;
1064 for (j = 0; !flag && j < incoming->rnum; j++)
1065 {
1066 const int incoming_idx = *(int*)ccv_array_get(incoming, j)((void*)(((char*)((incoming)->data)) + (size_t)(incoming)->
rsize * (size_t)(j)))
;
1067 for (k = 0; !flag && k < schd_info[incoming_idx].stream_size; k++)
1068 flag = (SCHEDULE_STREAMS(schd_info[incoming_idx])((schd_info[incoming_idx]).stream_size <= 1 ? (schd_info[incoming_idx
])._inline_streams : (schd_info[incoming_idx])._heap_streams)
[k] == 0); // If this is the default stream, we already have a good start.
1069 }
1070 if (flag)
1071 continue;
1072 }
1073 for (j = 0; j < schd_info[idx].stream_size; j++)
1074 if (SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[j] != 0) // If this is not the default stream, we need explicit begin signal to start.
1075 {
1076 const int stream_idx = SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[j];
1077 int flag = 0;
1078 for (k = 0; !flag && k < graph_stream_1_size; k++)
1079 flag = (stream_idx == schedule->stream_1s[k]);
1080 if (!flag)
1081 schedule->stream_1s[graph_stream_1_size++] = stream_idx;
1082 }
1083 }
1084 schedule->stream_1_size = graph_stream_1_size;
1085 }
1086 for (i = 0; i < exec_info_size; i++)
1087 if (incomings[i].outgoings)
1088 ccv_array_free(incomings[i].outgoings);
1089 ccfreefree(incomings);
1090 int graph_wait_size = 0;
1091 for (i = 0; i < destination_size; i++)
1092 {
1093 const int idx = destinations[i].d;
1094 for (j = 0; j < schd_info[idx].stream_size; j++)
1095 if (SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[j] != 0) // If this exec_info doesn't end with default stream, we need to wait.
1096 ++graph_wait_size;
1097 }
1098 if (graph_wait_size > 0)
1099 {
1100 schedule->waits = ccmallocmalloc(sizeof(int) * graph_wait_size);
1101 graph_wait_size = 0;
1102 for (i = 0; i < destination_size; i++)
1103 {
1104 const int idx = destinations[i].d;
1105 for (j = 0; j < schd_info[idx].stream_size; j++)
1106 if (SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[j] != 0) // If this exec_info doesn't end with default stream, we need to wait.
1107 {
1108 ccv_nnc_stream_data_t* const default_stream_data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, 0)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(0)))
;
1109 if (SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[j] < 0)
1110 SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[j] = signal_size++;
1111 else if (default_stream_data->signal_set && ccv_array_find_int(default_stream_data->signal_set, SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[j]))
1112 continue;
1113 schedule->waits[graph_wait_size++] = SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[j];
1114 }
1115 }
1116 schedule->wait_size = graph_wait_size;
1117 }
1118 for (i = 0; i < stream_data->rnum; i++)
1119 {
1120 ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, i)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(i)))
;
1121 if (data->signal_set)
1122 ccv_array_free(data->signal_set);
1123 assert(data->command_set)((void) sizeof ((data->command_set) ? 1 : 0), __extension__
({ if (data->command_set) ; else __assert_fail ("data->command_set"
, "ccv_nnc_graph.c", 1123, __extension__ __PRETTY_FUNCTION__)
; }))
;
1124 ccv_array_free(data->command_set);
1125 }
1126 // Allocate streams & signals
1127 int default_stream_type = stream_type;
1128 CCV_STREAM_SET_DEVICE_ID(default_stream_type, default_data->device_id)(default_stream_type) = (((default_stream_type) & ~0xfff00
) | (((default_data->device_id) & 0xfff) << 8))
;
1129 if (root_schedule)
1130 {
1131 assert(!graph->streams)((void) sizeof ((!graph->streams) ? 1 : 0), __extension__ (
{ if (!graph->streams) ; else __assert_fail ("!graph->streams"
, "ccv_nnc_graph.c", 1131, __extension__ __PRETTY_FUNCTION__)
; }))
;
1132 graph->stream_size = stream_data->rnum;
1133 graph->streams = (ccv_nnc_stream_context_t**)ccmallocmalloc(sizeof(ccv_nnc_stream_context_t*) * graph->stream_size);
1134 graph->block_stream_tasks = (co_routine_t**)cccalloccalloc(graph->stream_size, sizeof(co_routine_t*));
1135 if (stream_context)
1136 graph->streams[0] = stream_context;
1137 for (i = (stream_context ? 1 : 0); i < stream_data->rnum; i++)
1138 {
1139 ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, i)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(i)))
;
1140 int type = stream_type;
1141 CCV_STREAM_SET_DEVICE_ID(type, data->device_id)(type) = (((type) & ~0xfff00) | (((data->device_id) &
0xfff) << 8))
;
1142 graph->streams[i] = ccv_nnc_stream_context_new(type);
1143 }
1144 graph->signal_size = signal_size;
1145 graph->signals = (ccv_nnc_stream_signal_t**)cccalloccalloc(signal_size, sizeof(ccv_nnc_stream_signal_t*));
1146 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
1147 for (i = 0; i < schd_info[idx].stream_size; i++)
1148 if (SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[i] >= 0)
1149 {
1150 const int signal = SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[i];
1151 if (!graph->signals[signal])
1152 {
1153 const ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, SCHEDULE_STREAMS(schd_info[idx])[i])((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(((schd_info[idx]).stream_size <= 1 ?
(schd_info[idx])._inline_streams : (schd_info[idx])._heap_streams
)[i])))
;
1154 int type = stream_type;
1155 CCV_STREAM_SET_DEVICE_ID(type, data->device_id)(type) = (((type) & ~0xfff00) | (((data->device_id) &
0xfff) << 8))
;
1156 graph->signals[signal] = ccv_nnc_stream_signal_new(type);
1157 }
1158 }
1159 } ccv_nnc_graph_visit_endfor} }
1160 } else {
1161 assert(graph->streams)((void) sizeof ((graph->streams) ? 1 : 0), __extension__ (
{ if (graph->streams) ; else __assert_fail ("graph->streams"
, "ccv_nnc_graph.c", 1161, __extension__ __PRETTY_FUNCTION__)
; }))
;
1162 assert(graph->stream_size >= stream_data->rnum)((void) sizeof ((graph->stream_size >= stream_data->
rnum) ? 1 : 0), __extension__ ({ if (graph->stream_size >=
stream_data->rnum) ; else __assert_fail ("graph->stream_size >= stream_data->rnum"
, "ccv_nnc_graph.c", 1162, __extension__ __PRETTY_FUNCTION__)
; }))
;
1163 // Find streams to proper allocated stream based on the type we need.
1164 int* const stream_idxs = (int*)ccmallocmalloc(sizeof(int) * (stream_data->rnum + signal_size));
1165 uint64_t* const stream_used = (uint64_t*)cccalloccalloc(((graph->stream_size + 63) >> 6) + ((graph->signal_size + 63) >> 6), sizeof(uint64_t));
1166 for (i = 0; i < stream_data->rnum; i++)
1167 {
1168 ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, i)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(i)))
;
1169 int type = stream_type;
1170 CCV_STREAM_SET_DEVICE_ID(type, data->device_id)(type) = (((type) & ~0xfff00) | (((data->device_id) &
0xfff) << 8))
;
1171 for (j = 0; j < graph->stream_size; j++)
1172 if (!(stream_used[j >> 6] & ((uint64_t)1 << (j & 63))))
1173 {
1174 const int stream_type = ccv_nnc_stream_context_type(graph->streams[j]);
1175 if (stream_type == type)
1176 {
1177 stream_idxs[i] = j;
1178 stream_used[j >> 6] |= ((uint64_t)1 << (j & 63));
1179 break;
1180 }
1181 }
1182 }
1183 assert(graph->signal_size >= signal_size)((void) sizeof ((graph->signal_size >= signal_size) ? 1
: 0), __extension__ ({ if (graph->signal_size >= signal_size
) ; else __assert_fail ("graph->signal_size >= signal_size"
, "ccv_nnc_graph.c", 1183, __extension__ __PRETTY_FUNCTION__)
; }))
;
1184 // Find signals to proper allocated signal based on the type we need.
1185 int* const signal_idxs = stream_idxs + stream_data->rnum;
1186 uint64_t* const signal_used = stream_used + ((graph->stream_size + 63) >> 6);
1187 for (i = 0; i < signal_size; i++)
1188 signal_idxs[i] = -1;
1189 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
1190 for (i = 0; i < schd_info[idx].stream_size; i++)
1191 if (SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[i] >= 0)
1192 {
1193 const int signal = SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[i];
1194 if (signal_idxs[signal] < 0)
1195 {
1196 const ccv_nnc_stream_data_t* const data = (ccv_nnc_stream_data_t*)ccv_array_get(stream_data, SCHEDULE_STREAMS(schd_info[idx])[i])((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(((schd_info[idx]).stream_size <= 1 ?
(schd_info[idx])._inline_streams : (schd_info[idx])._heap_streams
)[i])))
;
1197 int type = stream_type;
1198 CCV_STREAM_SET_DEVICE_ID(type, data->device_id)(type) = (((type) & ~0xfff00) | (((data->device_id) &
0xfff) << 8))
;
1199 for (j = 0; j < graph->signal_size; j++)
1200 if (!(signal_used[j >> 6] & ((uint64_t)1 << (j & 63))))
1201 {
1202 const int signal_type = ccv_nnc_stream_signal_type(graph->signals[j]);
1203 if (signal_type == type)
1204 {
1205 signal_idxs[signal] = j;
1206 signal_used[j >> 6] |= ((uint64_t)1 << (j & 63));
1207 break;
1208 }
1209 }
1210 }
1211 }
1212 } ccv_nnc_graph_visit_endfor} }
1213 // Now rebind streams and signals from the schedule.
1214 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
1215 for (i = 0; i < schd_info[idx].stream_size; i++)
1216 {
1217 SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i] = stream_idxs[SCHEDULE_STREAMS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_streams
: (schd_info[idx])._heap_streams)
[i]];
1218 if (SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[i] >= 0)
1219 SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[i] = signal_idxs[SCHEDULE_SIGNALS(schd_info[idx])((schd_info[idx]).stream_size <= 1 ? (schd_info[idx])._inline_signals
: (schd_info[idx])._heap_signals)
[i]];
1220 }
1221 for (i = 0; i < schd_info[idx].wait_size; i++)
1222 schd_info[idx].waits[i] = signal_idxs[schd_info[idx].waits[i]];
1223 } ccv_nnc_graph_visit_endfor} }
1224 for (i = 0; i < schedule->stream_1_size; i++)
1225 schedule->stream_1s[i] = stream_idxs[schedule->stream_1s[i]];
1226 for (i = 0; i < schedule->wait_size; i++)
1227 schedule->waits[i] = signal_idxs[schedule->waits[i]];
1228 // Rebind who is the stream 0 (default stream).
1229 schedule->stream_0 = stream_idxs[0];
1230 ccfreefree(stream_used);
1231 ccfreefree(stream_idxs);
1232 }
1233 assert(graph->streams)((void) sizeof ((graph->streams) ? 1 : 0), __extension__ (
{ if (graph->streams) ; else __assert_fail ("graph->streams"
, "ccv_nnc_graph.c", 1233, __extension__ __PRETTY_FUNCTION__)
; }))
;
1234 ccv_nnc_graph_visit_free(visit);
1235 for (i = 0; i < signal_size; i++)
1236 { assert(graph->signals[i])((void) sizeof ((graph->signals[i]) ? 1 : 0), __extension__
({ if (graph->signals[i]) ; else __assert_fail ("graph->signals[i]"
, "ccv_nnc_graph.c", 1236, __extension__ __PRETTY_FUNCTION__)
; }))
; }
1237 if (schedule->stream_1_size)
1238 schedule->begin = ccv_nnc_stream_signal_new(default_stream_type);
1239 schedule->end = ccv_nnc_stream_signal_new(default_stream_type);
1240 // Do this recursively for its sub graphs.
1241 if (graph->sub_graphs)
1242 for (i = 0; i < graph->sub_graphs->rnum; i++)
1243 {
1244 ccv_nnc_graph_t* const sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, i)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(i)))
;
1245 if (sub_graph && !sub_graph->default_schedule)
1246 {
1247 const int exec_idx = sub_graph->exec_idx - 1;
1248 assert(schd_info[exec_idx].stream_size == 1)((void) sizeof ((schd_info[exec_idx].stream_size == 1) ? 1 : 0
), __extension__ ({ if (schd_info[exec_idx].stream_size == 1)
; else __assert_fail ("schd_info[exec_idx].stream_size == 1"
, "ccv_nnc_graph.c", 1248, __extension__ __PRETTY_FUNCTION__)
; }))
;
1249 const int stream_idx = SCHEDULE_STREAMS(schd_info[exec_idx])((schd_info[exec_idx]).stream_size <= 1 ? (schd_info[exec_idx
])._inline_streams : (schd_info[exec_idx])._heap_streams)
[0];
1250 const int device_id = ((ccv_nnc_stream_data_t*)ccv_array_get(stream_data, stream_idx)((void*)(((char*)((stream_data)->data)) + (size_t)(stream_data
)->rsize * (size_t)(stream_idx)))
)->device_id;
1251 sub_graph->default_schedule = _ccv_nnc_graph_static_schedule_new(sub_graph, stream_type, device_id, graph->streams[stream_idx], 0, 0, 0, 0);
1252 }
1253 }
1254 ccv_array_free(stream_data);
1255 return schedule;
1256}
1257void ccv_nnc_graph_set_default_static_schedule(ccv_nnc_graph_t* const graph, const int stream_type)
1258{
1259 assert(graph->p == 0)((void) sizeof ((graph->p == 0) ? 1 : 0), __extension__ ({
if (graph->p == 0) ; else __assert_fail ("graph->p == 0"
, "ccv_nnc_graph.c", 1259, __extension__ __PRETTY_FUNCTION__)
; }))
;
1260 if (graph->default_schedule)
1261 ccv_nnc_graph_static_schedule_free(graph->default_schedule);
1262 graph->default_schedule = _ccv_nnc_graph_static_schedule_new(graph, stream_type, -1, 0, 0, 0, 0, 0);
1263}
1264
1265ccv_nnc_graph_static_schedule_t* ccv_nnc_graph_static_schedule_new(ccv_nnc_graph_t* const graph, const int stream_type, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size)
1266{
1267 assert(graph->p == 0)((void) sizeof ((graph->p == 0) ? 1 : 0), __extension__ ({
if (graph->p == 0) ; else __assert_fail ("graph->p == 0"
, "ccv_nnc_graph.c", 1267, __extension__ __PRETTY_FUNCTION__)
; }))
;
1268 return _ccv_nnc_graph_static_schedule_new(graph, stream_type, -1, 0, sources, source_size, destinations, destination_size);
1269}
1270
1271ccv_nnc_stream_context_t* ccv_nnc_graph_default_stream(const ccv_nnc_graph_t* const graph)
1272{
1273 if (graph->streams && graph->stream_size > 0)
1274 return graph->streams[0];
1275 return 0;
1276}
1277
1278static void _ccv_nnc_graph_dot_exec(const int index, const ccv_nnc_graph_exec_info_t* const exec_info, const ccv_nnc_graph_exec_schedule_t* const schd_info, ccv_nnc_stream_context_t** const streams, const int flags, FILE* out)
1279{
1280 if (flags == CCV_NNC_LONG_DOT_GRAPH)
1281 fputc('{', out);
1282 fprintf(out, "node%d", index);
1283 if (flags == CCV_NNC_LONG_DOT_GRAPH)
1284 {
1285 fputs("|Command: ", out);
1286 fputs(ccv_nnc_cmd_name(exec_info->cmd.cmd), out);
1287 if (schd_info)
1288 {
1289 if (schd_info->stream_size > 0)
1290 {
1291 int i, flag = 0;
1292 fputs("|Stream: ", out);
1293 for (i = 0; i < schd_info->stream_size; i++)
1294 {
1295 const int device_id = streams ? CCV_TENSOR_GET_DEVICE_ID(streams[SCHEDULE_STREAMS(*schd_info)[i]]->type)(((streams[((*schd_info).stream_size <= 1 ? (*schd_info)._inline_streams
: (*schd_info)._heap_streams)[i]]->type) & 0xfff00) >>
8)
: 0;
1296 if (i == 0)
1297 fprintf(out, "%d (d%d)", SCHEDULE_STREAMS(*schd_info)((*schd_info).stream_size <= 1 ? (*schd_info)._inline_streams
: (*schd_info)._heap_streams)
[i], device_id);
1298 else
1299 fprintf(out, ", %d (d%d)", SCHEDULE_STREAMS(*schd_info)((*schd_info).stream_size <= 1 ? (*schd_info)._inline_streams
: (*schd_info)._heap_streams)
[i], device_id);
1300 }
1301 for (i = 0; i < schd_info->stream_size; i++)
1302 if (SCHEDULE_SIGNALS(*schd_info)((*schd_info).stream_size <= 1 ? (*schd_info)._inline_signals
: (*schd_info)._heap_signals)
[i] >= 0)
1303 {
1304 if (!flag)
1305 {
1306 flag = 1;
1307 fprintf(out, "|Signal: %d", SCHEDULE_SIGNALS(*schd_info)((*schd_info).stream_size <= 1 ? (*schd_info)._inline_signals
: (*schd_info)._heap_signals)
[i]);
1308 } else
1309 fprintf(out, ", %d", SCHEDULE_SIGNALS(*schd_info)((*schd_info).stream_size <= 1 ? (*schd_info)._inline_signals
: (*schd_info)._heap_signals)
[i]);
1310 }
1311 }
1312 if (schd_info->wait_size > 0)
1313 {
1314 fputs("|Wait: ", out);
1315 int i;
1316 for (i = 0; i < schd_info->wait_size - 1; i++)
1317 fprintf(out, "%d, ", schd_info->waits[i]);
1318 fprintf(out, "%d", schd_info->waits[schd_info->wait_size - 1]);
1319 }
1320 }
1321 fputc('}', out);
1322 }
1323}
1324
1325static void _ccv_nnc_graph_dot_tensor(const int index, const ccv_nnc_tensor_t* const tensor, const int zone, const int flags, const int depth, FILE* out)
1326{
1327 // if it has an alias pointer, or, it is a long form.
1328 if (flags == CCV_NNC_LONG_DOT_GRAPH)
1329 fputc('{', out);
1330 const int is_tensor_view = CCV_IS_TENSOR_VIEW(tensor)((*(int*)(tensor)) & CCV_TENSOR_VIEW);
1331 if (is_tensor_view)
1332 fprintf(out, "tensorview%d", index);
1333 else
1334 fprintf(out, "tensor%d", index);
1335 int i;
1336 for (i = 0; i < depth; i++) // Print subscription to denote depth.
1337 fputc('\'', out);
1338 if (CCV_GET_TAPE_ALLOC(tensor->type)((tensor->type) & CCV_TAPE_ALLOC))
1339 fputs(" (t)", out);
1340 if (flags == CCV_NNC_LONG_DOT_GRAPH)
1341 {
1342 const int device_id = CCV_TENSOR_GET_DEVICE_ID(tensor->info.type)(((tensor->info.type) & 0xfff00) >> 8);
1343 fprintf(out, "|d%d|zone%d", device_id, zone);
1344 for (i = 0; i < depth; i++) // Print subscription to denote depth.
1345 fputc('\'', out);
1346 uintptr_t aptr = (uintptr_t)tensor->data.u8;
1347 const int* ainc = is_tensor_view ? ((ccv_nnc_tensor_view_t*)(tensor))->inc : tensor->info.dim;
1348 // For the last one, we don't extend to full ainc.
1349 size_t ainc_size = (ccv_nnc_dimension_count(ainc) - ainc[0] + tensor->info.dim[0]) * CCV_GET_DATA_TYPE_SIZE(tensor->type)_ccv_get_data_type_size[((tensor->type) & 0xFF000) >>
12]
;
1350 // Print out the range as well.
1351 fprintf(out, "|{%#010x|%#010x}|%d", (uint32_t)aptr, (uint32_t)(aptr + ainc_size - 1), tensor->info.dim[0]);
1352 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && tensor->info.dim[i]; i++)
1353 fprintf(out, "x%d", tensor->info.dim[i]);
1354 fputc('}', out);
1355 }
1356}
1357
1358typedef struct {
1359 int index;
1360 int name;
1361 int zone;
1362 uintptr_t tensor_ref;
1363 uintptr_t start_ptr;
1364 uintptr_t end_ptr;
1365} ccv_nnc_tensor_dot_t;
1366
1367typedef struct {
1368 ccv_nnc_tensor_dot_t* dots;
1369 int* remap;
1370 int* rename_zone;
1371 int* rename_index;
1372} ccv_nnc_tensor_dot_recovery_t;
1373
1374// First sort by start_ptr, then sort by tensor ptr (so that we will have the same tensor sorted to one cluster).
1375#define less_than(i1, i2, aux) ((i1).start_ptr < (i2).start_ptr || ((i1).start_ptr == (i2).start_ptr && (i1).tensor_ref < (i2).tensor_ref))
1376static CCV_IMPLEMENT_QSORT(_ccv_nnc_tensor_dot_sort_by_ptr, ccv_nnc_tensor_dot_t, less_than)void _ccv_nnc_tensor_dot_sort_by_ptr(ccv_nnc_tensor_dot_t *array
, size_t total, int aux) { int isort_thresh = 7; ccv_nnc_tensor_dot_t
t; int sp = 0; struct { ccv_nnc_tensor_dot_t *lb; ccv_nnc_tensor_dot_t
*ub; } stack[48]; if( total <= 1 ) return; stack[0].lb = array
; stack[0].ub = array + (total - 1); while( sp >= 0 ) { ccv_nnc_tensor_dot_t
* left = stack[sp].lb; ccv_nnc_tensor_dot_t* right = stack[sp
--].ub; for(;;) { int i, n = (int)(right - left) + 1, m; ccv_nnc_tensor_dot_t
* ptr; ccv_nnc_tensor_dot_t* ptr2; if( n <= isort_thresh )
{ insert_sort: for( ptr = left + 1; ptr <= right; ptr++ )
{ for( ptr2 = ptr; ptr2 > left && less_than(ptr2[
0],ptr2[-1], aux); ptr2--) (((t)) = ((ptr2[0])), ((ptr2[0])) =
((ptr2[-1])), ((ptr2[-1])) = ((t))); } break; } else { ccv_nnc_tensor_dot_t
* left0; ccv_nnc_tensor_dot_t* left1; ccv_nnc_tensor_dot_t* right0
; ccv_nnc_tensor_dot_t* right1; ccv_nnc_tensor_dot_t* pivot; ccv_nnc_tensor_dot_t
* a; ccv_nnc_tensor_dot_t* b; ccv_nnc_tensor_dot_t* c; int swap_cnt
= 0; left0 = left; right0 = right; pivot = left + (n/2); if(
n > 40 ) { int d = n / 8; a = left, b = left + d, c = left
+ 2*d; left = less_than(*a, *b, aux) ? (less_than(*b, *c, aux
) ? b : (less_than(*a, *c, aux) ? c : a)) : (less_than(*c, *b
, aux) ? b : (less_than(*a, *c, aux) ? a : c)); a = pivot - d
, b = pivot, c = pivot + d; pivot = less_than(*a, *b, aux) ? (
less_than(*b, *c, aux) ? b : (less_than(*a, *c, aux) ? c : a)
) : (less_than(*c, *b, aux) ? b : (less_than(*a, *c, aux) ? a
: c)); a = right - 2*d, b = right - d, c = right; right = less_than
(*a, *b, aux) ? (less_than(*b, *c, aux) ? b : (less_than(*a, *
c, aux) ? c : a)) : (less_than(*c, *b, aux) ? b : (less_than(
*a, *c, aux) ? a : c)); } a = left, b = pivot, c = right; pivot
= less_than(*a, *b, aux) ? (less_than(*b, *c, aux) ? b : (less_than
(*a, *c, aux) ? c : a)) : (less_than(*c, *b, aux) ? b : (less_than
(*a, *c, aux) ? a : c)); if( pivot != left0 ) { (((t)) = ((*pivot
)), ((*pivot)) = ((*left0)), ((*left0)) = ((t))); pivot = left0
; } left = left1 = left0 + 1; right = right1 = right0; for(;;
) { while( left <= right && !less_than(*pivot, *left
, aux) ) { if( !less_than(*left, *pivot, aux) ) { if( left >
left1 ) (((t)) = ((*left1)), ((*left1)) = ((*left)), ((*left
)) = ((t))); swap_cnt = 1; left1++; } left++; } while( left <=
right && !less_than(*right, *pivot, aux) ) { if( !less_than
(*pivot, *right, aux) ) { if( right < right1 ) (((t)) = ((
*right1)), ((*right1)) = ((*right)), ((*right)) = ((t))); swap_cnt
= 1; right1--; } right--; } if( left > right ) break; (((
t)) = ((*left)), ((*left)) = ((*right)), ((*right)) = ((t)));
swap_cnt = 1; left++; right--; } if( swap_cnt == 0 ) { left =
left0, right = right0; goto insert_sort; } n = ({ typeof ((int
)(left1 - left0)) _a = ((int)(left1 - left0)); typeof ((int)(
left - left1)) _b = ((int)(left - left1)); (_a < _b) ? _a :
_b; }); for( i = 0; i < n; i++ ) (((t)) = ((left0[i])), (
(left0[i])) = ((left[i-n])), ((left[i-n])) = ((t))); n = ({ typeof
((int)(right0 - right1)) _a = ((int)(right0 - right1)); typeof
((int)(right1 - right)) _b = ((int)(right1 - right)); (_a <
_b) ? _a : _b; }); for( i = 0; i < n; i++ ) (((t)) = ((left
[i])), ((left[i])) = ((right0[i-n+1])), ((right0[i-n+1])) = (
(t))); n = (int)(left - left1); m = (int)(right1 - right); if
( n > 1 ) { if( m > 1 ) { if( n > m ) { stack[++sp].
lb = left0; stack[sp].ub = left0 + n - 1; left = right0 - m +
1, right = right0; } else { stack[++sp].lb = right0 - m + 1;
stack[sp].ub = right0; left = left0, right = left0 + n - 1; }
} else left = left0, right = left0 + n - 1; } else if( m >
1 ) left = right0 - m + 1, right = right0; else break; } } }
}
1377#undef less_than
1378
1379static int _ccv_nnc_graph_dot_tensor_multiview_count(const ccv_nnc_tensor_multiview_t* const mv)
1380{
1381 if (!CCV_IS_TENSOR_MULTIVIEW(mv)((*(int*)(mv)) & CCV_TENSOR_MULTIVIEW))
1382 return 1;
1383 const int count = mv->kind + mv->repeat;
1384 int i, c = 0;
1385 for (i = 0; i < count; i++)
1386 c += _ccv_nnc_graph_dot_tensor_multiview_count((ccv_nnc_tensor_multiview_t*)CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i]);
1387 return c;
1388}
1389
1390static void _ccv_nnc_graph_dot_tensor_multiview_tensor_dots(const ccv_nnc_tensor_multiview_t* const mv, ccv_nnc_tensor_dot_t* const tensor_dots, int* tensor_index)
1391{
1392 const int count = mv->kind + mv->repeat;
1393 int i;
1394 for (i = 0; i < count; i++)
1395 if (CCV_IS_TENSOR_MULTIVIEW(CCV_NNC_MULTIVIEW_DATA(mv)[i])((*(int*)(((mv)->_heap_data ? (mv)->_heap_data : (mv)->
_inline_data)[i])) & CCV_TENSOR_MULTIVIEW)
)
1396 _ccv_nnc_graph_dot_tensor_multiview_tensor_dots((ccv_nnc_tensor_multiview_t*)CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i], tensor_dots, tensor_index);
1397 else {
1398 tensor_dots[*tensor_index].name = *tensor_index;
1399 tensor_dots[*tensor_index].start_ptr = (uintptr_t)CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i]->data.u8;
1400 // Because tv's pointer will get updated, it is not correct in this case to have one tensor_ref.
1401 tensor_dots[*tensor_index].tensor_ref = tensor_dots[*tensor_index].start_ptr;
1402 const size_t dim_size = ccv_nnc_dimension_count(CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i]->info.dim) * CCV_GET_DATA_TYPE_SIZE(CCV_NNC_MULTIVIEW_DATA(mv)[i]->type)_ccv_get_data_type_size[((((mv)->_heap_data ? (mv)->_heap_data
: (mv)->_inline_data)[i]->type) & 0xFF000) >>
12]
;
1403 tensor_dots[*tensor_index].end_ptr = tensor_dots[*tensor_index].start_ptr + dim_size - 1;
1404 ++(*tensor_index);
1405 }
1406}
1407
1408static ccv_nnc_tensor_dot_recovery_t _ccv_nnc_graph_tensor_dot_recovery(const ccv_nnc_graph_t* const graph)
1409{
1410 int i, j;
1411 // Recover tensor relationships for all tensors referenced in the graph.
1412 // Most notably, we have to give these indexes, and find if they point to
1413 // the same memory region, and whether they overlap. These information
1414 // are lost since we converted from symbolic form to the execution form.
1415 // and here we do our best to recover because that is easier to understand
1416 // if we want to present the graph visually (also, we don't want to put this
1417 // information into the tensor or execution graph to avoid overhead, thus,
1418 // recovering is the best we can do).
1419 int tensor_count = 0;
1420 for (i = 0; i < graph->exec_info->rnum; i++)
1421 {
1422 ccv_nnc_graph_exec_info_t* exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
;
1423 for (j = 0; j < exec_info->input_size; j++)
1424 if (exec_info->inputs[j])
1425 tensor_count += CCV_IS_TENSOR_MULTIVIEW(exec_info->inputs[j])((*(int*)(exec_info->inputs[j])) & CCV_TENSOR_MULTIVIEW
)
? _ccv_nnc_graph_dot_tensor_multiview_count((ccv_nnc_tensor_multiview_t*)exec_info->inputs[j]) : 1;
1426 for (j = 0; j < exec_info->output_size; j++)
1427 if (exec_info->outputs[j])
1428 tensor_count += CCV_IS_TENSOR_MULTIVIEW(exec_info->outputs[j])((*(int*)(exec_info->outputs[j])) & CCV_TENSOR_MULTIVIEW
)
? _ccv_nnc_graph_dot_tensor_multiview_count((ccv_nnc_tensor_multiview_t*)exec_info->outputs[j]) : 1;
1429 }
1430 ccv_nnc_tensor_dot_t* tensor_dots = tensor_count > 0 ? (ccv_nnc_tensor_dot_t*)ccmallocmalloc(sizeof(ccv_nnc_tensor_dot_t) * tensor_count) : 0;
1431 int k = 0;
1432 for (i = 0; i < graph->exec_info->rnum; i++)
1433 {
1434 ccv_nnc_graph_exec_info_t* exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
;
1435 for (j = 0; j < exec_info->input_size; j++)
1436 {
1437 ccv_nnc_tensor_t* tensor = exec_info->inputs[j];
1438 if (!tensor)
1439 continue;
1440 if (CCV_IS_TENSOR_MULTIVIEW(tensor)((*(int*)(tensor)) & CCV_TENSOR_MULTIVIEW))
1441 _ccv_nnc_graph_dot_tensor_multiview_tensor_dots((ccv_nnc_tensor_multiview_t*)tensor, tensor_dots, &k);
1442 else {
1443 tensor_dots[k].name = k;
1444 tensor_dots[k].tensor_ref = (uintptr_t)tensor;
1445 tensor_dots[k].start_ptr = (uintptr_t)tensor->data.u8;
1446 const int* inc = CCV_IS_TENSOR_VIEW(tensor)((*(int*)(tensor)) & CCV_TENSOR_VIEW) ? ((ccv_nnc_tensor_view_t*)tensor)->inc : tensor->info.dim;
1447 const size_t inc_size = (ccv_nnc_dimension_count(inc) - inc[0] + tensor->info.dim[0]) * CCV_GET_DATA_TYPE_SIZE(tensor->type)_ccv_get_data_type_size[((tensor->type) & 0xFF000) >>
12]
;
1448 tensor_dots[k].end_ptr = tensor_dots[k].start_ptr + inc_size - 1;
1449 ++k;
1450 }
1451 }
1452 for (j = 0; j < exec_info->output_size; j++)
1453 {
1454 ccv_nnc_tensor_t* tensor = exec_info->outputs[j];
1455 if (!tensor)
1456 continue;
1457 if (CCV_IS_TENSOR_MULTIVIEW(tensor)((*(int*)(tensor)) & CCV_TENSOR_MULTIVIEW))
1458 _ccv_nnc_graph_dot_tensor_multiview_tensor_dots((ccv_nnc_tensor_multiview_t*)tensor, tensor_dots, &k);
1459 else {
1460 tensor_dots[k].name = k;
1461 tensor_dots[k].tensor_ref = (uintptr_t)tensor;
1462 tensor_dots[k].start_ptr = (uintptr_t)tensor->data.u8;
1463 const int* inc = CCV_IS_TENSOR_VIEW(tensor)((*(int*)(tensor)) & CCV_TENSOR_VIEW) ? ((ccv_nnc_tensor_view_t*)tensor)->inc : tensor->info.dim;
1464 const size_t inc_size = (ccv_nnc_dimension_count(inc) - inc[0] + tensor->info.dim[0]) * CCV_GET_DATA_TYPE_SIZE(tensor->type)_ccv_get_data_type_size[((tensor->type) & 0xFF000) >>
12]
;
1465 tensor_dots[k].end_ptr = tensor_dots[k].start_ptr + inc_size - 1;
1466 ++k;
1467 }
1468 }
1469 }
1470 tensor_count = k; // We may over count, now shrink.
1471 // To group overlap memory into one zone, we sort it by start ptr first (secondary by the tensor pointer).
1472 _ccv_nnc_tensor_dot_sort_by_ptr(tensor_dots, tensor_count, 0);
1473 int index = 0, zone = 0;
1474 uintptr_t tensor_ref = tensor_count > 0 ? tensor_dots[0].tensor_ref : 0;
1475 uintptr_t end_ptr = tensor_count > 0 ? tensor_dots[0].end_ptr : 0;
1476 // Then, it is trivial, we go by end ptr. If the next start ptr is still within the end ptr (start ptr <= end ptr),
1477 // they are the same zone.
1478 for (i = 0; i < tensor_count; i++)
1479 {
1480 if (tensor_dots[i].tensor_ref != tensor_ref)
1481 {
1482 tensor_ref = tensor_dots[i].tensor_ref;
1483 ++index;
1484 }
1485 if (tensor_dots[i].start_ptr > end_ptr)
1486 {
1487 end_ptr = ccv_max(end_ptr, tensor_dots[i].end_ptr)({ typeof (end_ptr) _a = (end_ptr); typeof (tensor_dots[i].end_ptr
) _b = (tensor_dots[i].end_ptr); (_a > _b) ? _a : _b; })
;
1488 ++zone;
1489 }
1490 tensor_dots[i].index = index;
1491 tensor_dots[i].zone = zone;
1492 }
1493 // We already have index and zone assigned, but the problem is that these are not very human interpretable (because
1494 // it follows the pointer from low to high, not the tensor creation order). The following code renamed both the index
1495 // and the zone so that it is much more understandable.
1496 const int index_count = index + 1;
1497 const int zone_count = zone + 1;
1498 int* remap = (int*)ccmallocmalloc(sizeof(int) * (tensor_count + index_count + zone_count));
1499 int* rename_index = remap + tensor_count;
1500 int* rename_zone = rename_index + index_count;
1501 for (i = 0; i < tensor_count; i++)
1502 remap[tensor_dots[i].name] = i;
1503 for (i = 0; i < index_count; i++)
1504 rename_index[i] = -1;
1505 for (i = 0; i < zone_count; i++)
1506 rename_zone[i] = -1;
1507 index = 0;
1508 zone = 0;
1509 for (i = 0; i < tensor_count; i++)
1510 {
1511 ccv_nnc_tensor_dot_t* tensor_dot = tensor_dots + remap[i];
1512 if (rename_index[tensor_dot->index] == -1)
1513 rename_index[tensor_dot->index] = index++;
1514 if (rename_zone[tensor_dot->zone] == -1)
1515 rename_zone[tensor_dot->zone] = zone++;
1516 }
1517 ccv_nnc_tensor_dot_recovery_t recovery = {
1518 .dots = tensor_dots,
1519 .remap = remap,
1520 .rename_index = rename_index,
1521 .rename_zone = rename_zone,
1522 };
1523 return recovery;
1524}
1525
1526static void _ccv_nnc_graph_tensor_dot_recovery_free(const ccv_nnc_tensor_dot_recovery_t recovery)
1527{
1528 ccfreefree(recovery.dots);
1529 ccfreefree(recovery.remap);
1530}
1531
1532static void _ccv_nnc_graph_dot_tensor_multiview_one(const ccv_nnc_tensor_multiview_t* const mv, const ccv_nnc_tensor_dot_recovery_t recovery, const int depth, int* tensor_index, FILE* out)
1533{
1534 const int count = mv->kind + mv->repeat;
1535 int i, j;
1536 fputs("|{", out);
1537 for (i = 0; i < count; i++)
1538 if (CCV_IS_TENSOR_MULTIVIEW(CCV_NNC_MULTIVIEW_DATA(mv)[i])((*(int*)(((mv)->_heap_data ? (mv)->_heap_data : (mv)->
_inline_data)[i])) & CCV_TENSOR_MULTIVIEW)
)
1539 {
1540 fprintf(out, "{%d", i);
1541 if (mv->kind == CCV_NNC_MULTIVIEW_K0N || (mv->kind == CCV_NNC_MULTIVIEW_K1N && i > 0))
1542 fputc('*', out); // Denotes that we loop on this.
1543 _ccv_nnc_graph_dot_tensor_multiview_one((ccv_nnc_tensor_multiview_t*)CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i], recovery, depth, tensor_index, out);
1544 if (i == count - 1)
1545 fputc('}', out);
1546 else
1547 fputs("}|", out);
1548 } else {
1549 fprintf(out, "{%d", i);
1550 if (mv->kind == CCV_NNC_MULTIVIEW_K0N || (mv->kind == CCV_NNC_MULTIVIEW_K1N && i > 0))
1551 fputc('*', out); // Denotes that we loop on this.
1552 const ccv_nnc_tensor_dot_t* const tensor_dot = recovery.dots + recovery.remap[*tensor_index];
1553 fprintf(out, "|zone%d", recovery.rename_zone[tensor_dot->zone]);
1554 for (j = 0; j < depth; j++)
1555 fputc('\'', out);
1556 uintptr_t aptr = (uintptr_t)CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i]->data.u8;
1557 // For the last one, we don't extend to full ainc.
1558 size_t dim_size = ccv_nnc_dimension_count(CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i]->info.dim) * CCV_GET_DATA_TYPE_SIZE(CCV_NNC_MULTIVIEW_DATA(mv)[i]->type)_ccv_get_data_type_size[((((mv)->_heap_data ? (mv)->_heap_data
: (mv)->_inline_data)[i]->type) & 0xFF000) >>
12]
;
1559 // Print out the range as well.
1560 fprintf(out, "|{%#010x|%#010x}", (uint32_t)aptr, (uint32_t)(aptr + dim_size - 1));
1561 ++(*tensor_index);
1562 if (i == count - 1)
1563 fputc('}', out);
1564 else
1565 fputs("}|", out);
1566 }
1567 fputc('}', out);
1568}
1569
1570static void _ccv_nnc_graph_dot_tensor_multiview(const ccv_nnc_tensor_multiview_t* const mv, const ccv_nnc_tensor_dot_recovery_t recovery, const int flags, const int depth, int* tensor_index, FILE* out)
1571{
1572 // if it has an alias pointer, or, it is a long form.
1573 if (flags == CCV_NNC_LONG_DOT_GRAPH)
1574 fputc('{', out);
1575 const ccv_nnc_tensor_dot_t* const tensor_dot = recovery.dots + recovery.remap[*tensor_index];
1576 fprintf(out, "multiview%d", recovery.rename_index[tensor_dot->index]);
1577 int i;
1578 for (i = 0; i < depth; i++) // Print subscription to denote depth.
1579 fputc('\'', out);
1580 if (CCV_GET_TAPE_ALLOC(mv->type)((mv->type) & CCV_TAPE_ALLOC))
1581 fputs(" (t)", out);
1582 if (flags == CCV_NNC_LONG_DOT_GRAPH)
1583 {
1584 _ccv_nnc_graph_dot_tensor_multiview_one(mv, recovery, depth, tensor_index, out);
1585 const ccv_nnc_tensor_t* root = (ccv_nnc_tensor_t*)mv;
1586 while (CCV_IS_TENSOR_MULTIVIEW(root)((*(int*)(root)) & CCV_TENSOR_MULTIVIEW))
1587 root = CCV_NNC_MULTIVIEW_DATA((ccv_nnc_tensor_multiview_t*)root)(((ccv_nnc_tensor_multiview_t*)root)->_heap_data ? ((ccv_nnc_tensor_multiview_t
*)root)->_heap_data : ((ccv_nnc_tensor_multiview_t*)root)->
_inline_data)
[0];
1588 fprintf(out, "|%d", root->info.dim[0]);
1589 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && root->info.dim[i]; i++)
1590 fprintf(out, "x%d", root->info.dim[i]);
1591 fputc('}', out);
1592 } else
1593 *tensor_index += _ccv_nnc_graph_dot_tensor_multiview_count(mv);
1594}
1595
1596static void _ccv_nnc_graph_dot_node(const ccv_nnc_graph_exec_info_t* const exec_info, const ccv_nnc_graph_exec_schedule_t* const schd_info, const int exec_index, ccv_nnc_stream_context_t** const streams, const ccv_nnc_tensor_dot_recovery_t recovery, const int flags, const int depth, FILE* out, int* const tensor_index)
1597{
1598 fprintf(out, "node%d [shape=record,label=\"", exec_index);
1599 _ccv_nnc_graph_dot_exec(exec_index, exec_info, schd_info, streams, flags, out);
1600 int i;
1601 int k = *tensor_index;
1602 if (exec_info->input_size > 0)
1603 {
1604 fputs("|{Input", out);
1605 for (i = 0; i < exec_info->input_size; i++)
1606 if (exec_info->inputs[i])
1607 {
1608 fputc('|', out);
1609 if (CCV_IS_TENSOR_MULTIVIEW(exec_info->inputs[i])((*(int*)(exec_info->inputs[i])) & CCV_TENSOR_MULTIVIEW
)
)
1610 _ccv_nnc_graph_dot_tensor_multiview((ccv_nnc_tensor_multiview_t*)exec_info->inputs[i], recovery, flags, depth, &k, out);
1611 else {
1612 const ccv_nnc_tensor_dot_t* const tensor_dot = recovery.dots + recovery.remap[k];
1613 _ccv_nnc_graph_dot_tensor(recovery.rename_index[tensor_dot->index], exec_info->inputs[i], recovery.rename_zone[tensor_dot->zone], flags, depth, out);
1614 ++k;
1615 }
1616 } else
1617 fputs("|-", out);
1618 fputc('}', out);
1619 }
1620 if (exec_info->output_size > 0)
1621 {
1622 fputs("|{Output", out);
1623 for (i = 0; i < exec_info->output_size; i++)
1624 if (exec_info->outputs[i])
1625 {
1626 fputc('|', out);
1627 if (CCV_IS_TENSOR_MULTIVIEW(exec_info->outputs[i])((*(int*)(exec_info->outputs[i])) & CCV_TENSOR_MULTIVIEW
)
)
1628 _ccv_nnc_graph_dot_tensor_multiview((ccv_nnc_tensor_multiview_t*)exec_info->outputs[i], recovery, flags, depth, &k, out);
1629 else {
1630 const ccv_nnc_tensor_dot_t* const tensor_dot = recovery.dots + recovery.remap[k];
1631 _ccv_nnc_graph_dot_tensor(recovery.rename_index[tensor_dot->index], exec_info->outputs[i], recovery.rename_zone[tensor_dot->zone], flags, depth, out);
1632 ++k;
1633 }
1634 } else
1635 fputs("|-", out);
1636 fputc('}', out);
1637 }
1638 fputs("\"];\n", out);
1639 *tensor_index = k;
1640}
1641
1642static void _ccv_nnc_graph_dot_while_label(const ccv_nnc_graph_exec_info_t* const exec_info, const int exec_index, const ccv_nnc_tensor_dot_recovery_t recovery, const ccv_nnc_graph_t* const while_graph, const int flags, const int depth, FILE* out, int* tensor_index)
1643{
1644 int i;
1645 fprintf(out, "label=<<b>while%d </b>Command: ", exec_index);
1646 fputs(ccv_nnc_cmd_name(exec_info->cmd.cmd), out);
1647 fputs(">;\n", out);
1648 fprintf(out, "label%d [shape=record,label=\"{", exec_index);
1649 int k = *tensor_index;
1650 if (exec_info->input_size > 0)
1651 {
1652 fputs("{Input|{", out);
1653 for (i = 0; i < exec_info->input_size; i++)
1654 {
1655 if (i > 0)
1656 fputc('|', out);
1657 if (exec_info->inputs[i])
1658 {
1659 if (CCV_IS_TENSOR_MULTIVIEW(exec_info->inputs[i])((*(int*)(exec_info->inputs[i])) & CCV_TENSOR_MULTIVIEW
)
)
1660 _ccv_nnc_graph_dot_tensor_multiview((ccv_nnc_tensor_multiview_t*)exec_info->inputs[i], recovery, flags, depth, &k, out);
1661 else {
1662 const ccv_nnc_tensor_dot_t* const tensor_dot = recovery.dots + recovery.remap[k];
1663 _ccv_nnc_graph_dot_tensor(recovery.rename_index[tensor_dot->index], exec_info->inputs[i], recovery.rename_zone[tensor_dot->zone], flags, depth, out);
1664 ++k;
1665 }
1666 } else
1667 fputc('-', out);
1668 }
1669 fputs("}}", out);
1670 }
1671 if (exec_info->output_size > 0)
1672 {
1673 if (exec_info->input_size > 0)
1674 fputs("|", out);
1675 fputs("{Output|{", out);
1676 for (i = 0; i < exec_info->output_size; i++)
1677 {
1678 if (i > 0)
1679 fputc('|', out);
1680 if (exec_info->outputs[i])
1681 {
1682 if (CCV_IS_TENSOR_MULTIVIEW(exec_info->outputs[i])((*(int*)(exec_info->outputs[i])) & CCV_TENSOR_MULTIVIEW
)
)
1683 _ccv_nnc_graph_dot_tensor_multiview((ccv_nnc_tensor_multiview_t*)exec_info->outputs[i], recovery, flags, depth, &k, out);
1684 else {
1685 const ccv_nnc_tensor_dot_t* const tensor_dot = recovery.dots + recovery.remap[k];
1686 _ccv_nnc_graph_dot_tensor(recovery.rename_index[tensor_dot->index], exec_info->outputs[i], recovery.rename_zone[tensor_dot->zone], flags, depth, out);
1687 ++k;
1688 }
1689 } else
1690 fputc('-', out);
1691 }
1692 fputs("}}", out);
1693 }
1694 fputs("}\"];\n", out);
1695 *tensor_index = k;
1696}
1697
1698static void _ccv_nnc_graph_dot_case_of_label(const ccv_nnc_graph_exec_info_t* const exec_info, const int exec_index, const ccv_nnc_tensor_dot_recovery_t recovery, const int flags, const int depth, FILE* out, int* tensor_index)
1699{
1700 int i;
1701 fprintf(out, "label=<<b>caseof%d </b>Command: ", exec_index);
1702 fputs(ccv_nnc_cmd_name(exec_info->cmd.cmd), out);
1703 fputs(">;\n", out);
1704 fprintf(out, "label%d [shape=record,label=\"{", exec_index);
1705 int k = *tensor_index;
1706 if (exec_info->input_size > 0)
1707 {
1708 fputs("{Input|{", out);
1709 for (i = 0; i < exec_info->input_size; i++)
1710 {
1711 if (i > 0)
1712 fputc('|', out);
1713 if (exec_info->inputs[i])
1714 {
1715 if (CCV_IS_TENSOR_MULTIVIEW(exec_info->inputs[i])((*(int*)(exec_info->inputs[i])) & CCV_TENSOR_MULTIVIEW
)
)
1716 _ccv_nnc_graph_dot_tensor_multiview((ccv_nnc_tensor_multiview_t*)exec_info->inputs[i], recovery, flags, depth, &k, out);
1717 else {
1718 const ccv_nnc_tensor_dot_t* const tensor_dot = recovery.dots + recovery.remap[k];
1719 _ccv_nnc_graph_dot_tensor(recovery.rename_index[tensor_dot->index], exec_info->inputs[i], recovery.rename_zone[tensor_dot->zone], flags, depth, out);
1720 ++k;
1721 }
1722 } else
1723 fputc('-', out);
1724 }
1725 fputs("}}", out);
1726 }
1727 if (exec_info->output_size > 0)
1728 {
1729 if (exec_info->input_size > 0)
1730 fputs("|", out);
1731 fputs("{Output|{", out);
1732 for (i = 0; i < exec_info->output_size; i++)
1733 {
1734 if (i > 0)
1735 fputc('|', out);
1736 if (exec_info->outputs[i])
1737 {
1738 if (CCV_IS_TENSOR_MULTIVIEW(exec_info->outputs[i])((*(int*)(exec_info->outputs[i])) & CCV_TENSOR_MULTIVIEW
)
)
1739 _ccv_nnc_graph_dot_tensor_multiview((ccv_nnc_tensor_multiview_t*)exec_info->outputs[i], recovery, flags, depth, &k, out);
1740 else {
1741 const ccv_nnc_tensor_dot_t* const tensor_dot = recovery.dots + recovery.remap[k];
1742 _ccv_nnc_graph_dot_tensor(recovery.rename_index[tensor_dot->index], exec_info->outputs[i], recovery.rename_zone[tensor_dot->zone], flags, depth, out);
1743 ++k;
1744 }
1745 } else
1746 fputc('-', out);
1747 }
1748 fputs("}}", out);
1749 }
1750 fputs("}\"];\n", out);
1751 *tensor_index = k;
1752}
1753
1754static void _ccv_nnc_graph_dot_sub_graphs(const ccv_nnc_graph_exec_info_t* const exec_info, const ccv_nnc_tensor_dot_recovery_t p_recovery, const ccv_array_t* const sub_graphs, const int flags, const int depth, FILE* out, int* tensor_index, int* exec_index)
1755{
1756 if (exec_info->flags & CCV_NNC_GRAPH_EXEC_P_WHILE)
1757 {
1758 fprintf(out, "subgraph cluster%d {\nstyle=\"rounded\";\nnode%d [style=invisible];\n", *exec_index, *exec_index);
1759 const ccv_nnc_graph_t* const while_graph = *(ccv_nnc_graph_t**)ccv_array_get(sub_graphs, CCV_NNC_GRAPH_REF(exec_info)[0] - 1)((void*)(((char*)((sub_graphs)->data)) + (size_t)(sub_graphs
)->rsize * (size_t)(((exec_info)->_heap_graph_ref ? (exec_info
)->_heap_graph_ref : (exec_info)->_inline_graph_ref)[0]
- 1)))
;
1760 // Output this node info within this subgraph.
1761 _ccv_nnc_graph_dot_while_label(exec_info, *exec_index, p_recovery, while_graph, flags, depth - 1 /* Label all references to its level above. */, out, tensor_index);
1762 } else if (exec_info->flags & CCV_NNC_GRAPH_EXEC_CASE_OF) {
1763 fprintf(out, "subgraph cluster%d {\nstyle=\"rounded\";\nnode%d [style=invisible];\n", *exec_index, *exec_index);
1764 _ccv_nnc_graph_dot_case_of_label(exec_info, *exec_index, p_recovery, flags, depth - 1 /* Label all references to its level above. */, out, tensor_index);
1765 }
1766 ++(*exec_index);
1767 int p;
1768 for (p = 0; p < exec_info->graph_ref_size; p++)
1769 {
1770 if (exec_info->flags & CCV_NNC_GRAPH_EXEC_CASE_OF)
1771 {
1772 fprintf(out, "subgraph cluster%d {\nstyle=\"rounded\";\nnode%d [style=invisible];\nlabel=\"\"\n", *exec_index, *exec_index);
1773 ++(*exec_index);
1774 }
1775 const ccv_nnc_graph_t* const graph = *(ccv_nnc_graph_t**)ccv_array_get(sub_graphs, CCV_NNC_GRAPH_REF(exec_info)[p] - 1)((void*)(((char*)((sub_graphs)->data)) + (size_t)(sub_graphs
)->rsize * (size_t)(((exec_info)->_heap_graph_ref ? (exec_info
)->_heap_graph_ref : (exec_info)->_inline_graph_ref)[p]
- 1)))
;
1776 const ccv_nnc_graph_static_schedule_t* const schedule = graph->default_schedule;
1777 ccv_nnc_tensor_dot_recovery_t recovery = _ccv_nnc_graph_tensor_dot_recovery(graph);
1778 int i, j;
1779 int k = 0;
1780 int* node_id = (int*)ccmallocmalloc(sizeof(int) * graph->exec_info->rnum);
1781 // Output styles.
1782 for (i = 0; i < graph->exec_info->rnum; i++)
1783 {
1784 node_id[i] = *exec_index;
1785 ccv_nnc_graph_exec_info_t* exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
;
1786 if (CCV_NNC_GRAPH_REF(exec_info)((exec_info)->_heap_graph_ref ? (exec_info)->_heap_graph_ref
: (exec_info)->_inline_graph_ref)
[0])
1787 _ccv_nnc_graph_dot_sub_graphs(exec_info, recovery, graph->sub_graphs, flags, depth + 1, out, &k, exec_index);
1788 else {
1789 _ccv_nnc_graph_dot_node(exec_info,
1790 schedule ? (i < schedule->exec_info_size ? schedule->exec_info + i : 0) : 0,
1791 *exec_index, graph->streams, recovery, flags, depth, out, &k);
1792 ++(*exec_index);
1793 }
1794 }
1795 // Output connections.
1796 for (i = 0; i < graph->exec_info->rnum; i++)
1797 {
1798 ccv_nnc_graph_exec_info_t* exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
;
1799 if (exec_info->outgoings)
1800 for (j = 0; j < exec_info->outgoings->rnum; j++)
1801 {
1802 const int outgoing_idx = *(int*)ccv_array_get(exec_info->outgoings, j)((void*)(((char*)((exec_info->outgoings)->data)) + (size_t
)(exec_info->outgoings)->rsize * (size_t)(j)))
;
1803 const ccv_nnc_graph_exec_info_t* const outgoing_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, outgoing_idx)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(outgoing_idx)))
;
1804 // If both are sub-graphs, have both tail and head specified.
1805 if (CCV_NNC_GRAPH_REF(exec_info)((exec_info)->_heap_graph_ref ? (exec_info)->_heap_graph_ref
: (exec_info)->_inline_graph_ref)
[0] && CCV_NNC_GRAPH_REF(outgoing_info)((outgoing_info)->_heap_graph_ref ? (outgoing_info)->_heap_graph_ref
: (outgoing_info)->_inline_graph_ref)
[0])
1806 fprintf(out, "node%d -> node%d [ltail=cluster%d,lhead=cluster%d];\n", node_id[i], node_id[outgoing_idx], node_id[i], node_id[outgoing_idx]);
1807 else if (CCV_NNC_GRAPH_REF(exec_info)((exec_info)->_heap_graph_ref ? (exec_info)->_heap_graph_ref
: (exec_info)->_inline_graph_ref)
[0] && !CCV_NNC_GRAPH_REF(outgoing_info)((outgoing_info)->_heap_graph_ref ? (outgoing_info)->_heap_graph_ref
: (outgoing_info)->_inline_graph_ref)
[0])
1808 fprintf(out, "node%d -> node%d [ltail=cluster%d];\n", node_id[i], node_id[outgoing_idx], node_id[i]);
1809 else if (!CCV_NNC_GRAPH_REF(exec_info)((exec_info)->_heap_graph_ref ? (exec_info)->_heap_graph_ref
: (exec_info)->_inline_graph_ref)
[0] && CCV_NNC_GRAPH_REF(outgoing_info)((outgoing_info)->_heap_graph_ref ? (outgoing_info)->_heap_graph_ref
: (outgoing_info)->_inline_graph_ref)
[0])
1810 fprintf(out, "node%d -> node%d [lhead=cluster%d];\n", node_id[i], node_id[outgoing_idx], node_id[outgoing_idx]);
1811 else
1812 fprintf(out, "node%d -> node%d;\n", node_id[i], node_id[outgoing_idx]);
1813 }
1814 }
1815 fputs("}\n", out);
1816 _ccv_nnc_graph_tensor_dot_recovery_free(recovery);
1817 ccfreefree(node_id);
1818 }
1819 // Extra subgraph cluster.
1820 if (exec_info->flags & CCV_NNC_GRAPH_EXEC_CASE_OF)
1821 fputs("}\n", out);
1822}
1823
1824void ccv_nnc_graph_dot(const ccv_nnc_graph_t* const graph, const int flags, FILE* out)
1825{
1826 fputs("digraph G {\ncompound=true;\n", out);
1827 ccv_nnc_tensor_dot_recovery_t recovery = _ccv_nnc_graph_tensor_dot_recovery(graph);
1828 int i, j;
1829 int k = 0, c = 0;
1830 int* node_id = (int*)ccmallocmalloc(sizeof(int) * graph->exec_info->rnum);
1831 const ccv_nnc_graph_static_schedule_t* const schedule = graph->default_schedule;
1832 // Output styles.
1833 for (i = 0; i < graph->exec_info->rnum; i++)
1834 {
1835 node_id[i] = c;
1836 ccv_nnc_graph_exec_info_t* exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
;
1837 if (CCV_NNC_GRAPH_REF(exec_info)((exec_info)->_heap_graph_ref ? (exec_info)->_heap_graph_ref
: (exec_info)->_inline_graph_ref)
[0])
1838 _ccv_nnc_graph_dot_sub_graphs(exec_info, recovery, graph->sub_graphs, flags, 1, out, &k, &c);
1839 else {
1840 _ccv_nnc_graph_dot_node(exec_info,
1841 schedule ? (i < schedule->exec_info_size ? schedule->exec_info + i : 0) : 0,
1842 c, graph->streams, recovery, flags, 0, out, &k);
1843 ++c;
1844 }
1845 }
1846 // Output connections.
1847 for (i = 0; i < graph->exec_info->rnum; i++)
1848 {
1849 ccv_nnc_graph_exec_info_t* exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
;
1850 if (exec_info->outgoings)
1851 for (j = 0; j < exec_info->outgoings->rnum; j++)
1852 {
1853 const int outgoing_idx = *(int*)ccv_array_get(exec_info->outgoings, j)((void*)(((char*)((exec_info->outgoings)->data)) + (size_t
)(exec_info->outgoings)->rsize * (size_t)(j)))
;
1854 const ccv_nnc_graph_exec_info_t* const outgoing_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, outgoing_idx)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(outgoing_idx)))
;
1855 // If both are sub-graphs, have both tail and head specified.
1856 if (CCV_NNC_GRAPH_REF(exec_info)((exec_info)->_heap_graph_ref ? (exec_info)->_heap_graph_ref
: (exec_info)->_inline_graph_ref)
[0] && CCV_NNC_GRAPH_REF(outgoing_info)((outgoing_info)->_heap_graph_ref ? (outgoing_info)->_heap_graph_ref
: (outgoing_info)->_inline_graph_ref)
[0])
1857 fprintf(out, "node%d -> node%d [ltail=cluster%d,lhead=cluster%d];\n", node_id[i], node_id[outgoing_idx], node_id[i], node_id[outgoing_idx]);
1858 else if (CCV_NNC_GRAPH_REF(exec_info)((exec_info)->_heap_graph_ref ? (exec_info)->_heap_graph_ref
: (exec_info)->_inline_graph_ref)
[0] && !CCV_NNC_GRAPH_REF(outgoing_info)((outgoing_info)->_heap_graph_ref ? (outgoing_info)->_heap_graph_ref
: (outgoing_info)->_inline_graph_ref)
[0])
1859 fprintf(out, "node%d -> node%d [ltail=cluster%d];\n", node_id[i], node_id[outgoing_idx], node_id[i]);
1860 else if (!CCV_NNC_GRAPH_REF(exec_info)((exec_info)->_heap_graph_ref ? (exec_info)->_heap_graph_ref
: (exec_info)->_inline_graph_ref)
[0] && CCV_NNC_GRAPH_REF(outgoing_info)((outgoing_info)->_heap_graph_ref ? (outgoing_info)->_heap_graph_ref
: (outgoing_info)->_inline_graph_ref)
[0])
1861 fprintf(out, "node%d -> node%d [lhead=cluster%d];\n", node_id[i], node_id[outgoing_idx], node_id[outgoing_idx]);
1862 else
1863 fprintf(out, "node%d -> node%d;\n", node_id[i], node_id[outgoing_idx]);
1864 }
1865 }
1866 fputs("}\n", out);
1867 _ccv_nnc_graph_tensor_dot_recovery_free(recovery);
1868 ccfreefree(node_id);
1869}
1870
1871void ccv_nnc_graph_autotune(ccv_nnc_graph_t* const graph, const size_t max_workspace_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size)
1872{
1873 // exec current node, for synchronous CPU execution, no stream unit.
1874 int i;
1875#define visitor(node, idx, ...) \
1876 do { \
1877 if (node->cmd.cmd == CCV_NNC_NOOP) \
1878 continue; \
1879 if (node->cmd.cmd == CCV_NNC_GRAPH_FORWARD || node->cmd.cmd == CCV_NNC_GRAPH_BACKWARD) \
1880 for (i = 0; i < node->graph_ref_size; i++) \
1881 { \
1882 ccv_nnc_graph_t* sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, CCV_NNC_GRAPH_REF(node)[i] - 1)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(((node)->_heap_graph_ref
? (node)->_heap_graph_ref : (node)->_inline_graph_ref)
[i] - 1)))
; \
1883 ccv_nnc_graph_autotune(sub_graph, max_workspace_size, flags, 0, 0, 0, 0); \
1884 } \
1885 else { \
1886 /* Need to unwrap these tensors */ \
1887 for (i = 0; i < node->input_size + node->output_size; i++) \
1888 if (node->inputs[i] && CCV_IS_TENSOR_MULTIVIEW(node->inputs[i])((*(int*)(node->inputs[i])) & CCV_TENSOR_MULTIVIEW)) \
1889 node->inputs[i] = _ccv_nnc_any_tensor_from_tensor_multiview((ccv_nnc_tensor_multiview_t*)node->inputs[i]); \
1890 PRINT(CCV_CLI_VERBOSE, "%s [%d]: [%d] -> [%d]\n", ccv_nnc_cmd_name(node->cmd.cmd), idx, node->input_size, node->output_size)do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("%s [%d]: [%d] -> [%d]\n", ccv_nnc_cmd_name(node
->cmd.cmd), idx, node->input_size, node->output_size
); fflush(stdout); } } while (0)
; \
1891 for (i = 0; i < node->input_size; i++) \
1892 PRINT(CCV_CLI_VERBOSE, "|-> %d. %p (%p)\n", i + 1, node->inputs[i], (node->inputs[i] ? node->inputs[i]->data.u8 : 0))do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("|-> %d. %p (%p)\n", i + 1, node->inputs[i], (
node->inputs[i] ? node->inputs[i]->data.u8 : 0)); fflush
(stdout); } } while (0)
; \
1893 for (i = 0; i < node->output_size; i++) \
1894 PRINT(CCV_CLI_VERBOSE, "|<- %d. %p (%p)\n", i + 1, node->outputs[i], (node->outputs[i] ? node->outputs[i]->data.u8 : 0))do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("|<- %d. %p (%p)\n", i + 1, node->outputs[i],
(node->outputs[i] ? node->outputs[i]->data.u8 : 0))
; fflush(stdout); } } while (0)
; \
1895 node->cmd = ccv_nnc_cmd_autotune(node->cmd, max_workspace_size, node->hint, flags, node->inputs, node->input_size, node->outputs, node->output_size, 0); \
1896 } \
1897 } while (0)
1898 const ccv_nnc_graph_exec_t* const graph_sources = sources ? sources : (graph->sources ? (ccv_nnc_graph_exec_t*)ccv_array_get(graph->sources, 0)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph
->sources)->rsize * (size_t)(0)))
: 0);
1
Assuming 'sources' is null
2
'?' condition is false
3
Assuming field 'sources' is null
4
'?' condition is false
5
'graph_sources' initialized to a null pointer value
1899 const int graph_source_size = source_size ? source_size : (graph->sources ? graph->sources->rnum : 0);
6
Assuming 'source_size' is not equal to 0
7
'?' condition is true
1900 const ccv_nnc_graph_exec_t* const graph_destinations = destinations ? destinations : (graph->destinations ? (ccv_nnc_graph_exec_t*)ccv_array_get(graph->destinations, 0)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0)))
: 0);
8
Assuming 'destinations' is non-null
9
'?' condition is true
1901 const int graph_destination_size = destination_size ? destination_size : (graph->destinations ? graph->destinations->rnum : 0);
10
Assuming 'destination_size' is not equal to 0
11
'?' condition is true
1902 CCV_NNC_GRAPH_VISIT(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, graph_sources, graph_source_size, graph_destinations, graph_destination_size, 0, visitor)do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = (graph->exec_info->
rnum + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (graph_source_size); _i_++
) { ((void) sizeof (((graph_sources)[_i_].graph == graph) ? 1
: 0), __extension__ ({ if ((graph_sources)[_i_].graph == graph
) ; else __assert_fail ("(graph_sources)[_i_].graph == graph"
, "ccv_nnc_graph.c", 1902, __extension__ __PRETTY_FUNCTION__)
; })); _exists_[0][_i_] = (graph_sources)[_i_].d; } int _exist_size_
[2] = { (graph_source_size), 0, }; int _p_ = 0, _q_ = 1; while
(_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_
= 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t _idx_
= _exists_[_p_][_i_]; if (_incomings_[_idx_].r == 1) continue
; _incomings_[_idx_].r = 1; if (((ccv_nnc_graph_exec_info_t*)
((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); ++_incomings_[d].c; _exists_[_q_][_exist_size_
[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (
_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph_source_size
); _i_++) { ((void) sizeof (((graph_sources)[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if ((graph_sources)[_i_].graph ==
graph) ; else __assert_fail ("(graph_sources)[_i_].graph == graph"
, "ccv_nnc_graph.c", 1902, __extension__ __PRETTY_FUNCTION__)
; })); _exists_[0][_i_] = (graph_sources)[_i_].d; } _exist_size_
[0] = (graph_source_size); _exist_size_[1] = 0; _p_ = 0, _q_ =
1; int _bump_ = 1; while (_exist_size_[_p_] > 0) { _exist_size_
[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) {
const int32_t _idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_
].r == 2) continue; _incomings_[_idx_].r = 2; if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; _exists_[_q_][_exist_size_[_q_
]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_
), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (graph_destination_size
); _i_++) { ((void) sizeof (((graph_destinations)[_i_].graph ==
graph) ? 1 : 0), __extension__ ({ if ((graph_destinations)[_i_
].graph == graph) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph.c", 1902, __extension__ __PRETTY_FUNCTION__)
; })); _exists_[0][_i_] = (graph_destinations)[_i_].d; } _exist_size_
[0] = (graph_destination_size); _exist_size_[1] = 0; _p_ = 0,
_q_ = 1; while (_exist_size_[_p_] > 0) { _exist_size_[_q_
] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const
int32_t _idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r
!= 2) continue; _incomings_[_idx_].r = 3; if (_incomings_[_idx_
].edges > 0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_
++) { const int d = _edges_[_incomings_[_idx_].edges - 1 + _j_
]; _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_];
} } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_
= 0; _i_ < (graph_destination_size); _i_++) { ((void) sizeof
(((graph_destinations)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph_destinations)[_i_].graph == graph) ; else __assert_fail
("(graph_destinations)[_i_].graph == graph", "ccv_nnc_graph.c"
, 1902, __extension__ __PRETTY_FUNCTION__); })); _incomings_[
(graph_destinations)[_i_].d].d = 1; } for (_i_ = 0; _i_ < (
graph_source_size); _i_++) { ((void) sizeof (((graph_sources)
[_i_].graph == graph) ? 1 : 0), __extension__ ({ if ((graph_sources
)[_i_].graph == graph) ; else __assert_fail ("(graph_sources)[_i_].graph == graph"
, "ccv_nnc_graph.c", 1902, __extension__ __PRETTY_FUNCTION__)
; })); _exists_[0][_i_] = (graph_sources)[_i_].d; } _p_ = 0; _q_
= 1; _exist_size_[0] = (graph_source_size); _exist_size_[1] =
0; int _d_ = 0; while (_exist_size_[_p_] > 0) { _exist_size_
[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_];) { const
int32_t _idx_ = _exists_[_p_][_i_]; visitor((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0)))) + _idx_), (
_idx_), (_incomings_[_idx_].d)); if (_incomings_[_idx_].d) { ++
_d_; _incomings_[_idx_].r = 4; } if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) { if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum == 1) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 3 && _d_ < (graph_destination_size
)) { _exists_[_p_][_i_] = d; continue; } } else for (_j_ = 0;
_j_ < ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum; _j_++) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 3 && _d_ < (graph_destination_size
)) { _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[_q_
]; } } } ++_i_; } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_
)); } for (_i_ = 0; _i_ < (graph_destination_size); _i_++)
{ ((void) sizeof (((graph_destinations)[_i_].graph == graph)
? 1 : 0), __extension__ ({ if ((graph_destinations)[_i_].graph
== graph) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph.c", 1902, __extension__ __PRETTY_FUNCTION__)
; })); if (_incomings_[(graph_destinations)[_i_].d].r == 4) continue
; if (!(0)) { ((void) sizeof ((_incomings_[(graph_destinations
)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if (_incomings_[
(graph_destinations)[_i_].d].c == 0) ; else __assert_fail ("_incomings_[(graph_destinations)[_i_].d].c == 0"
, "ccv_nnc_graph.c", 1902, __extension__ __PRETTY_FUNCTION__)
; })); } else if (_incomings_[(graph_destinations)[_i_].d].c >
0) continue; visitor((((ccv_nnc_graph_exec_info_t*)((void*)(
((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0)))) + (graph_destinations)[
_i_].d), ((graph_destinations)[_i_].d), (_incomings_[(graph_destinations
)[_i_].d].d)); } if (_heap_mem_) free(_incomings_); } while (
0);
;
12
Assuming '_i_' is >= field 'rnum'
13
Loop condition is false. Execution continues on line 1902
14
Taking false branch
15
Assuming '_i_' is < 'graph_source_size'
16
Loop condition is true. Entering loop body
17
Dereference of null pointer
1903#undef visitor
1904}
1905
1906void ccv_nnc_graph_free(ccv_nnc_graph_t* const graph)
1907{
1908 int i, j;
1909 for (i = 0; i < graph->exec_info->rnum; i++)
1910 {
1911 ccv_nnc_graph_exec_info_t *info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
;
1912 if (info->_heap_graph_ref)
1913 ccfreefree(info->_heap_graph_ref);
1914 ccv_array_t* outgoings = info->outgoings;
1915 if (outgoings)
1916 ccv_array_free(outgoings);
1917 // We allocate inputs & outputs in continuous fashion, therefore, only need to free the input array.
1918 if (info->inputs)
1919 ccfreefree(info->inputs);
1920 if (info->input_flags)
1921 ccfreefree(info->input_flags);
1922 if (info->updates)
1923 ccfreefree(info->updates);
1924 if ((info->flags & CCV_NNC_GRAPH_EXEC_P_WHILE) && info->p_while.inputs)
1925 ccfreefree(info->p_while.inputs);
1926 }
1927 if (graph->tensor_wraps)
1928 {
1929 for (i = 0; i < graph->tensor_wraps->rnum; i++)
1930 {
1931 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *(ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(graph->tensor_wraps, i)((void*)(((char*)((graph->tensor_wraps)->data)) + (size_t
)(graph->tensor_wraps)->rsize * (size_t)(i)))
;
1932 if (tensor_wrap_array)
1933 {
1934 for (j = 0; j < tensor_wrap_array->size; j++)
1935 _ccv_nnc_graph_tensor_wrap_free(tensor_wrap_array->tensor_wraps[j]);
1936 ccfreefree(tensor_wrap_array);
1937 }
1938 }
1939 ccv_array_free(graph->tensor_wraps);
1940 }
1941 if (graph->tensor_wraps_refs)
1942 ccv_array_free(graph->tensor_wraps_refs);
1943 if (graph->breakpoints)
1944 ccfreefree(graph->breakpoints);
1945 if (graph->sources)
1946 ccv_array_free(graph->sources);
1947 if (graph->destinations)
1948 ccv_array_free(graph->destinations);
1949 if (graph->default_schedule)
1950 ccv_nnc_graph_static_schedule_free(graph->default_schedule);
1951 if (graph->streams)
1952 {
1953 // If the graph has parent graph, the default stream is allocated by the parent graph, we need to skip.
1954 if (!graph->p)
1955 ccv_nnc_stream_context_free(graph->streams[0]);
1956 for (i = 1; i < graph->stream_size; i++)
1957 ccv_nnc_stream_context_free(graph->streams[i]);
1958 ccfreefree(graph->streams);
1959 }
1960 if (graph->block_stream_tasks)
1961 ccfreefree(graph->block_stream_tasks);
1962 if (graph->signals)
1963 {
1964 for (i = 0; i < graph->signal_size; i++)
1965 ccv_nnc_stream_signal_free(graph->signals[i]);
1966 ccfreefree(graph->signals);
1967 }
1968 if (graph->carry_overs)
1969 {
1970 for (i = 0; i < graph->carry_overs->rnum; i++)
1971 {
1972 ccv_nnc_graph_tensor_carry_over_t* const carry_over = (ccv_nnc_graph_tensor_carry_over_t*)ccv_array_get(graph->carry_overs, i)((void*)(((char*)((graph->carry_overs)->data)) + (size_t
)(graph->carry_overs)->rsize * (size_t)(i)))
;
1973 _ccv_nnc_graph_tensor_wrap_free(carry_over->from);
1974 _ccv_nnc_graph_tensor_wrap_free(carry_over->to);
1975 }
1976 ccv_array_free(graph->carry_overs);
1977 }
1978 if (graph->sub_graphs)
1979 {
1980 for (i = 0; i < graph->sub_graphs->rnum; i++)
1981 ccv_nnc_graph_free(*(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, i)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(i)))
);
1982 ccv_array_free(graph->sub_graphs);
1983 }
1984 ccv_array_free(graph->exec_info);
1985 if (graph->buffer)
1986 ccfreefree(graph->buffer);
1987 ccfreefree(graph);
1988}