Bug Summary

File:nnc/ccv_nnc_graph_run.c
Warning:line 903, column 4
Array access (from variable 'outputs') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_nnc_graph_run.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -resource-dir /usr/local/lib/clang/19 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2024-11-14-215813-628724-1 -x c ccv_nnc_graph_run.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_nnc_graph.h"
6#include "_ccv_nnc_stream.h"
7#ifdef HAVE_CUDA1
8#include "gpu/ccv_nnc_compat.h"
9#elif defined(HAVE_MPS)
10#include "mps/ccv_nnc_mps.h"
11#endif
12
13// MARK - Level-2 API
14
15static void _ccv_nnc_unwrap_tensor_wrap(const ccv_nnc_graph_t* const graph, const int64_t count, const int64_t reverse_count, ccv_nnc_graph_tensor_wrap_t* const tensor_wrap)
16{
17 ccv_nnc_tensor_t* tensor = tensor_wrap->tensors[tensor_wrap->index];
18 while (CCV_IS_TENSOR_MULTIVIEW(tensor)((*(int*)(tensor)) & CCV_TENSOR_MULTIVIEW) &&
19 (((ccv_nnc_tensor_multiview_t*)tensor)->anchor == (intptr_t)graph ||
20 ((ccv_nnc_tensor_multiview_t*)tensor)->anchor == (intptr_t)graph->pair))
21 {
22 // If the anchor is from the pair, we use the reverse_count instead (we are looking it up).
23 const int i = (int)((((ccv_nnc_tensor_multiview_t*)tensor)->anchor == (intptr_t)graph) ? count : reverse_count);
24 ccv_nnc_tensor_multiview_t* mv = (ccv_nnc_tensor_multiview_t*)tensor;
25 const int off = mv->kind;
26 const int mod = mv->repeat;
27 tensor = CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[i >= off ? ((i - off) % mod) + off : i]; // Unwrap.
28 // If reached the root.
29 if (!CCV_IS_TENSOR_MULTIVIEW(tensor)((*(int*)(tensor)) & CCV_TENSOR_MULTIVIEW))
30 tensor_wrap->update_required = 1; // Need to update tensor updates.
31 ++tensor_wrap->index;
32 tensor_wrap->tensors[tensor_wrap->index] = tensor;
33 assert(tensor_wrap->index < tensor_wrap->count)((void) sizeof ((tensor_wrap->index < tensor_wrap->count
) ? 1 : 0), __extension__ ({ if (tensor_wrap->index < tensor_wrap
->count) ; else __assert_fail ("tensor_wrap->index < tensor_wrap->count"
, "ccv_nnc_graph_run.c", 33, __extension__ __PRETTY_FUNCTION__
); }))
;
34 }
35}
36
37static void _ccv_nnc_graph_unwrap_sub_graph(const ccv_nnc_graph_t* const graph, const int64_t count, const int64_t reverse_count, const ccv_nnc_graph_t* const sub_graph)
38{
39 int i;
40 if (sub_graph->carry_overs)
41 for (i = 0; i < sub_graph->carry_overs->rnum; i++)
42 {
43 ccv_nnc_graph_tensor_carry_over_t* const carry_over = (ccv_nnc_graph_tensor_carry_over_t*)ccv_array_get(sub_graph->carry_overs, i)((void*)(((char*)((sub_graph->carry_overs)->data)) + (size_t
)(sub_graph->carry_overs)->rsize * (size_t)(i)))
;
44 _ccv_nnc_unwrap_tensor_wrap(graph, count, reverse_count, carry_over->from);
45 _ccv_nnc_unwrap_tensor_wrap(graph, count, reverse_count, carry_over->to);
46 }
47 if (sub_graph->sub_graphs)
48 for (i = 0; i < sub_graph->sub_graphs->rnum; i++)
49 _ccv_nnc_graph_unwrap_sub_graph(graph, count, reverse_count, *(ccv_nnc_graph_t**)ccv_array_get(sub_graph->sub_graphs, i)((void*)(((char*)((sub_graph->sub_graphs)->data)) + (size_t
)(sub_graph->sub_graphs)->rsize * (size_t)(i)))
);
50}
51
52static void _ccv_nnc_graph_unwrap(const ccv_nnc_graph_t* const graph, const int64_t count, const int64_t reverse_count)
53{
54 if (!graph->tensor_wraps_refs)
55 return;
56 int i, j;
57 for (i = 0; i < graph->tensor_wraps_refs->rnum; i++)
58 {
59 const ccv_nnc_graph_tensor_wraps_ref_t* const tensor_wraps_ref = (const ccv_nnc_graph_tensor_wraps_ref_t*)ccv_array_get(graph->tensor_wraps_refs, i)((void*)(((char*)((graph->tensor_wraps_refs)->data)) + (
size_t)(graph->tensor_wraps_refs)->rsize * (size_t)(i))
)
;
60 const ccv_nnc_graph_t* const sub_graph = tensor_wraps_ref->graph;
61 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *(ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(sub_graph->tensor_wraps, tensor_wraps_ref->d)((void*)(((char*)((sub_graph->tensor_wraps)->data)) + (
size_t)(sub_graph->tensor_wraps)->rsize * (size_t)(tensor_wraps_ref
->d)))
;
62 if (tensor_wrap_array)
63 for (j = 0; j < tensor_wrap_array->size; j++)
64 {
65 ccv_nnc_graph_tensor_wrap_t* const tensor_wrap = tensor_wrap_array->tensor_wraps[j];
66 if (!tensor_wrap)
67 continue;
68 _ccv_nnc_unwrap_tensor_wrap(graph, count, reverse_count, tensor_wrap);
69 }
70 }
71 _ccv_nnc_graph_unwrap_sub_graph(graph, count, reverse_count, graph);
72}
73
74static void _ccv_nnc_graph_transit_move_to(const ccv_nnc_graph_t* const graph)
75{
76 int i;
77 if (graph->carry_overs)
78 for (i = 0; i < graph->carry_overs->rnum; i++)
79 {
80 ccv_nnc_graph_tensor_carry_over_t* const carry_over = (ccv_nnc_graph_tensor_carry_over_t*)ccv_array_get(graph->carry_overs, i)((void*)(((char*)((graph->carry_overs)->data)) + (size_t
)(graph->carry_overs)->rsize * (size_t)(i)))
;
81 ccv_nnc_tensor_t* it = (ccv_nnc_tensor_t*)(carry_over->to->tensors[carry_over->to->index]);
82 assert(!CCV_IS_TENSOR_MULTIVIEW(it))((void) sizeof ((!((*(int*)(it)) & CCV_TENSOR_MULTIVIEW))
? 1 : 0), __extension__ ({ if (!((*(int*)(it)) & CCV_TENSOR_MULTIVIEW
)) ; else __assert_fail ("!CCV_IS_TENSOR_MULTIVIEW(it)", "ccv_nnc_graph_run.c"
, 82, __extension__ __PRETTY_FUNCTION__); }))
;
83 it->data = carry_over->transit;
84 }
85}
86
87static void _ccv_nnc_graph_from_move_transit(const ccv_nnc_graph_t* const graph)
88{
89 int i;
90 if (graph->carry_overs)
91 for (i = 0; i < graph->carry_overs->rnum; i++)
92 {
93 ccv_nnc_graph_tensor_carry_over_t* const carry_over = (ccv_nnc_graph_tensor_carry_over_t*)ccv_array_get(graph->carry_overs, i)((void*)(((char*)((graph->carry_overs)->data)) + (size_t
)(graph->carry_overs)->rsize * (size_t)(i)))
;
94 ccv_nnc_tensor_t* it = (ccv_nnc_tensor_t*)(carry_over->from->tensors[carry_over->from->index]);
95 assert(!CCV_IS_TENSOR_MULTIVIEW(it))((void) sizeof ((!((*(int*)(it)) & CCV_TENSOR_MULTIVIEW))
? 1 : 0), __extension__ ({ if (!((*(int*)(it)) & CCV_TENSOR_MULTIVIEW
)) ; else __assert_fail ("!CCV_IS_TENSOR_MULTIVIEW(it)", "ccv_nnc_graph_run.c"
, 95, __extension__ __PRETTY_FUNCTION__); }))
;
96 carry_over->transit = it->data;
97 }
98}
99
100static void _ccv_nnc_rewrap_tensor_wrap(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_tensor_wrap_t* const tensor_wrap)
101{
102 while (tensor_wrap->index > 0 && CCV_IS_TENSOR_MULTIVIEW(tensor_wrap->tensors[tensor_wrap->index - 1])((*(int*)(tensor_wrap->tensors[tensor_wrap->index - 1])
) & CCV_TENSOR_MULTIVIEW)
&&
103 (((ccv_nnc_tensor_multiview_t*)tensor_wrap->tensors[tensor_wrap->index - 1])->anchor == (intptr_t)graph ||
104 ((ccv_nnc_tensor_multiview_t*)tensor_wrap->tensors[tensor_wrap->index - 1])->anchor == (intptr_t)graph->pair))
105 --tensor_wrap->index;
106}
107
108static void _ccv_nnc_graph_rewrap_sub_graph(const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_t* const sub_graph)
109{
110 int i;
111 if (sub_graph->carry_overs)
112 for (i = 0; i < sub_graph->carry_overs->rnum; i++)
113 {
114 ccv_nnc_graph_tensor_carry_over_t* const carry_over = (ccv_nnc_graph_tensor_carry_over_t*)ccv_array_get(sub_graph->carry_overs, i)((void*)(((char*)((sub_graph->carry_overs)->data)) + (size_t
)(sub_graph->carry_overs)->rsize * (size_t)(i)))
;
115 _ccv_nnc_rewrap_tensor_wrap(graph, carry_over->from);
116 _ccv_nnc_rewrap_tensor_wrap(graph, carry_over->to);
117 }
118 if (sub_graph->sub_graphs)
119 for (i = 0; i < sub_graph->sub_graphs->rnum; i++)
120 _ccv_nnc_graph_rewrap_sub_graph(graph, *(ccv_nnc_graph_t**)ccv_array_get(sub_graph->sub_graphs, i)((void*)(((char*)((sub_graph->sub_graphs)->data)) + (size_t
)(sub_graph->sub_graphs)->rsize * (size_t)(i)))
);
121}
122
123static void _ccv_nnc_graph_rewrap(const ccv_nnc_graph_t* const graph) // Call this method at the end to roll the wrap_ptr back
124{
125 if (!graph->tensor_wraps_refs)
126 return;
127 int i, j;
128 for (i = 0; i < graph->tensor_wraps_refs->rnum; i++)
129 {
130 const ccv_nnc_graph_tensor_wraps_ref_t* const tensor_wraps_ref = (const ccv_nnc_graph_tensor_wraps_ref_t*)ccv_array_get(graph->tensor_wraps_refs, i)((void*)(((char*)((graph->tensor_wraps_refs)->data)) + (
size_t)(graph->tensor_wraps_refs)->rsize * (size_t)(i))
)
;
131 const ccv_nnc_graph_t* const sub_graph = tensor_wraps_ref->graph;
132 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *(ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(sub_graph->tensor_wraps, tensor_wraps_ref->d)((void*)(((char*)((sub_graph->tensor_wraps)->data)) + (
size_t)(sub_graph->tensor_wraps)->rsize * (size_t)(tensor_wraps_ref
->d)))
;
133 if (tensor_wrap_array)
134 for (j = 0; j < tensor_wrap_array->size; j++)
135 {
136 ccv_nnc_graph_tensor_wrap_t* const tensor_wrap = tensor_wrap_array->tensor_wraps[j];
137 if (!tensor_wrap)
138 continue;
139 _ccv_nnc_rewrap_tensor_wrap(graph, tensor_wrap);
140 }
141 }
142 _ccv_nnc_graph_rewrap_sub_graph(graph, graph);
143}
144
145static void _ccv_nnc_graph_exec_unwrap_io(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_info_t* const node)
146{
147 if (!node->tensor_wraps_ref)
148 return;
149 int i;
150 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *(ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(graph->tensor_wraps, node->tensor_wraps_ref - 1)((void*)(((char*)((graph->tensor_wraps)->data)) + (size_t
)(graph->tensor_wraps)->rsize * (size_t)(node->tensor_wraps_ref
- 1)))
;
151 ccv_nnc_graph_tensor_wrap_t** const tensor_wraps = tensor_wrap_array->tensor_wraps;
152 for (i = 0; i < tensor_wrap_array->size; i++)
153 if (tensor_wraps[i])
154 {
155 assert(tensor_wraps[i]->index > 0)((void) sizeof ((tensor_wraps[i]->index > 0) ? 1 : 0), __extension__
({ if (tensor_wraps[i]->index > 0) ; else __assert_fail
("tensor_wraps[i]->index > 0", "ccv_nnc_graph_run.c", 155
, __extension__ __PRETTY_FUNCTION__); }))
;
156 ccv_nnc_tensor_multiview_t* mv = (ccv_nnc_tensor_multiview_t*)(tensor_wraps[i]->tensors[tensor_wraps[i]->index - 1]);
157 assert(CCV_IS_TENSOR_MULTIVIEW(mv))((void) sizeof ((((*(int*)(mv)) & CCV_TENSOR_MULTIVIEW)) ?
1 : 0), __extension__ ({ if (((*(int*)(mv)) & CCV_TENSOR_MULTIVIEW
)) ; else __assert_fail ("CCV_IS_TENSOR_MULTIVIEW(mv)", "ccv_nnc_graph_run.c"
, 157, __extension__ __PRETTY_FUNCTION__); }))
;
158 // Only now set the mv->it, because now this node is about to get executed.
159 mv->it = tensor_wraps[i]->tensors[tensor_wraps[i]->index];
160 assert(!CCV_IS_TENSOR_MULTIVIEW(mv->it))((void) sizeof ((!((*(int*)(mv->it)) & CCV_TENSOR_MULTIVIEW
)) ? 1 : 0), __extension__ ({ if (!((*(int*)(mv->it)) &
CCV_TENSOR_MULTIVIEW)) ; else __assert_fail ("!CCV_IS_TENSOR_MULTIVIEW(mv->it)"
, "ccv_nnc_graph_run.c", 160, __extension__ __PRETTY_FUNCTION__
); }))
;
161 }
162 for (i = 0; i < node->input_size; i++)
163 if (tensor_wraps[i])
164 node->inputs[i] = tensor_wraps[i]->tensors[tensor_wraps[i]->index];
165 const int d = node->input_size;
166 for (i = 0; i < node->output_size; i++)
167 if (tensor_wraps[d + i])
168 node->outputs[i] = tensor_wraps[d + i]->tensors[tensor_wraps[d + i]->index];
169}
170
171static void _ccv_nnc_graph_exec_unwrap_while_expr(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_info_t* const node)
172{
173 assert(node->flags & CCV_NNC_GRAPH_EXEC_P_WHILE)((void) sizeof ((node->flags & CCV_NNC_GRAPH_EXEC_P_WHILE
) ? 1 : 0), __extension__ ({ if (node->flags & CCV_NNC_GRAPH_EXEC_P_WHILE
) ; else __assert_fail ("node->flags & CCV_NNC_GRAPH_EXEC_P_WHILE"
, "ccv_nnc_graph_run.c", 173, __extension__ __PRETTY_FUNCTION__
); }))
;
174 if (!node->p_while.tensor_wraps_ref)
175 return;
176 int i;
177 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *(ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(graph->tensor_wraps, node->p_while.tensor_wraps_ref - 1)((void*)(((char*)((graph->tensor_wraps)->data)) + (size_t
)(graph->tensor_wraps)->rsize * (size_t)(node->p_while
.tensor_wraps_ref - 1)))
;
178 ccv_nnc_graph_tensor_wrap_t** const tensor_wraps = tensor_wrap_array->tensor_wraps;
179 for (i = 0; i < tensor_wrap_array->size; i++)
180 if (tensor_wraps[i])
181 {
182 assert(tensor_wraps[i]->index > 0)((void) sizeof ((tensor_wraps[i]->index > 0) ? 1 : 0), __extension__
({ if (tensor_wraps[i]->index > 0) ; else __assert_fail
("tensor_wraps[i]->index > 0", "ccv_nnc_graph_run.c", 182
, __extension__ __PRETTY_FUNCTION__); }))
;
183 ccv_nnc_tensor_multiview_t* mv = (ccv_nnc_tensor_multiview_t*)(tensor_wraps[i]->tensors[tensor_wraps[i]->index - 1]);
184 assert(CCV_IS_TENSOR_MULTIVIEW(mv))((void) sizeof ((((*(int*)(mv)) & CCV_TENSOR_MULTIVIEW)) ?
1 : 0), __extension__ ({ if (((*(int*)(mv)) & CCV_TENSOR_MULTIVIEW
)) ; else __assert_fail ("CCV_IS_TENSOR_MULTIVIEW(mv)", "ccv_nnc_graph_run.c"
, 184, __extension__ __PRETTY_FUNCTION__); }))
;
185 // Only now set the mv->it, because now this node is about to get executed.
186 mv->it = tensor_wraps[i]->tensors[tensor_wraps[i]->index];
187 assert(!CCV_IS_TENSOR_MULTIVIEW(mv->it))((void) sizeof ((!((*(int*)(mv->it)) & CCV_TENSOR_MULTIVIEW
)) ? 1 : 0), __extension__ ({ if (!((*(int*)(mv->it)) &
CCV_TENSOR_MULTIVIEW)) ; else __assert_fail ("!CCV_IS_TENSOR_MULTIVIEW(mv->it)"
, "ccv_nnc_graph_run.c", 187, __extension__ __PRETTY_FUNCTION__
); }))
;
188 }
189 for (i = 0; i < node->p_while.input_size; i++)
190 if (tensor_wraps[i])
191 node->p_while.inputs[i] = tensor_wraps[i]->tensors[tensor_wraps[i]->index];
192}
193
194static void _ccv_nnc_graph_exec_unwrap_phi(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_info_t* const node, const int ref)
195{
196 int i;
197 // If the output tensor is a phi multi-view tensor, we update our selection to all the subscribers.
198 for (i = 0; i < node->output_size; i++)
199 if (CCV_IS_TENSOR_MULTIVIEW(node->outputs[i])((*(int*)(node->outputs[i])) & CCV_TENSOR_MULTIVIEW) &&
200 ((ccv_nnc_tensor_multiview_t*)node->outputs[i])->anchor == CCV_NNC_MULTIVIEW_PHI(intptr_t)0x1)
201 {
202 ccv_nnc_tensor_multiview_t* const mv = (ccv_nnc_tensor_multiview_t*)node->outputs[i];
203 mv->it = CCV_NNC_MULTIVIEW_DATA(mv)((mv)->_heap_data ? (mv)->_heap_data : (mv)->_inline_data
)
[ref >= 0];
204 ccv_nnc_tensor_multiview_synchronize(mv);
205 }
206}
207
208static void _ccv_nnc_graph_exec_begin_synchronize_multiviews(ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_info_t* const node)
209{
210 if (!node->tensor_wraps_ref)
211 return;
212 int i;
213 ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = *(ccv_nnc_graph_tensor_wrap_array_t**)ccv_array_get(graph->tensor_wraps, node->tensor_wraps_ref - 1)((void*)(((char*)((graph->tensor_wraps)->data)) + (size_t
)(graph->tensor_wraps)->rsize * (size_t)(node->tensor_wraps_ref
- 1)))
;
214 ccv_nnc_graph_tensor_wrap_t** const tensor_wraps = tensor_wrap_array->tensor_wraps;
215 for (i = 0; i < tensor_wrap_array->size; i++)
216 if (tensor_wraps[i] && tensor_wraps[i]->update_required)
217 {
218 assert(tensor_wraps[i]->index > 0)((void) sizeof ((tensor_wraps[i]->index > 0) ? 1 : 0), __extension__
({ if (tensor_wraps[i]->index > 0) ; else __assert_fail
("tensor_wraps[i]->index > 0", "ccv_nnc_graph_run.c", 218
, __extension__ __PRETTY_FUNCTION__); }))
;
219 ccv_nnc_tensor_multiview_t* const mv = (ccv_nnc_tensor_multiview_t*)(tensor_wraps[i]->tensors[tensor_wraps[i]->index - 1]);
220 // Now update the final pointer.
221 ccv_nnc_tensor_multiview_synchronize(mv);
222 tensor_wraps[i]->update_required = 0; // Reset, no need to update.
223 }
224}
225
226void ccv_nnc_print_tensor_shape(const ccv_nnc_tensor_t* const tensor)
227{
228 int i;
229 PRINT(CCV_CLI_INFO, " [%d", tensor->info.dim[0])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
(" [%d", tensor->info.dim[0]); fflush(stdout); } } while (
0)
;
230 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && tensor->info.dim[i]; i++)
231 PRINT(CCV_CLI_INFO, "x%d", tensor->info.dim[i])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("x%d", tensor->info.dim[i]); fflush(stdout); } } while (0
)
;
232 PRINT(CCV_CLI_INFO, "]")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("]"); fflush(stdout); } } while (0)
;
233}
234
235void ccv_nnc_print_tensor_info(const ccv_nnc_tensor_t* const tensor)
236{
237 int i;
238 PRINT(CCV_CLI_INFO, " [%d", tensor->info.dim[0])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
(" [%d", tensor->info.dim[0]); fflush(stdout); } } while (
0)
;
239 for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC(12) && tensor->info.dim[i]; i++)
240 PRINT(CCV_CLI_INFO, "x%d", tensor->info.dim[i])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("x%d", tensor->info.dim[i]); fflush(stdout); } } while (0
)
;
241 PRINT(CCV_CLI_INFO, "]")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("]"); fflush(stdout); } } while (0)
;
242 if (!CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()) || tensor->info.dim[0] <= 0)
243 return;
244 const int nd = ccv_nnc_tensor_nd(tensor->info.dim);
245 const int len = ccv_min(tensor->info.dim[nd - 1], 3)({ typeof (tensor->info.dim[nd - 1]) _a = (tensor->info
.dim[nd - 1]); typeof (3) _b = (3); (_a < _b) ? _a : _b; }
)
;
246 if (CCV_TENSOR_GET_MEMORY(tensor->info.type)((tensor->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
247 {
248#ifdef HAVE_CUDA1
249 switch (tensor->info.datatype)
250 {
251 case CCV_16F: {
252 uint16_t data[len];
253 cumemcpy(data, CCV_TENSOR_CPU_MEMORY, tensor->data.f16, tensor->info.type, len * sizeof(uint16_t));
254 float fp32[len];
255 ccv_half_precision_to_float(data, fp32, len);
256 for (i = 0; i < len; i++)
257 PRINT(CCV_CLI_VERBOSE, " %f", fp32[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", fp32[i]); fflush(stdout); } } while (0)
;
258 break;
259 }
260 case CCV_32F: {
261 float data[len];
262 cumemcpy(data, CCV_TENSOR_CPU_MEMORY, tensor->data.f32, tensor->info.type, len * sizeof(float));
263 for (i = 0; i < len; i++)
264 PRINT(CCV_CLI_VERBOSE, " %f", data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", data[i]); fflush(stdout); } } while (0)
;
265 break;
266 }
267 case CCV_64F: {
268 double data[len];
269 cumemcpy(data, CCV_TENSOR_CPU_MEMORY, tensor->data.f64, tensor->info.type, len * sizeof(double));
270 for (i = 0; i < len; i++)
271 PRINT(CCV_CLI_VERBOSE, " %f", data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", data[i]); fflush(stdout); } } while (0)
;
272 break;
273 }
274 case CCV_32S: {
275 int data[len];
276 cumemcpy(data, CCV_TENSOR_CPU_MEMORY, tensor->data.i32, tensor->info.type, len * sizeof(int));
277 for (i = 0; i < len; i++)
278 PRINT(CCV_CLI_VERBOSE, " %d", data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %d", data[i]); fflush(stdout); } } while (0)
;
279 break;
280 }
281 case CCV_64S: {
282 int64_t data[len];
283 cumemcpy(data, CCV_TENSOR_CPU_MEMORY, tensor->data.i64, tensor->info.type, len * sizeof(int64_t));
284 for (i = 0; i < len; i++)
285 PRINT(CCV_CLI_VERBOSE, " %lld", (long long)data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %lld", (long long)data[i]); fflush(stdout); } } while
(0)
;
286 break;
287 }
288 case CCV_8U: {
289 uint8_t data[len];
290 cumemcpy(data, CCV_TENSOR_CPU_MEMORY, tensor->data.u8, tensor->info.type, len * sizeof(uint8_t));
291 for (i = 0; i < len; i++)
292 PRINT(CCV_CLI_VERBOSE, " %d", (int)data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %d", (int)data[i]); fflush(stdout); } } while (0)
;
293 break;
294 }
295 }
296 if (ccv_nnc_tensor_count(tensor->info) > 3)
297 PRINT(CCV_CLI_VERBOSE, " ..")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" .."); fflush(stdout); } } while (0)
;
298#elif defined(HAVE_MPS)
299 switch (tensor->info.datatype)
300 {
301 case CCV_16F: {
302 uint16_t data[len];
303 mpmemcpy(data, 0, CCV_TENSOR_CPU_MEMORY, tensor->data.f16, tensor->dataof, tensor->info.type, len * sizeof(uint16_t));
304 float fp32[len];
305 ccv_half_precision_to_float(data, fp32, len);
306 for (i = 0; i < len; i++)
307 PRINT(CCV_CLI_VERBOSE, " %f", fp32[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", fp32[i]); fflush(stdout); } } while (0)
;
308 break;
309 }
310 case CCV_32F: {
311 float data[len];
312 mpmemcpy(data, 0, CCV_TENSOR_CPU_MEMORY, tensor->data.f32, tensor->dataof, tensor->info.type, len * sizeof(float));
313 for (i = 0; i < len; i++)
314 PRINT(CCV_CLI_VERBOSE, " %f", data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", data[i]); fflush(stdout); } } while (0)
;
315 break;
316 }
317 case CCV_64F: {
318 double data[len];
319 mpmemcpy(data, 0, CCV_TENSOR_CPU_MEMORY, tensor->data.f64, tensor->dataof, tensor->info.type, len * sizeof(double));
320 for (i = 0; i < len; i++)
321 PRINT(CCV_CLI_VERBOSE, " %f", data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", data[i]); fflush(stdout); } } while (0)
;
322 break;
323 }
324 case CCV_32S: {
325 int data[len];
326 mpmemcpy(data, 0, CCV_TENSOR_CPU_MEMORY, tensor->data.i32, tensor->dataof, tensor->info.type, len * sizeof(int));
327 for (i = 0; i < len; i++)
328 PRINT(CCV_CLI_VERBOSE, " %d", data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %d", data[i]); fflush(stdout); } } while (0)
;
329 break;
330 }
331 case CCV_64S: {
332 int64_t data[len];
333 mpmemcpy(data, 0, CCV_TENSOR_CPU_MEMORY, tensor->data.i64, tensor->dataof, tensor->info.type, len * sizeof(int64_t));
334 for (i = 0; i < len; i++)
335 PRINT(CCV_CLI_VERBOSE, " %lld", (long long)data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %lld", (long long)data[i]); fflush(stdout); } } while
(0)
;
336 break;
337 }
338 case CCV_8U: {
339 uint8_t data[len];
340 mpmemcpy(data, 0, CCV_TENSOR_CPU_MEMORY, tensor->data.u8, tensor->dataof, tensor->info.type, len * sizeof(uint8_t));
341 for (i = 0; i < len; i++)
342 PRINT(CCV_CLI_VERBOSE, " %d", (int)data[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %d", (int)data[i]); fflush(stdout); } } while (0)
;
343 break;
344 }
345 }
346 if (ccv_nnc_tensor_count(tensor->info) > 3)
347 PRINT(CCV_CLI_VERBOSE, " ..")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" .."); fflush(stdout); } } while (0)
;
348#endif
349 } else if (CCV_TENSOR_GET_MEMORY(tensor->info.type)((tensor->info.type) & 0x3) == CCV_TENSOR_CPU_MEMORY) {
350 switch (tensor->info.datatype)
351 {
352 case CCV_16F: {
353 float fp32[len];
354 ccv_half_precision_to_float((uint16_t*)tensor->data.f16, fp32, len);
355 for (i = 0; i < len; i++)
356 PRINT(CCV_CLI_VERBOSE, " %f", fp32[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", fp32[i]); fflush(stdout); } } while (0)
;
357 break;
358 }
359 case CCV_32F:
360 for (i = 0; i < len; i++)
361 PRINT(CCV_CLI_VERBOSE, " %f", tensor->data.f32[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", tensor->data.f32[i]); fflush(stdout); } }
while (0)
;
362 break;
363 case CCV_64F:
364 for (i = 0; i < len; i++)
365 PRINT(CCV_CLI_VERBOSE, " %f", tensor->data.f64[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %f", tensor->data.f64[i]); fflush(stdout); } }
while (0)
;
366 break;
367 case CCV_32S:
368 for (i = 0; i < len; i++)
369 PRINT(CCV_CLI_VERBOSE, " %d", tensor->data.i32[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %d", tensor->data.i32[i]); fflush(stdout); } }
while (0)
;
370 break;
371 case CCV_64S:
372 for (i = 0; i < len; i++)
373 PRINT(CCV_CLI_VERBOSE, " %lld", (long long)tensor->data.i64[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %lld", (long long)tensor->data.i64[i]); fflush
(stdout); } } while (0)
;
374 break;
375 case CCV_8U:
376 for (i = 0; i < len; i++)
377 PRINT(CCV_CLI_VERBOSE, " %d", (int)tensor->data.u8[i])do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" %d", (int)tensor->data.u8[i]); fflush(stdout);
} } while (0)
;
378 break;
379 }
380 if (ccv_nnc_tensor_count(tensor->info) > 3)
381 PRINT(CCV_CLI_VERBOSE, " ..")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf(" .."); fflush(stdout); } } while (0)
;
382 }
383}
384
385static co_decl(_ccv_nnc_graph_topsorted_run_coro, (ccv_nnc_graph_t* const graph, const int exec_idx, const ccv_nnc_graph_static_schedule_t* const schedule, ccv_nnc_graph_exec_info_t* const exec, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context, const int flags))co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t* const
_self, void* const _privates_); struct _ccv_nnc_graph_topsorted_run_coro_param_s
{ struct { ccv_nnc_graph_t* const graph;const int exec_idx;const
ccv_nnc_graph_static_schedule_t* const schedule;ccv_nnc_graph_exec_info_t
* const exec;ccv_nnc_tensor_tape_t* const tensor_tape;ccv_nnc_stream_context_t
* const stream_context;const int flags;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params; }; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size
(void);
;
386
387static co_decl_task(_ccv_nnc_graph_exec_cases_of_coro, (ccv_nnc_graph_t* const graph, const int exec_idx, ccv_nnc_graph_exec_info_t* const exec, const ccv_nnc_graph_exec_schedule_t* const schd, ccv_nnc_tensor_t* const* const inputs, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context, int flags), private(co_state_t _ccv_nnc_graph_exec_cases_of_coro(co_routine_t* const
_self, void* const _privates_); struct _ccv_nnc_graph_exec_cases_of_coro_param_s
{ struct { ccv_nnc_graph_t* const graph;const int exec_idx;ccv_nnc_graph_exec_info_t
* const exec;const ccv_nnc_graph_exec_schedule_t* const schd;
ccv_nnc_tensor_t* const* const inputs;ccv_nnc_tensor_tape_t* const
tensor_tape;ccv_nnc_stream_context_t* const stream_context;int
flags;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; } _co_params; }; size_t _ccv_nnc_graph_exec_cases_of_coro_stack_size
(void); struct _ccv_nnc_graph_exec_cases_of_coro_private_s { struct
_ccv_nnc_graph_exec_cases_of_coro_param_s _co_params; int ref
; ccv_nnc_graph_t* sub_graph; }; size_t _ccv_nnc_graph_exec_cases_of_coro_stack_size
(void) { return sizeof(struct _ccv_nnc_graph_exec_cases_of_coro_private_s
); } co_state_t _ccv_nnc_graph_exec_cases_of_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_exec_cases_of_coro_param_s _co_params;
int ref; ccv_nnc_graph_t* sub_graph; }; switch (_self_->line
) { case 0:
388 int ref;co_state_t _ccv_nnc_graph_exec_cases_of_coro(co_routine_t* const
_self, void* const _privates_); struct _ccv_nnc_graph_exec_cases_of_coro_param_s
{ struct { ccv_nnc_graph_t* const graph;const int exec_idx;ccv_nnc_graph_exec_info_t
* const exec;const ccv_nnc_graph_exec_schedule_t* const schd;
ccv_nnc_tensor_t* const* const inputs;ccv_nnc_tensor_tape_t* const
tensor_tape;ccv_nnc_stream_context_t* const stream_context;int
flags;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; } _co_params; }; size_t _ccv_nnc_graph_exec_cases_of_coro_stack_size
(void); struct _ccv_nnc_graph_exec_cases_of_coro_private_s { struct
_ccv_nnc_graph_exec_cases_of_coro_param_s _co_params; int ref
; ccv_nnc_graph_t* sub_graph; }; size_t _ccv_nnc_graph_exec_cases_of_coro_stack_size
(void) { return sizeof(struct _ccv_nnc_graph_exec_cases_of_coro_private_s
); } co_state_t _ccv_nnc_graph_exec_cases_of_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_exec_cases_of_coro_param_s _co_params;
int ref; ccv_nnc_graph_t* sub_graph; }; switch (_self_->line
) { case 0:
389 ccv_nnc_graph_t* sub_graph;co_state_t _ccv_nnc_graph_exec_cases_of_coro(co_routine_t* const
_self, void* const _privates_); struct _ccv_nnc_graph_exec_cases_of_coro_param_s
{ struct { ccv_nnc_graph_t* const graph;const int exec_idx;ccv_nnc_graph_exec_info_t
* const exec;const ccv_nnc_graph_exec_schedule_t* const schd;
ccv_nnc_tensor_t* const* const inputs;ccv_nnc_tensor_tape_t* const
tensor_tape;ccv_nnc_stream_context_t* const stream_context;int
flags;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; } _co_params; }; size_t _ccv_nnc_graph_exec_cases_of_coro_stack_size
(void); struct _ccv_nnc_graph_exec_cases_of_coro_private_s { struct
_ccv_nnc_graph_exec_cases_of_coro_param_s _co_params; int ref
; ccv_nnc_graph_t* sub_graph; }; size_t _ccv_nnc_graph_exec_cases_of_coro_stack_size
(void) { return sizeof(struct _ccv_nnc_graph_exec_cases_of_coro_private_s
); } co_state_t _ccv_nnc_graph_exec_cases_of_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_exec_cases_of_coro_param_s _co_params;
int ref; ccv_nnc_graph_t* sub_graph; }; switch (_self_->line
) { case 0:
390))co_state_t _ccv_nnc_graph_exec_cases_of_coro(co_routine_t* const
_self, void* const _privates_); struct _ccv_nnc_graph_exec_cases_of_coro_param_s
{ struct { ccv_nnc_graph_t* const graph;const int exec_idx;ccv_nnc_graph_exec_info_t
* const exec;const ccv_nnc_graph_exec_schedule_t* const schd;
ccv_nnc_tensor_t* const* const inputs;ccv_nnc_tensor_tape_t* const
tensor_tape;ccv_nnc_stream_context_t* const stream_context;int
flags;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; } _co_params; }; size_t _ccv_nnc_graph_exec_cases_of_coro_stack_size
(void); struct _ccv_nnc_graph_exec_cases_of_coro_private_s { struct
_ccv_nnc_graph_exec_cases_of_coro_param_s _co_params; int ref
; ccv_nnc_graph_t* sub_graph; }; size_t _ccv_nnc_graph_exec_cases_of_coro_stack_size
(void) { return sizeof(struct _ccv_nnc_graph_exec_cases_of_coro_private_s
); } co_state_t _ccv_nnc_graph_exec_cases_of_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_exec_cases_of_coro_param_s _co_params;
int ref; ccv_nnc_graph_t* sub_graph; }; switch (_self_->line
) { case 0:
{
391 // Wait until this stream context is done.
392 co_stream_await(CO_P(stream_context))do { if (!_co_stream_await(_self_, (((struct _private_s*)(_privates_
))->_co_params._co_params.stream_context))) { return (co_state_t
){ 392, 0 }; } case 392: ; } while (0)
;
393 if (CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->cmd.cmd == CCV_NNC_GRAPH_FORWARD)
394 {
395 CO_V(ref)(((struct _private_s*)(_privates_))->ref) = CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->case_of.offset + CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->case_of.expr(CO_P(inputs)(((struct _private_s*)(_privates_))->_co_params._co_params
.inputs)
, CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->input_size, CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->case_of.data);
396 if (CO_P(tensor_tape)(((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape)
)
397 ccv_nnc_tensor_tape_set_numbering(CO_P(tensor_tape)(((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape)
, CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, (ccv_nnc_graph_exec_t){
398 .d = CO_P(exec_idx)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_idx)
,
399 .graph = CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
,
400 }, CO_V(ref)(((struct _private_s*)(_privates_))->ref));
401 } else {
402 assert(CO_P(exec)->cmd.cmd == CCV_NNC_GRAPH_BACKWARD)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.exec)->cmd.cmd == CCV_NNC_GRAPH_BACKWARD) ? 1 :
0), __extension__ ({ if ((((struct _private_s*)(_privates_))
->_co_params._co_params.exec)->cmd.cmd == CCV_NNC_GRAPH_BACKWARD
) ; else __assert_fail ("CO_P(exec)->cmd.cmd == CCV_NNC_GRAPH_BACKWARD"
, "ccv_nnc_graph_run.c", 402, __extension__ __PRETTY_FUNCTION__
); }))
;
403 assert(CO_P(tensor_tape))((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.tensor_tape)) ? 1 : 0), __extension__ ({ if ((((struct
_private_s*)(_privates_))->_co_params._co_params.tensor_tape
)) ; else __assert_fail ("CO_P(tensor_tape)", "ccv_nnc_graph_run.c"
, 403, __extension__ __PRETTY_FUNCTION__); }))
;
404 CO_V(ref)(((struct _private_s*)(_privates_))->ref) = ccv_nnc_tensor_tape_numbering(CO_P(tensor_tape)(((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape)
, CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, (ccv_nnc_graph_exec_t){
405 .d = CO_P(exec_idx)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_idx)
,
406 .graph = CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
,
407 });
408 }
409 if (CO_V(ref)(((struct _private_s*)(_privates_))->ref) >= 0)
410 {
411 assert(CO_V(ref) < CO_P(exec)->graph_ref_size)((void) sizeof (((((struct _private_s*)(_privates_))->ref)
< (((struct _private_s*)(_privates_))->_co_params._co_params
.exec)->graph_ref_size) ? 1 : 0), __extension__ ({ if ((((
struct _private_s*)(_privates_))->ref) < (((struct _private_s
*)(_privates_))->_co_params._co_params.exec)->graph_ref_size
) ; else __assert_fail ("CO_V(ref) < CO_P(exec)->graph_ref_size"
, "ccv_nnc_graph_run.c", 411, __extension__ __PRETTY_FUNCTION__
); }))
;
412 CO_V(sub_graph)(((struct _private_s*)(_privates_))->sub_graph) = *(ccv_nnc_graph_t**)ccv_array_get(CO_P(graph)->sub_graphs, CCV_NNC_GRAPH_REF(CO_P(exec))[CO_V(ref)] - 1)((void*)(((char*)(((((struct _private_s*)(_privates_))->_co_params
._co_params.graph)->sub_graphs)->data)) + (size_t)((((struct
_private_s*)(_privates_))->_co_params._co_params.graph)->
sub_graphs)->rsize * (size_t)((((((struct _private_s*)(_privates_
))->_co_params._co_params.exec))->_heap_graph_ref ? (((
(struct _private_s*)(_privates_))->_co_params._co_params.exec
))->_heap_graph_ref : ((((struct _private_s*)(_privates_))
->_co_params._co_params.exec))->_inline_graph_ref)[(((struct
_private_s*)(_privates_))->ref)] - 1)))
;
413 assert(CO_P(schd)->stream_size == 1)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.schd)->stream_size == 1) ? 1 : 0), __extension__
({ if ((((struct _private_s*)(_privates_))->_co_params._co_params
.schd)->stream_size == 1) ; else __assert_fail ("CO_P(schd)->stream_size == 1"
, "ccv_nnc_graph_run.c", 413, __extension__ __PRETTY_FUNCTION__
); }))
;
414 assert(CO_P(graph)->streams[SCHEDULE_STREAMS(*CO_P(schd))[0]] == CO_V(sub_graph)->streams[0])((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.graph)->streams[((*(((struct _private_s*)(_privates_
))->_co_params._co_params.schd)).stream_size <= 1 ? (*(
((struct _private_s*)(_privates_))->_co_params._co_params.
schd))._inline_streams : (*(((struct _private_s*)(_privates_)
)->_co_params._co_params.schd))._heap_streams)[0]] == (((struct
_private_s*)(_privates_))->sub_graph)->streams[0]) ? 1
: 0), __extension__ ({ if ((((struct _private_s*)(_privates_
))->_co_params._co_params.graph)->streams[((*(((struct _private_s
*)(_privates_))->_co_params._co_params.schd)).stream_size <=
1 ? (*(((struct _private_s*)(_privates_))->_co_params._co_params
.schd))._inline_streams : (*(((struct _private_s*)(_privates_
))->_co_params._co_params.schd))._heap_streams)[0]] == (((
struct _private_s*)(_privates_))->sub_graph)->streams[0
]) ; else __assert_fail ("CO_P(graph)->streams[SCHEDULE_STREAMS(*CO_P(schd))[0]] == CO_V(sub_graph)->streams[0]"
, "ccv_nnc_graph_run.c", 414, __extension__ __PRETTY_FUNCTION__
); }))
;
415 co_apply(_ccv_nnc_graph_topsorted_run_coro, (CO_V(sub_graph), CO_P(exec_idx), CO_V(sub_graph)->default_schedule, CO_P(exec), CO_P(tensor_tape), CO_P(graph)->streams[SCHEDULE_STREAMS(*CO_P(schd))[0]], CO_P(flags)))do { _self_->callee = ({ co_routine_t* const task = malloc
((sizeof(co_routine_t) + _ccv_nnc_graph_topsorted_run_coro_stack_size
())); do { struct _ccv_nnc_graph_topsorted_run_coro_param_s params
= { ._co_params = { (((struct _private_s*)(_privates_))->
sub_graph), (((struct _private_s*)(_privates_))->_co_params
._co_params.exec_idx), (((struct _private_s*)(_privates_))->
sub_graph)->default_schedule, (((struct _private_s*)(_privates_
))->_co_params._co_params.exec), (((struct _private_s*)(_privates_
))->_co_params._co_params.tensor_tape), (((struct _private_s
*)(_privates_))->_co_params._co_params.graph)->streams[
((*(((struct _private_s*)(_privates_))->_co_params._co_params
.schd)).stream_size <= 1 ? (*(((struct _private_s*)(_privates_
))->_co_params._co_params.schd))._inline_streams : (*(((struct
_private_s*)(_privates_))->_co_params._co_params.schd))._heap_streams
)[0]], (((struct _private_s*)(_privates_))->_co_params._co_params
.flags) } }; task->fn = _ccv_nnc_graph_topsorted_run_coro;
task->line = 0; task->done = 0; task->root = 0; task
->other_size = 0; task->notify_any = 0; task->others
= 0; task->caller = 0; task->callee = 0; if (sizeof(params
) > 0) memcpy(task + 1, &params, sizeof(params)); } while
(0); task; }); _co_apply(_self_, _self_->callee); return (
co_state_t){ 415, 0 }; case 415: co_free(_self_->callee); _self_
->callee = 0; } while (0)
;
416 }
417 _ccv_nnc_graph_exec_unwrap_phi(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
, CO_V(ref)(((struct _private_s*)(_privates_))->ref));
418} co_end()default: return (co_state_t){ 418, 1 }; } }
419
420typedef struct {
421 ccv_nnc_graph_t* graph;
422 const ccv_nnc_graph_exec_schedule_t* node;
423 ccv_nnc_stream_context_t* stream;
424} ccv_nnc_graph_neighbor_context_discovery_t;
425
426static ccv_nnc_stream_context_t* _ccv_nnc_graph_neighbor_context_discovery(const int device_id, void* const context)
427{
428 const ccv_nnc_graph_neighbor_context_discovery_t* const discovery = (ccv_nnc_graph_neighbor_context_discovery_t*)context;
429 if (CCV_STREAM_GET_DEVICE_ID(ccv_nnc_stream_context_type(discovery->stream))(((ccv_nnc_stream_context_type(discovery->stream)) & 0xfff00
) >> 8)
== device_id)
430 return discovery->stream;
431 ccv_nnc_graph_t* const graph = discovery->graph;
432 const ccv_nnc_graph_exec_schedule_t* const node = discovery->node;
433 int i;
434 // First try to find in other streams of the same node.
435 for (i = 0; i < node->stream_size; i++)
436 {
437 ccv_nnc_stream_context_t* const stream = graph->streams[SCHEDULE_STREAMS(*node)((*node).stream_size <= 1 ? (*node)._inline_streams : (*node
)._heap_streams)
[i]];
438 if (CCV_STREAM_GET_DEVICE_ID(ccv_nnc_stream_context_type(stream))(((ccv_nnc_stream_context_type(stream)) & 0xfff00) >>
8)
== device_id)
439 return stream;
440 }
441 // If cannot find, try to find in all the wait streams.
442 for (i = 0; i < node->wait_size; i++)
443 {
444 ccv_nnc_stream_context_t* stream_context = ccv_nnc_stream_signal_get_emitter(graph->signals[node->waits[i]]);
445 if (stream_context && CCV_STREAM_GET_DEVICE_ID(ccv_nnc_stream_context_type(stream_context))(((ccv_nnc_stream_context_type(stream_context)) & 0xfff00
) >> 8)
== device_id)
446 return stream_context;
447 }
448 return 0;
449}
450
451static co_routine_t* _ccv_nnc_graph_exec_run_task(ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_info_t* const node, const ccv_nnc_graph_exec_schedule_t* const schd, const int idx, ccv_nnc_tensor_tape_t* const tensor_tape, const int flags)
452{
453 _ccv_nnc_graph_exec_unwrap_io(graph, node);
454 ccv_nnc_tensor_t** inputs = node->inputs;
455 ccv_nnc_tensor_t** outputs = inputs ? inputs + node->input_size : 0;
456 if (tensor_tape)
457 ccv_nnc_tensor_tape_io(tensor_tape, graph, node->input_flags, inputs, node->input_size, node->output_flags, outputs, node->output_size);
458 /* Broadcast the updates to all subscribed references for input / output, even though at th
459 * time output is not written yet, propagate pointer change is still valid. */
460 _ccv_nnc_graph_exec_begin_synchronize_multiviews(graph, node);
461 if (node->cmd.cmd == CCV_NNC_GRAPH_FORWARD || node->cmd.cmd == CCV_NNC_GRAPH_BACKWARD)
462 {
463 if (node->flags & CCV_NNC_GRAPH_EXEC_CASE_OF)
464 {
465 ccv_nnc_stream_context_t* const node_stream = graph->streams[SCHEDULE_STREAMS(*schd)((*schd).stream_size <= 1 ? (*schd)._inline_streams : (*schd
)._heap_streams)
[0]];
466 return co_new(_ccv_nnc_graph_exec_cases_of_coro, (graph, idx, node, schd, inputs, tensor_tape, node_stream, flags))({ co_routine_t* const task = malloc((sizeof(co_routine_t) + _ccv_nnc_graph_exec_cases_of_coro_stack_size
())); do { struct _ccv_nnc_graph_exec_cases_of_coro_param_s params
= { ._co_params = { graph, idx, node, schd, inputs, tensor_tape
, node_stream, flags } }; task->fn = _ccv_nnc_graph_exec_cases_of_coro
; task->line = 0; task->done = 0; task->root = 0; task
->other_size = 0; task->notify_any = 0; task->others
= 0; task->caller = 0; task->callee = 0; if (sizeof(params
) > 0) memcpy(task + 1, &params, sizeof(params)); } while
(0); task; })
;
467 } else if (node->flags & CCV_NNC_GRAPH_EXEC_P_WHILE) {
468 ccv_nnc_graph_t* sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, CCV_NNC_GRAPH_REF(node)[0] - 1)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(((node)->_heap_graph_ref
? (node)->_heap_graph_ref : (node)->_inline_graph_ref)
[0] - 1)))
;
469 assert(graph->streams[SCHEDULE_STREAMS(*schd)[0]] == sub_graph->streams[0])((void) sizeof ((graph->streams[((*schd).stream_size <=
1 ? (*schd)._inline_streams : (*schd)._heap_streams)[0]] == sub_graph
->streams[0]) ? 1 : 0), __extension__ ({ if (graph->streams
[((*schd).stream_size <= 1 ? (*schd)._inline_streams : (*schd
)._heap_streams)[0]] == sub_graph->streams[0]) ; else __assert_fail
("graph->streams[SCHEDULE_STREAMS(*schd)[0]] == sub_graph->streams[0]"
, "ccv_nnc_graph_run.c", 469, __extension__ __PRETTY_FUNCTION__
); }))
;
470 return co_new(_ccv_nnc_graph_topsorted_run_coro, (sub_graph, idx, sub_graph->default_schedule, node, tensor_tape, graph->streams[SCHEDULE_STREAMS(*schd)[0]], flags))({ co_routine_t* const task = malloc((sizeof(co_routine_t) + _ccv_nnc_graph_topsorted_run_coro_stack_size
())); do { struct _ccv_nnc_graph_topsorted_run_coro_param_s params
= { ._co_params = { sub_graph, idx, sub_graph->default_schedule
, node, tensor_tape, graph->streams[((*schd).stream_size <=
1 ? (*schd)._inline_streams : (*schd)._heap_streams)[0]], flags
} }; task->fn = _ccv_nnc_graph_topsorted_run_coro; task->
line = 0; task->done = 0; task->root = 0; task->other_size
= 0; task->notify_any = 0; task->others = 0; task->
caller = 0; task->callee = 0; if (sizeof(params) > 0) memcpy
(task + 1, &params, sizeof(params)); } while (0); task; }
)
;
471 }
472 } else {
473 PRINT(CCV_CLI_INFO, "%s [%d]: [%d] -> [%d] (%d)\n", ccv_nnc_cmd_name(node->cmd.cmd), idx, node->input_size, node->output_size, SCHEDULE_STREAMS(*schd)[0])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("%s [%d]: [%d] -> [%d] (%d)\n", ccv_nnc_cmd_name(node->
cmd.cmd), idx, node->input_size, node->output_size, ((*
schd).stream_size <= 1 ? (*schd)._inline_streams : (*schd)
._heap_streams)[0]); fflush(stdout); } } while (0)
;
474 int i, j;
475 int flag = 0;
476 for (i = 0; i < schd->stream_size; i++)
477 {
478 ccv_nnc_stream_context_t* const stream = graph->streams[SCHEDULE_STREAMS(*schd)((*schd).stream_size <= 1 ? (*schd)._inline_streams : (*schd
)._heap_streams)
[i]];
479 for (j = 0; j < schd->wait_size; j++)
480 {
481 ccv_nnc_stream_context_wait_signal(stream, graph->signals[schd->waits[j]]);
482 if (!flag)
483 {
484 PRINT(CCV_CLI_INFO, "Wait: (%d, %d)", SCHEDULE_STREAMS(*schd)[i], schd->waits[j])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("Wait: (%d, %d)", ((*schd).stream_size <= 1 ? (*schd)._inline_streams
: (*schd)._heap_streams)[i], schd->waits[j]); fflush(stdout
); } } while (0)
;
485 flag = 1;
486 } else
487 PRINT(CCV_CLI_INFO, ", (%d, %d)", SCHEDULE_STREAMS(*schd)[i], schd->waits[j])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
(", (%d, %d)", ((*schd).stream_size <= 1 ? (*schd)._inline_streams
: (*schd)._heap_streams)[i], schd->waits[j]); fflush(stdout
); } } while (0)
;
488 }
489 }
490 if (flag)
491 PRINT(CCV_CLI_INFO, "\n")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("\n"); fflush(stdout); } } while (0)
;
492 for (i = 0; i < node->input_size; i++)
493 {
494 PRINT(CCV_CLI_INFO, "|-> %d. %p (%p:%d)", i + 1, inputs[i], (inputs[i] ? inputs[i]->data.u8 : 0), (inputs[i] ? CCV_TENSOR_GET_DEVICE_ID(inputs[i]->info.type) : -1))do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("|-> %d. %p (%p:%d)", i + 1, inputs[i], (inputs[i] ? inputs
[i]->data.u8 : 0), (inputs[i] ? (((inputs[i]->info.type
) & 0xfff00) >> 8) : -1)); fflush(stdout); } } while
(0)
;
495 if (inputs[i] && CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_INFO)(CCV_CLI_INFO & ccv_cli_get_output_levels()))
496 ccv_nnc_print_tensor_info(inputs[i]);
497 PRINT(CCV_CLI_INFO, "\n")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("\n"); fflush(stdout); } } while (0)
;
498 }
499 for (i = 0; i < node->output_size; i++)
500 {
501 PRINT(CCV_CLI_INFO, "|<- %d. %p (%p:%d)", i + 1, outputs[i], (outputs[i] ? outputs[i]->data.u8 : 0), (outputs[i] ? CCV_TENSOR_GET_DEVICE_ID(outputs[i]->info.type) : -1))do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("|<- %d. %p (%p:%d)", i + 1, outputs[i], (outputs[i] ? outputs
[i]->data.u8 : 0), (outputs[i] ? (((outputs[i]->info.type
) & 0xfff00) >> 8) : -1)); fflush(stdout); } } while
(0)
;
502 if (outputs[i] && CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_INFO)(CCV_CLI_INFO & ccv_cli_get_output_levels()))
503 ccv_nnc_print_tensor_shape(outputs[i]);
504 PRINT(CCV_CLI_INFO, "\n")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("\n"); fflush(stdout); } } while (0)
;
505 }
506 ccv_nnc_stream_context_t* const node_stream = graph->streams[SCHEDULE_STREAMS(*schd)((*schd).stream_size <= 1 ? (*schd)._inline_streams : (*schd
)._heap_streams)
[0]];
507 ccv_nnc_graph_neighbor_context_discovery_t discovery_context = {
508 .graph = graph,
509 .node = schd,
510 .stream = node_stream
511 };
512 ccv_nnc_stream_context_set_neighbor_discovery(node_stream, _ccv_nnc_graph_neighbor_context_discovery, &discovery_context);
513 const int status = ccv_nnc_cmd_exec(node->cmd, node->hint, flags, inputs, node->input_size, outputs, node->output_size, node_stream);
514 if (status != 0)
515 PRINT(CCV_CLI_INFO, "Invalid Status: %d\n", status)do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("Invalid Status: %d\n", status); fflush(stdout); } } while (
0)
;
516 if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE)(CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
517 {
518 for (i = 0; i < node->output_size; i++)
519 {
520 PRINT(CCV_CLI_VERBOSE, "POST: |<- %d. %p (%p:%d)", i + 1, outputs[i], (outputs[i] ? outputs[i]->data.u8 : 0), (outputs[i] ? CCV_TENSOR_GET_DEVICE_ID(outputs[i]->info.type) : -1))do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("POST: |<- %d. %p (%p:%d)", i + 1, outputs[i], (
outputs[i] ? outputs[i]->data.u8 : 0), (outputs[i] ? (((outputs
[i]->info.type) & 0xfff00) >> 8) : -1)); fflush(
stdout); } } while (0)
;
521 if (outputs[i])
522 ccv_nnc_print_tensor_info(outputs[i]);
523 PRINT(CCV_CLI_VERBOSE, "\n")do { if ((CCV_CLI_VERBOSE & ccv_cli_get_output_levels()))
{ printf("\n"); fflush(stdout); } } while (0)
;
524 }
525 }
526 flag = 0;
527 for (i = 0; i < schd->stream_size; i++)
528 if (SCHEDULE_SIGNALS(*schd)((*schd).stream_size <= 1 ? (*schd)._inline_signals : (*schd
)._heap_signals)
[i] >= 0)
529 {
530 ccv_nnc_stream_context_t* const stream = graph->streams[SCHEDULE_STREAMS(*schd)((*schd).stream_size <= 1 ? (*schd)._inline_streams : (*schd
)._heap_streams)
[i]];
531 ccv_nnc_stream_context_emit_signal(stream, graph->signals[SCHEDULE_SIGNALS(*schd)((*schd).stream_size <= 1 ? (*schd)._inline_signals : (*schd
)._heap_signals)
[i]]);
532 if (!flag)
533 {
534 PRINT(CCV_CLI_INFO, "Emit: (%d, %d)", SCHEDULE_STREAMS(*schd)[i], SCHEDULE_SIGNALS(*schd)[i])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("Emit: (%d, %d)", ((*schd).stream_size <= 1 ? (*schd)._inline_streams
: (*schd)._heap_streams)[i], ((*schd).stream_size <= 1 ? (
*schd)._inline_signals : (*schd)._heap_signals)[i]); fflush(stdout
); } } while (0)
;
535 flag = 1;
536 } else
537 PRINT(CCV_CLI_INFO, ", (%d, %d)", SCHEDULE_STREAMS(*schd)[i], SCHEDULE_SIGNALS(*schd)[i])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
(", (%d, %d)", ((*schd).stream_size <= 1 ? (*schd)._inline_streams
: (*schd)._heap_streams)[i], ((*schd).stream_size <= 1 ? (
*schd)._inline_signals : (*schd)._heap_signals)[i]); fflush(stdout
); } } while (0)
;
538 }
539 if (flag)
540 PRINT(CCV_CLI_INFO, "\n")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("\n"); fflush(stdout); } } while (0)
;
541 }
542 return 0;
543}
544
545static void _ccv_nnc_graph_mark_outgoing_streams_blocked_by_task(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_schedule_t* const schd_info, ccv_nnc_graph_exec_info_t* const node, co_routine_t* const task)
546{
547 int i, j;
548 if (node->outgoings)
549 for (i = 0; i < node->outgoings->rnum; i++)
550 {
551 const int outgoing_idx = *(int*)ccv_array_get(node->outgoings, i)((void*)(((char*)((node->outgoings)->data)) + (size_t)(
node->outgoings)->rsize * (size_t)(i)))
;
552 const ccv_nnc_graph_exec_schedule_t* const outgoing_schd = schd_info + outgoing_idx;
553 // An outgoing stream can be blocked by multiple other tasks from other streams. But it is OK,
554 // because on next round of execution, that one will be marked as blocked again.
555 for (j = 0; j < outgoing_schd->stream_size; j++)
556 graph->block_stream_tasks[SCHEDULE_STREAMS(*outgoing_schd)((*outgoing_schd).stream_size <= 1 ? (*outgoing_schd)._inline_streams
: (*outgoing_schd)._heap_streams)
[j]] = task;
557 }
558}
559
560static co_decl_task(_ccv_nnc_graph_wait_any_sub_tasks, (ccv_nnc_graph_t* const graph, co_routine_t* const* const sub_tasks, const int sub_task_size, const ccv_nnc_graph_exec_schedule_t* const schd_info, const int* const pending_nodes, const int pending_node_size), private(co_state_t _ccv_nnc_graph_wait_any_sub_tasks(co_routine_t* const
_self, void* const _privates_); struct _ccv_nnc_graph_wait_any_sub_tasks_param_s
{ struct { ccv_nnc_graph_t* const graph;co_routine_t* const*
const sub_tasks;const int sub_task_size;const ccv_nnc_graph_exec_schedule_t
* const schd_info;const int* const pending_nodes;const int pending_node_size
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; }
_co_params; }; size_t _ccv_nnc_graph_wait_any_sub_tasks_stack_size
(void); struct _ccv_nnc_graph_wait_any_sub_tasks_private_s { struct
_ccv_nnc_graph_wait_any_sub_tasks_param_s _co_params; }; size_t
_ccv_nnc_graph_wait_any_sub_tasks_stack_size(void) { return sizeof
(struct _ccv_nnc_graph_wait_any_sub_tasks_private_s); } co_state_t
_ccv_nnc_graph_wait_any_sub_tasks(co_routine_t* const _self_
, void* const _privates_) { struct _private_s { struct _ccv_nnc_graph_wait_any_sub_tasks_param_s
_co_params; }; switch (_self_->line) { case 0:
561))co_state_t _ccv_nnc_graph_wait_any_sub_tasks(co_routine_t* const
_self, void* const _privates_); struct _ccv_nnc_graph_wait_any_sub_tasks_param_s
{ struct { ccv_nnc_graph_t* const graph;co_routine_t* const*
const sub_tasks;const int sub_task_size;const ccv_nnc_graph_exec_schedule_t
* const schd_info;const int* const pending_nodes;const int pending_node_size
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; }
_co_params; }; size_t _ccv_nnc_graph_wait_any_sub_tasks_stack_size
(void); struct _ccv_nnc_graph_wait_any_sub_tasks_private_s { struct
_ccv_nnc_graph_wait_any_sub_tasks_param_s _co_params; }; size_t
_ccv_nnc_graph_wait_any_sub_tasks_stack_size(void) { return sizeof
(struct _ccv_nnc_graph_wait_any_sub_tasks_private_s); } co_state_t
_ccv_nnc_graph_wait_any_sub_tasks(co_routine_t* const _self_
, void* const _privates_) { struct _private_s { struct _ccv_nnc_graph_wait_any_sub_tasks_param_s
_co_params; }; switch (_self_->line) { case 0:
{
562 assert(CO_P(sub_task_size) > 0)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.sub_task_size) > 0) ? 1 : 0), __extension__ ({
if ((((struct _private_s*)(_privates_))->_co_params._co_params
.sub_task_size) > 0) ; else __assert_fail ("CO_P(sub_task_size) > 0"
, "ccv_nnc_graph_run.c", 562, __extension__ __PRETTY_FUNCTION__
); }))
;
563 co_await_any(CO_P(sub_tasks), CO_P(sub_task_size))do { if (!_co_await_any(_self_, (((struct _private_s*)(_privates_
))->_co_params._co_params.sub_tasks), (((struct _private_s
*)(_privates_))->_co_params._co_params.sub_task_size))) { return
(co_state_t){ 563, 0 }; } case 563: ; } while (0)
;
564 // This is not good, these local variables need to be in the private section.
565 // I got away with it because there is no yield or resume or apply or any after await above.
566 int i, j, k;
567 for (i = 0; i < CO_P(sub_task_size)(((struct _private_s*)(_privates_))->_co_params._co_params
.sub_task_size)
; i++)
568 if (co_is_done(CO_P(sub_tasks)(((struct _private_s*)(_privates_))->_co_params._co_params
.sub_tasks)
[i]))
569 {
570 for (j = 0; j < CO_P(pending_node_size)(((struct _private_s*)(_privates_))->_co_params._co_params
.pending_node_size)
; j++)
571 {
572 const ccv_nnc_graph_exec_schedule_t* const node = CO_P(schd_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info)
+ CO_P(pending_nodes)(((struct _private_s*)(_privates_))->_co_params._co_params
.pending_nodes)
[j];
573 for (k = 0; k < node->stream_size; k++)
574 if (CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->block_stream_tasks[SCHEDULE_STREAMS(*node)((*node).stream_size <= 1 ? (*node)._inline_streams : (*node
)._heap_streams)
[k]] == CO_P(sub_tasks)(((struct _private_s*)(_privates_))->_co_params._co_params
.sub_tasks)
[i])
575 CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->block_stream_tasks[SCHEDULE_STREAMS(*node)((*node).stream_size <= 1 ? (*node)._inline_streams : (*node
)._heap_streams)
[k]] = 0;
576 }
577 co_free(CO_P(sub_tasks)(((struct _private_s*)(_privates_))->_co_params._co_params
.sub_tasks)
[i]);
578 }
579} co_end()default: return (co_state_t){ 579, 1 }; } }
580
581static co_decl_task(_ccv_nnc_graph_exec_run_loop, (ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_info_t* const exec_info, const ccv_nnc_graph_exec_schedule_t* const schd_info, const int* const psort, const int start_index, const int exec_info_size, ccv_nnc_tensor_tape_t* const tensor_tape, const int flags), private(co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
582 int i, p, q;co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
583 int sub_task_size;co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
584 co_routine_t** sub_tasks;co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
585 int* pending_nodes[2];co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
586 int pending_node_size[2];co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
587 int idx;co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
588 ccv_nnc_graph_exec_info_t* node;co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
589 const ccv_nnc_graph_exec_schedule_t* schd;co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
590 co_routine_t* task;co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
591))co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const _self
, void* const _privates_); struct _ccv_nnc_graph_exec_run_loop_param_s
{ struct { ccv_nnc_graph_t* const graph;ccv_nnc_graph_exec_info_t
* const exec_info;const ccv_nnc_graph_exec_schedule_t* const schd_info
;const int* const psort;const int start_index;const int exec_info_size
;ccv_nnc_tensor_tape_t* const tensor_tape;const int flags;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; } _co_params
; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void); struct
_ccv_nnc_graph_exec_run_loop_private_s { struct _ccv_nnc_graph_exec_run_loop_param_s
_co_params; int i, p, q; int sub_task_size; co_routine_t** sub_tasks
; int* pending_nodes[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; size_t _ccv_nnc_graph_exec_run_loop_stack_size(void
) { return sizeof(struct _ccv_nnc_graph_exec_run_loop_private_s
); } co_state_t _ccv_nnc_graph_exec_run_loop(co_routine_t* const
_self_, void* const _privates_) { struct _private_s { struct
_ccv_nnc_graph_exec_run_loop_param_s _co_params; int i, p, q
; int sub_task_size; co_routine_t** sub_tasks; int* pending_nodes
[2]; int pending_node_size[2]; int idx; ccv_nnc_graph_exec_info_t
* node; const ccv_nnc_graph_exec_schedule_t* schd; co_routine_t
* task; }; switch (_self_->line) { case 0:
{
592 CO_V(sub_task_size)(((struct _private_s*)(_privates_))->sub_task_size) = 0;
593 CO_V(sub_tasks)(((struct _private_s*)(_privates_))->sub_tasks) = (co_routine_t**)ccv_nnc_graph_buffer(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, sizeof(co_routine_t*) * (CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->sub_graphs ? CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->sub_graphs->rnum : 0) + sizeof(int) * CO_P(exec_info_size)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_info_size)
* 2);
594 CO_V(pending_nodes)(((struct _private_s*)(_privates_))->pending_nodes)[0] = (int*)(CO_V(sub_tasks)(((struct _private_s*)(_privates_))->sub_tasks) + (CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->sub_graphs ? CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->sub_graphs->rnum : 0));
595 CO_V(pending_nodes)(((struct _private_s*)(_privates_))->pending_nodes)[1] = CO_V(pending_nodes)(((struct _private_s*)(_privates_))->pending_nodes)[0] + CO_P(exec_info_size)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_info_size)
;
596 CO_V(pending_node_size)(((struct _private_s*)(_privates_))->pending_node_size)[0] = 0;
597 CO_V(pending_node_size)(((struct _private_s*)(_privates_))->pending_node_size)[1] = 0;
598 for (CO_V(i)(((struct _private_s*)(_privates_))->i) = CO_P(start_index)(((struct _private_s*)(_privates_))->_co_params._co_params
.start_index)
; CO_V(i)(((struct _private_s*)(_privates_))->i) < CO_P(exec_info_size)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_info_size)
; CO_V(i)(((struct _private_s*)(_privates_))->i)++)
599 {
600 if (__atomic_load_n(&CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->run_state, __ATOMIC_ACQUIRE2) == CCV_NNC_GRAPH_STATE_CANCEL)
601 break;
602 CO_V(idx)(((struct _private_s*)(_privates_))->idx) = CO_P(psort)(((struct _private_s*)(_privates_))->_co_params._co_params
.psort)
? CO_P(psort)(((struct _private_s*)(_privates_))->_co_params._co_params
.psort)
[CO_V(i)(((struct _private_s*)(_privates_))->i)] : CO_V(i)(((struct _private_s*)(_privates_))->i);
603 CO_V(node)(((struct _private_s*)(_privates_))->node) = CO_P(exec_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_info)
+ CO_V(idx)(((struct _private_s*)(_privates_))->idx);
604 CO_V(schd)(((struct _private_s*)(_privates_))->schd) = CO_P(schd_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info)
+ CO_V(idx)(((struct _private_s*)(_privates_))->idx);
605 // If stream is blocked by but not blocked by current executing task.
606 int blocked = 0, j;
607 for (j = 0; j < CO_V(schd)(((struct _private_s*)(_privates_))->schd)->stream_size; j++)
608 if (CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->block_stream_tasks[SCHEDULE_STREAMS(*CO_V(schd))((*(((struct _private_s*)(_privates_))->schd)).stream_size
<= 1 ? (*(((struct _private_s*)(_privates_))->schd))._inline_streams
: (*(((struct _private_s*)(_privates_))->schd))._heap_streams
)
[j]])
609 {
610 CO_V(pending_nodes)(((struct _private_s*)(_privates_))->pending_nodes)[0][CO_V(pending_node_size)(((struct _private_s*)(_privates_))->pending_node_size)[0]++] = CO_V(idx)(((struct _private_s*)(_privates_))->idx);
611 _ccv_nnc_graph_mark_outgoing_streams_blocked_by_task(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_P(schd_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info)
, CO_V(node)(((struct _private_s*)(_privates_))->node), CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->block_stream_tasks[SCHEDULE_STREAMS(*CO_V(schd))((*(((struct _private_s*)(_privates_))->schd)).stream_size
<= 1 ? (*(((struct _private_s*)(_privates_))->schd))._inline_streams
: (*(((struct _private_s*)(_privates_))->schd))._heap_streams
)
[j]]);
612 blocked = 1;
613 }
614 if (blocked)
615 continue;
616 CO_V(task)(((struct _private_s*)(_privates_))->task) = _ccv_nnc_graph_exec_run_task(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_V(node)(((struct _private_s*)(_privates_))->node), CO_V(schd)(((struct _private_s*)(_privates_))->schd), CO_V(idx)(((struct _private_s*)(_privates_))->idx), CO_P(tensor_tape)(((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape)
, CO_P(flags)(((struct _private_s*)(_privates_))->_co_params._co_params
.flags)
);
617 if (CO_V(task)(((struct _private_s*)(_privates_))->task))
618 {
619 co_resume(CO_V(task))do { _co_resume(_self_, (((struct _private_s*)(_privates_))->
task)); return (co_state_t){ 619, 0 }; case 619: _self_->callee
= 0; } while (0)
;
620 if (!co_is_done(CO_V(task)(((struct _private_s*)(_privates_))->task)))
621 {
622 CO_V(sub_tasks)(((struct _private_s*)(_privates_))->sub_tasks)[CO_V(sub_task_size)(((struct _private_s*)(_privates_))->sub_task_size)++] = CO_V(task)(((struct _private_s*)(_privates_))->task);
623 int j;
624 for (j = 0; j < CO_V(schd)(((struct _private_s*)(_privates_))->schd)->stream_size; j++)
625 CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->block_stream_tasks[SCHEDULE_STREAMS(*CO_V(schd))((*(((struct _private_s*)(_privates_))->schd)).stream_size
<= 1 ? (*(((struct _private_s*)(_privates_))->schd))._inline_streams
: (*(((struct _private_s*)(_privates_))->schd))._heap_streams
)
[j]] = CO_V(task)(((struct _private_s*)(_privates_))->task);
626 _ccv_nnc_graph_mark_outgoing_streams_blocked_by_task(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_P(schd_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info)
, CO_V(node)(((struct _private_s*)(_privates_))->node), CO_V(task)(((struct _private_s*)(_privates_))->task));
627 } else
628 co_free(CO_V(task)(((struct _private_s*)(_privates_))->task));
629 }
630 }
631 if (CO_V(sub_task_size)(((struct _private_s*)(_privates_))->sub_task_size))
632 co_apply(_ccv_nnc_graph_wait_any_sub_tasks, (CO_P(graph), CO_V(sub_tasks), CO_V(sub_task_size), CO_P(schd_info), CO_V(pending_nodes)[0], CO_V(pending_node_size)[0]))do { _self_->callee = ({ co_routine_t* const task = malloc
((sizeof(co_routine_t) + _ccv_nnc_graph_wait_any_sub_tasks_stack_size
())); do { struct _ccv_nnc_graph_wait_any_sub_tasks_param_s params
= { ._co_params = { (((struct _private_s*)(_privates_))->
_co_params._co_params.graph), (((struct _private_s*)(_privates_
))->sub_tasks), (((struct _private_s*)(_privates_))->sub_task_size
), (((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info), (((struct _private_s*)(_privates_))->pending_nodes
)[0], (((struct _private_s*)(_privates_))->pending_node_size
)[0] } }; task->fn = _ccv_nnc_graph_wait_any_sub_tasks; task
->line = 0; task->done = 0; task->root = 0; task->
other_size = 0; task->notify_any = 0; task->others = 0;
task->caller = 0; task->callee = 0; if (sizeof(params)
> 0) memcpy(task + 1, &params, sizeof(params)); } while
(0); task; }); _co_apply(_self_, _self_->callee); return (
co_state_t){ 632, 0 }; case 632: co_free(_self_->callee); _self_
->callee = 0; } while (0)
;
633 if (__atomic_load_n(&CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->run_state, __ATOMIC_ACQUIRE2) == CCV_NNC_GRAPH_STATE_CANCEL)
634 co_return()do { return (co_state_t){ 634, 1 }; } while (0);
635 CO_V(p)(((struct _private_s*)(_privates_))->p) = 0;
636 CO_V(q)(((struct _private_s*)(_privates_))->q) = 1;
637 while (CO_V(pending_node_size)(((struct _private_s*)(_privates_))->pending_node_size)[CO_V(p)(((struct _private_s*)(_privates_))->p)] > 0)
638 {
639 CO_V(pending_node_size)(((struct _private_s*)(_privates_))->pending_node_size)[CO_V(q)(((struct _private_s*)(_privates_))->q)] = 0;
640 CO_V(sub_task_size)(((struct _private_s*)(_privates_))->sub_task_size) = 0;
641 for (CO_V(i)(((struct _private_s*)(_privates_))->i) = 0; CO_V(i)(((struct _private_s*)(_privates_))->i) < CO_V(pending_node_size)(((struct _private_s*)(_privates_))->pending_node_size)[CO_V(p)(((struct _private_s*)(_privates_))->p)]; CO_V(i)(((struct _private_s*)(_privates_))->i)++)
642 {
643 CO_V(idx)(((struct _private_s*)(_privates_))->idx) = CO_V(pending_nodes)(((struct _private_s*)(_privates_))->pending_nodes)[CO_V(p)(((struct _private_s*)(_privates_))->p)][CO_V(i)(((struct _private_s*)(_privates_))->i)];
644 CO_V(node)(((struct _private_s*)(_privates_))->node) = CO_P(exec_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_info)
+ CO_V(idx)(((struct _private_s*)(_privates_))->idx);
645 CO_V(schd)(((struct _private_s*)(_privates_))->schd) = CO_P(schd_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info)
+ CO_V(idx)(((struct _private_s*)(_privates_))->idx);
646 int blocked = 0, j;
647 for (j = 0; j < CO_V(schd)(((struct _private_s*)(_privates_))->schd)->stream_size; j++)
648 if (CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->block_stream_tasks[SCHEDULE_STREAMS(*CO_V(schd))((*(((struct _private_s*)(_privates_))->schd)).stream_size
<= 1 ? (*(((struct _private_s*)(_privates_))->schd))._inline_streams
: (*(((struct _private_s*)(_privates_))->schd))._heap_streams
)
[j]])
649 {
650 _ccv_nnc_graph_mark_outgoing_streams_blocked_by_task(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_P(schd_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info)
, CO_V(node)(((struct _private_s*)(_privates_))->node), CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->block_stream_tasks[SCHEDULE_STREAMS(*CO_V(schd))((*(((struct _private_s*)(_privates_))->schd)).stream_size
<= 1 ? (*(((struct _private_s*)(_privates_))->schd))._inline_streams
: (*(((struct _private_s*)(_privates_))->schd))._heap_streams
)
[j]]);
651 CO_V(pending_nodes)(((struct _private_s*)(_privates_))->pending_nodes)[CO_V(q)(((struct _private_s*)(_privates_))->q)][CO_V(pending_node_size)(((struct _private_s*)(_privates_))->pending_node_size)[CO_V(q)(((struct _private_s*)(_privates_))->q)]++] = CO_V(idx)(((struct _private_s*)(_privates_))->idx);
652 blocked = 1;
653 }
654 if (blocked)
655 continue;
656 CO_V(task)(((struct _private_s*)(_privates_))->task) = _ccv_nnc_graph_exec_run_task(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_V(node)(((struct _private_s*)(_privates_))->node), CO_V(schd)(((struct _private_s*)(_privates_))->schd), CO_V(idx)(((struct _private_s*)(_privates_))->idx), CO_P(tensor_tape)(((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape)
, CO_P(flags)(((struct _private_s*)(_privates_))->_co_params._co_params
.flags)
);
657 if (CO_V(task)(((struct _private_s*)(_privates_))->task))
658 {
659 co_resume(CO_V(task))do { _co_resume(_self_, (((struct _private_s*)(_privates_))->
task)); return (co_state_t){ 659, 0 }; case 659: _self_->callee
= 0; } while (0)
;
660 if (!co_is_done(CO_V(task)(((struct _private_s*)(_privates_))->task)))
661 {
662 CO_V(sub_tasks)(((struct _private_s*)(_privates_))->sub_tasks)[CO_V(sub_task_size)(((struct _private_s*)(_privates_))->sub_task_size)++] = CO_V(task)(((struct _private_s*)(_privates_))->task);
663 for (j = 0; j < CO_V(schd)(((struct _private_s*)(_privates_))->schd)->stream_size; j++)
664 CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->block_stream_tasks[SCHEDULE_STREAMS(*CO_V(schd))((*(((struct _private_s*)(_privates_))->schd)).stream_size
<= 1 ? (*(((struct _private_s*)(_privates_))->schd))._inline_streams
: (*(((struct _private_s*)(_privates_))->schd))._heap_streams
)
[j]] = CO_V(task)(((struct _private_s*)(_privates_))->task);
665 _ccv_nnc_graph_mark_outgoing_streams_blocked_by_task(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_P(schd_info)(((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info)
, CO_V(node)(((struct _private_s*)(_privates_))->node), CO_V(task)(((struct _private_s*)(_privates_))->task));
666 } else
667 co_free(CO_V(task)(((struct _private_s*)(_privates_))->task));
668 }
669 }
670 int t;
671 CCV_SWAP(CO_V(p), CO_V(q), t)((t) = ((((struct _private_s*)(_privates_))->p)), ((((struct
_private_s*)(_privates_))->p)) = ((((struct _private_s*)(
_privates_))->q)), ((((struct _private_s*)(_privates_))->
q)) = (t))
;
672 if (CO_V(sub_task_size)(((struct _private_s*)(_privates_))->sub_task_size))
673 co_apply(_ccv_nnc_graph_wait_any_sub_tasks, (CO_P(graph), CO_V(sub_tasks), CO_V(sub_task_size), CO_P(schd_info), CO_V(pending_nodes)[CO_V(p)], CO_V(pending_node_size)[CO_V(p)]))do { _self_->callee = ({ co_routine_t* const task = malloc
((sizeof(co_routine_t) + _ccv_nnc_graph_wait_any_sub_tasks_stack_size
())); do { struct _ccv_nnc_graph_wait_any_sub_tasks_param_s params
= { ._co_params = { (((struct _private_s*)(_privates_))->
_co_params._co_params.graph), (((struct _private_s*)(_privates_
))->sub_tasks), (((struct _private_s*)(_privates_))->sub_task_size
), (((struct _private_s*)(_privates_))->_co_params._co_params
.schd_info), (((struct _private_s*)(_privates_))->pending_nodes
)[(((struct _private_s*)(_privates_))->p)], (((struct _private_s
*)(_privates_))->pending_node_size)[(((struct _private_s*)
(_privates_))->p)] } }; task->fn = _ccv_nnc_graph_wait_any_sub_tasks
; task->line = 0; task->done = 0; task->root = 0; task
->other_size = 0; task->notify_any = 0; task->others
= 0; task->caller = 0; task->callee = 0; if (sizeof(params
) > 0) memcpy(task + 1, &params, sizeof(params)); } while
(0); task; }); _co_apply(_self_, _self_->callee); return (
co_state_t){ 673, 0 }; case 673: co_free(_self_->callee); _self_
->callee = 0; } while (0)
;
674 }
675} co_end()default: return (co_state_t){ 675, 1 }; } }
676
677co_task(_ccv_nnc_graph_topsorted_run_coro, (ccv_nnc_graph_t* const graph, const int exec_idx, const ccv_nnc_graph_static_schedule_t* const schedule, ccv_nnc_graph_exec_info_t* const exec, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context, const int flags), private(struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
678 ccv_nnc_graph_exec_info_t* exec_info;struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
679 const ccv_nnc_graph_exec_schedule_t* schd_info;struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
680 co_routine_t* previous_main;struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
681 int stream_0;struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
682 // while loopstruct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
683 int64_t count, reverse_count;struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
684 int graph_breakpoint_size;struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
685 int i, j;struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
686))struct _ccv_nnc_graph_topsorted_run_coro_private_s { struct _ccv_nnc_graph_topsorted_run_coro_param_s
_co_params; ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; size_t _ccv_nnc_graph_topsorted_run_coro_stack_size(void) {
return sizeof(struct _ccv_nnc_graph_topsorted_run_coro_private_s
); } co_state_t _ccv_nnc_graph_topsorted_run_coro(co_routine_t
* const _self_, void* const _privates_) { struct _private_s {
struct _ccv_nnc_graph_topsorted_run_coro_param_s _co_params;
ccv_nnc_graph_exec_info_t* exec_info; const ccv_nnc_graph_exec_schedule_t
* schd_info; co_routine_t* previous_main; int stream_0; int64_t
count, reverse_count; int graph_breakpoint_size; int i, j; }
; switch (_self_->line) { case 0:
{
687 assert(CO_P(graph)->stream_size > 0)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.graph)->stream_size > 0) ? 1 : 0), __extension__
({ if ((((struct _private_s*)(_privates_))->_co_params._co_params
.graph)->stream_size > 0) ; else __assert_fail ("CO_P(graph)->stream_size > 0"
, "ccv_nnc_graph_run.c", 687, __extension__ __PRETTY_FUNCTION__
); }))
;
688 int i;
689 // Assign the resource container pointer.
690 for (i = 0; i < CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->stream_size; i++)
691 CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[i]->resource_container = CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->_inline_container;
692 CO_V(exec_info)(((struct _private_s*)(_privates_))->exec_info) = (ccv_nnc_graph_exec_info_t*)ccv_array_get(CO_P(graph)->exec_info, 0)((void*)(((char*)(((((struct _private_s*)(_privates_))->_co_params
._co_params.graph)->exec_info)->data)) + (size_t)((((struct
_private_s*)(_privates_))->_co_params._co_params.graph)->
exec_info)->rsize * (size_t)(0)))
;
693 CO_V(schd_info)(((struct _private_s*)(_privates_))->schd_info) = CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->exec_info;
694 CO_V(stream_0)(((struct _private_s*)(_privates_))->stream_0) = CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->stream_0;
695 if (CO_P(exec_idx)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_idx)
== -1)
696 {
697 if (CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main)
698 {
699 CO_V(previous_main)(((struct _private_s*)(_privates_))->previous_main) = CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main;
700 CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main = co_self()(_self_);
701 // Wait the previous task to be done. This makes sure that our graph run is serial on the same stream.
702 assert(!co_is_done(CO_V(previous_main)))((void) sizeof ((!co_is_done((((struct _private_s*)(_privates_
))->previous_main))) ? 1 : 0), __extension__ ({ if (!co_is_done
((((struct _private_s*)(_privates_))->previous_main))) ; else
__assert_fail ("!co_is_done(CO_V(previous_main))", "ccv_nnc_graph_run.c"
, 702, __extension__ __PRETTY_FUNCTION__); }))
;
703 co_await(CO_V(previous_main))do { if (!_co_await_any(_self_, &((((struct _private_s*)(
_privates_))->previous_main)), 1)) { return (co_state_t){ 703
, 0 }; } case 703: ; } while (0)
;
704 } else
705 CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main = co_self()(_self_);
706 PRINT(CCV_CLI_INFO, "Graph Stream %d Begin", CO_V(stream_0))do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("Graph Stream %d Begin", (((struct _private_s*)(_privates_))
->stream_0)); fflush(stdout); } } while (0)
;
707 ccv_nnc_stream_signal_t* stream_0_signal;
708 if (CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
!= CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[CO_V(stream_0)(((struct _private_s*)(_privates_))->stream_0)])
709 {
710 // Make sure when we start work on streams[0], the current stream context is done.
711 stream_0_signal = ccv_nnc_stream_context_emit_signal_new(CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
);
712 ccv_nnc_stream_context_wait_signal(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[CO_V(stream_0)(((struct _private_s*)(_privates_))->stream_0)], stream_0_signal);
713 } else if (CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->stream_1_size) {
714 ccv_nnc_stream_context_emit_signal(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[CO_V(stream_0)(((struct _private_s*)(_privates_))->stream_0)], CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->begin);
715 stream_0_signal = CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->begin;
716 }
717 int i, flag = 0;
718 for (i = 0; i < CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->stream_1_size; i++)
719 {
720 ccv_nnc_stream_context_wait_signal(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->stream_1s[i]], stream_0_signal);
721 if (!flag)
722 {
723 PRINT(CCV_CLI_INFO, ", Wait: %d", CO_P(schedule)->stream_1s[i])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
(", Wait: %d", (((struct _private_s*)(_privates_))->_co_params
._co_params.schedule)->stream_1s[i]); fflush(stdout); } } while
(0)
;
724 flag = 1;
725 } else
726 PRINT(CCV_CLI_INFO, ", %d", CO_P(schedule)->stream_1s[i])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
(", %d", (((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)->stream_1s[i]); fflush(stdout); } } while (0)
;
727 }
728 PRINT(CCV_CLI_INFO, "\n")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("\n"); fflush(stdout); } } while (0)
;
729 } else {
730 assert(CO_P(stream_context) == CO_P(graph)->streams[0])((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.stream_context) == (((struct _private_s*)(_privates_
))->_co_params._co_params.graph)->streams[0]) ? 1 : 0),
__extension__ ({ if ((((struct _private_s*)(_privates_))->
_co_params._co_params.stream_context) == (((struct _private_s
*)(_privates_))->_co_params._co_params.graph)->streams[
0]) ; else __assert_fail ("CO_P(stream_context) == CO_P(graph)->streams[0]"
, "ccv_nnc_graph_run.c", 730, __extension__ __PRETTY_FUNCTION__
); }))
;
731 }
732 if (CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
&& (CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->flags & CCV_NNC_GRAPH_EXEC_P_WHILE))
733 {
734 assert(CO_P(schedule) == CO_P(graph)->default_schedule)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.schedule) == (((struct _private_s*)(_privates_))->
_co_params._co_params.graph)->default_schedule) ? 1 : 0), __extension__
({ if ((((struct _private_s*)(_privates_))->_co_params._co_params
.schedule) == (((struct _private_s*)(_privates_))->_co_params
._co_params.graph)->default_schedule) ; else __assert_fail
("CO_P(schedule) == CO_P(graph)->default_schedule", "ccv_nnc_graph_run.c"
, 734, __extension__ __PRETTY_FUNCTION__); }))
;
735 assert(CO_P(exec)->p_while.expr)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.exec)->p_while.expr) ? 1 : 0), __extension__ (
{ if ((((struct _private_s*)(_privates_))->_co_params._co_params
.exec)->p_while.expr) ; else __assert_fail ("CO_P(exec)->p_while.expr"
, "ccv_nnc_graph_run.c", 735, __extension__ __PRETTY_FUNCTION__
); }))
;
736 CO_V(count)(((struct _private_s*)(_privates_))->count) = 0;
737 // This is a forward while loop. Backward while loop will just consult its pairing part.
738 if (CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->cmd.cmd == CCV_NNC_GRAPH_FORWARD)
739 {
740 CO_V(graph_breakpoint_size)(((struct _private_s*)(_privates_))->graph_breakpoint_size
)
= CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->breakpoint_offset + CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->breakpoint_size;
741 for (;; ++CO_V(count)(((struct _private_s*)(_privates_))->count))
742 {
743 CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->while_count = CO_V(count)(((struct _private_s*)(_privates_))->count);
744 if (CO_P(tensor_tape)(((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape)
)
745 ccv_nnc_tensor_tape_set_numbering(CO_P(tensor_tape)(((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape)
, CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->p, (ccv_nnc_graph_exec_t){
746 .d = CO_P(exec_idx)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_idx)
,
747 .graph = CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->p,
748 }, CO_V(count)(((struct _private_s*)(_privates_))->count));
749 _ccv_nnc_graph_unwrap(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_V(count)(((struct _private_s*)(_privates_))->count), 0);
750 if (CO_V(count)(((struct _private_s*)(_privates_))->count) > 0)
751 _ccv_nnc_graph_transit_move_to(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
752 co_apply(_ccv_nnc_graph_exec_run_loop, (CO_P(graph), CO_V(exec_info), CO_V(schd_info), 0, 0, CO_V(graph_breakpoint_size), CO_P(tensor_tape), CO_P(flags)))do { _self_->callee = ({ co_routine_t* const task = malloc
((sizeof(co_routine_t) + _ccv_nnc_graph_exec_run_loop_stack_size
())); do { struct _ccv_nnc_graph_exec_run_loop_param_s params
= { ._co_params = { (((struct _private_s*)(_privates_))->
_co_params._co_params.graph), (((struct _private_s*)(_privates_
))->exec_info), (((struct _private_s*)(_privates_))->schd_info
), 0, 0, (((struct _private_s*)(_privates_))->graph_breakpoint_size
), (((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape), (((struct _private_s*)(_privates_))->_co_params
._co_params.flags) } }; task->fn = _ccv_nnc_graph_exec_run_loop
; task->line = 0; task->done = 0; task->root = 0; task
->other_size = 0; task->notify_any = 0; task->others
= 0; task->caller = 0; task->callee = 0; if (sizeof(params
) > 0) memcpy(task + 1, &params, sizeof(params)); } while
(0); task; }); _co_apply(_self_, _self_->callee); return (
co_state_t){ 752, 0 }; case 752: co_free(_self_->callee); _self_
->callee = 0; } while (0)
;
753 if (__atomic_load_n(&CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->run_state, __ATOMIC_ACQUIRE2) == CCV_NNC_GRAPH_STATE_CANCEL)
754 break;
755 // Reached breakpoints, now check the breakpoint, if not met, break out.
756 // Wait until everything on the stream is executed.
757 for (CO_V(i)(((struct _private_s*)(_privates_))->i) = CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->breakpoint_offset; CO_V(i)(((struct _private_s*)(_privates_))->i) < CO_V(graph_breakpoint_size)(((struct _private_s*)(_privates_))->graph_breakpoint_size
)
; CO_V(i)(((struct _private_s*)(_privates_))->i)++)
758 for (CO_V(j)(((struct _private_s*)(_privates_))->j) = 0; CO_V(j)(((struct _private_s*)(_privates_))->j) < CO_V(schd_info)(((struct _private_s*)(_privates_))->schd_info)[CO_V(i)(((struct _private_s*)(_privates_))->i)].stream_size; CO_V(j)(((struct _private_s*)(_privates_))->j)++)
759 co_stream_await(CO_P(graph)->streams[SCHEDULE_STREAMS(CO_V(schd_info)[CO_V(i)])[CO_V(j)]])do { if (!_co_stream_await(_self_, (((struct _private_s*)(_privates_
))->_co_params._co_params.graph)->streams[(((((struct _private_s
*)(_privates_))->schd_info)[(((struct _private_s*)(_privates_
))->i)]).stream_size <= 1 ? ((((struct _private_s*)(_privates_
))->schd_info)[(((struct _private_s*)(_privates_))->i)]
)._inline_streams : ((((struct _private_s*)(_privates_))->
schd_info)[(((struct _private_s*)(_privates_))->i)])._heap_streams
)[(((struct _private_s*)(_privates_))->j)]])) { return (co_state_t
){ 759, 0 }; } case 759: ; } while (0)
;
760 _ccv_nnc_graph_exec_unwrap_while_expr(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
);
761 if (!CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->p_while.expr(CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->p_while.inputs, CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->p_while.input_size, CO_P(exec)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec)
->p_while.data))
762 {
763 _ccv_nnc_graph_rewrap(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
764 // If we break from here, it is ok because all the streams are waited.
765 break;
766 }
767 co_apply(_ccv_nnc_graph_exec_run_loop, (CO_P(graph), CO_V(exec_info), CO_V(schd_info), 0, CO_V(graph_breakpoint_size), CO_P(graph)->exec_info->rnum, CO_P(tensor_tape), CO_P(flags)))do { _self_->callee = ({ co_routine_t* const task = malloc
((sizeof(co_routine_t) + _ccv_nnc_graph_exec_run_loop_stack_size
())); do { struct _ccv_nnc_graph_exec_run_loop_param_s params
= { ._co_params = { (((struct _private_s*)(_privates_))->
_co_params._co_params.graph), (((struct _private_s*)(_privates_
))->exec_info), (((struct _private_s*)(_privates_))->schd_info
), 0, (((struct _private_s*)(_privates_))->graph_breakpoint_size
), (((struct _private_s*)(_privates_))->_co_params._co_params
.graph)->exec_info->rnum, (((struct _private_s*)(_privates_
))->_co_params._co_params.tensor_tape), (((struct _private_s
*)(_privates_))->_co_params._co_params.flags) } }; task->
fn = _ccv_nnc_graph_exec_run_loop; task->line = 0; task->
done = 0; task->root = 0; task->other_size = 0; task->
notify_any = 0; task->others = 0; task->caller = 0; task
->callee = 0; if (sizeof(params) > 0) memcpy(task + 1, &
params, sizeof(params)); } while (0); task; }); _co_apply(_self_
, _self_->callee); return (co_state_t){ 767, 0 }; case 767
: co_free(_self_->callee); _self_->callee = 0; } while (
0)
;
768 // If it is cancelled here, we don't need to breakout yet, we can breakout on earlier place. The most important thing is to avoid stream wait if there is a cancel.
769 _ccv_nnc_graph_from_move_transit(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
770 _ccv_nnc_graph_rewrap(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
771 }
772 } else {
773 // For backward graph, no need to evaluate the while expr.
774 assert(CO_P(exec)->cmd.cmd == CCV_NNC_GRAPH_BACKWARD)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.exec)->cmd.cmd == CCV_NNC_GRAPH_BACKWARD) ? 1 :
0), __extension__ ({ if ((((struct _private_s*)(_privates_))
->_co_params._co_params.exec)->cmd.cmd == CCV_NNC_GRAPH_BACKWARD
) ; else __assert_fail ("CO_P(exec)->cmd.cmd == CCV_NNC_GRAPH_BACKWARD"
, "ccv_nnc_graph_run.c", 774, __extension__ __PRETTY_FUNCTION__
); }))
;
775 assert(CO_P(graph)->pair)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.graph)->pair) ? 1 : 0), __extension__ ({ if ((
((struct _private_s*)(_privates_))->_co_params._co_params.
graph)->pair) ; else __assert_fail ("CO_P(graph)->pair"
, "ccv_nnc_graph_run.c", 775, __extension__ __PRETTY_FUNCTION__
); }))
;
776 assert(CO_P(tensor_tape))((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.tensor_tape)) ? 1 : 0), __extension__ ({ if ((((struct
_private_s*)(_privates_))->_co_params._co_params.tensor_tape
)) ; else __assert_fail ("CO_P(tensor_tape)", "ccv_nnc_graph_run.c"
, 776, __extension__ __PRETTY_FUNCTION__); }))
;
777 CO_V(count)(((struct _private_s*)(_privates_))->count) = 0;
778 CO_V(reverse_count)(((struct _private_s*)(_privates_))->reverse_count) = CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->while_count = ccv_nnc_tensor_tape_numbering(CO_P(tensor_tape)(((struct _private_s*)(_privates_))->_co_params._co_params
.tensor_tape)
, CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->p, (ccv_nnc_graph_exec_t){
779 .d = CO_P(exec_idx)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_idx)
,
780 .graph = CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->p,
781 });
782 _ccv_nnc_graph_unwrap(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_V(count)(((struct _private_s*)(_privates_))->count), CO_V(reverse_count)(((struct _private_s*)(_privates_))->reverse_count));
783 co_apply(_ccv_nnc_graph_exec_run_loop, (CO_P(graph), CO_V(exec_info), CO_V(schd_info), 0, CO_P(graph)->breakpoint_offset, CO_P(graph)->exec_info->rnum, CO_P(tensor_tape), CO_P(flags)))do { _self_->callee = ({ co_routine_t* const task = malloc
((sizeof(co_routine_t) + _ccv_nnc_graph_exec_run_loop_stack_size
())); do { struct _ccv_nnc_graph_exec_run_loop_param_s params
= { ._co_params = { (((struct _private_s*)(_privates_))->
_co_params._co_params.graph), (((struct _private_s*)(_privates_
))->exec_info), (((struct _private_s*)(_privates_))->schd_info
), 0, (((struct _private_s*)(_privates_))->_co_params._co_params
.graph)->breakpoint_offset, (((struct _private_s*)(_privates_
))->_co_params._co_params.graph)->exec_info->rnum, (
((struct _private_s*)(_privates_))->_co_params._co_params.
tensor_tape), (((struct _private_s*)(_privates_))->_co_params
._co_params.flags) } }; task->fn = _ccv_nnc_graph_exec_run_loop
; task->line = 0; task->done = 0; task->root = 0; task
->other_size = 0; task->notify_any = 0; task->others
= 0; task->caller = 0; task->callee = 0; if (sizeof(params
) > 0) memcpy(task + 1, &params, sizeof(params)); } while
(0); task; }); _co_apply(_self_, _self_->callee); return (
co_state_t){ 783, 0 }; case 783: co_free(_self_->callee); _self_
->callee = 0; } while (0)
;
784 // If it is cancelled here, we don't need to breakout yet, we can breakout later.
785 _ccv_nnc_graph_from_move_transit(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
786 _ccv_nnc_graph_rewrap(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
787 for (CO_V(count)(((struct _private_s*)(_privates_))->count) = 1; CO_V(reverse_count)(((struct _private_s*)(_privates_))->reverse_count) > 0; ++CO_V(count)(((struct _private_s*)(_privates_))->count))
788 {
789 CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->while_count = --CO_V(reverse_count)(((struct _private_s*)(_privates_))->reverse_count);
790 _ccv_nnc_graph_unwrap(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
, CO_V(count)(((struct _private_s*)(_privates_))->count), CO_V(reverse_count)(((struct _private_s*)(_privates_))->reverse_count));
791 _ccv_nnc_graph_transit_move_to(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
792 co_apply(_ccv_nnc_graph_exec_run_loop, (CO_P(graph), CO_V(exec_info), CO_V(schd_info), 0, 0, CO_P(graph)->exec_info->rnum, CO_P(tensor_tape), CO_P(flags)))do { _self_->callee = ({ co_routine_t* const task = malloc
((sizeof(co_routine_t) + _ccv_nnc_graph_exec_run_loop_stack_size
())); do { struct _ccv_nnc_graph_exec_run_loop_param_s params
= { ._co_params = { (((struct _private_s*)(_privates_))->
_co_params._co_params.graph), (((struct _private_s*)(_privates_
))->exec_info), (((struct _private_s*)(_privates_))->schd_info
), 0, 0, (((struct _private_s*)(_privates_))->_co_params._co_params
.graph)->exec_info->rnum, (((struct _private_s*)(_privates_
))->_co_params._co_params.tensor_tape), (((struct _private_s
*)(_privates_))->_co_params._co_params.flags) } }; task->
fn = _ccv_nnc_graph_exec_run_loop; task->line = 0; task->
done = 0; task->root = 0; task->other_size = 0; task->
notify_any = 0; task->others = 0; task->caller = 0; task
->callee = 0; if (sizeof(params) > 0) memcpy(task + 1, &
params, sizeof(params)); } while (0); task; }); _co_apply(_self_
, _self_->callee); return (co_state_t){ 792, 0 }; case 792
: co_free(_self_->callee); _self_->callee = 0; } while (
0)
;
793 if (__atomic_load_n(&CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->run_state, __ATOMIC_ACQUIRE2) == CCV_NNC_GRAPH_STATE_CANCEL)
794 break;
795 _ccv_nnc_graph_from_move_transit(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
796 _ccv_nnc_graph_rewrap(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
);
797 }
798 }
799 if (__atomic_load_n(&CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->run_state, __ATOMIC_ACQUIRE2) == CCV_NNC_GRAPH_STATE_CANCEL)
800 {
801 // The most important thing is to reset main and then return, we don't need to wait for any streaming event.
802 if (CO_P(exec_idx)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_idx)
== -1 && CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main == co_self()(_self_))
803 CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main = 0;
804 co_return()do { return (co_state_t){ 804, 1 }; } while (0);
805 }
806 assert(CO_V(stream_0) == 0)((void) sizeof (((((struct _private_s*)(_privates_))->stream_0
) == 0) ? 1 : 0), __extension__ ({ if ((((struct _private_s*)
(_privates_))->stream_0) == 0) ; else __assert_fail ("CO_V(stream_0) == 0"
, "ccv_nnc_graph_run.c", 806, __extension__ __PRETTY_FUNCTION__
); }))
;
807 int i;
808 for (i = 0; i < CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->wait_size; i++)
809 ccv_nnc_stream_context_wait_signal(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[0], CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->signals[CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->waits[i]]);
810 } else {
811 CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->while_count = 0;
812 co_apply(_ccv_nnc_graph_exec_run_loop, (CO_P(graph), CO_V(exec_info), CO_V(schd_info), CO_P(schedule)->psort, 0, CO_P(schedule)->psort ? CO_P(schedule)->psort_size : CO_P(schedule)->exec_info_size, CO_P(tensor_tape), CO_P(flags)))do { _self_->callee = ({ co_routine_t* const task = malloc
((sizeof(co_routine_t) + _ccv_nnc_graph_exec_run_loop_stack_size
())); do { struct _ccv_nnc_graph_exec_run_loop_param_s params
= { ._co_params = { (((struct _private_s*)(_privates_))->
_co_params._co_params.graph), (((struct _private_s*)(_privates_
))->exec_info), (((struct _private_s*)(_privates_))->schd_info
), (((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)->psort, 0, (((struct _private_s*)(_privates_))->
_co_params._co_params.schedule)->psort ? (((struct _private_s
*)(_privates_))->_co_params._co_params.schedule)->psort_size
: (((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)->exec_info_size, (((struct _private_s*)(_privates_
))->_co_params._co_params.tensor_tape), (((struct _private_s
*)(_privates_))->_co_params._co_params.flags) } }; task->
fn = _ccv_nnc_graph_exec_run_loop; task->line = 0; task->
done = 0; task->root = 0; task->other_size = 0; task->
notify_any = 0; task->others = 0; task->caller = 0; task
->callee = 0; if (sizeof(params) > 0) memcpy(task + 1, &
params, sizeof(params)); } while (0); task; }); _co_apply(_self_
, _self_->callee); return (co_state_t){ 812, 0 }; case 812
: co_free(_self_->callee); _self_->callee = 0; } while (
0)
;
813 if (__atomic_load_n(&CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->run_state, __ATOMIC_ACQUIRE2) == CCV_NNC_GRAPH_STATE_CANCEL)
814 {
815 // The most important thing is to reset main and then return, we don't need to wait for any streaming event.
816 if (CO_P(exec_idx)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_idx)
== -1 && CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main == co_self()(_self_))
817 CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main = 0;
818 co_return()do { return (co_state_t){ 818, 1 }; } while (0);
819 }
820 PRINT(CCV_CLI_INFO, "Graph Stream %d End", CO_V(stream_0))do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("Graph Stream %d End", (((struct _private_s*)(_privates_))->
stream_0)); fflush(stdout); } } while (0)
;
821 int i, flag = 0;
822 for (i = 0; i < CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->wait_size; i++)
823 {
824 ccv_nnc_stream_context_wait_signal(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[CO_V(stream_0)(((struct _private_s*)(_privates_))->stream_0)], CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->signals[CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->waits[i]]);
825 if (!flag)
826 {
827 PRINT(CCV_CLI_INFO, ", Wait: %d", CO_P(schedule)->waits[i])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
(", Wait: %d", (((struct _private_s*)(_privates_))->_co_params
._co_params.schedule)->waits[i]); fflush(stdout); } } while
(0)
;
828 flag = 1;
829 } else
830 PRINT(CCV_CLI_INFO, ", %d", CO_P(schedule)->waits[i])do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
(", %d", (((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)->waits[i]); fflush(stdout); } } while (0)
;
831 }
832 PRINT(CCV_CLI_INFO, "\n")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("\n"); fflush(stdout); } } while (0)
;
833 }
834 if (CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
!= CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[CO_V(stream_0)(((struct _private_s*)(_privates_))->stream_0)])
835 {
836 assert(CO_P(exec_idx) == -1)((void) sizeof (((((struct _private_s*)(_privates_))->_co_params
._co_params.exec_idx) == -1) ? 1 : 0), __extension__ ({ if ((
((struct _private_s*)(_privates_))->_co_params._co_params.
exec_idx) == -1) ; else __assert_fail ("CO_P(exec_idx) == -1"
, "ccv_nnc_graph_run.c", 836, __extension__ __PRETTY_FUNCTION__
); }))
;
837 ccv_nnc_stream_context_emit_signal(CO_P(graph)(((struct _private_s*)(_privates_))->_co_params._co_params
.graph)
->streams[CO_V(stream_0)(((struct _private_s*)(_privates_))->stream_0)], CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->end);
838 ccv_nnc_stream_context_wait_signal(CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
, CO_P(schedule)(((struct _private_s*)(_privates_))->_co_params._co_params
.schedule)
->end);
839 }
840 // Reset main to 0 if it is current me.
841 if (CO_P(exec_idx)(((struct _private_s*)(_privates_))->_co_params._co_params
.exec_idx)
== -1 && CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main == co_self()(_self_))
842 CO_P(stream_context)(((struct _private_s*)(_privates_))->_co_params._co_params
.stream_context)
->main = 0;
843} co_end()default: return (co_state_t){ 843, 1 }; } }
844
845static int _ccv_nnc_graph_run(ccv_nnc_graph_t* const graph, const int exec_idx, ccv_nnc_graph_exec_info_t* const exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
846
847static inline void _ccv_nnc_graph_exec_run(ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_info_t* const node, const int idx, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context, const int flags)
848{
849 int i;
850 _ccv_nnc_graph_exec_unwrap_io(graph, node);
851 ccv_nnc_tensor_t** inputs = node->inputs;
852 ccv_nnc_tensor_t** outputs = inputs ? inputs + node->input_size : 0;
20
Assuming 'inputs' is null
21
'?' condition is false
22
'outputs' initialized to a null pointer value
853 if (tensor_tape)
23
Assuming 'tensor_tape' is null
24
Taking false branch
854 ccv_nnc_tensor_tape_io(tensor_tape, graph, node->input_flags, inputs, node->input_size, node->output_flags, outputs, node->output_size);
855 /* Broadcast the updates to all subscribed references for input / output, even though at th
856 * time output is not written yet, propagate pointer change is still valid. */
857 _ccv_nnc_graph_exec_begin_synchronize_multiviews(graph, node);
858 if (node->cmd.cmd == CCV_NNC_GRAPH_FORWARD || node->cmd.cmd == CCV_NNC_GRAPH_BACKWARD)
25
Assuming field 'cmd' is not equal to CCV_NNC_GRAPH_FORWARD
26
Assuming field 'cmd' is not equal to CCV_NNC_GRAPH_BACKWARD
27
Taking false branch
859 {
860 assert(!stream_context)((void) sizeof ((!stream_context) ? 1 : 0), __extension__ ({ if
(!stream_context) ; else __assert_fail ("!stream_context", "ccv_nnc_graph_run.c"
, 860, __extension__ __PRETTY_FUNCTION__); }))
; // This doesn't work properly with stream context.
861 if (node->flags & CCV_NNC_GRAPH_EXEC_CASE_OF)
862 {
863 int ref;
864 if (node->cmd.cmd == CCV_NNC_GRAPH_FORWARD)
865 {
866 ref = node->case_of.offset + node->case_of.expr(inputs, node->input_size, node->case_of.data);
867 if (tensor_tape)
868 ccv_nnc_tensor_tape_set_numbering(tensor_tape, graph, (ccv_nnc_graph_exec_t){
869 .d = idx,
870 .graph = graph,
871 }, ref);
872 } else {
873 assert(node->cmd.cmd == CCV_NNC_GRAPH_BACKWARD)((void) sizeof ((node->cmd.cmd == CCV_NNC_GRAPH_BACKWARD) ?
1 : 0), __extension__ ({ if (node->cmd.cmd == CCV_NNC_GRAPH_BACKWARD
) ; else __assert_fail ("node->cmd.cmd == CCV_NNC_GRAPH_BACKWARD"
, "ccv_nnc_graph_run.c", 873, __extension__ __PRETTY_FUNCTION__
); }))
;
874 assert(tensor_tape)((void) sizeof ((tensor_tape) ? 1 : 0), __extension__ ({ if (
tensor_tape) ; else __assert_fail ("tensor_tape", "ccv_nnc_graph_run.c"
, 874, __extension__ __PRETTY_FUNCTION__); }))
;
875 ref = ccv_nnc_tensor_tape_numbering(tensor_tape, graph, (ccv_nnc_graph_exec_t){
876 .d = idx,
877 .graph = graph,
878 });
879 }
880 if (ref >= 0)
881 {
882 assert(ref < node->graph_ref_size)((void) sizeof ((ref < node->graph_ref_size) ? 1 : 0), __extension__
({ if (ref < node->graph_ref_size) ; else __assert_fail
("ref < node->graph_ref_size", "ccv_nnc_graph_run.c", 882
, __extension__ __PRETTY_FUNCTION__); }))
;
883 ccv_nnc_graph_t* sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, CCV_NNC_GRAPH_REF(node)[ref] - 1)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(((node)->_heap_graph_ref
? (node)->_heap_graph_ref : (node)->_inline_graph_ref)
[ref] - 1)))
;
884 _ccv_nnc_graph_run(sub_graph, idx, node, inputs, node->input_size, outputs, node->output_size, flags, 0, 0, 0, 0, tensor_tape, stream_context);
885 }
886 _ccv_nnc_graph_exec_unwrap_phi(graph, node, ref);
887 } else if (node->flags & CCV_NNC_GRAPH_EXEC_P_WHILE) {
888 ccv_nnc_graph_t* sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, CCV_NNC_GRAPH_REF(node)[0] - 1)((void*)(((char*)((graph->sub_graphs)->data)) + (size_t
)(graph->sub_graphs)->rsize * (size_t)(((node)->_heap_graph_ref
? (node)->_heap_graph_ref : (node)->_inline_graph_ref)
[0] - 1)))
;
889 _ccv_nnc_graph_run(sub_graph, idx, node, inputs, node->input_size, outputs, node->output_size, flags, 0, 0, 0, 0, tensor_tape, stream_context);
890 }
891 } else {
892 PRINT(CCV_CLI_INFO, "%s [%d]: [%d] -> [%d]\n", ccv_nnc_cmd_name(node->cmd.cmd), idx, node->input_size, node->output_size)do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("%s [%d]: [%d] -> [%d]\n", ccv_nnc_cmd_name(node->cmd.
cmd), idx, node->input_size, node->output_size); fflush
(stdout); } } while (0)
;
28
Assuming the condition is false
29
Taking false branch
30
Loop condition is false. Exiting loop
893 for (i = 0; i < node->input_size; i++)
31
Assuming 'i' is >= field 'input_size'
32
Loop condition is false. Execution continues on line 900
894 {
895 PRINT(CCV_CLI_INFO, "|-> %d. %p (%p:%d)", i + 1, inputs[i], (inputs[i] ? inputs[i]->data.u8 : 0), (inputs[i] ? CCV_TENSOR_GET_DEVICE_ID(inputs[i]->info.type) : -1))do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("|-> %d. %p (%p:%d)", i + 1, inputs[i], (inputs[i] ? inputs
[i]->data.u8 : 0), (inputs[i] ? (((inputs[i]->info.type
) & 0xfff00) >> 8) : -1)); fflush(stdout); } } while
(0)
;
896 if (inputs[i] && CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_INFO)(CCV_CLI_INFO & ccv_cli_get_output_levels()))
897 ccv_nnc_print_tensor_info(inputs[i]);
898 PRINT(CCV_CLI_INFO, "\n")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("\n"); fflush(stdout); } } while (0)
;
899 }
900 ccv_nnc_cmd_exec(node->cmd, node->hint, flags, inputs, node->input_size, outputs, node->output_size, stream_context);
901 for (i = 0; i < node->output_size; i++)
33
Assuming 'i' is < field 'output_size'
34
Loop condition is true. Entering loop body
902 {
903 PRINT(CCV_CLI_INFO, "|<- %d. %p (%p:%d)", i + 1, outputs[i], (outputs[i] ? outputs[i]->data.u8 : 0), (outputs[i] ? CCV_TENSOR_GET_DEVICE_ID(outputs[i]->info.type) : -1))do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("|<- %d. %p (%p:%d)", i + 1, outputs[i], (outputs[i] ? outputs
[i]->data.u8 : 0), (outputs[i] ? (((outputs[i]->info.type
) & 0xfff00) >> 8) : -1)); fflush(stdout); } } while
(0)
;
35
Assuming the condition is true
36
Taking true branch
37
Array access (from variable 'outputs') results in a null pointer dereference
904 if (outputs[i] && CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_INFO)(CCV_CLI_INFO & ccv_cli_get_output_levels()))
905 ccv_nnc_print_tensor_info(outputs[i]);
906 PRINT(CCV_CLI_INFO, "\n")do { if ((CCV_CLI_INFO & ccv_cli_get_output_levels())) { printf
("\n"); fflush(stdout); } } while (0)
;
907 }
908 }
909}
910
911static inline void _ccv_nnc_graph_topsorted_run(ccv_nnc_graph_t* const graph, const int exec_idx, ccv_nnc_graph_exec_info_t* const exec, const int flags, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context)
912{
913 int i;
914 if (exec
16.1
'exec' is null
&& (exec->flags & CCV_NNC_GRAPH_EXEC_P_WHILE))
915 {
916 assert(!stream_context)((void) sizeof ((!stream_context) ? 1 : 0), __extension__ ({ if
(!stream_context) ; else __assert_fail ("!stream_context", "ccv_nnc_graph_run.c"
, 916, __extension__ __PRETTY_FUNCTION__); }))
; // This doesn't work properly with stream context.
917 assert(exec->p_while.expr)((void) sizeof ((exec->p_while.expr) ? 1 : 0), __extension__
({ if (exec->p_while.expr) ; else __assert_fail ("exec->p_while.expr"
, "ccv_nnc_graph_run.c", 917, __extension__ __PRETTY_FUNCTION__
); }))
;
918 int64_t count = 0;
919 // This is a forward while loop. Backward while loop will just consult its pairing part.
920 if (exec->cmd.cmd == CCV_NNC_GRAPH_FORWARD)
921 {
922 const int graph_breakpoint_size = graph->breakpoint_offset + graph->breakpoint_size;
923 for (;; ++count)
924 {
925 graph->while_count = count;
926 if (tensor_tape)
927 ccv_nnc_tensor_tape_set_numbering(tensor_tape, graph->p, (ccv_nnc_graph_exec_t){
928 .d = exec_idx,
929 .graph = graph->p,
930 }, count);
931 _ccv_nnc_graph_unwrap(graph, count, 0);
932 if (count > 0)
933 _ccv_nnc_graph_transit_move_to(graph);
934 for (i = 0; i < graph_breakpoint_size; i++)
935 _ccv_nnc_graph_exec_run(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
, i, tensor_tape, stream_context, flags);
936 _ccv_nnc_graph_exec_unwrap_while_expr(graph, exec);
937 // Reached breakpoints, now check the breakpoint, if not met, break out.
938 if (!exec->p_while.expr(exec->p_while.inputs, exec->p_while.input_size, exec->p_while.data))
939 {
940 _ccv_nnc_graph_rewrap(graph);
941 break;
942 }
943 for (i = graph_breakpoint_size; i < graph->exec_info->rnum; i++)
944 _ccv_nnc_graph_exec_run(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
, i, tensor_tape, stream_context, flags);
945 _ccv_nnc_graph_from_move_transit(graph);
946 _ccv_nnc_graph_rewrap(graph);
947 }
948 } else {
949 // For backward graph, no need to evaluate the while expr.
950 assert(exec->cmd.cmd == CCV_NNC_GRAPH_BACKWARD)((void) sizeof ((exec->cmd.cmd == CCV_NNC_GRAPH_BACKWARD) ?
1 : 0), __extension__ ({ if (exec->cmd.cmd == CCV_NNC_GRAPH_BACKWARD
) ; else __assert_fail ("exec->cmd.cmd == CCV_NNC_GRAPH_BACKWARD"
, "ccv_nnc_graph_run.c", 950, __extension__ __PRETTY_FUNCTION__
); }))
;
951 assert(graph->pair)((void) sizeof ((graph->pair) ? 1 : 0), __extension__ ({ if
(graph->pair) ; else __assert_fail ("graph->pair", "ccv_nnc_graph_run.c"
, 951, __extension__ __PRETTY_FUNCTION__); }))
;
952 assert(tensor_tape)((void) sizeof ((tensor_tape) ? 1 : 0), __extension__ ({ if (
tensor_tape) ; else __assert_fail ("tensor_tape", "ccv_nnc_graph_run.c"
, 952, __extension__ __PRETTY_FUNCTION__); }))
;
953 count = 0;
954 int64_t reverse_count = graph->while_count = ccv_nnc_tensor_tape_numbering(tensor_tape, graph->p, (ccv_nnc_graph_exec_t){
955 .d = exec_idx,
956 .graph = graph->p,
957 });
958 _ccv_nnc_graph_unwrap(graph, count, reverse_count);
959 for (i = graph->breakpoint_offset; i < graph->exec_info->rnum; i++)
960 _ccv_nnc_graph_exec_run(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
, i, tensor_tape, stream_context, flags);
961 _ccv_nnc_graph_from_move_transit(graph);
962 _ccv_nnc_graph_rewrap(graph);
963 for (count = 1; reverse_count > 0; ++count)
964 {
965 graph->while_count = --reverse_count;
966 _ccv_nnc_graph_unwrap(graph, count, reverse_count);
967 _ccv_nnc_graph_transit_move_to(graph);
968 for (i = 0; i < graph->exec_info->rnum; i++)
969 _ccv_nnc_graph_exec_run(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
, i, tensor_tape, stream_context, flags);
970 _ccv_nnc_graph_from_move_transit(graph);
971 _ccv_nnc_graph_rewrap(graph);
972 }
973 }
974 } else {
975 graph->while_count = 0;
976 for (i = 0; i < graph->exec_info->rnum; i++)
17
Assuming 'i' is < field 'rnum'
18
Loop condition is true. Entering loop body
977 _ccv_nnc_graph_exec_run(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, i)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(i)))
, i, tensor_tape, stream_context, flags)
;
19
Calling '_ccv_nnc_graph_exec_run'
978 }
979}
980
981static inline void _ccv_nnc_graph_run_slow_path(ccv_nnc_graph_t* const graph, const int exec_idx, ccv_nnc_graph_exec_info_t* const exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context)
982{
983 int i, j;
984 const ccv_nnc_graph_exec_t* const graph_sources = sources ? sources : (ccv_nnc_graph_exec_t*)ccv_array_get(graph->sources, 0)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph
->sources)->rsize * (size_t)(0)))
;
985 const int graph_source_size = source_size ? source_size : graph->sources->rnum;
986 const ccv_nnc_graph_exec_t* const graph_destinations = destinations ? destinations : (ccv_nnc_graph_exec_t*)ccv_array_get(graph->destinations, 0)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0)))
;
987 const int graph_destination_size = destination_size ? destination_size : graph->destinations->rnum;
988#define visitor(node, idx, ...) \
989 _ccv_nnc_graph_exec_run(graph, node, idx, tensor_tape, stream_context, flags)
990 if (exec && (exec->flags & CCV_NNC_GRAPH_EXEC_P_WHILE))
991 {
992 assert(!stream_context)((void) sizeof ((!stream_context) ? 1 : 0), __extension__ ({ if
(!stream_context) ; else __assert_fail ("!stream_context", "ccv_nnc_graph_run.c"
, 992, __extension__ __PRETTY_FUNCTION__); }))
; // This doesn't work properly with stream context.
993 assert(exec->p_while.expr)((void) sizeof ((exec->p_while.expr) ? 1 : 0), __extension__
({ if (exec->p_while.expr) ; else __assert_fail ("exec->p_while.expr"
, "ccv_nnc_graph_run.c", 993, __extension__ __PRETTY_FUNCTION__
); }))
;
994 int64_t count = 0;
995 // This is a forward while loop. Backward while loop will just consult its pairing part.
996 if (exec->cmd.cmd == CCV_NNC_GRAPH_FORWARD)
997 {
998 ccv_array_t* follows = ccv_array_new(sizeof(ccv_nnc_graph_exec_t), graph->breakpoint_size, 0);
999 for (i = 0; i < graph->breakpoint_size; i++)
1000 {
1001 const ccv_nnc_graph_exec_info_t* const exec_info = (const ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, graph->breakpoints->d)((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(graph->breakpoints
->d)))
;
1002 if (exec_info->outgoings)
1003 for (j = 0; j < exec_info->outgoings->rnum; j++)
1004 {
1005 const ccv_nnc_graph_exec_t exec = {
1006 .d = *(int*)ccv_array_get(exec_info->outgoings, j)((void*)(((char*)((exec_info->outgoings)->data)) + (size_t
)(exec_info->outgoings)->rsize * (size_t)(j)))
,
1007 .graph = graph,
1008 };
1009 ccv_array_push(follows, &exec);
1010 }
1011 }
1012 for (;; ++count)
1013 {
1014 graph->while_count = count;
1015 if (tensor_tape)
1016 ccv_nnc_tensor_tape_set_numbering(tensor_tape, graph->p, (ccv_nnc_graph_exec_t){
1017 .d = exec_idx,
1018 .graph = graph->p,
1019 }, count);
1020 _ccv_nnc_graph_unwrap(graph, count, 0);
1021 if (count > 0)
1022 _ccv_nnc_graph_transit_move_to(graph);
1023 CCV_NNC_GRAPH_VISIT(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, graph_sources, graph_source_size, graph->breakpoints, graph->breakpoint_size, 0, visitor)do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = ((graph->exec_info->
rnum) + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (graph_source_size); _i_++
) { ((void) sizeof (((graph_sources)[_i_].graph == graph) ? 1
: 0), __extension__ ({ if ((graph_sources)[_i_].graph == graph
) ; else __assert_fail ("(graph_sources)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1023, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_sources)[_i_].d].r = 1; _exists_[0
][_i_] = (graph_sources)[_i_].d; } int _exist_size_[2] = { (graph_source_size
), 0, }; int _p_ = 0, _q_ = 1; while (_exist_size_[_p_] > 0
) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_][_i_]; if (
_incomings_[_idx_].r != 1) continue; _incomings_[_idx_].r = 2
; if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->
exec_info)->data)) + (size_t)(graph->exec_info)->rsize
* (size_t)(0))))[_idx_].outgoings) for (_j_ = 0; _j_ < ((
ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0))))[_idx_].outgoings->rnum; _j_++) { const int d = *(int
*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t*)((void*)(((
char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->data
)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)(((char*)(
(graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize * (size_t
)(_j_))); ++_incomings_[d].c; if (_incomings_[d].r != 0) continue
; _incomings_[d].r = 1; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1023, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_source_size); _i_++) { ((void) sizeof
(((graph_sources)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph_sources)[_i_].graph == graph) ; else __assert_fail
("(graph_sources)[_i_].graph == graph", "ccv_nnc_graph_run.c"
, 1023, __extension__ __PRETTY_FUNCTION__); })); _incomings_[
(graph_sources)[_i_].d].r = 3; _exists_[0][_i_] = (graph_sources
)[_i_].d; } _exist_size_[0] = (graph_source_size); _exist_size_
[1] = 0; _p_ = 0, _q_ = 1; int _bump_ = 1; while (_exist_size_
[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ <
_exist_size_[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_
][_i_]; if (_incomings_[_idx_].r != 3) continue; _incomings_[
_idx_].r = 4; if (((ccv_nnc_graph_exec_info_t*)((void*)(((char
*)((graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings) for (_j_ = 0; _j_
< ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->
exec_info)->data)) + (size_t)(graph->exec_info)->rsize
* (size_t)(0))))[_idx_].outgoings->rnum; _j_++) { const int
d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t*)(
(void*)(((char*)((graph->exec_info)->data)) + (size_t)(
graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; if (_incomings_[d].r != 2) continue
; _incomings_[d].r = 3; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1023, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph->breakpoint_size); _i_++) { ((void
) sizeof (((graph->breakpoints)[_i_].graph == graph) ? 1 :
0), __extension__ ({ if ((graph->breakpoints)[_i_].graph ==
graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1023, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph->breakpoints)[_i_].d].r = 5; _exists_
[0][_i_] = (graph->breakpoints)[_i_].d; } _exist_size_[0] =
(graph->breakpoint_size); _exist_size_[1] = 0; _p_ = 0, _q_
= 1; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 5) continue
; _incomings_[_idx_].r = 6; if (_incomings_[_idx_].edges >
0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; if (_incomings_
[d].r != 4) continue; _incomings_[d].r = 5; ((void) sizeof ((
_exist_size_[_q_] < (graph->exec_info->rnum)) ? 1 : 0
), __extension__ ({ if (_exist_size_[_q_] < (graph->exec_info
->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1023, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph->breakpoint_size); _i_++) { ((void
) sizeof (((graph->breakpoints)[_i_].graph == graph) ? 1 :
0), __extension__ ({ if ((graph->breakpoints)[_i_].graph ==
graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1023, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph->breakpoints)[_i_].d].d = 1; } for
(_i_ = 0; _i_ < (graph_source_size); _i_++) { ((void) sizeof
(((graph_sources)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph_sources)[_i_].graph == graph) ; else __assert_fail
("(graph_sources)[_i_].graph == graph", "ccv_nnc_graph_run.c"
, 1023, __extension__ __PRETTY_FUNCTION__); })); _exists_[0][
_i_] = (graph_sources)[_i_].d; } _p_ = 0; _q_ = 1; _exist_size_
[0] = (graph_source_size); _exist_size_[1] = 0; int _d_ = 0; while
(_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_
= 0; _i_ < _exist_size_[_p_];) { const int32_t _idx_ = _exists_
[_p_][_i_]; visitor((((ccv_nnc_graph_exec_info_t*)((void*)(((
char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0)))) + _idx_), (_idx_), (_incomings_
[_idx_].d)); if (_incomings_[_idx_].d) { ++_d_; _incomings_[_idx_
].r = 7; } if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)
((graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings) { if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
->rnum == 1) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph
->breakpoint_size)) { _exists_[_p_][_i_] = d; continue; } }
else for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void
*)(((char*)((graph->exec_info)->data)) + (size_t)(graph
->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->
rnum; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph
->breakpoint_size)) { ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1023, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (
_i_)); } for (_i_ = 0; _i_ < (graph->breakpoint_size); _i_
++) { ((void) sizeof (((graph->breakpoints)[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if ((graph->breakpoints)[_i_]
.graph == graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1023, __extension__ __PRETTY_FUNCTION__
); })); if (_incomings_[(graph->breakpoints)[_i_].d].r == 7
) continue; if (!(0)) { ((void) sizeof ((_incomings_[(graph->
breakpoints)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if (_incomings_
[(graph->breakpoints)[_i_].d].c == 0) ; else __assert_fail
("_incomings_[(graph->breakpoints)[_i_].d].c == 0", "ccv_nnc_graph_run.c"
, 1023, __extension__ __PRETTY_FUNCTION__); })); } else if (_incomings_
[(graph->breakpoints)[_i_].d].c > 0) continue; visitor(
(((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0)))) + (graph->breakpoints)[_i_].d), ((graph->breakpoints
)[_i_].d), (_incomings_[(graph->breakpoints)[_i_].d].d)); }
if (_heap_mem_) free(_incomings_); } while (0);
;
1024 _ccv_nnc_graph_exec_unwrap_while_expr(graph, exec);
1025 // Reached breakpoints, now check the breakpoint, if not met, break out.
1026 if (!exec->p_while.expr(exec->p_while.inputs, exec->p_while.input_size, exec->p_while.data))
1027 {
1028 _ccv_nnc_graph_rewrap(graph);
1029 break;
1030 }
1031 if (follows->rnum > 0)
1032 CCV_NNC_GRAPH_VISIT(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, (ccv_nnc_graph_exec_t*)ccv_array_get(follows, 0), follows->rnum, graph_destinations, graph_destination_size, 0, visitor)do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = ((graph->exec_info->
rnum) + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (follows->rnum); _i_++)
{ ((void) sizeof ((((ccv_nnc_graph_exec_t*)((void*)(((char*)
((follows)->data)) + (size_t)(follows)->rsize * (size_t
)(0))))[_i_].graph == graph) ? 1 : 0), __extension__ ({ if ((
(ccv_nnc_graph_exec_t*)((void*)(((char*)((follows)->data))
+ (size_t)(follows)->rsize * (size_t)(0))))[_i_].graph ==
graph) ; else __assert_fail ("((ccv_nnc_graph_exec_t*)((void*)(((char*)((follows)->data)) + (size_t)(follows)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[((ccv_nnc_graph_exec_t*)((void*)(((char*)
((follows)->data)) + (size_t)(follows)->rsize * (size_t
)(0))))[_i_].d].r = 1; _exists_[0][_i_] = ((ccv_nnc_graph_exec_t
*)((void*)(((char*)((follows)->data)) + (size_t)(follows)->
rsize * (size_t)(0))))[_i_].d; } int _exist_size_[2] = { (follows
->rnum), 0, }; int _p_ = 0, _q_ = 1; while (_exist_size_[_p_
] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_][_i_]; if (
_incomings_[_idx_].r != 1) continue; _incomings_[_idx_].r = 2
; if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->
exec_info)->data)) + (size_t)(graph->exec_info)->rsize
* (size_t)(0))))[_idx_].outgoings) for (_j_ = 0; _j_ < ((
ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0))))[_idx_].outgoings->rnum; _j_++) { const int d = *(int
*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t*)((void*)(((
char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->data
)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)(((char*)(
(graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize * (size_t
)(_j_))); ++_incomings_[d].c; if (_incomings_[d].r != 0) continue
; _incomings_[d].r = 1; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (follows->rnum); _i_++) { ((void) sizeof
((((ccv_nnc_graph_exec_t*)((void*)(((char*)((follows)->data
)) + (size_t)(follows)->rsize * (size_t)(0))))[_i_].graph ==
graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t
*)((void*)(((char*)((follows)->data)) + (size_t)(follows)->
rsize * (size_t)(0))))[_i_].graph == graph) ; else __assert_fail
("((ccv_nnc_graph_exec_t*)((void*)(((char*)((follows)->data)) + (size_t)(follows)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[((ccv_nnc_graph_exec_t*)((void*)(((char*)
((follows)->data)) + (size_t)(follows)->rsize * (size_t
)(0))))[_i_].d].r = 3; _exists_[0][_i_] = ((ccv_nnc_graph_exec_t
*)((void*)(((char*)((follows)->data)) + (size_t)(follows)->
rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (follows->
rnum); _exist_size_[1] = 0; _p_ = 0, _q_ = 1; int _bump_ = 1;
while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for
(_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 3) continue
; _incomings_[_idx_].r = 4; if (((ccv_nnc_graph_exec_info_t*)
((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; if (_incomings_[d].r != 2) continue
; _incomings_[d].r = 3; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_destination_size); _i_++) { ((void
) sizeof (((graph_destinations)[_i_].graph == graph) ? 1 : 0)
, __extension__ ({ if ((graph_destinations)[_i_].graph == graph
) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_destinations)[_i_].d].r = 5; _exists_
[0][_i_] = (graph_destinations)[_i_].d; } _exist_size_[0] = (
graph_destination_size); _exist_size_[1] = 0; _p_ = 0, _q_ = 1
; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for
(_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 5) continue
; _incomings_[_idx_].r = 6; if (_incomings_[_idx_].edges >
0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; if (_incomings_
[d].r != 4) continue; _incomings_[d].r = 5; ((void) sizeof ((
_exist_size_[_q_] < (graph->exec_info->rnum)) ? 1 : 0
), __extension__ ({ if (_exist_size_[_q_] < (graph->exec_info
->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_destination_size); _i_++) { ((void
) sizeof (((graph_destinations)[_i_].graph == graph) ? 1 : 0)
, __extension__ ({ if ((graph_destinations)[_i_].graph == graph
) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_destinations)[_i_].d].d = 1; } for
(_i_ = 0; _i_ < (follows->rnum); _i_++) { ((void) sizeof
((((ccv_nnc_graph_exec_t*)((void*)(((char*)((follows)->data
)) + (size_t)(follows)->rsize * (size_t)(0))))[_i_].graph ==
graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_t
*)((void*)(((char*)((follows)->data)) + (size_t)(follows)->
rsize * (size_t)(0))))[_i_].graph == graph) ; else __assert_fail
("((ccv_nnc_graph_exec_t*)((void*)(((char*)((follows)->data)) + (size_t)(follows)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _exists_[0][_i_] = ((ccv_nnc_graph_exec_t*)((void*)((
(char*)((follows)->data)) + (size_t)(follows)->rsize * (
size_t)(0))))[_i_].d; } _p_ = 0; _q_ = 1; _exist_size_[0] = (
follows->rnum); _exist_size_[1] = 0; int _d_ = 0; while (_exist_size_
[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ <
_exist_size_[_p_];) { const int32_t _idx_ = _exists_[_p_][_i_
]; visitor((((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0)))) + _idx_), (_idx_), (_incomings_[_idx_]
.d)); if (_incomings_[_idx_].d) { ++_d_; _incomings_[_idx_].r
= 7; } if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings) { if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
->rnum == 1) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph_destination_size
)) { _exists_[_p_][_i_] = d; continue; } } else for (_j_ = 0;
_j_ < ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum; _j_++) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph_destination_size
)) { ((void) sizeof ((_exist_size_[_q_] < (graph->exec_info
->rnum)) ? 1 : 0), __extension__ ({ if (_exist_size_[_q_] <
(graph->exec_info->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (
_i_)); } for (_i_ = 0; _i_ < (graph_destination_size); _i_
++) { ((void) sizeof (((graph_destinations)[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if ((graph_destinations)[_i_].graph
== graph) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); if (_incomings_[(graph_destinations)[_i_].d].r == 7) continue
; if (!(0)) { ((void) sizeof ((_incomings_[(graph_destinations
)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if (_incomings_[
(graph_destinations)[_i_].d].c == 0) ; else __assert_fail ("_incomings_[(graph_destinations)[_i_].d].c == 0"
, "ccv_nnc_graph_run.c", 1032, __extension__ __PRETTY_FUNCTION__
); })); } else if (_incomings_[(graph_destinations)[_i_].d].c
> 0) continue; visitor((((ccv_nnc_graph_exec_info_t*)((void
*)(((char*)((graph->exec_info)->data)) + (size_t)(graph
->exec_info)->rsize * (size_t)(0)))) + (graph_destinations
)[_i_].d), ((graph_destinations)[_i_].d), (_incomings_[(graph_destinations
)[_i_].d].d)); } if (_heap_mem_) free(_incomings_); } while (
0);
;
1033 _ccv_nnc_graph_from_move_transit(graph);
1034 _ccv_nnc_graph_rewrap(graph);
1035 }
1036 ccv_array_free(follows);
1037 } else {
1038 // For backward graph, no need to evaluate the while expr.
1039 assert(exec->cmd.cmd == CCV_NNC_GRAPH_BACKWARD)((void) sizeof ((exec->cmd.cmd == CCV_NNC_GRAPH_BACKWARD) ?
1 : 0), __extension__ ({ if (exec->cmd.cmd == CCV_NNC_GRAPH_BACKWARD
) ; else __assert_fail ("exec->cmd.cmd == CCV_NNC_GRAPH_BACKWARD"
, "ccv_nnc_graph_run.c", 1039, __extension__ __PRETTY_FUNCTION__
); }))
;
1040 assert(graph->pair)((void) sizeof ((graph->pair) ? 1 : 0), __extension__ ({ if
(graph->pair) ; else __assert_fail ("graph->pair", "ccv_nnc_graph_run.c"
, 1040, __extension__ __PRETTY_FUNCTION__); }))
;
1041 assert(tensor_tape)((void) sizeof ((tensor_tape) ? 1 : 0), __extension__ ({ if (
tensor_tape) ; else __assert_fail ("tensor_tape", "ccv_nnc_graph_run.c"
, 1041, __extension__ __PRETTY_FUNCTION__); }))
;
1042 count = 0;
1043 int64_t reverse_count = graph->while_count = ccv_nnc_tensor_tape_numbering(tensor_tape, graph->p, (ccv_nnc_graph_exec_t){
1044 .d = exec_idx,
1045 .graph = graph->p,
1046 });
1047 _ccv_nnc_graph_unwrap(graph, count, reverse_count);
1048 CCV_NNC_GRAPH_VISIT(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, graph->breakpoints, graph->breakpoint_size, graph_destinations, graph_destination_size, 1, visitor)do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = ((graph->exec_info->
rnum) + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (graph->breakpoint_size
); _i_++) { ((void) sizeof (((graph->breakpoints)[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if ((graph->breakpoints
)[_i_].graph == graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph->breakpoints)[_i_].d].r = 1; _exists_
[0][_i_] = (graph->breakpoints)[_i_].d; } int _exist_size_
[2] = { (graph->breakpoint_size), 0, }; int _p_ = 0, _q_ =
1; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0;
for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 1) continue
; _incomings_[_idx_].r = 2; if (((ccv_nnc_graph_exec_info_t*)
((void*)(((char*)((graph->exec_info)->data)) + (size_t)
(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); ++_incomings_[d].c; if (_incomings_[d].r !=
0) continue; _incomings_[d].r = 1; ((void) sizeof ((_exist_size_
[_q_] < (graph->exec_info->rnum)) ? 1 : 0), __extension__
({ if (_exist_size_[_q_] < (graph->exec_info->rnum)
) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph->breakpoint_size); _i_++) { ((void
) sizeof (((graph->breakpoints)[_i_].graph == graph) ? 1 :
0), __extension__ ({ if ((graph->breakpoints)[_i_].graph ==
graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph->breakpoints)[_i_].d].r = 3; _exists_
[0][_i_] = (graph->breakpoints)[_i_].d; } _exist_size_[0] =
(graph->breakpoint_size); _exist_size_[1] = 0; _p_ = 0, _q_
= 1; int _bump_ = 1; while (_exist_size_[_p_] > 0) { _exist_size_
[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) {
const int32_t _idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_
].r != 3) continue; _incomings_[_idx_].r = 4; if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) for (_j_ = 0; _j_ < ((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; if (_incomings_[d].r != 2) continue
; _incomings_[d].r = 3; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_destination_size); _i_++) { ((void
) sizeof (((graph_destinations)[_i_].graph == graph) ? 1 : 0)
, __extension__ ({ if ((graph_destinations)[_i_].graph == graph
) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_destinations)[_i_].d].r = 5; _exists_
[0][_i_] = (graph_destinations)[_i_].d; } _exist_size_[0] = (
graph_destination_size); _exist_size_[1] = 0; _p_ = 0, _q_ = 1
; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for
(_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 5) continue
; _incomings_[_idx_].r = 6; if (_incomings_[_idx_].edges >
0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; if (_incomings_
[d].r != 4) continue; _incomings_[d].r = 5; ((void) sizeof ((
_exist_size_[_q_] < (graph->exec_info->rnum)) ? 1 : 0
), __extension__ ({ if (_exist_size_[_q_] < (graph->exec_info
->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_destination_size); _i_++) { ((void
) sizeof (((graph_destinations)[_i_].graph == graph) ? 1 : 0)
, __extension__ ({ if ((graph_destinations)[_i_].graph == graph
) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_destinations)[_i_].d].d = 1; } for
(_i_ = 0; _i_ < (graph->breakpoint_size); _i_++) { ((void
) sizeof (((graph->breakpoints)[_i_].graph == graph) ? 1 :
0), __extension__ ({ if ((graph->breakpoints)[_i_].graph ==
graph) ; else __assert_fail ("(graph->breakpoints)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _exists_[0][_i_] = (graph->breakpoints)[_i_].d; } _p_
= 0; _q_ = 1; _exist_size_[0] = (graph->breakpoint_size);
_exist_size_[1] = 0; int _d_ = 0; while (_exist_size_[_p_] >
0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_];) { const int32_t _idx_ = _exists_[_p_][_i_]; visitor((
((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0)))) + _idx_), (_idx_), (_incomings_[_idx_].d)); if (_incomings_
[_idx_].d) { ++_d_; _incomings_[_idx_].r = 7; } if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
) { if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum == 1) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph_destination_size
)) { _exists_[_p_][_i_] = d; continue; } } else for (_j_ = 0;
_j_ < ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum; _j_++) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph_destination_size
)) { ((void) sizeof ((_exist_size_[_q_] < (graph->exec_info
->rnum)) ? 1 : 0), __extension__ ({ if (_exist_size_[_q_] <
(graph->exec_info->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (
_i_)); } for (_i_ = 0; _i_ < (graph_destination_size); _i_
++) { ((void) sizeof (((graph_destinations)[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if ((graph_destinations)[_i_].graph
== graph) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); if (_incomings_[(graph_destinations)[_i_].d].r == 7) continue
; if (!(1)) { ((void) sizeof ((_incomings_[(graph_destinations
)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if (_incomings_[
(graph_destinations)[_i_].d].c == 0) ; else __assert_fail ("_incomings_[(graph_destinations)[_i_].d].c == 0"
, "ccv_nnc_graph_run.c", 1048, __extension__ __PRETTY_FUNCTION__
); })); } else if (_incomings_[(graph_destinations)[_i_].d].c
> 0) continue; visitor((((ccv_nnc_graph_exec_info_t*)((void
*)(((char*)((graph->exec_info)->data)) + (size_t)(graph
->exec_info)->rsize * (size_t)(0)))) + (graph_destinations
)[_i_].d), ((graph_destinations)[_i_].d), (_incomings_[(graph_destinations
)[_i_].d].d)); } if (_heap_mem_) free(_incomings_); } while (
0);
;
1049 _ccv_nnc_graph_from_move_transit(graph);
1050 _ccv_nnc_graph_rewrap(graph);
1051 for (count = 1; reverse_count > 0; ++count)
1052 {
1053 graph->while_count = --reverse_count;
1054 _ccv_nnc_graph_unwrap(graph, count, reverse_count);
1055 _ccv_nnc_graph_transit_move_to(graph);
1056 CCV_NNC_GRAPH_VISIT(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, graph_sources, graph_source_size, graph_destinations, graph_destination_size, 0, visitor)do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = ((graph->exec_info->
rnum) + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (graph_source_size); _i_++
) { ((void) sizeof (((graph_sources)[_i_].graph == graph) ? 1
: 0), __extension__ ({ if ((graph_sources)[_i_].graph == graph
) ; else __assert_fail ("(graph_sources)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_sources)[_i_].d].r = 1; _exists_[0
][_i_] = (graph_sources)[_i_].d; } int _exist_size_[2] = { (graph_source_size
), 0, }; int _p_ = 0, _q_ = 1; while (_exist_size_[_p_] > 0
) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_][_i_]; if (
_incomings_[_idx_].r != 1) continue; _incomings_[_idx_].r = 2
; if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->
exec_info)->data)) + (size_t)(graph->exec_info)->rsize
* (size_t)(0))))[_idx_].outgoings) for (_j_ = 0; _j_ < ((
ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0))))[_idx_].outgoings->rnum; _j_++) { const int d = *(int
*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t*)((void*)(((
char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->data
)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)(((char*)(
(graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize * (size_t
)(_j_))); ++_incomings_[d].c; if (_incomings_[d].r != 0) continue
; _incomings_[d].r = 1; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_source_size); _i_++) { ((void) sizeof
(((graph_sources)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph_sources)[_i_].graph == graph) ; else __assert_fail
("(graph_sources)[_i_].graph == graph", "ccv_nnc_graph_run.c"
, 1056, __extension__ __PRETTY_FUNCTION__); })); _incomings_[
(graph_sources)[_i_].d].r = 3; _exists_[0][_i_] = (graph_sources
)[_i_].d; } _exist_size_[0] = (graph_source_size); _exist_size_
[1] = 0; _p_ = 0, _q_ = 1; int _bump_ = 1; while (_exist_size_
[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ <
_exist_size_[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_
][_i_]; if (_incomings_[_idx_].r != 3) continue; _incomings_[
_idx_].r = 4; if (((ccv_nnc_graph_exec_info_t*)((void*)(((char
*)((graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings) for (_j_ = 0; _j_
< ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->
exec_info)->data)) + (size_t)(graph->exec_info)->rsize
* (size_t)(0))))[_idx_].outgoings->rnum; _j_++) { const int
d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t*)(
(void*)(((char*)((graph->exec_info)->data)) + (size_t)(
graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; if (_incomings_[d].r != 2) continue
; _incomings_[d].r = 3; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_destination_size); _i_++) { ((void
) sizeof (((graph_destinations)[_i_].graph == graph) ? 1 : 0)
, __extension__ ({ if ((graph_destinations)[_i_].graph == graph
) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_destinations)[_i_].d].r = 5; _exists_
[0][_i_] = (graph_destinations)[_i_].d; } _exist_size_[0] = (
graph_destination_size); _exist_size_[1] = 0; _p_ = 0, _q_ = 1
; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for
(_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 5) continue
; _incomings_[_idx_].r = 6; if (_incomings_[_idx_].edges >
0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; if (_incomings_
[d].r != 4) continue; _incomings_[d].r = 5; ((void) sizeof ((
_exist_size_[_q_] < (graph->exec_info->rnum)) ? 1 : 0
), __extension__ ({ if (_exist_size_[_q_] < (graph->exec_info
->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_destination_size); _i_++) { ((void
) sizeof (((graph_destinations)[_i_].graph == graph) ? 1 : 0)
, __extension__ ({ if ((graph_destinations)[_i_].graph == graph
) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_destinations)[_i_].d].d = 1; } for
(_i_ = 0; _i_ < (graph_source_size); _i_++) { ((void) sizeof
(((graph_sources)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph_sources)[_i_].graph == graph) ; else __assert_fail
("(graph_sources)[_i_].graph == graph", "ccv_nnc_graph_run.c"
, 1056, __extension__ __PRETTY_FUNCTION__); })); _exists_[0][
_i_] = (graph_sources)[_i_].d; } _p_ = 0; _q_ = 1; _exist_size_
[0] = (graph_source_size); _exist_size_[1] = 0; int _d_ = 0; while
(_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_
= 0; _i_ < _exist_size_[_p_];) { const int32_t _idx_ = _exists_
[_p_][_i_]; visitor((((ccv_nnc_graph_exec_info_t*)((void*)(((
char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0)))) + _idx_), (_idx_), (_incomings_
[_idx_].d)); if (_incomings_[_idx_].d) { ++_d_; _incomings_[_idx_
].r = 7; } if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)
((graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings) { if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
->rnum == 1) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph_destination_size
)) { _exists_[_p_][_i_] = d; continue; } } else for (_j_ = 0;
_j_ < ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum; _j_++) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph_destination_size
)) { ((void) sizeof ((_exist_size_[_q_] < (graph->exec_info
->rnum)) ? 1 : 0), __extension__ ({ if (_exist_size_[_q_] <
(graph->exec_info->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (
_i_)); } for (_i_ = 0; _i_ < (graph_destination_size); _i_
++) { ((void) sizeof (((graph_destinations)[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if ((graph_destinations)[_i_].graph
== graph) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); if (_incomings_[(graph_destinations)[_i_].d].r == 7) continue
; if (!(0)) { ((void) sizeof ((_incomings_[(graph_destinations
)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if (_incomings_[
(graph_destinations)[_i_].d].c == 0) ; else __assert_fail ("_incomings_[(graph_destinations)[_i_].d].c == 0"
, "ccv_nnc_graph_run.c", 1056, __extension__ __PRETTY_FUNCTION__
); })); } else if (_incomings_[(graph_destinations)[_i_].d].c
> 0) continue; visitor((((ccv_nnc_graph_exec_info_t*)((void
*)(((char*)((graph->exec_info)->data)) + (size_t)(graph
->exec_info)->rsize * (size_t)(0)))) + (graph_destinations
)[_i_].d), ((graph_destinations)[_i_].d), (_incomings_[(graph_destinations
)[_i_].d].d)); } if (_heap_mem_) free(_incomings_); } while (
0);
;
1057 _ccv_nnc_graph_from_move_transit(graph);
1058 _ccv_nnc_graph_rewrap(graph);
1059 }
1060 }
1061 } else {
1062 graph->while_count = 0;
1063 CCV_NNC_GRAPH_VISIT(graph, (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, 0), graph->exec_info->rnum, graph_sources, graph_source_size, graph_destinations, graph_destination_size, 0, visitor)do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_info->rnum); _i_
++) _incoming_edges_ += (((ccv_nnc_graph_exec_info_t*)((void*
)(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_i_].outgoings) ? ((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_i_].outgoings
->rnum : 0; const int _heap_mem_ = ((graph->exec_info->
rnum) + _incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (graph->exec_info->rnum) + sizeof
(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); else _incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (
sizeof(ccv_nnc_incoming_t) * (graph->exec_info->rnum) +
sizeof(int32_t) * ((graph->exec_info->rnum) * 2 + _incoming_edges_
)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (graph
->exec_info->rnum)); int32_t* _exists_[2] = { (int32_t*
)(_incomings_ + (graph->exec_info->rnum)), (int32_t*)(_incomings_
+ (graph->exec_info->rnum)) + (graph->exec_info->
rnum), }; int32_t* const _edges_ = _exists_[1] + (graph->exec_info
->rnum); for (_i_ = 0; _i_ < (graph_source_size); _i_++
) { ((void) sizeof (((graph_sources)[_i_].graph == graph) ? 1
: 0), __extension__ ({ if ((graph_sources)[_i_].graph == graph
) ; else __assert_fail ("(graph_sources)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_sources)[_i_].d].r = 1; _exists_[0
][_i_] = (graph_sources)[_i_].d; } int _exist_size_[2] = { (graph_source_size
), 0, }; int _p_ = 0, _q_ = 1; while (_exist_size_[_p_] > 0
) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_][_i_]; if (
_incomings_[_idx_].r != 1) continue; _incomings_[_idx_].r = 2
; if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->
exec_info)->data)) + (size_t)(graph->exec_info)->rsize
* (size_t)(0))))[_idx_].outgoings) for (_j_ = 0; _j_ < ((
ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->exec_info
)->data)) + (size_t)(graph->exec_info)->rsize * (size_t
)(0))))[_idx_].outgoings->rnum; _j_++) { const int d = *(int
*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t*)((void*)(((
char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->data
)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)(((char*)(
(graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize * (size_t
)(_j_))); ++_incomings_[d].c; if (_incomings_[d].r != 0) continue
; _incomings_[d].r = 1; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_source_size); _i_++) { ((void) sizeof
(((graph_sources)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph_sources)[_i_].graph == graph) ; else __assert_fail
("(graph_sources)[_i_].graph == graph", "ccv_nnc_graph_run.c"
, 1063, __extension__ __PRETTY_FUNCTION__); })); _incomings_[
(graph_sources)[_i_].d].r = 3; _exists_[0][_i_] = (graph_sources
)[_i_].d; } _exist_size_[0] = (graph_source_size); _exist_size_
[1] = 0; _p_ = 0, _q_ = 1; int _bump_ = 1; while (_exist_size_
[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ <
_exist_size_[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_
][_i_]; if (_incomings_[_idx_].r != 3) continue; _incomings_[
_idx_].r = 4; if (((ccv_nnc_graph_exec_info_t*)((void*)(((char
*)((graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings) for (_j_ = 0; _j_
< ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph->
exec_info)->data)) + (size_t)(graph->exec_info)->rsize
* (size_t)(0))))[_idx_].outgoings->rnum; _j_++) { const int
d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t*)(
(void*)(((char*)((graph->exec_info)->data)) + (size_t)(
graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); if (_incomings_[d].edges == 0) { _incomings_
[d].edges = _bump_; _bump_ += _incomings_[d].c; _incomings_[d
].c = 0; } _edges_[_incomings_[d].edges - 1 + _incomings_[d].
c] = _idx_; ++_incomings_[d].c; if (_incomings_[d].r != 2) continue
; _incomings_[d].r = 3; ((void) sizeof ((_exist_size_[_q_] <
(graph->exec_info->rnum)) ? 1 : 0), __extension__ ({ if
(_exist_size_[_q_] < (graph->exec_info->rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_destination_size); _i_++) { ((void
) sizeof (((graph_destinations)[_i_].graph == graph) ? 1 : 0)
, __extension__ ({ if ((graph_destinations)[_i_].graph == graph
) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_destinations)[_i_].d].r = 5; _exists_
[0][_i_] = (graph_destinations)[_i_].d; } _exist_size_[0] = (
graph_destination_size); _exist_size_[1] = 0; _p_ = 0, _q_ = 1
; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for
(_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 5) continue
; _incomings_[_idx_].r = 6; if (_incomings_[_idx_].edges >
0) for (_j_ = 0; _j_ < _incomings_[_idx_].c; _j_++) { const
int d = _edges_[_incomings_[_idx_].edges - 1 + _j_]; if (_incomings_
[d].r != 4) continue; _incomings_[d].r = 5; ((void) sizeof ((
_exist_size_[_q_] < (graph->exec_info->rnum)) ? 1 : 0
), __extension__ ({ if (_exist_size_[_q_] < (graph->exec_info
->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (_i_)); } for
(_i_ = 0; _i_ < (graph_destination_size); _i_++) { ((void
) sizeof (((graph_destinations)[_i_].graph == graph) ? 1 : 0)
, __extension__ ({ if ((graph_destinations)[_i_].graph == graph
) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); _incomings_[(graph_destinations)[_i_].d].d = 1; } for
(_i_ = 0; _i_ < (graph_source_size); _i_++) { ((void) sizeof
(((graph_sources)[_i_].graph == graph) ? 1 : 0), __extension__
({ if ((graph_sources)[_i_].graph == graph) ; else __assert_fail
("(graph_sources)[_i_].graph == graph", "ccv_nnc_graph_run.c"
, 1063, __extension__ __PRETTY_FUNCTION__); })); _exists_[0][
_i_] = (graph_sources)[_i_].d; } _p_ = 0; _q_ = 1; _exist_size_
[0] = (graph_source_size); _exist_size_[1] = 0; int _d_ = 0; while
(_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_
= 0; _i_ < _exist_size_[_p_];) { const int32_t _idx_ = _exists_
[_p_][_i_]; visitor((((ccv_nnc_graph_exec_info_t*)((void*)(((
char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0)))) + _idx_), (_idx_), (_incomings_
[_idx_].d)); if (_incomings_[_idx_].d) { ++_d_; _incomings_[_idx_
].r = 7; } if (((ccv_nnc_graph_exec_info_t*)((void*)(((char*)
((graph->exec_info)->data)) + (size_t)(graph->exec_info
)->rsize * (size_t)(0))))[_idx_].outgoings) { if (((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
->rnum == 1) { const int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(0))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph_destination_size
)) { _exists_[_p_][_i_] = d; continue; } } else for (_j_ = 0;
_j_ < ((ccv_nnc_graph_exec_info_t*)((void*)(((char*)((graph
->exec_info)->data)) + (size_t)(graph->exec_info)->
rsize * (size_t)(0))))[_idx_].outgoings->rnum; _j_++) { const
int d = *(int*)((void*)(((char*)((((ccv_nnc_graph_exec_info_t
*)((void*)(((char*)((graph->exec_info)->data)) + (size_t
)(graph->exec_info)->rsize * (size_t)(0))))[_idx_].outgoings
)->data)) + (size_t)(((ccv_nnc_graph_exec_info_t*)((void*)
(((char*)((graph->exec_info)->data)) + (size_t)(graph->
exec_info)->rsize * (size_t)(0))))[_idx_].outgoings)->rsize
* (size_t)(_j_))); --_incomings_[d].c; if (_incomings_[d].c ==
0 && _incomings_[d].r == 6 && _d_ < (graph_destination_size
)) { ((void) sizeof ((_exist_size_[_q_] < (graph->exec_info
->rnum)) ? 1 : 0), __extension__ ({ if (_exist_size_[_q_] <
(graph->exec_info->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_info->rnum)"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); _exists_[_q_][_exist_size_[_q_]] = d; ++_exist_size_[
_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_) = (_q_), (_q_) = (
_i_)); } for (_i_ = 0; _i_ < (graph_destination_size); _i_
++) { ((void) sizeof (((graph_destinations)[_i_].graph == graph
) ? 1 : 0), __extension__ ({ if ((graph_destinations)[_i_].graph
== graph) ; else __assert_fail ("(graph_destinations)[_i_].graph == graph"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); if (_incomings_[(graph_destinations)[_i_].d].r == 7) continue
; if (!(0)) { ((void) sizeof ((_incomings_[(graph_destinations
)[_i_].d].c == 0) ? 1 : 0), __extension__ ({ if (_incomings_[
(graph_destinations)[_i_].d].c == 0) ; else __assert_fail ("_incomings_[(graph_destinations)[_i_].d].c == 0"
, "ccv_nnc_graph_run.c", 1063, __extension__ __PRETTY_FUNCTION__
); })); } else if (_incomings_[(graph_destinations)[_i_].d].c
> 0) continue; visitor((((ccv_nnc_graph_exec_info_t*)((void
*)(((char*)((graph->exec_info)->data)) + (size_t)(graph
->exec_info)->rsize * (size_t)(0)))) + (graph_destinations
)[_i_].d), ((graph_destinations)[_i_].d), (_incomings_[(graph_destinations
)[_i_].d].d)); } if (_heap_mem_) free(_incomings_); } while (
0);
;
1064 }
1065#undef visitor
1066}
1067
1068static int _ccv_nnc_graph_run(ccv_nnc_graph_t* const graph, const int exec_idx, ccv_nnc_graph_exec_info_t* const exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context)
1069{
1070 assert((sources == 0 && source_size == 0) || (sources && source_size))((void) sizeof (((sources == 0 && source_size == 0) ||
(sources && source_size)) ? 1 : 0), __extension__ ({
if ((sources == 0 && source_size == 0) || (sources &&
source_size)) ; else __assert_fail ("(sources == 0 && source_size == 0) || (sources && source_size)"
, "ccv_nnc_graph_run.c", 1070, __extension__ __PRETTY_FUNCTION__
); }))
;
3
Assuming 'sources' is equal to null
4
Assuming 'source_size' is equal to 0
1071 assert((destinations == 0 && destination_size == 0) || (destinations && destination_size))((void) sizeof (((destinations == 0 && destination_size
== 0) || (destinations && destination_size)) ? 1 : 0
), __extension__ ({ if ((destinations == 0 && destination_size
== 0) || (destinations && destination_size)) ; else __assert_fail
("(destinations == 0 && destination_size == 0) || (destinations && destination_size)"
, "ccv_nnc_graph_run.c", 1071, __extension__ __PRETTY_FUNCTION__
); }))
;
5
Assuming 'destinations' is equal to null
6
Assuming 'destination_size' is equal to 0
1072 const ccv_nnc_graph_exec_t* const graph_sources = sources
6.1
'sources' is null
? sources : (ccv_nnc_graph_exec_t*)ccv_array_get(graph->sources, 0)((void*)(((char*)((graph->sources)->data)) + (size_t)(graph
->sources)->rsize * (size_t)(0)))
;
7
'?' condition is false
1073 const int graph_source_size = source_size
7.1
'source_size' is 0
? source_size : graph->sources->rnum;
8
'?' condition is false
1074 const ccv_nnc_graph_exec_t* const graph_destinations = destinations
8.1
'destinations' is null
? destinations : (ccv_nnc_graph_exec_t*)ccv_array_get(graph->destinations, 0)((void*)(((char*)((graph->destinations)->data)) + (size_t
)(graph->destinations)->rsize * (size_t)(0)))
;
9
'?' condition is false
1075 const int graph_destination_size = destination_size
9.1
'destination_size' is 0
? destination_size : graph->destinations->rnum;
10
'?' condition is false
1076 int i;
1077 for (i = 0; i < graph_source_size; i++)
11
Assuming 'i' is >= 'graph_source_size'
12
Loop condition is false. Execution continues on line 1080
1078 if (graph_sources[i].graph != graph)
1079 return CCV_NNC_EXEC_INVALID;
1080 for (i = 0; i < graph_destination_size; i++)
13
Assuming 'i' is >= 'graph_destination_size'
1081 if (graph_destinations[i].graph != graph)
1082 return CCV_NNC_EXEC_INVALID;
1083 // When topsorted is true, there is no memory allocation when run the graph.
1084 const int topsorted = (!sources
13.1
'sources' is null
&& !destinations
13.2
'destinations' is null
&& graph->topsorted);
1085 if (topsorted)
14
Assuming 'topsorted' is not equal to 0
15
Taking true branch
1086 _ccv_nnc_graph_topsorted_run(graph, exec_idx, exec, flags, tensor_tape, stream_context);
16
Calling '_ccv_nnc_graph_topsorted_run'
1087 else
1088 _ccv_nnc_graph_run_slow_path(graph, exec_idx, exec, inputs, input_size, outputs, output_size, flags, sources, source_size, destinations, destination_size, tensor_tape, stream_context);
1089 return CCV_NNC_EXEC_SUCCESS;
1090}
1091
1092int ccv_nnc_graph_run(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context)
1093{
1094 __atomic_store_n(&graph->run_state, CCV_NNC_GRAPH_STATE_RUNNING, __ATOMIC_RELEASE3);
1095 if (stream_context && graph->topsorted && graph->stream_size > 0 && graph->default_schedule && source_size == 0 && destination_size == 0)
1
Assuming 'stream_context' is null
1096 {
1097 co_scheduler_t* const scheduler = ccv_nnc_stream_context_get_scheduler(stream_context);
1098 co_routine_t* const task = co_new(_ccv_nnc_graph_topsorted_run_coro, (graph, -1, graph->default_schedule, 0, tensor_tape, stream_context, flags))({ co_routine_t* const task = malloc((sizeof(co_routine_t) + _ccv_nnc_graph_topsorted_run_coro_stack_size
())); do { struct _ccv_nnc_graph_topsorted_run_coro_param_s params
= { ._co_params = { graph, -1, graph->default_schedule, 0
, tensor_tape, stream_context, flags } }; task->fn = _ccv_nnc_graph_topsorted_run_coro
; task->line = 0; task->done = 0; task->root = 0; task
->other_size = 0; task->notify_any = 0; task->others
= 0; task->caller = 0; task->callee = 0; if (sizeof(params
) > 0) memcpy(task + 1, &params, sizeof(params)); } while
(0); task; })
;
1099 co_schedule(scheduler, task);
1100 // I don't need to worry about freeing this task, it will free itself at the end.
1101 return CCV_NNC_EXEC_SUCCESS;
1102 } else
1103 return _ccv_nnc_graph_run(graph, -1, 0, 0, 0, 0, 0, flags, sources, source_size, destinations, destination_size, tensor_tape, 0 /* In this case, we don't support stream context yet. */);
2
Calling '_ccv_nnc_graph_run'
1104}
1105
1106int ccv_nnc_graph_run_with_schedule(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_static_schedule_t* const _schedule, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const _stream_context)
1107{
1108 assert(graph->topsorted)((void) sizeof ((graph->topsorted) ? 1 : 0), __extension__
({ if (graph->topsorted) ; else __assert_fail ("graph->topsorted"
, "ccv_nnc_graph_run.c", 1108, __extension__ __PRETTY_FUNCTION__
); }))
;
1109 if (graph->exec_info->rnum == 0)
1110 return CCV_NNC_EXEC_SUCCESS;
1111 __atomic_store_n(&graph->run_state, CCV_NNC_GRAPH_STATE_RUNNING, __ATOMIC_RELEASE3);
1112 assert(graph->stream_size > 0)((void) sizeof ((graph->stream_size > 0) ? 1 : 0), __extension__
({ if (graph->stream_size > 0) ; else __assert_fail ("graph->stream_size > 0"
, "ccv_nnc_graph_run.c", 1112, __extension__ __PRETTY_FUNCTION__
); }))
;
1113 const ccv_nnc_graph_static_schedule_t* const schedule = _schedule ? _schedule : graph->default_schedule;
1114 assert(schedule)((void) sizeof ((schedule) ? 1 : 0), __extension__ ({ if (schedule
) ; else __assert_fail ("schedule", "ccv_nnc_graph_run.c", 1114
, __extension__ __PRETTY_FUNCTION__); }))
;
1115 assert(schedule->stream_0 < graph->stream_size)((void) sizeof ((schedule->stream_0 < graph->stream_size
) ? 1 : 0), __extension__ ({ if (schedule->stream_0 < graph
->stream_size) ; else __assert_fail ("schedule->stream_0 < graph->stream_size"
, "ccv_nnc_graph_run.c", 1115, __extension__ __PRETTY_FUNCTION__
); }))
;
1116 ccv_nnc_stream_context_t* const stream_context = _stream_context ? _stream_context : graph->streams[schedule->stream_0];
1117 co_scheduler_t* const scheduler = ccv_nnc_stream_context_get_scheduler(stream_context);
1118 co_routine_t* const task = co_new(_ccv_nnc_graph_topsorted_run_coro, (graph, -1, schedule, 0, tensor_tape, stream_context, flags))({ co_routine_t* const task = malloc((sizeof(co_routine_t) + _ccv_nnc_graph_topsorted_run_coro_stack_size
())); do { struct _ccv_nnc_graph_topsorted_run_coro_param_s params
= { ._co_params = { graph, -1, schedule, 0, tensor_tape, stream_context
, flags } }; task->fn = _ccv_nnc_graph_topsorted_run_coro;
task->line = 0; task->done = 0; task->root = 0; task
->other_size = 0; task->notify_any = 0; task->others
= 0; task->caller = 0; task->callee = 0; if (sizeof(params
) > 0) memcpy(task + 1, &params, sizeof(params)); } while
(0); task; })
;
1119 co_schedule(scheduler, task);
1120 // I don't need to worry about freeing this task, it will free itself at the end.
1121 if (!_stream_context) // If no stream context provided, this is a sync operation.
1122 ccv_nnc_stream_context_wait(stream_context);
1123 return CCV_NNC_EXEC_SUCCESS;
1124}
1125
1126void ccv_nnc_graph_cancel(ccv_nnc_graph_t* const graph)
1127{
1128 __atomic_store_n(&graph->run_state, CCV_NNC_GRAPH_STATE_CANCEL, __ATOMIC_RELEASE3);
1129}