File: | nnc/ccv_nnc_micro_interpret.c |
Warning: | line 186, column 65 The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | #include "ccv_nnc.h" | |||
2 | #include "ccv_nnc_easy.h" | |||
3 | #include "ccv_nnc_internal.h" | |||
4 | #include "ccv_internal.h" | |||
5 | #include "_ccv_nnc_micro.h" | |||
6 | ||||
7 | // MARK - Level-1 API | |||
8 | ||||
9 | static int _ccv_nnc_micro_index_interpret(const ccv_nnc_micro_loop_index_term_t index, const int* const loop_counter, const int* const shapes, const ccv_nnc_micro_scalar_t* const values, const int parameter_size) | |||
10 | { | |||
11 | switch (index.type) | |||
12 | { | |||
13 | case CCV_NNC_MICRO_LOOP_INDEX_TYPE_VAL: | |||
14 | return index.immediate_value; | |||
15 | case CCV_NNC_MICRO_LOOP_INDEX_TYPE_ID: | |||
16 | switch (index.id.type) | |||
17 | { | |||
18 | case CCV_NNC_MICRO_AXIS_SIZE_ID: | |||
19 | return shapes[index.id.id * CCV_NNC_MAX_DIM_ALLOC(12) + index.id.d]; | |||
20 | case CCV_NNC_MICRO_LOOP_ID: | |||
21 | return loop_counter[index.id.id]; | |||
22 | case CCV_NNC_MICRO_SCALAR_ID: | |||
23 | switch (values[index.id.id].type) | |||
24 | { | |||
25 | case CCV_8U: | |||
26 | return values[index.id.id].u8; | |||
27 | case CCV_32S: | |||
28 | return values[index.id.id].i32; | |||
29 | case CCV_64S: | |||
30 | return (int)values[index.id.id].i64; | |||
31 | } | |||
32 | break; | |||
33 | } | |||
34 | break; | |||
35 | case CCV_NNC_MICRO_LOOP_INDEX_TYPE_BINARY: { | |||
36 | const int left = _ccv_nnc_micro_index_interpret(index.binary->left, loop_counter, shapes, values, parameter_size); | |||
37 | const int right = _ccv_nnc_micro_index_interpret(index.binary->right, loop_counter, shapes, values, parameter_size); | |||
38 | switch (index.binary->op) | |||
39 | { | |||
40 | case CCV_NNC_MICRO_BINARY_OP_PLUS: | |||
41 | return left + right; | |||
42 | case CCV_NNC_MICRO_BINARY_OP_MINUS: | |||
43 | return left - right; | |||
44 | case CCV_NNC_MICRO_BINARY_OP_MUL: | |||
45 | return left * right; | |||
46 | case CCV_NNC_MICRO_BINARY_OP_DIV: | |||
47 | return left / right; | |||
48 | case CCV_NNC_MICRO_BINARY_OP_MAX: | |||
49 | return ccv_max(left, right)({ typeof (left) _a = (left); typeof (right) _b = (right); (_a > _b) ? _a : _b; }); | |||
50 | case CCV_NNC_MICRO_BINARY_OP_MIN: | |||
51 | return ccv_min(left, right)({ typeof (left) _a = (left); typeof (right) _b = (right); (_a < _b) ? _a : _b; }); | |||
52 | } | |||
53 | break; | |||
54 | } | |||
55 | } | |||
56 | return 0; | |||
57 | } | |||
58 | ||||
59 | static float _ccv_nnc_micro_expression_interpret(const ccv_nnc_micro_loop_expression_t* const expression, const int* const loop_counter, const ccv_nnc_micro_scalar_t* const carrieds, const int carried_count, float* const* const vars_mem, const int* const shapes, const ccv_nnc_micro_scalar_t* const values, const int parameter_size, int* const out_of_bound_ref) | |||
60 | { | |||
61 | int i; | |||
62 | switch (expression->type) | |||
63 | { | |||
64 | case CCV_NNC_MICRO_LOOP_EXPR_TYPE_ID: { | |||
65 | assert(expression->id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID)((void) sizeof ((expression->id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID ) ? 1 : 0), __extension__ ({ if (expression->id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID ) ; else __assert_fail ("expression->id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID" , "ccv_nnc_micro_interpret.c", 65, __extension__ __PRETTY_FUNCTION__ ); })); | |||
66 | return carrieds[expression->id.id].f32; | |||
67 | } | |||
68 | case CCV_NNC_MICRO_LOOP_EXPR_TYPE_VAL: { | |||
69 | return expression->immediate_value.f32; | |||
70 | } | |||
71 | case CCV_NNC_MICRO_LOOP_EXPR_TYPE_VAR: { | |||
72 | const ccv_nnc_micro_loop_variable_t variable = expression->variable; | |||
73 | assert(variable.id.type == CCV_NNC_MICRO_TENSOR_ID)((void) sizeof ((variable.id.type == CCV_NNC_MICRO_TENSOR_ID) ? 1 : 0), __extension__ ({ if (variable.id.type == CCV_NNC_MICRO_TENSOR_ID ) ; else __assert_fail ("variable.id.type == CCV_NNC_MICRO_TENSOR_ID" , "ccv_nnc_micro_interpret.c", 73, __extension__ __PRETTY_FUNCTION__ ); })); | |||
74 | float* ptr = vars_mem[variable.id.id]; | |||
75 | size_t size = 1; | |||
76 | int out_of_bound = 0; | |||
77 | for (i = variable.index_count - 1; !out_of_bound && i >= 0; i--) | |||
78 | { | |||
79 | const int index = _ccv_nnc_micro_index_interpret(variable.index[i], loop_counter, shapes, values, parameter_size); | |||
80 | if (!variable.no_check_bound[i] && | |||
81 | (index < 0 || index >= shapes[variable.id.id * CCV_NNC_MAX_DIM_ALLOC(12) + i])) | |||
82 | out_of_bound = 1; | |||
83 | ptr += index * size; | |||
84 | size *= shapes[variable.id.id * CCV_NNC_MAX_DIM_ALLOC(12) + i]; | |||
85 | } | |||
86 | if (out_of_bound) | |||
87 | { | |||
88 | *out_of_bound_ref = 1; | |||
89 | return 0; | |||
90 | } | |||
91 | return ptr[0]; | |||
92 | } | |||
93 | case CCV_NNC_MICRO_LOOP_EXPR_TYPE_UNARY: { | |||
94 | const float left = _ccv_nnc_micro_expression_interpret(expression->unary.x, loop_counter, carrieds, carried_count, vars_mem, shapes, values, parameter_size, out_of_bound_ref); | |||
95 | if (*out_of_bound_ref) | |||
96 | return 0; | |||
97 | switch (expression->unary.unary_op) | |||
98 | { | |||
99 | case CCV_NNC_MICRO_UNARY_OP_EXP: | |||
100 | return exp(left); | |||
101 | case CCV_NNC_MICRO_UNARY_OP_LOG: | |||
102 | return log(left); | |||
103 | } | |||
104 | break; | |||
105 | } | |||
106 | case CCV_NNC_MICRO_LOOP_EXPR_TYPE_BINARY: { | |||
107 | const float left = _ccv_nnc_micro_expression_interpret(expression->binary.left, loop_counter, carrieds, carried_count, vars_mem, shapes, values, parameter_size, out_of_bound_ref); | |||
108 | if (*out_of_bound_ref) | |||
109 | return 0; | |||
110 | const float right = _ccv_nnc_micro_expression_interpret(expression->binary.right, loop_counter, carrieds, carried_count, vars_mem, shapes, values, parameter_size, out_of_bound_ref); | |||
111 | if (*out_of_bound_ref) | |||
112 | return 0; | |||
113 | switch (expression->binary.binary_op) | |||
114 | { | |||
115 | case CCV_NNC_MICRO_BINARY_OP_PLUS: | |||
116 | return left + right; | |||
117 | case CCV_NNC_MICRO_BINARY_OP_MINUS: | |||
118 | return left - right; | |||
119 | case CCV_NNC_MICRO_BINARY_OP_MUL: | |||
120 | return left * right; | |||
121 | case CCV_NNC_MICRO_BINARY_OP_DIV: | |||
122 | return left / right; | |||
123 | case CCV_NNC_MICRO_BINARY_OP_MAX: | |||
124 | return ccv_max(left, right)({ typeof (left) _a = (left); typeof (right) _b = (right); (_a > _b) ? _a : _b; }); | |||
125 | case CCV_NNC_MICRO_BINARY_OP_MIN: | |||
126 | return ccv_min(left, right)({ typeof (left) _a = (left); typeof (right) _b = (right); (_a < _b) ? _a : _b; }); | |||
127 | } | |||
128 | break; | |||
129 | } | |||
130 | } | |||
131 | return 0; | |||
132 | } | |||
133 | ||||
134 | static void _ccv_nnc_micro_statement_interpret(const ccv_nnc_micro_loop_statement_t statement, const int* const loop_counter, ccv_nnc_micro_scalar_t* const carrieds, const int carried_count, float* const* const vars_mem, const int* const shapes, const ccv_nnc_micro_scalar_t* const values, const int parameter_size) | |||
135 | { | |||
136 | int i; | |||
137 | switch (statement.type) | |||
138 | { | |||
139 | case CCV_NNC_MICRO_LOOP_STATEMENT_TYPE_ASSIGNMENT: { | |||
140 | assert(statement.assignment.lvalue.id.type == CCV_NNC_MICRO_TENSOR_ID)((void) sizeof ((statement.assignment.lvalue.id.type == CCV_NNC_MICRO_TENSOR_ID ) ? 1 : 0), __extension__ ({ if (statement.assignment.lvalue. id.type == CCV_NNC_MICRO_TENSOR_ID) ; else __assert_fail ("statement.assignment.lvalue.id.type == CCV_NNC_MICRO_TENSOR_ID" , "ccv_nnc_micro_interpret.c", 140, __extension__ __PRETTY_FUNCTION__ ); })); | |||
141 | const ccv_nnc_micro_loop_variable_t variable = statement.assignment.lvalue; | |||
142 | float* ptr = vars_mem[variable.id.id]; | |||
143 | size_t size = 1; | |||
144 | int out_of_bound = 0; | |||
145 | for (i = variable.index_count - 1; !out_of_bound && i >= 0; i--) | |||
146 | { | |||
147 | const int index = _ccv_nnc_micro_index_interpret(variable.index[i], loop_counter, shapes, values, parameter_size); | |||
148 | if (!variable.no_check_bound[i] && | |||
149 | (index < 0 || index >= shapes[variable.id.id * CCV_NNC_MAX_DIM_ALLOC(12) + i])) | |||
150 | out_of_bound = 1; | |||
151 | ptr += index * size; | |||
152 | size *= shapes[variable.id.id * CCV_NNC_MAX_DIM_ALLOC(12) + i]; | |||
153 | } | |||
154 | if (out_of_bound) | |||
155 | return; | |||
156 | const float val = _ccv_nnc_micro_expression_interpret(&statement.assignment.rvalue, loop_counter, carrieds, carried_count, vars_mem, shapes, values, parameter_size, &out_of_bound); | |||
157 | if (out_of_bound) | |||
158 | return; | |||
159 | ptr[0] = val; | |||
160 | break; | |||
161 | } | |||
162 | case CCV_NNC_MICRO_LOOP_STATEMENT_TYPE_COMPOUND_ASSIGNMENT: { | |||
163 | int out_of_bound = 0; | |||
164 | const float rvalue = _ccv_nnc_micro_expression_interpret(&statement.compound_assignment.rvalue, loop_counter, carrieds, carried_count, vars_mem, shapes, values, parameter_size, &out_of_bound); | |||
165 | if (out_of_bound
| |||
166 | return; | |||
167 | switch (statement.compound_assignment.lvalue.type) | |||
168 | { | |||
169 | case CCV_NNC_MICRO_LOOP_EXPR_TYPE_ID: { | |||
170 | assert(statement.compound_assignment.lvalue.id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID)((void) sizeof ((statement.compound_assignment.lvalue.id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID) ? 1 : 0), __extension__ ({ if (statement.compound_assignment.lvalue.id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID ) ; else __assert_fail ("statement.compound_assignment.lvalue.id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID" , "ccv_nnc_micro_interpret.c", 170, __extension__ __PRETTY_FUNCTION__ ); })); | |||
171 | switch (statement.compound_assignment.lvalue.id.d) | |||
172 | { | |||
173 | case CCV_NNC_MICRO_REDUCE_OP_MAX: | |||
174 | carrieds[statement.compound_assignment.lvalue.id.id].f32 = ccv_max(carrieds[statement.compound_assignment.lvalue.id.id].f32, rvalue)({ typeof (carrieds[statement.compound_assignment.lvalue.id.id ].f32) _a = (carrieds[statement.compound_assignment.lvalue.id .id].f32); typeof (rvalue) _b = (rvalue); (_a > _b) ? _a : _b; }); | |||
175 | break; | |||
176 | case CCV_NNC_MICRO_REDUCE_OP_MIN: | |||
177 | carrieds[statement.compound_assignment.lvalue.id.id].f32 = ccv_min(carrieds[statement.compound_assignment.lvalue.id.id].f32, rvalue)({ typeof (carrieds[statement.compound_assignment.lvalue.id.id ].f32) _a = (carrieds[statement.compound_assignment.lvalue.id .id].f32); typeof (rvalue) _b = (rvalue); (_a < _b) ? _a : _b; }); | |||
178 | break; | |||
179 | case CCV_NNC_MICRO_REDUCE_OP_ARGMAX: | |||
180 | assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail ("0", "ccv_nnc_micro_interpret.c", 180, __extension__ __PRETTY_FUNCTION__ ); })); | |||
181 | break; | |||
182 | case CCV_NNC_MICRO_REDUCE_OP_ARGMIN: | |||
183 | assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail ("0", "ccv_nnc_micro_interpret.c", 183, __extension__ __PRETTY_FUNCTION__ ); })); | |||
184 | break; | |||
185 | case CCV_NNC_MICRO_REDUCE_OP_MEAN: | |||
186 | carrieds[statement.compound_assignment.lvalue.id.id].f32 += rvalue; | |||
| ||||
187 | break; | |||
188 | case CCV_NNC_MICRO_REDUCE_OP_SUM: | |||
189 | carrieds[statement.compound_assignment.lvalue.id.id].f32 += rvalue; | |||
190 | break; | |||
191 | case CCV_NNC_MICRO_REDUCE_OP_PROD: | |||
192 | carrieds[statement.compound_assignment.lvalue.id.id].f32 *= rvalue; | |||
193 | break; | |||
194 | } | |||
195 | break; | |||
196 | } | |||
197 | case CCV_NNC_MICRO_LOOP_EXPR_TYPE_VAR: { | |||
198 | assert(statement.compound_assignment.lvalue.id.type == CCV_NNC_MICRO_TENSOR_ID)((void) sizeof ((statement.compound_assignment.lvalue.id.type == CCV_NNC_MICRO_TENSOR_ID) ? 1 : 0), __extension__ ({ if (statement .compound_assignment.lvalue.id.type == CCV_NNC_MICRO_TENSOR_ID ) ; else __assert_fail ("statement.compound_assignment.lvalue.id.type == CCV_NNC_MICRO_TENSOR_ID" , "ccv_nnc_micro_interpret.c", 198, __extension__ __PRETTY_FUNCTION__ ); })); | |||
199 | const ccv_nnc_micro_loop_variable_t variable = statement.compound_assignment.lvalue.variable; | |||
200 | float* ptr = vars_mem[variable.id.id]; | |||
201 | size_t size = 1; | |||
202 | for (i = variable.index_count - 1; !out_of_bound && i >= 0; i--) | |||
203 | { | |||
204 | const int index = _ccv_nnc_micro_index_interpret(variable.index[i], loop_counter, shapes, values, parameter_size); | |||
205 | if (!variable.no_check_bound[i] && | |||
206 | (index < 0 || index >= shapes[variable.id.id * CCV_NNC_MAX_DIM_ALLOC(12) + i])) | |||
207 | out_of_bound = 1; | |||
208 | ptr += index * size; | |||
209 | size *= shapes[variable.id.id * CCV_NNC_MAX_DIM_ALLOC(12) + i]; | |||
210 | } | |||
211 | if (out_of_bound) | |||
212 | return; | |||
213 | ptr[0] += rvalue; | |||
214 | break; | |||
215 | } | |||
216 | } | |||
217 | break; | |||
218 | } | |||
219 | } | |||
220 | } | |||
221 | ||||
222 | static void _ccv_nnc_micro_loop_interpret(const ccv_nnc_micro_loop_t* const loops, const int loop_count, const int index, int* const loop_counter, ccv_nnc_micro_scalar_t* const carrieds, const int carried_count, float* const* const vars_mem, const int* const shapes, const ccv_nnc_micro_scalar_t* const values, const int parameter_size) | |||
223 | { | |||
224 | if (index >= loop_count) | |||
225 | return; | |||
226 | const int start_index = _ccv_nnc_micro_index_interpret(loops[index].start_index, loop_counter, shapes, values, parameter_size); | |||
227 | const int end_index = _ccv_nnc_micro_index_interpret(loops[index].end_index, loop_counter, shapes, values, parameter_size); | |||
228 | int i, j; | |||
229 | const ccv_nnc_micro_loop_statement_t* const statements = loops[index].statements; | |||
230 | const int statement_count = loops[index].statement_count; | |||
231 | const ccv_nnc_micro_loop_carried_t* const carried_refs = loops[index].carrieds; | |||
232 | const int carried_ref_count = loops[index].carried_count; | |||
233 | for (i = start_index; i < end_index; i++) | |||
234 | { | |||
235 | loop_counter[loops[index].id.id] = i; | |||
236 | for (j = 0; j < carried_ref_count; j++) | |||
237 | { | |||
238 | assert(carried_refs[j].id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID)((void) sizeof ((carried_refs[j].id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID ) ? 1 : 0), __extension__ ({ if (carried_refs[j].id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID ) ; else __assert_fail ("carried_refs[j].id.type == CCV_NNC_MICRO_LOOP_CARRIED_ID" , "ccv_nnc_micro_interpret.c", 238, __extension__ __PRETTY_FUNCTION__ ); })); | |||
239 | assert(carried_refs[j].id.id < carried_count)((void) sizeof ((carried_refs[j].id.id < carried_count) ? 1 : 0), __extension__ ({ if (carried_refs[j].id.id < carried_count ) ; else __assert_fail ("carried_refs[j].id.id < carried_count" , "ccv_nnc_micro_interpret.c", 239, __extension__ __PRETTY_FUNCTION__ ); })); | |||
240 | switch (carried_refs[j].id.d) | |||
241 | { | |||
242 | case CCV_NNC_MICRO_REDUCE_OP_MAX: | |||
243 | carrieds[carried_refs[j].id.id].f32 = -FLT_MAX3.40282347e+38F; | |||
244 | break; | |||
245 | case CCV_NNC_MICRO_REDUCE_OP_MIN: | |||
246 | carrieds[carried_refs[j].id.id].f32 = FLT_MAX3.40282347e+38F; | |||
247 | break; | |||
248 | case CCV_NNC_MICRO_REDUCE_OP_ARGMAX: | |||
249 | carrieds[carried_refs[j].id.id].i32 = -1; | |||
250 | break; | |||
251 | case CCV_NNC_MICRO_REDUCE_OP_ARGMIN: | |||
252 | carrieds[carried_refs[j].id.id].i32 = -1; | |||
253 | break; | |||
254 | case CCV_NNC_MICRO_REDUCE_OP_MEAN: | |||
255 | carrieds[carried_refs[j].id.id].f32 = 0; | |||
256 | break; | |||
257 | case CCV_NNC_MICRO_REDUCE_OP_SUM: | |||
258 | carrieds[carried_refs[j].id.id].f32 = 0; | |||
259 | break; | |||
260 | case CCV_NNC_MICRO_REDUCE_OP_PROD: | |||
261 | carrieds[carried_refs[j].id.id].f32 = 1; | |||
262 | break; | |||
263 | } | |||
264 | } | |||
265 | _ccv_nnc_micro_loop_interpret(loops, loop_count, index + 1, loop_counter, carrieds, carried_count, vars_mem, shapes, values, parameter_size); | |||
266 | for (j = 0; j < statement_count; j++) | |||
267 | _ccv_nnc_micro_statement_interpret(statements[j], loop_counter, carrieds, carried_count, vars_mem, shapes, values, parameter_size); | |||
268 | } | |||
269 | } | |||
270 | ||||
271 | void ccv_nnc_micro_combine_interpret(ccv_nnc_micro_combine_t* const combine, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_micro_scalar_t* const values, const int parameter_size, ccv_nnc_tensor_t* const* const outputs, const int output_size) | |||
272 | { | |||
273 | // We haven't optimized for emit_grad at the moment yet. | |||
274 | assert(cmd == CCV_NNC_CUSTOM_FORWARD || cmd == CCV_NNC_CUSTOM_BACKWARD)((void) sizeof ((cmd == CCV_NNC_CUSTOM_FORWARD || cmd == CCV_NNC_CUSTOM_BACKWARD ) ? 1 : 0), __extension__ ({ if (cmd == CCV_NNC_CUSTOM_FORWARD || cmd == CCV_NNC_CUSTOM_BACKWARD) ; else __assert_fail ("cmd == CCV_NNC_CUSTOM_FORWARD || cmd == CCV_NNC_CUSTOM_BACKWARD" , "ccv_nnc_micro_interpret.c", 274, __extension__ __PRETTY_FUNCTION__ ); })); | |||
| ||||
275 | int i, j; | |||
276 | const ccv_nnc_micro_program_t* const program = cmd
| |||
277 | const int var_count = program->var_count; | |||
278 | assert(input_size == program->input_size)((void) sizeof ((input_size == program->input_size) ? 1 : 0 ), __extension__ ({ if (input_size == program->input_size) ; else __assert_fail ("input_size == program->input_size" , "ccv_nnc_micro_interpret.c", 278, __extension__ __PRETTY_FUNCTION__ ); })); | |||
279 | assert(output_size == program->output_size)((void) sizeof ((output_size == program->output_size) ? 1 : 0), __extension__ ({ if (output_size == program->output_size ) ; else __assert_fail ("output_size == program->output_size" , "ccv_nnc_micro_interpret.c", 279, __extension__ __PRETTY_FUNCTION__ ); })); | |||
280 | assert(parameter_size == combine->parameter_size)((void) sizeof ((parameter_size == combine->parameter_size ) ? 1 : 0), __extension__ ({ if (parameter_size == combine-> parameter_size) ; else __assert_fail ("parameter_size == combine->parameter_size" , "ccv_nnc_micro_interpret.c", 280, __extension__ __PRETTY_FUNCTION__ ); })); | |||
281 | int* const shapes = (int*)cccalloccalloc(var_count, sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(12)); | |||
282 | ccv_nnc_micro_tensor_t* const vars = program->vars; | |||
283 | for (i = 0; i < input_size; i++) | |||
284 | memcpy(shapes + program->inputs[i] * CCV_NNC_MAX_DIM_ALLOC(12), &inputs[i]->info.dim, sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(12)); | |||
285 | int loop_counter[CCV_NNC_MAX_DIM_ALLOC(12)]; | |||
286 | for (i = 0; i < var_count; i++) | |||
287 | { | |||
288 | int flag = 0; | |||
289 | for (j = 0; !flag && j < input_size; j++) | |||
290 | flag = (program->inputs[j] == i); | |||
291 | if (flag) | |||
292 | continue; | |||
293 | if (vars[i].shape) | |||
294 | { | |||
295 | for (j = 0; j < vars[i].dimensions; j++) | |||
296 | shapes[i * CCV_NNC_MAX_DIM_ALLOC(12) + j] = _ccv_nnc_micro_index_interpret(vars[i].shape[j], loop_counter, shapes, values, parameter_size); | |||
297 | } else | |||
298 | memcpy(shapes + i * CCV_NNC_MAX_DIM_ALLOC(12), shapes + vars[i].input * CCV_NNC_MAX_DIM_ALLOC(12), sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(12)); | |||
299 | } | |||
300 | const ccv_array_t* const equal_assertions = combine->equal_assertions; | |||
301 | for (i = 0; i < equal_assertions->rnum; i++) | |||
302 | { | |||
303 | ccv_nnc_micro_id_equal_assertion_t* const equal_assertion = ccv_array_get(equal_assertions, i)((void*)(((char*)((equal_assertions)->data)) + (size_t)(equal_assertions )->rsize * (size_t)(i))); | |||
304 | assert(shapes[equal_assertion->left.id * CCV_NNC_MAX_DIM_ALLOC + equal_assertion->left.d] == shapes[equal_assertion->right.id * CCV_NNC_MAX_DIM_ALLOC + equal_assertion->right.d])((void) sizeof ((shapes[equal_assertion->left.id * (12) + equal_assertion ->left.d] == shapes[equal_assertion->right.id * (12) + equal_assertion ->right.d]) ? 1 : 0), __extension__ ({ if (shapes[equal_assertion ->left.id * (12) + equal_assertion->left.d] == shapes[equal_assertion ->right.id * (12) + equal_assertion->right.d]) ; else __assert_fail ("shapes[equal_assertion->left.id * CCV_NNC_MAX_DIM_ALLOC + equal_assertion->left.d] == shapes[equal_assertion->right.id * CCV_NNC_MAX_DIM_ALLOC + equal_assertion->right.d]" , "ccv_nnc_micro_interpret.c", 304, __extension__ __PRETTY_FUNCTION__ ); })); | |||
305 | } | |||
306 | size_t total_size = 0; | |||
307 | for (i = 0; i
| |||
308 | { | |||
309 | int flag = 0; | |||
310 | for (j = 0; !flag && j < input_size; j++) | |||
311 | flag = (program->inputs[j] == i); | |||
312 | for (j = 0; !flag && j < output_size; j++) | |||
313 | flag = (program->outputs[j] == i); | |||
314 | if (flag) | |||
315 | continue; | |||
316 | if (vars[i].no_alloc) // This is skipped. | |||
317 | continue; | |||
318 | // allocating memory for these. | |||
319 | size_t size = 1; | |||
320 | for (j = 0; j < vars[i].dimensions; j++) | |||
321 | size *= shapes[i * CCV_NNC_MAX_DIM_ALLOC(12) + j]; | |||
322 | total_size += size; | |||
323 | } | |||
324 | float** const vars_mem = (float**)ccmallocmalloc(sizeof(float*) * var_count + sizeof(float) * total_size); | |||
325 | float* ptr = (float*)(vars_mem + var_count); | |||
326 | // Assuming these are not tensor_view_t. | |||
327 | for (i = 0; i < output_size; i++) | |||
328 | { | |||
329 | assert(CCV_IS_TENSOR_CONTIGUOUS(outputs[i]))((void) sizeof (((!((*(int*)(outputs[i])) & CCV_TENSOR_VIEW ) || (((ccv_nnc_tensor_view_t*)outputs[i])->contiguous == 1 ))) ? 1 : 0), __extension__ ({ if ((!((*(int*)(outputs[i])) & CCV_TENSOR_VIEW) || (((ccv_nnc_tensor_view_t*)outputs[i])-> contiguous == 1))) ; else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(outputs[i])" , "ccv_nnc_micro_interpret.c", 329, __extension__ __PRETTY_FUNCTION__ ); })); | |||
330 | vars_mem[program->outputs[i]] = outputs[i]->data.f32; | |||
331 | } | |||
332 | for (i = 0; i
| |||
333 | { | |||
334 | int flag = 0; | |||
335 | for (j = 0; !flag && j < input_size; j++) | |||
336 | flag = (program->inputs[j] == i); | |||
337 | for (j = 0; !flag && j < output_size; j++) | |||
338 | flag = (program->outputs[j] == i); | |||
339 | if (flag) | |||
340 | continue; | |||
341 | if (vars[i].no_alloc) // This is skipped. | |||
342 | { | |||
343 | vars_mem[i] = 0; | |||
344 | continue; | |||
345 | } | |||
346 | // allocating memory for these. | |||
347 | size_t size = 1; | |||
348 | for (j = 0; j < vars[i].dimensions; j++) | |||
349 | size *= shapes[i * CCV_NNC_MAX_DIM_ALLOC(12) + j]; | |||
350 | vars_mem[i] = ptr; | |||
351 | ptr += size; | |||
352 | } | |||
353 | for (i = 0; i
| |||
354 | { | |||
355 | assert(CCV_IS_TENSOR_CONTIGUOUS(inputs[i]))((void) sizeof (((!((*(int*)(inputs[i])) & CCV_TENSOR_VIEW ) || (((ccv_nnc_tensor_view_t*)inputs[i])->contiguous == 1 ))) ? 1 : 0), __extension__ ({ if ((!((*(int*)(inputs[i])) & CCV_TENSOR_VIEW) || (((ccv_nnc_tensor_view_t*)inputs[i])-> contiguous == 1))) ; else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(inputs[i])" , "ccv_nnc_micro_interpret.c", 355, __extension__ __PRETTY_FUNCTION__ ); })); | |||
356 | vars_mem[program->inputs[i]] = inputs[i]->data.f32; | |||
357 | } | |||
358 | ccv_nnc_micro_function_t* const functions = program->functions; | |||
359 | const int function_count = program->function_count; | |||
360 | int max_carried_count = 0; | |||
361 | for (i = 0; i < function_count; i++) | |||
362 | { | |||
363 | const int block_count = functions[i].block_count; | |||
364 | ccv_nnc_micro_loop_block_t* const blocks = block_count == 1 ? &functions[i].one_block : functions[i].blocks; | |||
365 | for (j = 0; j
| |||
366 | max_carried_count = ccv_max(max_carried_count, blocks[j].carried_count)({ typeof (max_carried_count) _a = (max_carried_count); typeof (blocks[j].carried_count) _b = (blocks[j].carried_count); (_a > _b) ? _a : _b; }); | |||
367 | } | |||
368 | ccv_nnc_micro_scalar_t* const carrieds = max_carried_count > 0 ? (ccv_nnc_micro_scalar_t*)ccmallocmalloc(sizeof(ccv_nnc_micro_scalar_t) * max_carried_count) : 0; | |||
369 | for (i = 0; i < function_count; i++) | |||
370 | { | |||
371 | const int block_count = functions[i].block_count; | |||
372 | ccv_nnc_micro_loop_block_t* const blocks = block_count
| |||
373 | for (j = 0; j < block_count; j++) | |||
374 | _ccv_nnc_micro_loop_interpret(blocks[j].loops, blocks[j].loop_count, 0, loop_counter, carrieds, blocks[j].carried_count, vars_mem, shapes, values, parameter_size); | |||
375 | } | |||
376 | if (carrieds) | |||
377 | ccfreefree(carrieds); | |||
378 | ccfreefree(vars_mem); | |||
379 | ccfreefree(shapes); | |||
380 | } |