Bug Summary

File:nnc/ccv_cnnp_dataframe_addons.c
Warning:line 423, column 43
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ccv_cnnp_dataframe_addons.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model static -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -target-feature +sse2 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -resource-dir /usr/local/lib/clang/8.0.0 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_UCONTEXT -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D USE_DISPATCH -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -I /usr/local/include -internal-isystem /usr/local/include -internal-isystem /usr/local/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -fdebug-compilation-dir /home/liu/buildslave/linux-x64-runtests/build/lib/nnc -ferror-limit 19 -fmessage-length 0 -fblocks -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -o /home/liu/buildslave/public_html/analyze/2019-05-04-163002-105371-1 -x c ccv_cnnp_dataframe_addons.c -faddrsig

ccv_cnnp_dataframe_addons.c

1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_cnnp_dataframe.h"
6
7#include <3rdparty/sfmt/SFMT.h>
8
9#pragma mark - Create Dataframe from Array
10
11static void _ccv_cnnp_array_enum(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
12{
13 int i;
14 ccv_array_t* const array = (ccv_array_t*)context;
15 for (i = 0; i < row_size; i++)
16 data[i] = ccv_array_get(array, row_idxs[i])((void*)(((char*)((array)->data)) + (size_t)(array)->rsize
* (size_t)(row_idxs[i])))
;
17}
18
19ccv_cnnp_dataframe_t* ccv_cnnp_dataframe_from_array_new(ccv_array_t* const array)
20{
21 const ccv_cnnp_column_data_t array_column_data = {
22 .data_enum = _ccv_cnnp_array_enum,
23 .context = array
24 };
25 return ccv_cnnp_dataframe_new(&array_column_data, 1, array->rnum);
26}
27
28typedef struct {
29 ccv_cnnp_dataframe_tuple_t tuple;
30 int tensor_offset;
31 int device_id;
32} ccv_cnnp_copy_to_gpu_context_t;
33
34#pragma mark - Copy Tensors from CPU to GPU
35
36static void _ccv_cnnp_tensor_list_deinit(void* const data, void* const context)
37{
38 ccv_cnnp_copy_to_gpu_context_t* const copy_to_gpu = (ccv_cnnp_copy_to_gpu_context_t*)context;
39 ccv_nnc_tensor_t** const tensor_list = (ccv_nnc_tensor_t**)data;
40 int i;
41 for (i = 0; i < copy_to_gpu->tuple.size; i++)
42 if (tensor_list[i])
43 ccv_nnc_tensor_free(tensor_list[i]);
44 ccfreefree(tensor_list);
45}
46
47static void _ccv_cnnp_copy_to_gpu(void*** const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
48{
49 const ccv_cnnp_copy_to_gpu_context_t* const copy_to_gpu_context = (ccv_cnnp_copy_to_gpu_context_t*)context;
50 int i, j;
51 for (i = 0; i < batch_size; i++)
52 {
53 ccv_nnc_tensor_t** inputs = (ccv_nnc_tensor_t**)column_data[0][i] + copy_to_gpu_context->tensor_offset;
54 ccv_nnc_tensor_t** outputs = (ccv_nnc_tensor_t**)data[i];
55 if (!outputs)
56 {
57 outputs = (ccv_nnc_tensor_t**)(data[i] = ccmallocmalloc(sizeof(ccv_nnc_tensor_t*) * copy_to_gpu_context->tuple.size));
58 for (j = 0; j < copy_to_gpu_context->tuple.size; j++)
59 {
60 ccv_nnc_tensor_param_t params = inputs[j]->info;
61 params.type &= ~CCV_TENSOR_CPU_MEMORY;
62 params.type |= CCV_TENSOR_GPU_MEMORY; // Change to GPU memory.
63 CCV_TENSOR_SET_DEVICE_ID(params.type, copy_to_gpu_context->device_id)(params.type) = (((params.type) & ~0xfff00) | (((copy_to_gpu_context
->device_id) & 0xfff) << 8))
;
64 outputs[j] = ccv_nnc_tensor_new(0, params, 0);
65 }
66 }
67 for (j = 0; j < copy_to_gpu_context->tuple.size; j++)
68 ccv_nnc_tensor_pin_memory(inputs[j]);
69 ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, ccv_nnc_no_hint, 0, inputs, copy_to_gpu_context->tuple.size, outputs, copy_to_gpu_context->tuple.size, stream_context);
70 }
71}
72
73int ccv_cnnp_dataframe_copy_to_gpu(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int tensor_offset, const int tensor_size, int device_id)
74{
75 assert(tensor_size > 0)((void) sizeof ((tensor_size > 0) ? 1 : 0), __extension__ (
{ if (tensor_size > 0) ; else __assert_fail ("tensor_size > 0"
, "ccv_cnnp_dataframe_addons.c", 75, __extension__ __PRETTY_FUNCTION__
); }))
;
76 int stream_type = CCV_STREAM_CONTEXT_GPU;
77 CCV_STREAM_SET_DEVICE_ID(stream_type, device_id)(stream_type) = (((stream_type) & ~0xfff00) | (((device_id
) & 0xfff) << 8))
;
78 ccv_cnnp_copy_to_gpu_context_t* const copy_to_gpu_context = (ccv_cnnp_copy_to_gpu_context_t*)ccmallocmalloc(sizeof(ccv_cnnp_copy_to_gpu_context_t));
79 copy_to_gpu_context->tuple.size = tensor_size;
80 copy_to_gpu_context->tensor_offset = tensor_offset;
81 copy_to_gpu_context->device_id = device_id;
82 return ccv_cnnp_dataframe_map(dataframe, _ccv_cnnp_copy_to_gpu, stream_type, _ccv_cnnp_tensor_list_deinit, COLUMN_ID_LIST(column_idx)(int []){column_idx}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, copy_to_gpu_context, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
83}
84
85#pragma mark - Make Auxiliary Tensor as a new Column
86
87static void _ccv_cnnp_tensor_deinit(void* const data, void* const context)
88{
89 ccv_nnc_tensor_free((ccv_nnc_tensor_t*)data);
90}
91
92static void _ccv_cnnp_tensor_new(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
93{
94 ccv_nnc_tensor_param_t params = *(ccv_nnc_tensor_param_t*)context;
95 int i;
96 for (i = 0; i < row_size; i++)
97 if (!data[i])
98 data[i] = ccv_nnc_tensor_new(0, params, 0);
99}
100
101int ccv_cnnp_dataframe_add_aux(ccv_cnnp_dataframe_t* const dataframe, const ccv_nnc_tensor_param_t params)
102{
103 int stream_type = CCV_TENSOR_GET_MEMORY(params.type)((params.type) & 0x3) == CCV_TENSOR_CPU_MEMORY ? 0 : CCV_STREAM_CONTEXT_GPU;
104 if (stream_type == CCV_STREAM_CONTEXT_GPU)
105 CCV_STREAM_SET_DEVICE_ID(stream_type, CCV_TENSOR_GET_DEVICE_ID(params.type))(stream_type) = (((stream_type) & ~0xfff00) | ((((((params
.type) & 0xfff00) >> 8)) & 0xfff) << 8))
;
106 ccv_nnc_tensor_param_t* const context = (ccv_nnc_tensor_param_t*)ccmallocmalloc(sizeof(ccv_nnc_tensor_param_t));
107 context[0] = params;
108 return ccv_cnnp_dataframe_add(dataframe, _ccv_cnnp_tensor_new, stream_type, _ccv_cnnp_tensor_deinit, context, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
109}
110
111#pragma mark - Load Tensor from File Path
112
113static void _ccv_cnnp_image_deinit(void* const data, void* const context)
114{
115 ccv_matrix_free(data);
116}
117
118static void _ccv_cnnp_read_image(void*** const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
119{
120 parallel_for(i, batch_size){ int i; for ((i) = 0; (i) < (batch_size); (i)++) { {
121 if (data[i])
122 ccv_matrix_free(data[i]);
123 off_t structof = (off_t)context;
124 char* const filename = *(char**)((char*)column_data[0][i] + structof);
125 data[i] = 0;
126 ccv_read(filename, (ccv_dense_matrix_t**)&data[i], CCV_IO_ANY_FILE | CCV_IO_RGB_COLOR)ccv_read_impl(filename, (ccv_dense_matrix_t**)&data[i], CCV_IO_ANY_FILE
| CCV_IO_RGB_COLOR, 0, 0, 0)
;
127 } parallel_endfor} }
128}
129
130int ccv_cnnp_dataframe_read_image(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof)
131{
132 return ccv_cnnp_dataframe_map(dataframe, _ccv_cnnp_read_image, 0, _ccv_cnnp_image_deinit, COLUMN_ID_LIST(column_idx)(int []){column_idx}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, (void*)(uintptr_t)structof, 0);
133}
134
135#pragma mark - Apply Random Jitter to Image
136
137typedef struct {
138 sfmt_t sfmt;
139 int datatype;
140 ccv_cnnp_random_jitter_t random_jitter;
141} ccv_cnnp_random_jitter_context_t;
142
143static void _ccv_cnnp_image_lighting(ccv_dense_matrix_t* image, const float alpha_r, const float alpha_g, const float alpha_b)
144{
145 assert(CCV_GET_DATA_TYPE(image->type) == CCV_32F)((void) sizeof ((((image->type) & 0xFF000) == CCV_32F)
? 1 : 0), __extension__ ({ if (((image->type) & 0xFF000
) == CCV_32F) ; else __assert_fail ("CCV_GET_DATA_TYPE(image->type) == CCV_32F"
, "ccv_cnnp_dataframe_addons.c", 145, __extension__ __PRETTY_FUNCTION__
); }))
;
146 assert(CCV_GET_CHANNEL(image->type) == CCV_C3)((void) sizeof ((((image->type) & 0xFFF) == CCV_C3) ? 1
: 0), __extension__ ({ if (((image->type) & 0xFFF) ==
CCV_C3) ; else __assert_fail ("CCV_GET_CHANNEL(image->type) == CCV_C3"
, "ccv_cnnp_dataframe_addons.c", 146, __extension__ __PRETTY_FUNCTION__
); }))
;
147 // These eigenvector values can be computed out of imageNet dataset (see ccv_convnet for how that is done). Here I just copied
148 // from mxnet: https://github.com/apache/incubator-mxnet/blob/master/src/operator/image/image_random-inl.h#L632
149 const float pca_r = alpha_r * (55.46 * -0.5675) + alpha_g * (4.794 * 0.7192) + alpha_b * (1.148 * 0.4009);
150 const float pca_g = alpha_r * (55.46 * -0.5808) + alpha_g * (4.794 * -0.0045) + alpha_b * (1.148 * -0.8140);
151 const float pca_b = alpha_r * (55.46 * -0.5836) + alpha_g * (4.794 * -0.6948) + alpha_b * (1.148 * 0.4203);
152 int i;
153 const int size = image->rows * image->cols;
154 float* const ptr = image->data.f32;
155 for (i = 0; i < size; i++)
156 {
157 ptr[i * 3] = ccv_clamp(ptr[i * 3] + pca_r, 0, 255)({ typeof (0) _a = (0); typeof (255) _b = (255); typeof (ptr[
i * 3] + pca_r) _x = (ptr[i * 3] + pca_r); (_x < _a) ? _a :
((_x > _b) ? _b : _x); })
;
158 ptr[i * 3 + 1] = ccv_clamp(ptr[i * 3 + 1] + pca_g, 0, 255)({ typeof (0) _a = (0); typeof (255) _b = (255); typeof (ptr[
i * 3 + 1] + pca_g) _x = (ptr[i * 3 + 1] + pca_g); (_x < _a
) ? _a : ((_x > _b) ? _b : _x); })
;
159 ptr[i * 3 + 2] = ccv_clamp(ptr[i * 3 + 2] + pca_b, 0, 255)({ typeof (0) _a = (0); typeof (255) _b = (255); typeof (ptr[
i * 3 + 2] + pca_b) _x = (ptr[i * 3 + 2] + pca_b); (_x < _a
) ? _a : ((_x > _b) ? _b : _x); })
;
160 }
161}
162
163static float _ccv_cnnp_random_logexp(sfmt_t* const sfmt, const float jitter)
164{
165 // We want to get something around logarithmic scale, thus, 0 is no good, and infinity is no good. 1 is the same.
166 // jitter is some turbulence we want around 1. We want the range range to be around [1 / (1 + jitter), 1 + jitter]
167 // but the distribution is not uniform (50% fall under 1, and 50% fall above 1). The way to do this is to first
168 // get to logarithmic range, doing a uniform sampling, and then convert back.
169 double log_jitter_limit = log(1 + jitter);
170 double log_random_jitter = sfmt_genrand_real1(sfmt) * 2 * log_jitter_limit - log_jitter_limit;
171 return (float)exp(log_random_jitter); // Convert it back to exponential form.
172}
173
174static void _ccv_cnnp_image_manip(ccv_dense_matrix_t* image, const ccv_cnnp_random_jitter_t random_jitter, sfmt_t* const sfmt)
175{
176 assert(sfmt && CCV_GET_CHANNEL(image->type) == CCV_C3)((void) sizeof ((sfmt && ((image->type) & 0xFFF
) == CCV_C3) ? 1 : 0), __extension__ ({ if (sfmt && (
(image->type) & 0xFFF) == CCV_C3) ; else __assert_fail
("sfmt && CCV_GET_CHANNEL(image->type) == CCV_C3"
, "ccv_cnnp_dataframe_addons.c", 176, __extension__ __PRETTY_FUNCTION__
); }))
;
177 int idx[4] = {0, 1, 2, 3};
178 sfmt_genrand_shuffle(sfmt, idx, 4, sizeof(int));
179 int i;
180 for (i = 0; i < 4; i++)
181 // change the applying order
182 switch (idx[i])
183 {
184 case 0:
185 if (random_jitter.brightness == 0)
186 break;
187 // introduce some brightness changes to the original image
188 ccv_scale(image, (ccv_matrix_t**)&image, 0, _ccv_cnnp_random_logexp(sfmt, random_jitter.brightness));
189 break;
190 case 1:
191 // introduce some saturation changes to the original image
192 if (random_jitter.saturation == 0)
193 break;
194 ccv_saturation(image, &image, 0, _ccv_cnnp_random_logexp(sfmt, random_jitter.saturation));
195 break;
196 case 2:
197 // introduce some contrast changes to the original image
198 if (random_jitter.contrast == 0)
199 break;
200 ccv_contrast(image, &image, 0, _ccv_cnnp_random_logexp(sfmt, random_jitter.contrast));
201 break;
202 case 3:
203 if (random_jitter.lighting == 0)
204 break;
205 _ccv_cnnp_image_lighting(image, sfmt_genrand_real1(sfmt) * random_jitter.lighting, sfmt_genrand_real1(sfmt) * random_jitter.lighting, sfmt_genrand_real1(sfmt) * random_jitter.lighting);
206 break;
207 }
208}
209
210static void _ccv_cnnp_normalize(ccv_dense_matrix_t* const image, const float mean[3], const float inv_std[3])
211{
212 int i;
213 const int count = image->rows * image->cols;
214 float* ap = image->data.f32;
215 for (i = 0; i < count; i++)
216 {
217 ap[i * 3] = (ap[i * 3] - mean[0]) * inv_std[0];
218 ap[i * 3 + 1] = (ap[i * 3 + 1] - mean[1]) * inv_std[1];
219 ap[i * 3 + 2] = (ap[i * 3 + 2] - mean[2]) * inv_std[2];
220 }
221}
222
223static void _ccv_cnnp_random_jitter(void*** const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
224{
225 sfmt_t* const sfmt = (sfmt_t*)alloca(sizeof(sfmt_t) * batch_size)__builtin_alloca (sizeof(sfmt_t) * batch_size);
226 ccv_cnnp_random_jitter_context_t* const ctx = (ccv_cnnp_random_jitter_context_t*)context;
227 int i;
228 for (i = 0; i < batch_size; i++)
229 sfmt_init_gen_rand(&sfmt[i], sfmt_genrand_uint32(&ctx->sfmt));
230 const ccv_cnnp_random_jitter_t random_jitter = ctx->random_jitter;
231 assert(random_jitter.resize.min > 0)((void) sizeof ((random_jitter.resize.min > 0) ? 1 : 0), __extension__
({ if (random_jitter.resize.min > 0) ; else __assert_fail
("random_jitter.resize.min > 0", "ccv_cnnp_dataframe_addons.c"
, 231, __extension__ __PRETTY_FUNCTION__); }))
;
232 assert(random_jitter.resize.max >= random_jitter.resize.min)((void) sizeof ((random_jitter.resize.max >= random_jitter
.resize.min) ? 1 : 0), __extension__ ({ if (random_jitter.resize
.max >= random_jitter.resize.min) ; else __assert_fail ("random_jitter.resize.max >= random_jitter.resize.min"
, "ccv_cnnp_dataframe_addons.c", 232, __extension__ __PRETTY_FUNCTION__
); }))
;
233 parallel_for(i, batch_size){ int i; for ((i) = 0; (i) < (batch_size); (i)++) { {
234 if (data[i])
235 ccv_matrix_free(data[i]);
236 ccv_dense_matrix_t* const input = (ccv_dense_matrix_t*)column_data[0][i];
237 const int resize = ccv_clamp((int)(sfmt_genrand_real1(&sfmt[i]) * (random_jitter.resize.max - random_jitter.resize.min) + 0.5) + random_jitter.resize.min, random_jitter.resize.min, random_jitter.resize.max)({ typeof (random_jitter.resize.min) _a = (random_jitter.resize
.min); typeof (random_jitter.resize.max) _b = (random_jitter.
resize.max); typeof ((int)(sfmt_genrand_real1(&sfmt[i]) *
(random_jitter.resize.max - random_jitter.resize.min) + 0.5)
+ random_jitter.resize.min) _x = ((int)(sfmt_genrand_real1(&
sfmt[i]) * (random_jitter.resize.max - random_jitter.resize.min
) + 0.5) + random_jitter.resize.min); (_x < _a) ? _a : ((_x
> _b) ? _b : _x); })
;
238 int resize_rows = ccv_max(resize, (int)(input->rows * (float)resize / input->cols + 0.5))({ typeof (resize) _a = (resize); typeof ((int)(input->rows
* (float)resize / input->cols + 0.5)) _b = ((int)(input->
rows * (float)resize / input->cols + 0.5)); (_a > _b) ?
_a : _b; })
;
239 int resize_cols = ccv_max(resize, (int)(input->cols * (float)resize / input->rows + 0.5))({ typeof (resize) _a = (resize); typeof ((int)(input->cols
* (float)resize / input->rows + 0.5)) _b = ((int)(input->
cols * (float)resize / input->rows + 0.5)); (_a > _b) ?
_a : _b; })
;
240 if (random_jitter.aspect_ratio > 0)
241 {
242 const float aspect_ratio = sqrtf(_ccv_cnnp_random_logexp(&sfmt[i], random_jitter.aspect_ratio));
243 resize_rows = (int)(resize_rows * aspect_ratio + 0.5);
244 resize_cols = (int)(resize_cols / aspect_ratio + 0.5);
245 }
246 ccv_dense_matrix_t* resized = 0;
247 // First, resize.
248 if (input->rows > resize && input->cols > resize)
249 ccv_resample(input, &resized, CCV_32F, resize_rows, resize_cols, CCV_INTER_AREA);
250 else if (input->rows != resize_rows || input->cols != resize_cols)
251 ccv_resample(input, &resized, CCV_32F, resize_rows, resize_cols, CCV_INTER_CUBIC);
252 else
253 ccv_shift(input, (ccv_matrix_t**)&resized, CCV_32F, 0, 0); // converting to 32f
254 if (random_jitter.symmetric && (sfmt_genrand_uint32(&sfmt[i]) & 1) == 0)
255 ccv_flip(resized, &resized, 0, CCV_FLIP_X);
256 _ccv_cnnp_image_manip(resized, random_jitter, &sfmt[i]);
257 // Apply normalization before slice. Slice will introduce 0 padding, which won't be correct before normalization.
258 if (random_jitter.normalize.mean[0] != 0 || random_jitter.normalize.std[0] != 1 ||
259 random_jitter.normalize.mean[1] != 0 || random_jitter.normalize.std[1] != 1 ||
260 random_jitter.normalize.mean[2] != 0 || random_jitter.normalize.std[2] != 1)
261 _ccv_cnnp_normalize(resized, random_jitter.normalize.mean, random_jitter.normalize.std);
262 // Then slice down.
263 ccv_dense_matrix_t* patch = 0;
264 if (random_jitter.size.cols > 0 && random_jitter.size.rows > 0 &&
265 ((resized->cols != random_jitter.size.cols || resized->rows != random_jitter.size.rows) ||
266 (random_jitter.offset.x != 0 || random_jitter.offset.y != 0)))
267 {
268 int x = ccv_clamp((int)(sfmt_genrand_real1(&sfmt[i]) * (resized->cols - random_jitter.size.cols + 1)),({ typeof (({ typeof (0) _a = (0); typeof (resized->cols -
random_jitter.size.cols) _b = (resized->cols - random_jitter
.size.cols); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->cols - random_jitter.size.cols) _b
= (resized->cols - random_jitter.size.cols); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->cols - random_jitter.size.cols) _b = (resized->cols -
random_jitter.size.cols); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->cols - random_jitter
.size.cols) _b = (resized->cols - random_jitter.size.cols)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->cols - random_jitter.size.cols +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->cols - random_jitter.size.cols + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
269 ccv_min(0, resized->cols - random_jitter.size.cols),({ typeof (({ typeof (0) _a = (0); typeof (resized->cols -
random_jitter.size.cols) _b = (resized->cols - random_jitter
.size.cols); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->cols - random_jitter.size.cols) _b
= (resized->cols - random_jitter.size.cols); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->cols - random_jitter.size.cols) _b = (resized->cols -
random_jitter.size.cols); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->cols - random_jitter
.size.cols) _b = (resized->cols - random_jitter.size.cols)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->cols - random_jitter.size.cols +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->cols - random_jitter.size.cols + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
270 ccv_max(0, resized->cols - random_jitter.size.cols))({ typeof (({ typeof (0) _a = (0); typeof (resized->cols -
random_jitter.size.cols) _b = (resized->cols - random_jitter
.size.cols); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->cols - random_jitter.size.cols) _b
= (resized->cols - random_jitter.size.cols); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->cols - random_jitter.size.cols) _b = (resized->cols -
random_jitter.size.cols); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->cols - random_jitter
.size.cols) _b = (resized->cols - random_jitter.size.cols)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->cols - random_jitter.size.cols +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->cols - random_jitter.size.cols + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
;
271 int y = ccv_clamp((int)(sfmt_genrand_real1(&sfmt[i]) * (resized->rows - random_jitter.size.rows + 1)),({ typeof (({ typeof (0) _a = (0); typeof (resized->rows -
random_jitter.size.rows) _b = (resized->rows - random_jitter
.size.rows); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->rows - random_jitter.size.rows) _b
= (resized->rows - random_jitter.size.rows); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->rows - random_jitter.size.rows) _b = (resized->rows -
random_jitter.size.rows); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->rows - random_jitter
.size.rows) _b = (resized->rows - random_jitter.size.rows)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->rows - random_jitter.size.rows +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->rows - random_jitter.size.rows + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
272 ccv_min(0, resized->rows - random_jitter.size.rows),({ typeof (({ typeof (0) _a = (0); typeof (resized->rows -
random_jitter.size.rows) _b = (resized->rows - random_jitter
.size.rows); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->rows - random_jitter.size.rows) _b
= (resized->rows - random_jitter.size.rows); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->rows - random_jitter.size.rows) _b = (resized->rows -
random_jitter.size.rows); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->rows - random_jitter
.size.rows) _b = (resized->rows - random_jitter.size.rows)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->rows - random_jitter.size.rows +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->rows - random_jitter.size.rows + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
273 ccv_max(0, resized->rows - random_jitter.size.rows))({ typeof (({ typeof (0) _a = (0); typeof (resized->rows -
random_jitter.size.rows) _b = (resized->rows - random_jitter
.size.rows); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->rows - random_jitter.size.rows) _b
= (resized->rows - random_jitter.size.rows); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->rows - random_jitter.size.rows) _b = (resized->rows -
random_jitter.size.rows); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->rows - random_jitter
.size.rows) _b = (resized->rows - random_jitter.size.rows)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->rows - random_jitter.size.rows +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->rows - random_jitter.size.rows + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
;
274 if (random_jitter.offset.x != 0)
275 x += sfmt_genrand_real1(&sfmt[i]) * random_jitter.offset.x * 2 - random_jitter.offset.x;
276 if (random_jitter.offset.y != 0)
277 y += sfmt_genrand_real1(&sfmt[i]) * random_jitter.offset.y * 2 - random_jitter.offset.y;
278 ccv_slice(resized, (ccv_matrix_t**)&patch, CCV_32F, y, x, random_jitter.size.rows, random_jitter.size.cols);
279 ccv_matrix_free(resized);
280 } else
281 patch = resized;
282 data[i] = patch;
283 } parallel_endfor} }
284}
285
286int ccv_cnnp_dataframe_image_random_jitter(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int datatype, const ccv_cnnp_random_jitter_t random_jitter)
287{
288 assert(datatype == CCV_32F)((void) sizeof ((datatype == CCV_32F) ? 1 : 0), __extension__
({ if (datatype == CCV_32F) ; else __assert_fail ("datatype == CCV_32F"
, "ccv_cnnp_dataframe_addons.c", 288, __extension__ __PRETTY_FUNCTION__
); }))
;
289 ccv_cnnp_random_jitter_context_t* const random_jitter_context = (ccv_cnnp_random_jitter_context_t*)ccmallocmalloc(sizeof(ccv_cnnp_random_jitter_context_t));
290 if (random_jitter.seed)
291 sfmt_init_gen_rand(&random_jitter_context->sfmt, (uint32_t)random_jitter.seed);
292 else
293 sfmt_init_gen_rand(&random_jitter_context->sfmt, (uint32_t)(uintptr_t)dataframe);
294 random_jitter_context->datatype = datatype;
295 random_jitter_context->random_jitter = random_jitter;
296 int i;
297 // The std in the random jitter should be inv_std.
298 for (i = 0; i < 3; i++)
299 random_jitter_context->random_jitter.normalize.std[i] = random_jitter_context->random_jitter.normalize.std[i] ? 1. / random_jitter_context->random_jitter.normalize.std[i] : 1;
300 return ccv_cnnp_dataframe_map(dataframe, _ccv_cnnp_random_jitter, 0, _ccv_cnnp_image_deinit, COLUMN_ID_LIST(column_idx)(int []){column_idx}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, random_jitter_context, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
301}
302
303typedef struct {
304 int range;
305 int datatype;
306 int format;
307 float onval;
308 float offval;
309 off_t structof;
310} ccv_cnnp_one_hot_context_t;
311
312static void _ccv_cnnp_one_hot(void*** const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
313{
314 ccv_cnnp_one_hot_context_t* const one_hot = (ccv_cnnp_one_hot_context_t*)context;
315 ccv_nnc_tensor_param_t params = {
316 .datatype = one_hot->datatype,
317 .type = CCV_TENSOR_CPU_MEMORY,
318 .format = one_hot->format,
319 .dim = {
320 one_hot->range,
321 },
322 };
323 parallel_for(i, batch_size){ int i; for ((i) = 0; (i) < (batch_size); (i)++) { {
324 int j;
325 const int label = *(int*)((char*)column_data[0][i] + one_hot->structof);
326 if (!data[i])
327 data[i] = ccv_nnc_tensor_new(0, params, 0);
328 ccv_nnc_tensor_t* const tensor = (ccv_nnc_tensor_t*)data[i];
329 assert(label >= 0 && label < one_hot->range)((void) sizeof ((label >= 0 && label < one_hot->
range) ? 1 : 0), __extension__ ({ if (label >= 0 &&
label < one_hot->range) ; else __assert_fail ("label >= 0 && label < one_hot->range"
, "ccv_cnnp_dataframe_addons.c", 329, __extension__ __PRETTY_FUNCTION__
); }))
;
330 for (j = 0; j < one_hot->range; j++)
331 tensor->data.f32[j] = (j == label) ? one_hot->onval : one_hot->offval;
332 } parallel_endfor} }
333}
334
335int ccv_cnnp_dataframe_one_hot(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int range, const float onval, const float offval, const int datatype, const int format)
336{
337 assert(datatype == CCV_32F)((void) sizeof ((datatype == CCV_32F) ? 1 : 0), __extension__
({ if (datatype == CCV_32F) ; else __assert_fail ("datatype == CCV_32F"
, "ccv_cnnp_dataframe_addons.c", 337, __extension__ __PRETTY_FUNCTION__
); }))
;
338 ccv_cnnp_one_hot_context_t* const one_hot = (ccv_cnnp_one_hot_context_t*)ccmallocmalloc(sizeof(ccv_cnnp_one_hot_context_t));
339 one_hot->range = range;
340 one_hot->datatype = datatype;
341 one_hot->format = format;
342 one_hot->onval = onval;
343 one_hot->offval = offval;
344 one_hot->structof = structof;
345 return ccv_cnnp_dataframe_map(dataframe, _ccv_cnnp_one_hot, 0, _ccv_cnnp_tensor_deinit, COLUMN_ID_LIST(column_idx)(int []){column_idx}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, one_hot, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
346}
347
348typedef struct {
349 ccv_cnnp_dataframe_tuple_t tuple;
350 int format;
351 int batch_count;
352 int group_count;
353} ccv_cnnp_batch_context_t;
354
355static void _ccv_cnnp_batching_new(void** const input_data, const int input_size, void** const output_data, void* const context, ccv_nnc_stream_context_t* const stream_context)
356{
357 ccv_cnnp_batch_context_t* const batch = (ccv_cnnp_batch_context_t*)context;
358 const int output_tuple_size = batch->tuple.size;
359 const int batch_count = batch->batch_count;
360 const int group_count = batch->group_count;
361 const int input_tuple_size = output_tuple_size / group_count;
362 int i, j, k;
363 assert(input_size > 0)((void) sizeof ((input_size > 0) ? 1 : 0), __extension__ (
{ if (input_size > 0) ; else __assert_fail ("input_size > 0"
, "ccv_cnnp_dataframe_addons.c", 363, __extension__ __PRETTY_FUNCTION__
); }))
;
1
Assuming 'input_size' is > 0
2
Taking true branch
364 if (!output_data[0])
3
Assuming the condition is false
4
Taking false branch
365 {
366 ccv_nnc_tensor_t** const inputs = (ccv_nnc_tensor_t**)input_data[0];
367 ccv_nnc_tensor_t** const tensors = (ccv_nnc_tensor_t**)(output_data[0] = ccmallocmalloc(sizeof(ccv_nnc_tensor_t*) * output_tuple_size));
368 for (i = 0; i < group_count; i++)
369 for (j = 0; j < input_tuple_size; j++)
370 {
371 ccv_nnc_tensor_param_t params = inputs[j]->info;
372 assert(params.datatype == CCV_32F)((void) sizeof ((params.datatype == CCV_32F) ? 1 : 0), __extension__
({ if (params.datatype == CCV_32F) ; else __assert_fail ("params.datatype == CCV_32F"
, "ccv_cnnp_dataframe_addons.c", 372, __extension__ __PRETTY_FUNCTION__
); }))
; // Only support 32 bit float yet.
373 assert(params.format == CCV_TENSOR_FORMAT_NHWC || params.format == CCV_TENSOR_FORMAT_NCHW)((void) sizeof ((params.format == CCV_TENSOR_FORMAT_NHWC || params
.format == CCV_TENSOR_FORMAT_NCHW) ? 1 : 0), __extension__ ({
if (params.format == CCV_TENSOR_FORMAT_NHWC || params.format
== CCV_TENSOR_FORMAT_NCHW) ; else __assert_fail ("params.format == CCV_TENSOR_FORMAT_NHWC || params.format == CCV_TENSOR_FORMAT_NCHW"
, "ccv_cnnp_dataframe_addons.c", 373, __extension__ __PRETTY_FUNCTION__
); }))
;
374 params.format = batch->format;
375 // Special-case for dim count is 3 and 1, in these two cases, the N is not provided.
376 if (batch->format == inputs[j]->info.format)
377 {
378 const int nd = ccv_nnc_tensor_nd(params.dim);
379 if (nd == 3 || nd == 1)
380 {
381 memset(params.dim, 0, sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(8));
382 memcpy(params.dim + 1, inputs[j]->info.dim, sizeof(int) * nd);
383 }
384 } else {
385 const int nd = ccv_nnc_tensor_nd(params.dim);
386 if (nd == 1)
387 {
388 memset(params.dim, 0, sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(8));
389 memcpy(params.dim + 1, inputs[j]->info.dim, sizeof(int) * nd);
390 } else if (nd >= 3) {
391 memset(params.dim, 0, sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(8));
392 const int hw = ccv_nnc_tensor_hw(inputs[j]->info, nd);
393 if (batch->format == CCV_TENSOR_FORMAT_NCHW)
394 {
395 params.dim[1] = ccv_nnc_tensor_get_c(inputs[j]->info);
396 for (k = 0; k < CCV_NNC_MAX_DIM(2); k++)
397 params.dim[k + 2] = inputs[j]->info.dim[k + hw];
398 } else {
399 params.dim[CCV_NNC_MAX_DIM(2) + 1] = ccv_nnc_tensor_get_c(inputs[j]->info);
400 for (k = 0; k < CCV_NNC_MAX_DIM(2); k++)
401 params.dim[k + 1] = inputs[j]->info.dim[k + hw];
402 }
403 }
404 }
405 params.dim[0] = batch_count; // Set the batch count now.
406 tensors[i * input_tuple_size + j] = ccv_nnc_tensor_new(0, params, 0);
407 }
408 }
409 for (i = 0; i < group_count; i++)
5
Assuming 'i' is < 'group_count'
6
Loop condition is true. Entering loop body
410 for (j = 0; j < input_tuple_size; j++)
7
Assuming 'j' is < 'input_tuple_size'
8
Loop condition is true. Entering loop body
411 {
412 ccv_nnc_tensor_t* const output = ((ccv_nnc_tensor_t**)output_data[0])[i * input_tuple_size + j];
413 parallel_for(k, batch_count){ int k; for ((k) = 0; (k) < (batch_count); (k)++) { {
9
Assuming 'k' is < 'batch_count'
10
Loop condition is true. Entering loop body
414 ccv_nnc_tensor_t* const input = ((ccv_nnc_tensor_t**)input_data[(k + i * batch_count) % input_size])[j];
415 const size_t tensor_count = ccv_nnc_tensor_count(input->info);
416 float* const ap = input->data.f32;
417 float* const bp = output->data.f32 + k * tensor_count;
418 if (input->info.format == output->info.format)
11
Assuming the condition is false
12
Taking false branch
419 memcpy(bp, ap, sizeof(float) * tensor_count);
420 else {
421 // Do a simple format conversion.
422 const int c = ccv_nnc_tensor_get_c(input->info);
13
Calling 'ccv_nnc_tensor_get_c'
16
Returning from 'ccv_nnc_tensor_get_c'
17
'c' initialized to 0
423 const size_t hw_count = tensor_count / c;
18
Division by zero
424 size_t x;
425 int y;
426 if (input->info.format == CCV_TENSOR_FORMAT_NHWC && output->info.format == CCV_TENSOR_FORMAT_NCHW)
427 for (x = 0; x < hw_count; x++)
428 for (y = 0; y < c; y++)
429 bp[y * hw_count + x] = ap[x * c + y];
430 else if (input->info.format == CCV_TENSOR_FORMAT_NCHW && output->info.format == CCV_TENSOR_FORMAT_NHWC)
431 for (x = 0; x < hw_count; x++)
432 for (y = 0; y < c; y++)
433 bp[x * c + y] = ap[y * hw_count + x];
434 }
435 } parallel_endfor} }
436 }
437}
438
439static void _ccv_cnnp_batching_deinit(void* const self, void* const context)
440{
441 ccv_cnnp_batch_context_t* const batch = (ccv_cnnp_batch_context_t*)context;
442 ccv_nnc_tensor_t** const tensors = (ccv_nnc_tensor_t**)self;
443 const int size = batch->tuple.size;
444 int i;
445 for (i = 0; i < size; i++)
446 ccv_nnc_tensor_free(tensors[i]);
447 ccfreefree(tensors);
448}
449
450ccv_cnnp_dataframe_t* ccv_cnnp_dataframe_batching_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const int batch_count, const int group_count, const int format)
451{
452 assert(format == CCV_TENSOR_FORMAT_NCHW || format == CCV_TENSOR_FORMAT_NHWC)((void) sizeof ((format == CCV_TENSOR_FORMAT_NCHW || format ==
CCV_TENSOR_FORMAT_NHWC) ? 1 : 0), __extension__ ({ if (format
== CCV_TENSOR_FORMAT_NCHW || format == CCV_TENSOR_FORMAT_NHWC
) ; else __assert_fail ("format == CCV_TENSOR_FORMAT_NCHW || format == CCV_TENSOR_FORMAT_NHWC"
, "ccv_cnnp_dataframe_addons.c", 452, __extension__ __PRETTY_FUNCTION__
); }))
;
453 assert(column_idx_size >= 1)((void) sizeof ((column_idx_size >= 1) ? 1 : 0), __extension__
({ if (column_idx_size >= 1) ; else __assert_fail ("column_idx_size >= 1"
, "ccv_cnnp_dataframe_addons.c", 453, __extension__ __PRETTY_FUNCTION__
); }))
;
454 assert(batch_count > 0)((void) sizeof ((batch_count > 0) ? 1 : 0), __extension__ (
{ if (batch_count > 0) ; else __assert_fail ("batch_count > 0"
, "ccv_cnnp_dataframe_addons.c", 454, __extension__ __PRETTY_FUNCTION__
); }))
;
455 assert(group_count > 0)((void) sizeof ((group_count > 0) ? 1 : 0), __extension__ (
{ if (group_count > 0) ; else __assert_fail ("group_count > 0"
, "ccv_cnnp_dataframe_addons.c", 455, __extension__ __PRETTY_FUNCTION__
); }))
;
456 const int derived = ccv_cnnp_dataframe_make_tuple(dataframe, column_idxs, column_idx_size);
457 ccv_cnnp_batch_context_t* const batch = (ccv_cnnp_batch_context_t*)ccmallocmalloc(sizeof(ccv_cnnp_batch_context_t));
458 batch->tuple.size = column_idx_size * group_count;
459 batch->format = format;
460 batch->batch_count = batch_count;
461 batch->group_count = group_count;
462 return ccv_cnnp_dataframe_reduce_new(dataframe, _ccv_cnnp_batching_new, _ccv_cnnp_batching_deinit, derived, batch_count * group_count, batch, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
463}

./ccv_nnc_easy.h

1/**********************************************************
2 * C-based/Cached/Core Computer Vision Library
3 * Liu Liu, 2010-02-01
4 **********************************************************/
5
6/**********************************************************
7 * CCV - Neural Network Collection
8 **********************************************************/
9
10#ifndef GUARD_ccv_nnc_easy_h
11#define GUARD_ccv_nnc_easy_h
12
13#include <ccv.h>
14#include <nnc/ccv_nnc.h>
15
16/**
17 * Convenience API
18 *
19 * This header provides convenience APIs for nnc usage. Being convenience API,
20 * it is optimized for shorthand coding, and may collide the naming space with
21 * others.
22 *
23 */
24// c99 only, make sure your compiler supports that.
25
26#define NOOP_GRAPH_WHILE_EXPR(ccv_nnc_graph_while_f)(1) (ccv_nnc_graph_while_f)(1)
27#define NOOP_GRAPH_CASE_OF_EXPR(ccv_nnc_graph_case_of_f)(1) (ccv_nnc_graph_case_of_f)(1)
28
29// This is a better LIST_COUNT macro, it generates a list of 1+1+0+0+0 where it is 1 if the parameter presents, and 0 otherwise.
30// This works better for cases such as LIST_COUNT(1, 2, 3,) where previous macro will get 4 and this one will have correctly
31// computed result.
32#define LIST_COUNT_01(_0,_1,_2,...)_2 _2
33#define LIST_COUNT_E(...)1 LIST_COUNT_01(_0,##__VA_ARGS__,1,0)1
34#define LIST_COUNT_N(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28,_29,_30,_31,_32,_33,_34,_35,_36,_37,_38,_39,_40,_41,_42,_43,_44,_45,_46,_47,_48,_49,_50,_51,_52,_53,_54,_55,_56,_57,_58,_59,_60,_61,_62,_63,...)(1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +
1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1
+1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +
1 +1 +1 -1)
(LIST_COUNT_E(_0)1+LIST_COUNT_E(_1)1+LIST_COUNT_E(_2)1+LIST_COUNT_E(_3)1+LIST_COUNT_E(_4)1+LIST_COUNT_E(_5)1+LIST_COUNT_E(_6)1+LIST_COUNT_E(_7)1+LIST_COUNT_E(_8)1+LIST_COUNT_E(_9)1+LIST_COUNT_E(_10)1+LIST_COUNT_E(_11)1+LIST_COUNT_E(_12)1+LIST_COUNT_E(_13)1+LIST_COUNT_E(_14)1+LIST_COUNT_E(_15)1+LIST_COUNT_E(_16)1+LIST_COUNT_E(_17)1+LIST_COUNT_E(_18)1+LIST_COUNT_E(_19)1+LIST_COUNT_E(_20)1+LIST_COUNT_E(_21)1+LIST_COUNT_E(_22)1+LIST_COUNT_E(_23)1+LIST_COUNT_E(_24)1+LIST_COUNT_E(_25)1+LIST_COUNT_E(_26)1+LIST_COUNT_E(_27)1+LIST_COUNT_E(_28)1+LIST_COUNT_E(_29)1+LIST_COUNT_E(_30)1+LIST_COUNT_E(_31)1+LIST_COUNT_E(_32)1+LIST_COUNT_E(_33)1+LIST_COUNT_E(_34)1+LIST_COUNT_E(_35)1+LIST_COUNT_E(_36)1+LIST_COUNT_E(_37)1+LIST_COUNT_E(_38)1+LIST_COUNT_E(_39)1+LIST_COUNT_E(_40)1+LIST_COUNT_E(_41)1+LIST_COUNT_E(_42)1+LIST_COUNT_E(_43)1+LIST_COUNT_E(_44)1+LIST_COUNT_E(_45)1+LIST_COUNT_E(_46)1+LIST_COUNT_E(_47)1+LIST_COUNT_E(_48)1+LIST_COUNT_E(_49)1+LIST_COUNT_E(_50)1+LIST_COUNT_E(_51)1+LIST_COUNT_E(_52)1+LIST_COUNT_E(_53)1+LIST_COUNT_E(_54)1+LIST_COUNT_E(_55)1+LIST_COUNT_E(_56)1+LIST_COUNT_E(_57)1+LIST_COUNT_E(_58)1+LIST_COUNT_E(_59)1+LIST_COUNT_E(_60)1+LIST_COUNT_E(_61)1+LIST_COUNT_E(_62)1+LIST_COUNT_E(_63)1-1)
35#define LIST_COUNT(...)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
LIST_COUNT_N(_0,##__VA_ARGS__,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
36
37#define TENSOR_LIST_X(...)(ccv_nnc_tensor_t* []){...} (ccv_nnc_tensor_t* []){__VA_ARGS__}
38
39#define TENSOR_PARAM_LIST_X(...)(ccv_nnc_tensor_param_t []){...} (ccv_nnc_tensor_param_t []){__VA_ARGS__}
40
41#define TENSOR_SYMBOL_LIST_X(...)(ccv_nnc_tensor_symbol_t []){...} (ccv_nnc_tensor_symbol_t []){__VA_ARGS__}
42
43#define TENSOR_VARIABLE_LIST_X(...)(ccv_nnc_tensor_variable_t []){...} (ccv_nnc_tensor_variable_t []){__VA_ARGS__}
44
45#define KV_X(_x, _y, ...){(_x), (_y)} {(_x), (_y)}
46#define KV(...){(...), (0)} KV_X(__VA_ARGS__, 0){(__VA_ARGS__), (0)}
47#define TENSOR_BIND_MAP_X(...)(ccv_nnc_tensor_bind_t []){...} (ccv_nnc_tensor_bind_t []){__VA_ARGS__}
48
49#define TENSOR_SYMBOL_MAP_X(...)(ccv_nnc_tensor_symbol_map_t []){...} (ccv_nnc_tensor_symbol_map_t []){__VA_ARGS__}
50
51#define GRAPH_EXEC_LIST_X(...)(ccv_nnc_graph_exec_t []){...} (ccv_nnc_graph_exec_t []){__VA_ARGS__}
52
53#define GRAPH_EXEC_SYMBOL_LIST_X(...)(ccv_nnc_graph_exec_symbol_t []){...} (ccv_nnc_graph_exec_symbol_t []){__VA_ARGS__}
54
55#define SYMBOLIC_GRAPH_PASSES_X(...)(int []){...} (int []){__VA_ARGS__}
56
57#define MODEL_LIST_X(...)(ccv_cnnp_model_t* []){...}(ccv_cnnp_model_t* []){__VA_ARGS__}
58
59#define MODEL_IO_LIST_X(...)(ccv_cnnp_model_io_t []){...}(ccv_cnnp_model_io_t []){__VA_ARGS__}
60
61#define MODEL_CMD_TENSOR_MAP_X(...)(ccv_cnnp_tensor_param_t []){...}(ccv_cnnp_tensor_param_t []){__VA_ARGS__}
62
63#define MODEL_CMD_TENSOR_LIST_X(...)(int []){...}(int []){__VA_ARGS__}
64
65#define COLUMN_ID_LIST_X(...)(int []){...}(int []){__VA_ARGS__}
66
67/**
68 * @defgroup convenience_api Convenience API
69 * @{
70 */
71/**
72 * Pass a list of tensors to NNC functions that accepts (tensor array, tensor array size).
73 * This method effectively gives two parameters as one.
74 */
75#define TENSOR_LIST(...)(ccv_nnc_tensor_t* []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
TENSOR_LIST_X(__VA_ARGS__)(ccv_nnc_tensor_t* []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
76/**
77 * Pass a list of tensor parameters to NNC functions that accepts (parameter array, parameter array size).
78 * This method effectively gives two parameters as one.
79 */
80#define TENSOR_PARAM_LIST(...)(ccv_nnc_tensor_param_t []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
TENSOR_PARAM_LIST_X(__VA_ARGS__)(ccv_nnc_tensor_param_t []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
81/**
82 * This represents a tensor symbol that is empty (tensor = nil)
83 */
84#define NO_TENSOR_SYMBOL(ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL} (ccv_nnc_tensor_symbol_t){.d = CCV_NNC_NO_TENSOR_SYMBOL}
85/**
86 * This represents a graph exec symbol that is empty (exec = nil)
87 */
88#define NO_GRAPH_EXEC_SYMBOL(ccv_nnc_graph_exec_symbol_t){.d = CCV_NNC_NO_GRAPH_EXEC_SYMBOL
}
(ccv_nnc_graph_exec_symbol_t){.d = CCV_NNC_NO_GRAPH_EXEC_SYMBOL}
89/**
90 * Pass a list of tensor symbols to NNC functions that accepts (tensor symbol array, tensor symbol array size).
91 * This method effectively gives two parameters as one.
92 */
93#define TENSOR_SYMBOL_LIST(...)(ccv_nnc_tensor_symbol_t []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
TENSOR_SYMBOL_LIST_X(__VA_ARGS__)(ccv_nnc_tensor_symbol_t []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
94/**
95 * Pass a list of tensor variables to NNC functions that accepts (tensor variable array, tensor variable array size).
96 * This method effectively gives two parameters as one.
97 */
98#define TENSOR_VARIABLE_LIST(...)(ccv_nnc_tensor_variable_t []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
TENSOR_VARIABLE_LIST_X(__VA_ARGS__)(ccv_nnc_tensor_variable_t []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
99/**
100 * Pass a list of tensor bindings to NNC functions that accepts (tensor binding array, tensor binding array size).
101 * This method effectively gives two parameters as one. Since tensor binding requires two: symbol and a tensor,
102 * you should use this like: TENSOR_BIND_MAP(KV(symbol1, tensor1), KV(symbol2, tensor2)).
103 */
104#define TENSOR_BIND_MAP(...)(ccv_nnc_tensor_bind_t []){...}, (sizeof((ccv_nnc_tensor_bind_t
[]){...}) / sizeof(ccv_nnc_tensor_bind_t))
TENSOR_BIND_MAP_X(__VA_ARGS__)(ccv_nnc_tensor_bind_t []){__VA_ARGS__}, (sizeof(TENSOR_BIND_MAP_X(__VA_ARGS__)(ccv_nnc_tensor_bind_t []){__VA_ARGS__}) / sizeof(ccv_nnc_tensor_bind_t))
105/**
106 * Pass a list of tensor symbol pairs to NNC functions that accepts (tensor symbol pair array, tensor symbol pair array size).
107 * This method effectively gives two parameters as one. Since tensor symbol pair requires two: source symbol and destination symbol,
108 * you should use this like: TENSOR_SYMBOL_MAP(KV(symbol1, symbol2), KV(symbol3, symbol4)).
109 */
110#define TENSOR_SYMBOL_MAP(...)(ccv_nnc_tensor_symbol_map_t []){...}, (sizeof((ccv_nnc_tensor_symbol_map_t
[]){...}) / sizeof(ccv_nnc_tensor_symbol_map_t))
TENSOR_SYMBOL_MAP_X(__VA_ARGS__)(ccv_nnc_tensor_symbol_map_t []){__VA_ARGS__}, (sizeof(TENSOR_SYMBOL_MAP_X(__VA_ARGS__)(ccv_nnc_tensor_symbol_map_t []){__VA_ARGS__}) / sizeof(ccv_nnc_tensor_symbol_map_t))
111/**
112 * Pass a list of execution nodes to NNC functions that accepts (execution node array, execution node array size).
113 * This method effectively gives two parameters as one.
114 */
115#define GRAPH_EXEC_LIST(...)(ccv_nnc_graph_exec_t []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
GRAPH_EXEC_LIST_X(__VA_ARGS__)(ccv_nnc_graph_exec_t []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
116/**
117 * Pass a list of execution node symbols to NNC functions that accepts (execution node symbol array, execution node symbol array size).
118 * This method effectively gives two parameters as one.
119 */
120#define GRAPH_EXEC_SYMBOL_LIST(...)(ccv_nnc_graph_exec_symbol_t []){...}, (1 +1 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
GRAPH_EXEC_SYMBOL_LIST_X(__VA_ARGS__)(ccv_nnc_graph_exec_symbol_t []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
121/**
122 * Pass both default sources and default sources size to function that accepts (sources, source size).
123 * @param x A given symbolic graph.
124 */
125#define SYMBOLIC_GRAPH_SOURCES(x)ccv_nnc_symbolic_graph_sources(x), ccv_nnc_symbolic_graph_source_size
(x)
ccv_nnc_symbolic_graph_sources(x), ccv_nnc_symbolic_graph_source_size(x)
126/**
127 * Pass both default destinations and default destinations size to function that accepts (destinations, destination size).
128 * @param x A given symbolic graph.
129 */
130#define SYMBOLIC_GRAPH_DESTINATIONS(x)ccv_nnc_symbolic_graph_destinations(x), ccv_nnc_symbolic_graph_destination_size
(x)
ccv_nnc_symbolic_graph_destinations(x), ccv_nnc_symbolic_graph_destination_size(x)
131/**
132 * Pass a list of simplification passes to NNC functions that accepts (pass array, pass array size).
133 * This method effectively gives two parameters as one.
134 */
135#define SYMBOLIC_GRAPH_PASSES(...)(int []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 -1)
SYMBOLIC_GRAPH_PASSES_X(__VA_ARGS__)(int []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
136/**
137 * Pass a list of CNNP models to NNC functions that accepts (model array, model array size).
138 * This method effectively gives two parameters as one.
139 */
140#define MODEL_LIST(...)(ccv_cnnp_model_t* []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
MODEL_LIST_X(__VA_ARGS__)(ccv_cnnp_model_t* []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
141/**
142 * Pass a list of CNNP model IOs to NNC functions that accepts (model IO array, model IO array size).
143 * This method effectively gives two parameters as one.
144 */
145#define MODEL_IO_LIST(...)(ccv_cnnp_model_io_t []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
MODEL_IO_LIST_X(__VA_ARGS__)(ccv_cnnp_model_io_t []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
146/**
147 * Pass a list of CNNP tensor params to ccv_cnnp_cmd_exec which accepts (tensor params array, tensor params array size).
148 * This method effectively gives two parameters as one.
149 */
150#define MODEL_CMD_TENSOR_MAP(...)(ccv_cnnp_tensor_param_t []){...}, (sizeof((ccv_cnnp_tensor_param_t
[]){...}) / sizeof(ccv_cnnp_tensor_param_t))
MODEL_CMD_TENSOR_MAP_X(__VA_ARGS__)(ccv_cnnp_tensor_param_t []){__VA_ARGS__}, (sizeof(MODEL_CMD_TENSOR_MAP_X(__VA_ARGS__)(ccv_cnnp_tensor_param_t []){__VA_ARGS__}) / sizeof(ccv_cnnp_tensor_param_t))
151/**
152 * Pass a list of CNNP tensor type to ccv_cnnp_cmd_exec which accepts (tensor type array, tensor type array size).
153 * This method effectively gives two parameters as one.
154 */
155#define MODEL_CMD_TENSOR_LIST(...)(int []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 -1)
MODEL_CMD_TENSOR_LIST_X(__VA_ARGS__)(int []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
156/**
157 * Pass a list of dataframe column ids to iteration function that accepts (column id array, column id array size).
158 * This method effectively gives two parameters as one.
159 */
160#define COLUMN_ID_LIST(...)(int []){...}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 -1)
COLUMN_ID_LIST_X(__VA_ARGS__)(int []){__VA_ARGS__}, LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
161
162#define TRAVERSE_FULL0,0,0,0 0,0,0,0
163
164// We will support NUMA allocation on CPU in the future. Currently, this is not very meaningful (except enforce no memory reuse between tensors).
165#define CPU_NUMA_TENSOR_NHWC(device_id, dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_device_id
) | CCV_TENSOR_CPU_MEMORY,.format=CCV_TENSOR_FORMAT_NHWC,.datatype
=CCV_dt,.dim={...}})
((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_##device_id) | CCV_TENSOR_CPU_MEMORY,.format=CCV_TENSOR_FORMAT_NHWC,.datatype=CCV_##dt,.dim={__VA_ARGS__}})
166#define CPU_NUMA_TENSOR_NCHW(device_id, dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_device_id
) | CCV_TENSOR_CPU_MEMORY,.format=CCV_TENSOR_FORMAT_NCHW,.datatype
=CCV_dt,.dim={...}})
((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_##device_id) | CCV_TENSOR_CPU_MEMORY,.format=CCV_TENSOR_FORMAT_NCHW,.datatype=CCV_##dt,.dim={__VA_ARGS__}})
167#define CPU_NUMA_TENSOR_CHWN(device_id, dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_device_id
) | CCV_TENSOR_CPU_MEMORY,.format=CCV_TENSOR_FORMAT_CHWN,.datatype
=CCV_dt,.dim={...}})
((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_##device_id) | CCV_TENSOR_CPU_MEMORY,.format=CCV_TENSOR_FORMAT_CHWN,.datatype=CCV_##dt,.dim={__VA_ARGS__}})
168#define CPU_TENSOR_NHWC(dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_ANY) | CCV_TENSOR_CPU_MEMORY
,.format=CCV_TENSOR_FORMAT_NHWC,.datatype=CCV_dt,.dim={...}})
CPU_NUMA_TENSOR_NHWC(ANY, dt, __VA_ARGS__)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_ANY) | CCV_TENSOR_CPU_MEMORY
,.format=CCV_TENSOR_FORMAT_NHWC,.datatype=CCV_dt,.dim={__VA_ARGS__
}})
169#define CPU_TENSOR_NCHW(dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_ANY) | CCV_TENSOR_CPU_MEMORY
,.format=CCV_TENSOR_FORMAT_NCHW,.datatype=CCV_dt,.dim={...}})
CPU_NUMA_TENSOR_NCHW(ANY, dt, __VA_ARGS__)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_ANY) | CCV_TENSOR_CPU_MEMORY
,.format=CCV_TENSOR_FORMAT_NCHW,.datatype=CCV_dt,.dim={__VA_ARGS__
}})
170#define CPU_TENSOR_CHWN(dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_ANY) | CCV_TENSOR_CPU_MEMORY
,.format=CCV_TENSOR_FORMAT_CHWN,.datatype=CCV_dt,.dim={...}})
CPU_NUMA_TENSOR_CHWN(ANY, dt, __VA_ARGS__)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_ANY) | CCV_TENSOR_CPU_MEMORY
,.format=CCV_TENSOR_FORMAT_CHWN,.datatype=CCV_dt,.dim={__VA_ARGS__
}})
171// This way, we can do error check on the device type :)
172#define GPU_TENSOR_NHWC(device_id, dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_device_id
) | CCV_TENSOR_GPU_MEMORY,.format=CCV_TENSOR_FORMAT_NHWC,.datatype
=CCV_dt,.dim={...}})
((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_##device_id) | CCV_TENSOR_GPU_MEMORY,.format=CCV_TENSOR_FORMAT_NHWC,.datatype=CCV_##dt,.dim={__VA_ARGS__}})
173#define GPU_TENSOR_NCHW(device_id, dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_device_id
) | CCV_TENSOR_GPU_MEMORY,.format=CCV_TENSOR_FORMAT_NCHW,.datatype
=CCV_dt,.dim={...}})
((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_##device_id) | CCV_TENSOR_GPU_MEMORY,.format=CCV_TENSOR_FORMAT_NCHW,.datatype=CCV_##dt,.dim={__VA_ARGS__}})
174#define GPU_TENSOR_CHWN(device_id, dt, ...)((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_device_id
) | CCV_TENSOR_GPU_MEMORY,.format=CCV_TENSOR_FORMAT_CHWN,.datatype
=CCV_dt,.dim={...}})
((ccv_nnc_tensor_param_t){.type=(CCV_COMPUTE_DEVICE_##device_id) | CCV_TENSOR_GPU_MEMORY,.format=CCV_TENSOR_FORMAT_CHWN,.datatype=CCV_##dt,.dim={__VA_ARGS__}})
175/** @} */
176
177#define DIM_ALLOC(...)(int [(8)]){...} (int [CCV_NNC_MAX_DIM_ALLOC(8)]){__VA_ARGS__}
178
179#define ESCAPE_X(...)... __VA_ARGS__
180#define HINT_X_1(_stride_)((ccv_nnc_hint_t){.stride={.dim={ESCAPE_X _stride_}}, .border
={.begin={0},.end={0}}})
((ccv_nnc_hint_t){.stride={.dim={ESCAPE_X _stride_}}, .border={.begin={0},.end={0}}})
181#define HINT_X_2(_stride_, _border_)((ccv_nnc_hint_t){.stride={.dim={ESCAPE_X _stride_}}, .border
={.begin={ESCAPE_X _border_},.end={ESCAPE_X _border_}}})
((ccv_nnc_hint_t){.stride={.dim={ESCAPE_X _stride_}}, .border={.begin={ESCAPE_X _border_},.end={ESCAPE_X _border_}}})
182#define HINT_X_3(_stride_, _begin_, _end_)((ccv_nnc_hint_t){.stride={.dim={ESCAPE_X _stride_}}, .border
={.begin={ESCAPE_X _begin_},.end={ESCAPE_X _end_}}})
((ccv_nnc_hint_t){.stride={.dim={ESCAPE_X _stride_}}, .border={.begin={ESCAPE_X _begin_},.end={ESCAPE_X _end_}}})
183#define HINT_X_SEL(_1, _2, _3, _FX, ...)_FX _FX
184/**
185 * @ingroup convenience_api
186 * Simpler method to create hint.
187 * HINT(stride), HINT(stride, border), HINT(stride, border begin, border end)
188 */
189#define HINT(...)((ccv_nnc_hint_t){.stride={.dim={ESCAPE_X ...}}, .border={.begin
={0},.end={0}}})
HINT_X_SEL(__VA_ARGS__, HINT_X_3, HINT_X_2, HINT_X_1)(__VA_ARGS__)((ccv_nnc_hint_t){.stride={.dim={ESCAPE_X __VA_ARGS__}}, .border
={.begin={0},.end={0}}})
190
191static inline size_t ccv_nnc_dimension_count(const int dim[CCV_NNC_MAX_DIM_ALLOC(8)])
192{
193 if (dim[0] == 0)
194 return 0;
195 int i;
196 size_t count = 1;
197 for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC(8) && dim[i] > 0; i++)
198 count *= dim[i];
199 return count;
200}
201
202static inline size_t ccv_nnc_tensor_count(const ccv_nnc_tensor_param_t params)
203{
204 return ccv_nnc_dimension_count(params.dim);
205}
206
207static inline size_t ccv_nnc_tensor_data_size(const ccv_nnc_tensor_param_t params)
208{
209 return (CCV_GET_DATA_TYPE_SIZE(params.datatype)_ccv_get_data_type_size[((params.datatype) & 0xFF000) >>
12]
* (ssize_t)ccv_nnc_tensor_count(params) + 15) & -16;
210}
211
212static inline void ccv_nnc_tensor_view_get_dim(const ccv_nnc_tensor_view_t* const tv, int dim[CCV_NNC_MAX_DIM_ALLOC(8)])
213{
214 int x;
215 const int nd = ccv_nnc_tensor_nd(tv->info.dim);
216 const int offset = CCV_NNC_MAX_DIM(2) + 2 - nd;
217 for (x = 0; x < offset; x++)
218 dim[x] = 1;
219 for (x = offset; x < CCV_NNC_MAX_DIM(2) + 2; x++)
220 dim[x] = tv->info.dim[x - offset];
221}
222
223static inline CCV_WARN_UNUSED(int)int __attribute__((warn_unused_result)) ccv_nnc_tensor_view_check_dim(const ccv_nnc_tensor_view_t* const tv, int dim[CCV_NNC_MAX_DIM_ALLOC(8)])
224{
225 int x;
226 const int nd = ccv_nnc_tensor_nd(tv->info.dim);
227 const int offset = CCV_NNC_MAX_DIM(2) + 2 - nd;
228 for (x = 0; x < offset; x++)
229 if (dim[x] != 1)
230 return 0;
231 for (x = offset; x < CCV_NNC_MAX_DIM(2) + 2; x++)
232 if (dim[x] != tv->info.dim[x - offset])
233 return 0;
234 return 1;
235}
236
237static inline void ccv_nnc_tensor_view_get_broadcast_dim(const ccv_nnc_tensor_view_t* const tv, int dim[CCV_NNC_MAX_DIM_ALLOC(8)])
238{
239 int x;
240 const int nd = ccv_nnc_tensor_nd(tv->info.dim);
241 const int offset = CCV_NNC_MAX_DIM(2) + 2 - nd;
242 for (x = 0; x < offset; x++)
243 dim[x] = ccv_max(1, dim[x])({ typeof (1) _a = (1); typeof (dim[x]) _b = (dim[x]); (_a >
_b) ? _a : _b; })
;
244 for (x = offset; x < CCV_NNC_MAX_DIM(2) + 2; x++)
245 dim[x] = ccv_max(dim[x], tv->info.dim[x - offset])({ typeof (dim[x]) _a = (dim[x]); typeof (tv->info.dim[x -
offset]) _b = (tv->info.dim[x - offset]); (_a > _b) ? _a
: _b; })
;
246}
247
248static inline CCV_WARN_UNUSED(int)int __attribute__((warn_unused_result)) ccv_nnc_tensor_view_check_broadcast_dim(const ccv_nnc_tensor_view_t* const tv, int dim[CCV_NNC_MAX_DIM_ALLOC(8)])
249{
250 int x;
251 const int nd = ccv_nnc_tensor_nd(tv->info.dim);
252 const int offset = CCV_NNC_MAX_DIM(2) + 2 - nd;
253 for (x = offset; x < CCV_NNC_MAX_DIM(2) + 2; x++)
254 if (dim[x] != tv->info.dim[x - offset] && tv->info.dim[x - offset] != 1)
255 return 0;
256 return 1;
257}
258
259static inline void ccv_nnc_tensor_view_get_inc(const ccv_nnc_tensor_view_t* const tv, int inc[CCV_NNC_MAX_DIM_ALLOC(8)])
260{
261 int x;
262 const int nd = ccv_nnc_tensor_nd(tv->info.dim);
263 const int offset = CCV_NNC_MAX_DIM(2) + 2 - nd;
264 for (x = 0; x < offset; x++)
265 inc[x] = 1;
266 for (x = offset; x < CCV_NNC_MAX_DIM(2) + 2; x++)
267 inc[x] = CCV_IS_TENSOR_VIEW(tv)((*(int*)(tv)) & CCV_TENSOR_VIEW) ? tv->inc[x - offset] : tv->info.dim[x - offset];
268}
269
270static inline int ccv_nnc_tensor_get_n(const ccv_nnc_tensor_param_t params)
271{
272 switch (params.format)
273 {
274 case CCV_TENSOR_FORMAT_NHWC:
275 case CCV_TENSOR_FORMAT_NCHW:
276 if (ccv_nnc_tensor_nd(params.dim) == CCV_NNC_MAX_DIM(2) + 1)
277 return 1;
278 else
279 return params.dim[0];
280 case CCV_TENSOR_FORMAT_CHWN:
281 return params.dim[CCV_NNC_MAX_DIM(2) + 1];
282 }
283 return 0;
284}
285
286static inline int ccv_nnc_tensor_get_c(const ccv_nnc_tensor_param_t params)
287{
288 switch (params.format)
14
'Default' branch taken. Execution continues on line 300
289 {
290 case CCV_TENSOR_FORMAT_NHWC:
291 return params.dim[ccv_nnc_tensor_nd(params.dim) - 1];
292 case CCV_TENSOR_FORMAT_NCHW:
293 if (ccv_nnc_tensor_nd(params.dim) == CCV_NNC_MAX_DIM(2) + 1)
294 return params.dim[0];
295 else
296 return params.dim[1];
297 case CCV_TENSOR_FORMAT_CHWN:
298 return params.dim[0];
299 }
300 return 0;
15
Returning zero
301}
302
303static inline void ccv_nnc_tensor_set_n(ccv_nnc_tensor_param_t* const params, const int n)
304{
305 switch (params->format)
306 {
307 case CCV_TENSOR_FORMAT_NHWC:
308 case CCV_TENSOR_FORMAT_NCHW:
309 params->dim[0] = n;
310 break;
311 case CCV_TENSOR_FORMAT_CHWN:
312 params->dim[CCV_NNC_MAX_DIM(2) + 1] = n;
313 break;
314 }
315}
316
317static inline void ccv_nnc_tensor_set_c(ccv_nnc_tensor_param_t* const params, const int nd, const int c)
318{
319 switch (params->format)
320 {
321 case CCV_TENSOR_FORMAT_NHWC:
322 params->dim[nd - 1] = c;
323 break;
324 case CCV_TENSOR_FORMAT_NCHW:
325 if (nd == CCV_NNC_MAX_DIM(2) + 1)
326 params->dim[0] = c;
327 else
328 params->dim[1] = c;
329 break;
330 case CCV_TENSOR_FORMAT_CHWN:
331 params->dim[0] = c;
332 break;
333 }
334}
335
336
337#define CMD_BLAS(...)((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.blas={.a={...}}}
)
((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.blas={.a={__VA_ARGS__}}})
338#define CMD_GEMM(_count)((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.blas={.a={1,1},.
count=_count}})
((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.blas={.a={1,1},.count=_count}}) // We default to alpha = 1 and beta = 1
339#define CMD_GENERIC_X_0()((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}}}) ((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}}})
340#define CMD_GENERIC_X_F(...)("This should not be used, you should have either 0 parameter or 3 parameters for CMD_GENERIC"
)
("This should not be used, you should have either 0 parameter or 3 parameters for CMD_GENERIC")
341#define CMD_GENERIC_X_3(...)((ccv_nnc_cmd_param_t){.size={.dim={...}}}) ((ccv_nnc_cmd_param_t){.size={.dim={__VA_ARGS__}}})
342#define CMD_GENERIC_X_SEL(_0, _1, _2, _3, _FX, ...)_FX _FX
343// Using ## so that if it is empty, we omit one comma.
344#define CMD_GENERIC(...)("This should not be used, you should have either 0 parameter or 3 parameters for CMD_GENERIC"
)
CMD_GENERIC_X_SEL(CMD_GENERIC_X_F, ##__VA_ARGS__, CMD_GENERIC_X_3, CMD_GENERIC_X_F, CMD_GENERIC_X_F, CMD_GENERIC_X_0)(__VA_ARGS__)("This should not be used, you should have either 0 parameter or 3 parameters for CMD_GENERIC"
)
345#define CMD_REDUCE(...)((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.reduce={.count=(
1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 -1),.axis={...}}})
((ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.reduce={.count=LIST_COUNT(__VA_ARGS__)(1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 -1)
,.axis={__VA_ARGS__}}})
346/**
347 * @defgroup available_commands Available Commands
348 * @{
349 */
350#define CMD_NOOP()ccv_nnc_cmd(CCV_NNC_NOOP, 0, ccv_nnc_cmd_auto, 0) ccv_nnc_cmd(CCV_NNC_NOOP, 0, ccv_nnc_cmd_auto, 0)
351#define CMD_CUSTOM_FORWARD(f)ccv_nnc_cmd(CCV_NNC_CUSTOM_FORWARD, f, ccv_nnc_cmd_auto, 0) ccv_nnc_cmd(CCV_NNC_CUSTOM_FORWARD, f, ccv_nnc_cmd_auto, 0)
352/** @} */
353
354int ccv_nnc_is_no_hint(const ccv_nnc_hint_t hint);
355int ccv_nnc_is_cmd_auto(const ccv_nnc_cmd_param_t params);
356int ccv_nnc_is_tensor_auto(const ccv_nnc_tensor_param_t params);
357
358/**
359 * @addtogroup convenience_api
360 * @{
361 */
362/**
363 * Offsets all zero.
364 */
365extern const int ccv_nnc_no_ofs[CCV_NNC_MAX_DIM_ALLOC(8)];
366/**
367 * No hint available.
368 */
369extern const ccv_nnc_hint_t ccv_nnc_no_hint;
370/**
371 * Derive the command parameters automatically if possible.
372 */
373extern const ccv_nnc_cmd_param_t ccv_nnc_cmd_auto;
374/**
375 * Derive the tensor parameters automatically if possible.
376 */
377extern const ccv_nnc_tensor_param_t ccv_nnc_tensor_auto;
378/** @} */
379
380// Generated command flags for easy creation of ccv_nnc_cmd_t objects.
381#include "cmd/ccv_nnc_cmd_easy.h"
382
383#endif