Bug Summary

File:nnc/ccv_cnnp_dataframe_addons.c
Warning:line 412, column 4
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ccv_cnnp_dataframe_addons.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model static -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -target-feature +sse2 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -resource-dir /usr/local/lib/clang/8.0.0 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_UCONTEXT -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D USE_DISPATCH -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -I /usr/local/include -internal-isystem /usr/local/include -internal-isystem /usr/local/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -fdebug-compilation-dir /home/liu/buildslave/linux-x64-runtests/build/lib/nnc -ferror-limit 19 -fmessage-length 0 -fblocks -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -o /home/liu/buildslave/public_html/analyze/2019-05-04-163002-105371-1 -x c ccv_cnnp_dataframe_addons.c -faddrsig
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_cnnp_dataframe.h"
6
7#include <3rdparty/sfmt/SFMT.h>
8
9#pragma mark - Create Dataframe from Array
10
11static void _ccv_cnnp_array_enum(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
12{
13 int i;
14 ccv_array_t* const array = (ccv_array_t*)context;
15 for (i = 0; i < row_size; i++)
16 data[i] = ccv_array_get(array, row_idxs[i])((void*)(((char*)((array)->data)) + (size_t)(array)->rsize
* (size_t)(row_idxs[i])))
;
17}
18
19ccv_cnnp_dataframe_t* ccv_cnnp_dataframe_from_array_new(ccv_array_t* const array)
20{
21 const ccv_cnnp_column_data_t array_column_data = {
22 .data_enum = _ccv_cnnp_array_enum,
23 .context = array
24 };
25 return ccv_cnnp_dataframe_new(&array_column_data, 1, array->rnum);
26}
27
28typedef struct {
29 ccv_cnnp_dataframe_tuple_t tuple;
30 int tensor_offset;
31 int device_id;
32} ccv_cnnp_copy_to_gpu_context_t;
33
34#pragma mark - Copy Tensors from CPU to GPU
35
36static void _ccv_cnnp_tensor_list_deinit(void* const data, void* const context)
37{
38 ccv_cnnp_copy_to_gpu_context_t* const copy_to_gpu = (ccv_cnnp_copy_to_gpu_context_t*)context;
39 ccv_nnc_tensor_t** const tensor_list = (ccv_nnc_tensor_t**)data;
40 int i;
41 for (i = 0; i < copy_to_gpu->tuple.size; i++)
42 if (tensor_list[i])
43 ccv_nnc_tensor_free(tensor_list[i]);
44 ccfreefree(tensor_list);
45}
46
47static void _ccv_cnnp_copy_to_gpu(void*** const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
48{
49 const ccv_cnnp_copy_to_gpu_context_t* const copy_to_gpu_context = (ccv_cnnp_copy_to_gpu_context_t*)context;
50 int i, j;
51 for (i = 0; i < batch_size; i++)
52 {
53 ccv_nnc_tensor_t** inputs = (ccv_nnc_tensor_t**)column_data[0][i] + copy_to_gpu_context->tensor_offset;
54 ccv_nnc_tensor_t** outputs = (ccv_nnc_tensor_t**)data[i];
55 if (!outputs)
56 {
57 outputs = (ccv_nnc_tensor_t**)(data[i] = ccmallocmalloc(sizeof(ccv_nnc_tensor_t*) * copy_to_gpu_context->tuple.size));
58 for (j = 0; j < copy_to_gpu_context->tuple.size; j++)
59 {
60 ccv_nnc_tensor_param_t params = inputs[j]->info;
61 params.type &= ~CCV_TENSOR_CPU_MEMORY;
62 params.type |= CCV_TENSOR_GPU_MEMORY; // Change to GPU memory.
63 CCV_TENSOR_SET_DEVICE_ID(params.type, copy_to_gpu_context->device_id)(params.type) = (((params.type) & ~0xfff00) | (((copy_to_gpu_context
->device_id) & 0xfff) << 8))
;
64 outputs[j] = ccv_nnc_tensor_new(0, params, 0);
65 }
66 }
67 for (j = 0; j < copy_to_gpu_context->tuple.size; j++)
68 ccv_nnc_tensor_pin_memory(inputs[j]);
69 ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD()ccv_nnc_cmd(CCV_NNC_DATA_TRANSFER_FORWARD, 0, ccv_nnc_cmd_auto
, 0)
, ccv_nnc_no_hint, 0, inputs, copy_to_gpu_context->tuple.size, outputs, copy_to_gpu_context->tuple.size, stream_context);
70 }
71}
72
73int ccv_cnnp_dataframe_copy_to_gpu(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int tensor_offset, const int tensor_size, int device_id)
74{
75 assert(tensor_size > 0)((void) sizeof ((tensor_size > 0) ? 1 : 0), __extension__ (
{ if (tensor_size > 0) ; else __assert_fail ("tensor_size > 0"
, "ccv_cnnp_dataframe_addons.c", 75, __extension__ __PRETTY_FUNCTION__
); }))
;
76 int stream_type = CCV_STREAM_CONTEXT_GPU;
77 CCV_STREAM_SET_DEVICE_ID(stream_type, device_id)(stream_type) = (((stream_type) & ~0xfff00) | (((device_id
) & 0xfff) << 8))
;
78 ccv_cnnp_copy_to_gpu_context_t* const copy_to_gpu_context = (ccv_cnnp_copy_to_gpu_context_t*)ccmallocmalloc(sizeof(ccv_cnnp_copy_to_gpu_context_t));
79 copy_to_gpu_context->tuple.size = tensor_size;
80 copy_to_gpu_context->tensor_offset = tensor_offset;
81 copy_to_gpu_context->device_id = device_id;
82 return ccv_cnnp_dataframe_map(dataframe, _ccv_cnnp_copy_to_gpu, stream_type, _ccv_cnnp_tensor_list_deinit, COLUMN_ID_LIST(column_idx)(int []){column_idx}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, copy_to_gpu_context, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
83}
84
85#pragma mark - Make Auxiliary Tensor as a new Column
86
87static void _ccv_cnnp_tensor_deinit(void* const data, void* const context)
88{
89 ccv_nnc_tensor_free((ccv_nnc_tensor_t*)data);
90}
91
92static void _ccv_cnnp_tensor_new(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
93{
94 ccv_nnc_tensor_param_t params = *(ccv_nnc_tensor_param_t*)context;
95 int i;
96 for (i = 0; i < row_size; i++)
97 if (!data[i])
98 data[i] = ccv_nnc_tensor_new(0, params, 0);
99}
100
101int ccv_cnnp_dataframe_add_aux(ccv_cnnp_dataframe_t* const dataframe, const ccv_nnc_tensor_param_t params)
102{
103 int stream_type = CCV_TENSOR_GET_MEMORY(params.type)((params.type) & 0x3) == CCV_TENSOR_CPU_MEMORY ? 0 : CCV_STREAM_CONTEXT_GPU;
104 if (stream_type == CCV_STREAM_CONTEXT_GPU)
105 CCV_STREAM_SET_DEVICE_ID(stream_type, CCV_TENSOR_GET_DEVICE_ID(params.type))(stream_type) = (((stream_type) & ~0xfff00) | ((((((params
.type) & 0xfff00) >> 8)) & 0xfff) << 8))
;
106 ccv_nnc_tensor_param_t* const context = (ccv_nnc_tensor_param_t*)ccmallocmalloc(sizeof(ccv_nnc_tensor_param_t));
107 context[0] = params;
108 return ccv_cnnp_dataframe_add(dataframe, _ccv_cnnp_tensor_new, stream_type, _ccv_cnnp_tensor_deinit, context, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
109}
110
111#pragma mark - Load Tensor from File Path
112
113static void _ccv_cnnp_image_deinit(void* const data, void* const context)
114{
115 ccv_matrix_free(data);
116}
117
118static void _ccv_cnnp_read_image(void*** const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
119{
120 parallel_for(i, batch_size){ int i; for ((i) = 0; (i) < (batch_size); (i)++) { {
121 if (data[i])
122 ccv_matrix_free(data[i]);
123 off_t structof = (off_t)context;
124 char* const filename = *(char**)((char*)column_data[0][i] + structof);
125 data[i] = 0;
126 ccv_read(filename, (ccv_dense_matrix_t**)&data[i], CCV_IO_ANY_FILE | CCV_IO_RGB_COLOR)ccv_read_impl(filename, (ccv_dense_matrix_t**)&data[i], CCV_IO_ANY_FILE
| CCV_IO_RGB_COLOR, 0, 0, 0)
;
127 } parallel_endfor} }
128}
129
130int ccv_cnnp_dataframe_read_image(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof)
131{
132 return ccv_cnnp_dataframe_map(dataframe, _ccv_cnnp_read_image, 0, _ccv_cnnp_image_deinit, COLUMN_ID_LIST(column_idx)(int []){column_idx}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, (void*)(uintptr_t)structof, 0);
133}
134
135#pragma mark - Apply Random Jitter to Image
136
137typedef struct {
138 sfmt_t sfmt;
139 int datatype;
140 ccv_cnnp_random_jitter_t random_jitter;
141} ccv_cnnp_random_jitter_context_t;
142
143static void _ccv_cnnp_image_lighting(ccv_dense_matrix_t* image, const float alpha_r, const float alpha_g, const float alpha_b)
144{
145 assert(CCV_GET_DATA_TYPE(image->type) == CCV_32F)((void) sizeof ((((image->type) & 0xFF000) == CCV_32F)
? 1 : 0), __extension__ ({ if (((image->type) & 0xFF000
) == CCV_32F) ; else __assert_fail ("CCV_GET_DATA_TYPE(image->type) == CCV_32F"
, "ccv_cnnp_dataframe_addons.c", 145, __extension__ __PRETTY_FUNCTION__
); }))
;
146 assert(CCV_GET_CHANNEL(image->type) == CCV_C3)((void) sizeof ((((image->type) & 0xFFF) == CCV_C3) ? 1
: 0), __extension__ ({ if (((image->type) & 0xFFF) ==
CCV_C3) ; else __assert_fail ("CCV_GET_CHANNEL(image->type) == CCV_C3"
, "ccv_cnnp_dataframe_addons.c", 146, __extension__ __PRETTY_FUNCTION__
); }))
;
147 // These eigenvector values can be computed out of imageNet dataset (see ccv_convnet for how that is done). Here I just copied
148 // from mxnet: https://github.com/apache/incubator-mxnet/blob/master/src/operator/image/image_random-inl.h#L632
149 const float pca_r = alpha_r * (55.46 * -0.5675) + alpha_g * (4.794 * 0.7192) + alpha_b * (1.148 * 0.4009);
150 const float pca_g = alpha_r * (55.46 * -0.5808) + alpha_g * (4.794 * -0.0045) + alpha_b * (1.148 * -0.8140);
151 const float pca_b = alpha_r * (55.46 * -0.5836) + alpha_g * (4.794 * -0.6948) + alpha_b * (1.148 * 0.4203);
152 int i;
153 const int size = image->rows * image->cols;
154 float* const ptr = image->data.f32;
155 for (i = 0; i < size; i++)
156 {
157 ptr[i * 3] = ccv_clamp(ptr[i * 3] + pca_r, 0, 255)({ typeof (0) _a = (0); typeof (255) _b = (255); typeof (ptr[
i * 3] + pca_r) _x = (ptr[i * 3] + pca_r); (_x < _a) ? _a :
((_x > _b) ? _b : _x); })
;
158 ptr[i * 3 + 1] = ccv_clamp(ptr[i * 3 + 1] + pca_g, 0, 255)({ typeof (0) _a = (0); typeof (255) _b = (255); typeof (ptr[
i * 3 + 1] + pca_g) _x = (ptr[i * 3 + 1] + pca_g); (_x < _a
) ? _a : ((_x > _b) ? _b : _x); })
;
159 ptr[i * 3 + 2] = ccv_clamp(ptr[i * 3 + 2] + pca_b, 0, 255)({ typeof (0) _a = (0); typeof (255) _b = (255); typeof (ptr[
i * 3 + 2] + pca_b) _x = (ptr[i * 3 + 2] + pca_b); (_x < _a
) ? _a : ((_x > _b) ? _b : _x); })
;
160 }
161}
162
163static float _ccv_cnnp_random_logexp(sfmt_t* const sfmt, const float jitter)
164{
165 // We want to get something around logarithmic scale, thus, 0 is no good, and infinity is no good. 1 is the same.
166 // jitter is some turbulence we want around 1. We want the range range to be around [1 / (1 + jitter), 1 + jitter]
167 // but the distribution is not uniform (50% fall under 1, and 50% fall above 1). The way to do this is to first
168 // get to logarithmic range, doing a uniform sampling, and then convert back.
169 double log_jitter_limit = log(1 + jitter);
170 double log_random_jitter = sfmt_genrand_real1(sfmt) * 2 * log_jitter_limit - log_jitter_limit;
171 return (float)exp(log_random_jitter); // Convert it back to exponential form.
172}
173
174static void _ccv_cnnp_image_manip(ccv_dense_matrix_t* image, const ccv_cnnp_random_jitter_t random_jitter, sfmt_t* const sfmt)
175{
176 assert(sfmt && CCV_GET_CHANNEL(image->type) == CCV_C3)((void) sizeof ((sfmt && ((image->type) & 0xFFF
) == CCV_C3) ? 1 : 0), __extension__ ({ if (sfmt && (
(image->type) & 0xFFF) == CCV_C3) ; else __assert_fail
("sfmt && CCV_GET_CHANNEL(image->type) == CCV_C3"
, "ccv_cnnp_dataframe_addons.c", 176, __extension__ __PRETTY_FUNCTION__
); }))
;
177 int idx[4] = {0, 1, 2, 3};
178 sfmt_genrand_shuffle(sfmt, idx, 4, sizeof(int));
179 int i;
180 for (i = 0; i < 4; i++)
181 // change the applying order
182 switch (idx[i])
183 {
184 case 0:
185 if (random_jitter.brightness == 0)
186 break;
187 // introduce some brightness changes to the original image
188 ccv_scale(image, (ccv_matrix_t**)&image, 0, _ccv_cnnp_random_logexp(sfmt, random_jitter.brightness));
189 break;
190 case 1:
191 // introduce some saturation changes to the original image
192 if (random_jitter.saturation == 0)
193 break;
194 ccv_saturation(image, &image, 0, _ccv_cnnp_random_logexp(sfmt, random_jitter.saturation));
195 break;
196 case 2:
197 // introduce some contrast changes to the original image
198 if (random_jitter.contrast == 0)
199 break;
200 ccv_contrast(image, &image, 0, _ccv_cnnp_random_logexp(sfmt, random_jitter.contrast));
201 break;
202 case 3:
203 if (random_jitter.lighting == 0)
204 break;
205 _ccv_cnnp_image_lighting(image, sfmt_genrand_real1(sfmt) * random_jitter.lighting, sfmt_genrand_real1(sfmt) * random_jitter.lighting, sfmt_genrand_real1(sfmt) * random_jitter.lighting);
206 break;
207 }
208}
209
210static void _ccv_cnnp_normalize(ccv_dense_matrix_t* const image, const float mean[3], const float inv_std[3])
211{
212 int i;
213 const int count = image->rows * image->cols;
214 float* ap = image->data.f32;
215 for (i = 0; i < count; i++)
216 {
217 ap[i * 3] = (ap[i * 3] - mean[0]) * inv_std[0];
218 ap[i * 3 + 1] = (ap[i * 3 + 1] - mean[1]) * inv_std[1];
219 ap[i * 3 + 2] = (ap[i * 3 + 2] - mean[2]) * inv_std[2];
220 }
221}
222
223static void _ccv_cnnp_random_jitter(void*** const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
224{
225 sfmt_t* const sfmt = (sfmt_t*)alloca(sizeof(sfmt_t) * batch_size)__builtin_alloca (sizeof(sfmt_t) * batch_size);
226 ccv_cnnp_random_jitter_context_t* const ctx = (ccv_cnnp_random_jitter_context_t*)context;
227 int i;
228 for (i = 0; i < batch_size; i++)
229 sfmt_init_gen_rand(&sfmt[i], sfmt_genrand_uint32(&ctx->sfmt));
230 const ccv_cnnp_random_jitter_t random_jitter = ctx->random_jitter;
231 assert(random_jitter.resize.min > 0)((void) sizeof ((random_jitter.resize.min > 0) ? 1 : 0), __extension__
({ if (random_jitter.resize.min > 0) ; else __assert_fail
("random_jitter.resize.min > 0", "ccv_cnnp_dataframe_addons.c"
, 231, __extension__ __PRETTY_FUNCTION__); }))
;
232 assert(random_jitter.resize.max >= random_jitter.resize.min)((void) sizeof ((random_jitter.resize.max >= random_jitter
.resize.min) ? 1 : 0), __extension__ ({ if (random_jitter.resize
.max >= random_jitter.resize.min) ; else __assert_fail ("random_jitter.resize.max >= random_jitter.resize.min"
, "ccv_cnnp_dataframe_addons.c", 232, __extension__ __PRETTY_FUNCTION__
); }))
;
233 parallel_for(i, batch_size){ int i; for ((i) = 0; (i) < (batch_size); (i)++) { {
234 if (data[i])
235 ccv_matrix_free(data[i]);
236 ccv_dense_matrix_t* const input = (ccv_dense_matrix_t*)column_data[0][i];
237 const int resize = ccv_clamp((int)(sfmt_genrand_real1(&sfmt[i]) * (random_jitter.resize.max - random_jitter.resize.min) + 0.5) + random_jitter.resize.min, random_jitter.resize.min, random_jitter.resize.max)({ typeof (random_jitter.resize.min) _a = (random_jitter.resize
.min); typeof (random_jitter.resize.max) _b = (random_jitter.
resize.max); typeof ((int)(sfmt_genrand_real1(&sfmt[i]) *
(random_jitter.resize.max - random_jitter.resize.min) + 0.5)
+ random_jitter.resize.min) _x = ((int)(sfmt_genrand_real1(&
sfmt[i]) * (random_jitter.resize.max - random_jitter.resize.min
) + 0.5) + random_jitter.resize.min); (_x < _a) ? _a : ((_x
> _b) ? _b : _x); })
;
238 int resize_rows = ccv_max(resize, (int)(input->rows * (float)resize / input->cols + 0.5))({ typeof (resize) _a = (resize); typeof ((int)(input->rows
* (float)resize / input->cols + 0.5)) _b = ((int)(input->
rows * (float)resize / input->cols + 0.5)); (_a > _b) ?
_a : _b; })
;
239 int resize_cols = ccv_max(resize, (int)(input->cols * (float)resize / input->rows + 0.5))({ typeof (resize) _a = (resize); typeof ((int)(input->cols
* (float)resize / input->rows + 0.5)) _b = ((int)(input->
cols * (float)resize / input->rows + 0.5)); (_a > _b) ?
_a : _b; })
;
240 if (random_jitter.aspect_ratio > 0)
241 {
242 const float aspect_ratio = sqrtf(_ccv_cnnp_random_logexp(&sfmt[i], random_jitter.aspect_ratio));
243 resize_rows = (int)(resize_rows * aspect_ratio + 0.5);
244 resize_cols = (int)(resize_cols / aspect_ratio + 0.5);
245 }
246 ccv_dense_matrix_t* resized = 0;
247 // First, resize.
248 if (input->rows > resize && input->cols > resize)
249 ccv_resample(input, &resized, CCV_32F, resize_rows, resize_cols, CCV_INTER_AREA);
250 else if (input->rows != resize_rows || input->cols != resize_cols)
251 ccv_resample(input, &resized, CCV_32F, resize_rows, resize_cols, CCV_INTER_CUBIC);
252 else
253 ccv_shift(input, (ccv_matrix_t**)&resized, CCV_32F, 0, 0); // converting to 32f
254 if (random_jitter.symmetric && (sfmt_genrand_uint32(&sfmt[i]) & 1) == 0)
255 ccv_flip(resized, &resized, 0, CCV_FLIP_X);
256 _ccv_cnnp_image_manip(resized, random_jitter, &sfmt[i]);
257 // Apply normalization before slice. Slice will introduce 0 padding, which won't be correct before normalization.
258 if (random_jitter.normalize.mean[0] != 0 || random_jitter.normalize.std[0] != 1 ||
259 random_jitter.normalize.mean[1] != 0 || random_jitter.normalize.std[1] != 1 ||
260 random_jitter.normalize.mean[2] != 0 || random_jitter.normalize.std[2] != 1)
261 _ccv_cnnp_normalize(resized, random_jitter.normalize.mean, random_jitter.normalize.std);
262 // Then slice down.
263 ccv_dense_matrix_t* patch = 0;
264 if (random_jitter.size.cols > 0 && random_jitter.size.rows > 0 &&
265 ((resized->cols != random_jitter.size.cols || resized->rows != random_jitter.size.rows) ||
266 (random_jitter.offset.x != 0 || random_jitter.offset.y != 0)))
267 {
268 int x = ccv_clamp((int)(sfmt_genrand_real1(&sfmt[i]) * (resized->cols - random_jitter.size.cols + 1)),({ typeof (({ typeof (0) _a = (0); typeof (resized->cols -
random_jitter.size.cols) _b = (resized->cols - random_jitter
.size.cols); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->cols - random_jitter.size.cols) _b
= (resized->cols - random_jitter.size.cols); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->cols - random_jitter.size.cols) _b = (resized->cols -
random_jitter.size.cols); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->cols - random_jitter
.size.cols) _b = (resized->cols - random_jitter.size.cols)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->cols - random_jitter.size.cols +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->cols - random_jitter.size.cols + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
269 ccv_min(0, resized->cols - random_jitter.size.cols),({ typeof (({ typeof (0) _a = (0); typeof (resized->cols -
random_jitter.size.cols) _b = (resized->cols - random_jitter
.size.cols); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->cols - random_jitter.size.cols) _b
= (resized->cols - random_jitter.size.cols); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->cols - random_jitter.size.cols) _b = (resized->cols -
random_jitter.size.cols); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->cols - random_jitter
.size.cols) _b = (resized->cols - random_jitter.size.cols)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->cols - random_jitter.size.cols +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->cols - random_jitter.size.cols + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
270 ccv_max(0, resized->cols - random_jitter.size.cols))({ typeof (({ typeof (0) _a = (0); typeof (resized->cols -
random_jitter.size.cols) _b = (resized->cols - random_jitter
.size.cols); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->cols - random_jitter.size.cols) _b
= (resized->cols - random_jitter.size.cols); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->cols - random_jitter.size.cols) _b = (resized->cols -
random_jitter.size.cols); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->cols - random_jitter
.size.cols) _b = (resized->cols - random_jitter.size.cols)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->cols - random_jitter.size.cols +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->cols - random_jitter.size.cols + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
;
271 int y = ccv_clamp((int)(sfmt_genrand_real1(&sfmt[i]) * (resized->rows - random_jitter.size.rows + 1)),({ typeof (({ typeof (0) _a = (0); typeof (resized->rows -
random_jitter.size.rows) _b = (resized->rows - random_jitter
.size.rows); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->rows - random_jitter.size.rows) _b
= (resized->rows - random_jitter.size.rows); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->rows - random_jitter.size.rows) _b = (resized->rows -
random_jitter.size.rows); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->rows - random_jitter
.size.rows) _b = (resized->rows - random_jitter.size.rows)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->rows - random_jitter.size.rows +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->rows - random_jitter.size.rows + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
272 ccv_min(0, resized->rows - random_jitter.size.rows),({ typeof (({ typeof (0) _a = (0); typeof (resized->rows -
random_jitter.size.rows) _b = (resized->rows - random_jitter
.size.rows); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->rows - random_jitter.size.rows) _b
= (resized->rows - random_jitter.size.rows); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->rows - random_jitter.size.rows) _b = (resized->rows -
random_jitter.size.rows); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->rows - random_jitter
.size.rows) _b = (resized->rows - random_jitter.size.rows)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->rows - random_jitter.size.rows +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->rows - random_jitter.size.rows + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
273 ccv_max(0, resized->rows - random_jitter.size.rows))({ typeof (({ typeof (0) _a = (0); typeof (resized->rows -
random_jitter.size.rows) _b = (resized->rows - random_jitter
.size.rows); (_a < _b) ? _a : _b; })) _a = (({ typeof (0) _a
= (0); typeof (resized->rows - random_jitter.size.rows) _b
= (resized->rows - random_jitter.size.rows); (_a < _b)
? _a : _b; })); typeof (({ typeof (0) _a = (0); typeof (resized
->rows - random_jitter.size.rows) _b = (resized->rows -
random_jitter.size.rows); (_a > _b) ? _a : _b; })) _b = (
({ typeof (0) _a = (0); typeof (resized->rows - random_jitter
.size.rows) _b = (resized->rows - random_jitter.size.rows)
; (_a > _b) ? _a : _b; })); typeof ((int)(sfmt_genrand_real1
(&sfmt[i]) * (resized->rows - random_jitter.size.rows +
1))) _x = ((int)(sfmt_genrand_real1(&sfmt[i]) * (resized
->rows - random_jitter.size.rows + 1))); (_x < _a) ? _a
: ((_x > _b) ? _b : _x); })
;
274 if (random_jitter.offset.x != 0)
275 x += sfmt_genrand_real1(&sfmt[i]) * random_jitter.offset.x * 2 - random_jitter.offset.x;
276 if (random_jitter.offset.y != 0)
277 y += sfmt_genrand_real1(&sfmt[i]) * random_jitter.offset.y * 2 - random_jitter.offset.y;
278 ccv_slice(resized, (ccv_matrix_t**)&patch, CCV_32F, y, x, random_jitter.size.rows, random_jitter.size.cols);
279 ccv_matrix_free(resized);
280 } else
281 patch = resized;
282 data[i] = patch;
283 } parallel_endfor} }
284}
285
286int ccv_cnnp_dataframe_image_random_jitter(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int datatype, const ccv_cnnp_random_jitter_t random_jitter)
287{
288 assert(datatype == CCV_32F)((void) sizeof ((datatype == CCV_32F) ? 1 : 0), __extension__
({ if (datatype == CCV_32F) ; else __assert_fail ("datatype == CCV_32F"
, "ccv_cnnp_dataframe_addons.c", 288, __extension__ __PRETTY_FUNCTION__
); }))
;
289 ccv_cnnp_random_jitter_context_t* const random_jitter_context = (ccv_cnnp_random_jitter_context_t*)ccmallocmalloc(sizeof(ccv_cnnp_random_jitter_context_t));
290 if (random_jitter.seed)
291 sfmt_init_gen_rand(&random_jitter_context->sfmt, (uint32_t)random_jitter.seed);
292 else
293 sfmt_init_gen_rand(&random_jitter_context->sfmt, (uint32_t)(uintptr_t)dataframe);
294 random_jitter_context->datatype = datatype;
295 random_jitter_context->random_jitter = random_jitter;
296 int i;
297 // The std in the random jitter should be inv_std.
298 for (i = 0; i < 3; i++)
299 random_jitter_context->random_jitter.normalize.std[i] = random_jitter_context->random_jitter.normalize.std[i] ? 1. / random_jitter_context->random_jitter.normalize.std[i] : 1;
300 return ccv_cnnp_dataframe_map(dataframe, _ccv_cnnp_random_jitter, 0, _ccv_cnnp_image_deinit, COLUMN_ID_LIST(column_idx)(int []){column_idx}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, random_jitter_context, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
301}
302
303typedef struct {
304 int range;
305 int datatype;
306 int format;
307 float onval;
308 float offval;
309 off_t structof;
310} ccv_cnnp_one_hot_context_t;
311
312static void _ccv_cnnp_one_hot(void*** const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context)
313{
314 ccv_cnnp_one_hot_context_t* const one_hot = (ccv_cnnp_one_hot_context_t*)context;
315 ccv_nnc_tensor_param_t params = {
316 .datatype = one_hot->datatype,
317 .type = CCV_TENSOR_CPU_MEMORY,
318 .format = one_hot->format,
319 .dim = {
320 one_hot->range,
321 },
322 };
323 parallel_for(i, batch_size){ int i; for ((i) = 0; (i) < (batch_size); (i)++) { {
324 int j;
325 const int label = *(int*)((char*)column_data[0][i] + one_hot->structof);
326 if (!data[i])
327 data[i] = ccv_nnc_tensor_new(0, params, 0);
328 ccv_nnc_tensor_t* const tensor = (ccv_nnc_tensor_t*)data[i];
329 assert(label >= 0 && label < one_hot->range)((void) sizeof ((label >= 0 && label < one_hot->
range) ? 1 : 0), __extension__ ({ if (label >= 0 &&
label < one_hot->range) ; else __assert_fail ("label >= 0 && label < one_hot->range"
, "ccv_cnnp_dataframe_addons.c", 329, __extension__ __PRETTY_FUNCTION__
); }))
;
330 for (j = 0; j < one_hot->range; j++)
331 tensor->data.f32[j] = (j == label) ? one_hot->onval : one_hot->offval;
332 } parallel_endfor} }
333}
334
335int ccv_cnnp_dataframe_one_hot(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int range, const float onval, const float offval, const int datatype, const int format)
336{
337 assert(datatype == CCV_32F)((void) sizeof ((datatype == CCV_32F) ? 1 : 0), __extension__
({ if (datatype == CCV_32F) ; else __assert_fail ("datatype == CCV_32F"
, "ccv_cnnp_dataframe_addons.c", 337, __extension__ __PRETTY_FUNCTION__
); }))
;
338 ccv_cnnp_one_hot_context_t* const one_hot = (ccv_cnnp_one_hot_context_t*)ccmallocmalloc(sizeof(ccv_cnnp_one_hot_context_t));
339 one_hot->range = range;
340 one_hot->datatype = datatype;
341 one_hot->format = format;
342 one_hot->onval = onval;
343 one_hot->offval = offval;
344 one_hot->structof = structof;
345 return ccv_cnnp_dataframe_map(dataframe, _ccv_cnnp_one_hot, 0, _ccv_cnnp_tensor_deinit, COLUMN_ID_LIST(column_idx)(int []){column_idx}, (1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 -1)
, one_hot, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
346}
347
348typedef struct {
349 ccv_cnnp_dataframe_tuple_t tuple;
350 int format;
351 int batch_count;
352 int group_count;
353} ccv_cnnp_batch_context_t;
354
355static void _ccv_cnnp_batching_new(void** const input_data, const int input_size, void** const output_data, void* const context, ccv_nnc_stream_context_t* const stream_context)
356{
357 ccv_cnnp_batch_context_t* const batch = (ccv_cnnp_batch_context_t*)context;
358 const int output_tuple_size = batch->tuple.size;
359 const int batch_count = batch->batch_count;
360 const int group_count = batch->group_count;
361 const int input_tuple_size = output_tuple_size / group_count;
362 int i, j, k;
363 assert(input_size > 0)((void) sizeof ((input_size > 0) ? 1 : 0), __extension__ (
{ if (input_size > 0) ; else __assert_fail ("input_size > 0"
, "ccv_cnnp_dataframe_addons.c", 363, __extension__ __PRETTY_FUNCTION__
); }))
;
1
Assuming 'input_size' is > 0
2
Taking true branch
364 if (!output_data[0])
3
Assuming the condition is true
4
Taking true branch
365 {
366 ccv_nnc_tensor_t** const inputs = (ccv_nnc_tensor_t**)input_data[0];
367 ccv_nnc_tensor_t** const tensors = (ccv_nnc_tensor_t**)(output_data[0] = ccmallocmalloc(sizeof(ccv_nnc_tensor_t*) * output_tuple_size));
5
Storing uninitialized value
368 for (i = 0; i < group_count; i++)
6
Assuming 'i' is < 'group_count'
7
Loop condition is true. Entering loop body
10
Assuming 'i' is >= 'group_count'
11
Loop condition is false. Execution continues on line 409
369 for (j = 0; j < input_tuple_size; j++)
8
Assuming 'j' is >= 'input_tuple_size'
9
Loop condition is false. Execution continues on line 368
370 {
371 ccv_nnc_tensor_param_t params = inputs[j]->info;
372 assert(params.datatype == CCV_32F)((void) sizeof ((params.datatype == CCV_32F) ? 1 : 0), __extension__
({ if (params.datatype == CCV_32F) ; else __assert_fail ("params.datatype == CCV_32F"
, "ccv_cnnp_dataframe_addons.c", 372, __extension__ __PRETTY_FUNCTION__
); }))
; // Only support 32 bit float yet.
373 assert(params.format == CCV_TENSOR_FORMAT_NHWC || params.format == CCV_TENSOR_FORMAT_NCHW)((void) sizeof ((params.format == CCV_TENSOR_FORMAT_NHWC || params
.format == CCV_TENSOR_FORMAT_NCHW) ? 1 : 0), __extension__ ({
if (params.format == CCV_TENSOR_FORMAT_NHWC || params.format
== CCV_TENSOR_FORMAT_NCHW) ; else __assert_fail ("params.format == CCV_TENSOR_FORMAT_NHWC || params.format == CCV_TENSOR_FORMAT_NCHW"
, "ccv_cnnp_dataframe_addons.c", 373, __extension__ __PRETTY_FUNCTION__
); }))
;
374 params.format = batch->format;
375 // Special-case for dim count is 3 and 1, in these two cases, the N is not provided.
376 if (batch->format == inputs[j]->info.format)
377 {
378 const int nd = ccv_nnc_tensor_nd(params.dim);
379 if (nd == 3 || nd == 1)
380 {
381 memset(params.dim, 0, sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(8));
382 memcpy(params.dim + 1, inputs[j]->info.dim, sizeof(int) * nd);
383 }
384 } else {
385 const int nd = ccv_nnc_tensor_nd(params.dim);
386 if (nd == 1)
387 {
388 memset(params.dim, 0, sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(8));
389 memcpy(params.dim + 1, inputs[j]->info.dim, sizeof(int) * nd);
390 } else if (nd >= 3) {
391 memset(params.dim, 0, sizeof(int) * CCV_NNC_MAX_DIM_ALLOC(8));
392 const int hw = ccv_nnc_tensor_hw(inputs[j]->info, nd);
393 if (batch->format == CCV_TENSOR_FORMAT_NCHW)
394 {
395 params.dim[1] = ccv_nnc_tensor_get_c(inputs[j]->info);
396 for (k = 0; k < CCV_NNC_MAX_DIM(2); k++)
397 params.dim[k + 2] = inputs[j]->info.dim[k + hw];
398 } else {
399 params.dim[CCV_NNC_MAX_DIM(2) + 1] = ccv_nnc_tensor_get_c(inputs[j]->info);
400 for (k = 0; k < CCV_NNC_MAX_DIM(2); k++)
401 params.dim[k + 1] = inputs[j]->info.dim[k + hw];
402 }
403 }
404 }
405 params.dim[0] = batch_count; // Set the batch count now.
406 tensors[i * input_tuple_size + j] = ccv_nnc_tensor_new(0, params, 0);
407 }
408 }
409 for (i = 0; i < group_count; i++)
12
Loop condition is true. Entering loop body
410 for (j = 0; j < input_tuple_size; j++)
13
Assuming 'j' is < 'input_tuple_size'
14
Loop condition is true. Entering loop body
411 {
412 ccv_nnc_tensor_t* const output = ((ccv_nnc_tensor_t**)output_data[0])[i * input_tuple_size + j];
15
Assigned value is garbage or undefined
413 parallel_for(k, batch_count){ int k; for ((k) = 0; (k) < (batch_count); (k)++) { {
414 ccv_nnc_tensor_t* const input = ((ccv_nnc_tensor_t**)input_data[(k + i * batch_count) % input_size])[j];
415 const size_t tensor_count = ccv_nnc_tensor_count(input->info);
416 float* const ap = input->data.f32;
417 float* const bp = output->data.f32 + k * tensor_count;
418 if (input->info.format == output->info.format)
419 memcpy(bp, ap, sizeof(float) * tensor_count);
420 else {
421 // Do a simple format conversion.
422 const int c = ccv_nnc_tensor_get_c(input->info);
423 const size_t hw_count = tensor_count / c;
424 size_t x;
425 int y;
426 if (input->info.format == CCV_TENSOR_FORMAT_NHWC && output->info.format == CCV_TENSOR_FORMAT_NCHW)
427 for (x = 0; x < hw_count; x++)
428 for (y = 0; y < c; y++)
429 bp[y * hw_count + x] = ap[x * c + y];
430 else if (input->info.format == CCV_TENSOR_FORMAT_NCHW && output->info.format == CCV_TENSOR_FORMAT_NHWC)
431 for (x = 0; x < hw_count; x++)
432 for (y = 0; y < c; y++)
433 bp[x * c + y] = ap[y * hw_count + x];
434 }
435 } parallel_endfor} }
436 }
437}
438
439static void _ccv_cnnp_batching_deinit(void* const self, void* const context)
440{
441 ccv_cnnp_batch_context_t* const batch = (ccv_cnnp_batch_context_t*)context;
442 ccv_nnc_tensor_t** const tensors = (ccv_nnc_tensor_t**)self;
443 const int size = batch->tuple.size;
444 int i;
445 for (i = 0; i < size; i++)
446 ccv_nnc_tensor_free(tensors[i]);
447 ccfreefree(tensors);
448}
449
450ccv_cnnp_dataframe_t* ccv_cnnp_dataframe_batching_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const int batch_count, const int group_count, const int format)
451{
452 assert(format == CCV_TENSOR_FORMAT_NCHW || format == CCV_TENSOR_FORMAT_NHWC)((void) sizeof ((format == CCV_TENSOR_FORMAT_NCHW || format ==
CCV_TENSOR_FORMAT_NHWC) ? 1 : 0), __extension__ ({ if (format
== CCV_TENSOR_FORMAT_NCHW || format == CCV_TENSOR_FORMAT_NHWC
) ; else __assert_fail ("format == CCV_TENSOR_FORMAT_NCHW || format == CCV_TENSOR_FORMAT_NHWC"
, "ccv_cnnp_dataframe_addons.c", 452, __extension__ __PRETTY_FUNCTION__
); }))
;
453 assert(column_idx_size >= 1)((void) sizeof ((column_idx_size >= 1) ? 1 : 0), __extension__
({ if (column_idx_size >= 1) ; else __assert_fail ("column_idx_size >= 1"
, "ccv_cnnp_dataframe_addons.c", 453, __extension__ __PRETTY_FUNCTION__
); }))
;
454 assert(batch_count > 0)((void) sizeof ((batch_count > 0) ? 1 : 0), __extension__ (
{ if (batch_count > 0) ; else __assert_fail ("batch_count > 0"
, "ccv_cnnp_dataframe_addons.c", 454, __extension__ __PRETTY_FUNCTION__
); }))
;
455 assert(group_count > 0)((void) sizeof ((group_count > 0) ? 1 : 0), __extension__ (
{ if (group_count > 0) ; else __assert_fail ("group_count > 0"
, "ccv_cnnp_dataframe_addons.c", 455, __extension__ __PRETTY_FUNCTION__
); }))
;
456 const int derived = ccv_cnnp_dataframe_make_tuple(dataframe, column_idxs, column_idx_size);
457 ccv_cnnp_batch_context_t* const batch = (ccv_cnnp_batch_context_t*)ccmallocmalloc(sizeof(ccv_cnnp_batch_context_t));
458 batch->tuple.size = column_idx_size * group_count;
459 batch->format = format;
460 batch->batch_count = batch_count;
461 batch->group_count = group_count;
462 return ccv_cnnp_dataframe_reduce_new(dataframe, _ccv_cnnp_batching_new, _ccv_cnnp_batching_deinit, derived, batch_count * group_count, batch, (ccv_cnnp_column_data_context_deinit_f)ccfreefree);
463}