Bug Summary

File:nnc/ccv_nnc_tensor_io.c
Warning:line 232, column 22
Although the value stored to 'tensor' is used in the enclosing expression, the value is never actually read from 'tensor'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_nnc_tensor_io.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -resource-dir /usr/local/lib/clang/18 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -D HAVE_CUDA_SM80 -I /usr/local/include -internal-isystem /usr/local/lib/clang/18/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2024-06-10-002533-204407-1 -x c ccv_nnc_tensor_io.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_nnc_symbolic_graph.h"
6#include "3rdparty/sqlite3/sqlite3.h"
7#ifdef HAVE_CUDA1
8#include "gpu/ccv_nnc_compat.h"
9#elif HAVE_MPS
10#include "mps/ccv_nnc_mps.h"
11#endif
12
13#ifdef NDEBUG
14#define SQLITE_ENFORCE(stmt)((void) sizeof ((stmt) ? 1 : 0), __extension__ ({ if (stmt) ;
else __assert_fail ("stmt", "ccv_nnc_tensor_io.c", 14, __extension__
__PRETTY_FUNCTION__); }))
(void)(stmt)
15#else
16#define SQLITE_ENFORCEassert assert
17#endif
18
19// MARK - Level-1 API
20
21int ccv_nnc_tensor_write(const ccv_nnc_tensor_t* const tensor, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options)
22{
23 assert(CCV_IS_TENSOR_CONTIGUOUS(tensor))((void) sizeof (((!((*(int*)(tensor)) & CCV_TENSOR_VIEW) ||
(((ccv_nnc_tensor_view_t*)tensor)->contiguous == 1))) ? 1
: 0), __extension__ ({ if ((!((*(int*)(tensor)) & CCV_TENSOR_VIEW
) || (((ccv_nnc_tensor_view_t*)tensor)->contiguous == 1)))
; else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(tensor)", "ccv_nnc_tensor_io.c"
, 23, __extension__ __PRETTY_FUNCTION__); }))
;
24 assert(name)((void) sizeof ((name) ? 1 : 0), __extension__ ({ if (name) ;
else __assert_fail ("name", "ccv_nnc_tensor_io.c", 24, __extension__
__PRETTY_FUNCTION__); }))
;
25 sqlite3* conn = (sqlite3*)handle;
26 if (!conn)
27 return CCV_IO_ERROR;
28 const char tensor_create_table_qs[] = "CREATE TABLE IF NOT EXISTS tensors "
29 "(name TEXT, type INTEGER, format INTEGER, datatype INTEGER, "
30 "dim BLOB, data BLOB, PRIMARY KEY (name))";
31 SQLITE_ENFORCE(SQLITE_OK == sqlite3_exec(conn, tensor_create_table_qs, 0, 0, 0))((void) sizeof ((0 == sqlite3_exec(conn, tensor_create_table_qs
, 0, 0, 0)) ? 1 : 0), __extension__ ({ if (0 == sqlite3_exec(
conn, tensor_create_table_qs, 0, 0, 0)) ; else __assert_fail (
"SQLITE_OK == sqlite3_exec(conn, tensor_create_table_qs, 0, 0, 0)"
, "ccv_nnc_tensor_io.c", 31, __extension__ __PRETTY_FUNCTION__
); }))
;
32 const char tensor_insert_qs[] =
33 "REPLACE INTO tensors "
34 "(name, type, format, datatype, dim, data) VALUES ("
35 "$name, $type, $format, $datatype, $dim, $data)";
36 sqlite3_stmt* tensor_insert_stmt = 0;
37 SQLITE_ENFORCE(SQLITE_OK == sqlite3_prepare_v2(conn, tensor_insert_qs, sizeof(tensor_insert_qs), &tensor_insert_stmt, 0))((void) sizeof ((0 == sqlite3_prepare_v2(conn, tensor_insert_qs
, sizeof(tensor_insert_qs), &tensor_insert_stmt, 0)) ? 1 :
0), __extension__ ({ if (0 == sqlite3_prepare_v2(conn, tensor_insert_qs
, sizeof(tensor_insert_qs), &tensor_insert_stmt, 0)) ; else
__assert_fail ("SQLITE_OK == sqlite3_prepare_v2(conn, tensor_insert_qs, sizeof(tensor_insert_qs), &tensor_insert_stmt, 0)"
, "ccv_nnc_tensor_io.c", 37, __extension__ __PRETTY_FUNCTION__
); }))
;
38 sqlite3_bind_text(tensor_insert_stmt, 1, name, -1, 0);
39 ccv_nnc_tensor_param_t params = tensor->info;
40 const size_t data_size = ccv_nnc_tensor_data_size_without_padding(tensor->info);
41 unsigned char* workspace = 0;
42 unsigned int identifier = 0;
43#ifdef HAVE_CUDA1
44 if (CCV_TENSOR_GET_MEMORY(tensor->info.type)((tensor->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
45 {
46 if (!options || !options->encode)
47 {
48 workspace = ccmallocmalloc(data_size);
49 cumemcpy(workspace, CCV_TENSOR_CPU_MEMORY, tensor->data.u8, tensor->info.type, data_size);
50 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace, data_size, 0);
51 } else {
52 workspace = ccmallocmalloc(data_size * 2 + 4);
53 cumemcpy(workspace, CCV_TENSOR_CPU_MEMORY, tensor->data.u8, tensor->info.type, data_size);
54 size_t encoded_size = data_size + 4;
55 if (options->encode(workspace, data_size, tensor->info.datatype, tensor->info.dim, ccv_nnc_tensor_nd(tensor->info.dim), options->context, workspace + data_size, &encoded_size, &params, &identifier))
56 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace + data_size, encoded_size, 0);
57 else
58 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace, data_size, 0);
59 }
60 } else {
61 if (!options || !options->encode)
62 sqlite3_bind_blob(tensor_insert_stmt, 6, tensor->data.u8, data_size, 0);
63 else {
64 workspace = ccmallocmalloc(data_size + 4);
65 size_t encoded_size = data_size + 4;
66 if (options->encode(tensor->data.u8, data_size, tensor->info.datatype, tensor->info.dim, ccv_nnc_tensor_nd(tensor->info.dim), options->context, workspace, &encoded_size, &params, &identifier))
67 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace, encoded_size, 0);
68 else
69 sqlite3_bind_blob(tensor_insert_stmt, 6, tensor->data.u8, data_size, 0);
70 }
71 }
72#elif defined(HAVE_MPS)
73 if (CCV_TENSOR_GET_MEMORY(tensor->info.type)((tensor->info.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
74 {
75 if (!options || !options->encode)
76 {
77 workspace = ccmallocmalloc(data_size);
78 mpmemcpy(workspace, 0, CCV_TENSOR_CPU_MEMORY, tensor->data.u8, tensor->dataof, tensor->info.type, data_size);
79 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace, data_size, 0);
80 } else {
81 workspace = ccmallocmalloc(data_size * 2 + 4);
82 mpmemcpy(workspace, 0, CCV_TENSOR_CPU_MEMORY, tensor->data.u8, tensor->dataof, tensor->info.type, data_size);
83 size_t encoded_size = data_size + 4;
84 if (options->encode(workspace, data_size, tensor->info.datatype, tensor->info.dim, ccv_nnc_tensor_nd(tensor->info.dim), options->context, workspace + data_size, &encoded_size, &params, &identifier))
85 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace + data_size, encoded_size, 0);
86 else
87 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace, data_size, 0);
88 }
89 } else {
90 if (!options || !options->encode)
91 sqlite3_bind_blob(tensor_insert_stmt, 6, tensor->data.u8, data_size, 0);
92 else {
93 workspace = ccmallocmalloc(data_size + 4); // Allocate extra 4 bytes in case we need to copy the QX tensor out.
94 size_t encoded_size = data_size + 4;
95 if (options->encode(tensor->data.u8, data_size, tensor->info.datatype, tensor->info.dim, ccv_nnc_tensor_nd(tensor->info.dim), options->context, workspace, &encoded_size, &params, &identifier))
96 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace, encoded_size, 0);
97 else
98 sqlite3_bind_blob(tensor_insert_stmt, 6, tensor->data.u8, data_size, 0);
99 }
100 }
101#else
102 if (!options || !options->encode)
103 sqlite3_bind_blob(tensor_insert_stmt, 6, tensor->data.u8, data_size, 0);
104 else {
105 workspace = ccmallocmalloc(data_size + 4);
106 size_t encoded_size = data_size + 4;
107 if (options->encode(tensor->data.u8, data_size, tensor->info.datatype, tensor->info.dim, ccv_nnc_tensor_nd(tensor->info.dim), options->context, workspace, &encoded_size, &params, &identifier))
108 sqlite3_bind_blob(tensor_insert_stmt, 6, workspace, encoded_size, 0);
109 else
110 sqlite3_bind_blob(tensor_insert_stmt, 6, tensor->data.u8, data_size, 0);
111 }
112#endif
113 sqlite3_bind_int64(tensor_insert_stmt, 2, ((sqlite_int64)identifier << 32) | params.type);
114 sqlite3_bind_int(tensor_insert_stmt, 3, params.format);
115 sqlite3_bind_int64(tensor_insert_stmt, 4, ((sqlite_int64)params.reserved << 32) | params.datatype);
116 sqlite3_bind_blob(tensor_insert_stmt, 5, params.dim, sizeof(params.dim), 0);
117 sqlite3_step(tensor_insert_stmt);
118 sqlite3_reset(tensor_insert_stmt);
119 sqlite3_clear_bindings(tensor_insert_stmt);
120 sqlite3_finalize(tensor_insert_stmt);
121 if (workspace)
122 free(workspace);
123 return CCV_IO_FINAL;
124}
125
126int ccv_nnc_tensor_read(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const int flags, const ccv_nnc_tensor_param_t* const tensor_params_optional, ccv_nnc_tensor_t** const tensor_out)
127{
128 assert(name)((void) sizeof ((name) ? 1 : 0), __extension__ ({ if (name) ;
else __assert_fail ("name", "ccv_nnc_tensor_io.c", 128, __extension__
__PRETTY_FUNCTION__); }))
;
129 sqlite3* conn = (sqlite3*)handle;
130 if (!conn)
131 return CCV_IO_ERROR;
132 const char tensor_select_qs[] =
133 "SELECT data, type, format, datatype, dim FROM tensors WHERE name=$name";
134 sqlite3_stmt* tensor_select_stmt = 0;
135 if (SQLITE_OK0 != sqlite3_prepare_v2(conn, tensor_select_qs, sizeof(tensor_select_qs), &tensor_select_stmt, 0))
136 return CCV_IO_ERROR;
137 sqlite3_bind_text(tensor_select_stmt, 1, name, -1, 0);
138 if (SQLITE_ROW100 != sqlite3_step(tensor_select_stmt))
139 {
140 sqlite3_finalize(tensor_select_stmt);
141 return CCV_IO_ERROR;
142 }
143 ccv_nnc_tensor_t* tensor = *tensor_out;
144 ccv_nnc_tensor_param_t tensor_params;
145 int datatype = 0;
146 unsigned int identifier = 0;
147 if (!tensor) // If the tensor is not provided, we need to create one.
148 {
149 if (tensor_params_optional)
150 {
151 identifier = (sqlite3_column_int64(tensor_select_stmt, 1) >> 32) & 0xffffffff;
152 datatype = sqlite3_column_int64(tensor_select_stmt, 3) & 0xffffffff;
153 tensor_params = *tensor_params_optional;
154 assert(!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY))((void) sizeof ((!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY
)) ? 1 : 0), __extension__ ({ if (!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY
)) ; else __assert_fail ("!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY)"
, "ccv_nnc_tensor_io.c", 154, __extension__ __PRETTY_FUNCTION__
); }))
;
155 } else {
156 const sqlite_int64 type = sqlite3_column_int64(tensor_select_stmt, 1);
157 identifier = (type >> 32) & 0xffffffff;
158 tensor_params.type = (type & 0xffffffff);
159 tensor_params.format = sqlite3_column_int(tensor_select_stmt, 2);
160 const sqlite_int64 datatype_mix = sqlite3_column_int64(tensor_select_stmt, 3);
161 datatype = tensor_params.datatype = (datatype_mix & 0xffffffff);
162 tensor_params.reserved = (datatype_mix >> 32) & 0xffffffff;
163 const void* const dim = sqlite3_column_blob(tensor_select_stmt, 4);
164 memcpy(tensor_params.dim, dim, ccv_min(sizeof(tensor_params.dim), sqlite3_column_bytes(tensor_select_stmt, 4))({ typeof (sizeof(tensor_params.dim)) _a = (sizeof(tensor_params
.dim)); typeof (sqlite3_column_bytes(tensor_select_stmt, 4)) _b
= (sqlite3_column_bytes(tensor_select_stmt, 4)); (_a < _b
) ? _a : _b; })
);
165 }
166 if (!options || !options->decode)
167 {
168 if (flags & CCV_NNC_TENSOR_READ_METADATA_ONLY)
169 {
170 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, CCV_NO_DATA_ALLOC); // Set the data point to 1 so it is allocated without data.
171 assert(tensor->data.u8 == 0)((void) sizeof ((tensor->data.u8 == 0) ? 1 : 0), __extension__
({ if (tensor->data.u8 == 0) ; else __assert_fail ("tensor->data.u8 == 0"
, "ccv_nnc_tensor_io.c", 171, __extension__ __PRETTY_FUNCTION__
); }))
; // Set it back to 0.
172 // Already done loading metadata, return.
173 sqlite3_reset(tensor_select_stmt);
174 sqlite3_clear_bindings(tensor_select_stmt);
175 sqlite3_finalize(tensor_select_stmt);
176 return CCV_IO_FINAL;
177 } else
178 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
179 } else {
180 assert(!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY))((void) sizeof ((!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY
)) ? 1 : 0), __extension__ ({ if (!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY
)) ; else __assert_fail ("!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY)"
, "ccv_nnc_tensor_io.c", 180, __extension__ __PRETTY_FUNCTION__
); }))
;
181 }
182 } else {
183 identifier = (sqlite3_column_int64(tensor_select_stmt, 1) >> 32) & 0xffffffff;
184 datatype = sqlite3_column_int(tensor_select_stmt, 3) & 0xffffffff;
185 tensor_params = tensor->info;
186 assert(!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY))((void) sizeof ((!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY
)) ? 1 : 0), __extension__ ({ if (!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY
)) ; else __assert_fail ("!(flags & CCV_NNC_TENSOR_READ_METADATA_ONLY)"
, "ccv_nnc_tensor_io.c", 186, __extension__ __PRETTY_FUNCTION__
); }))
;
187 }
188 const void* const data = sqlite3_column_blob(tensor_select_stmt, 0);
189 int dim[CCV_NNC_MAX_DIM_ALLOC(12)];
190 memcpy(dim, sqlite3_column_blob(tensor_select_stmt, 4), ccv_min(sizeof(dim), sqlite3_column_bytes(tensor_select_stmt, 4))({ typeof (sizeof(dim)) _a = (sizeof(dim)); typeof (sqlite3_column_bytes
(tensor_select_stmt, 4)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 4)); (_a < _b) ? _a : _b; })
);
191 const int nd = ccv_nnc_tensor_nd(dim);
192 if (datatype != tensor_params.datatype && CCV_GET_DATA_TYPE(tensor_params.datatype)((tensor_params.datatype) & 0xFF000) != CCV_QX)
193 {
194 // Only ever works for 16F to 32F or 32F to 16F transparently.
195 assert((datatype == CCV_16F && tensor_params.datatype == CCV_32F) || (datatype == CCV_32F && tensor_params.datatype == CCV_16F))((void) sizeof (((datatype == CCV_16F && tensor_params
.datatype == CCV_32F) || (datatype == CCV_32F && tensor_params
.datatype == CCV_16F)) ? 1 : 0), __extension__ ({ if ((datatype
== CCV_16F && tensor_params.datatype == CCV_32F) || (
datatype == CCV_32F && tensor_params.datatype == CCV_16F
)) ; else __assert_fail ("(datatype == CCV_16F && tensor_params.datatype == CCV_32F) || (datatype == CCV_32F && tensor_params.datatype == CCV_16F)"
, "ccv_nnc_tensor_io.c", 195, __extension__ __PRETTY_FUNCTION__
); }))
;
196 const size_t tensor_count = ccv_nnc_tensor_count(tensor_params);
197 ccv_nnc_tensor_param_t params = tensor_params;
198 params.datatype = datatype;
199 const size_t source_data_size = ccv_nnc_tensor_data_size(params);
200#ifdef HAVE_CUDA1
201 if (CCV_TENSOR_GET_MEMORY(tensor_params.type)((tensor_params.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
202 {
203 const size_t data_size = ccv_nnc_tensor_data_size(tensor_params);
204 unsigned char* workspace;
205 unsigned char* copying;
206 size_t decoded_size = data_size;
207 if (!options || !options->decode)
208 {
209 copying = workspace = ccmallocmalloc(data_size);
210 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
211 ccv_half_precision_to_float((uint16_t*)data, (float*)workspace, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
212 else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F)
213 ccv_float_to_half_precision((float*)data, (uint16_t*)workspace, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
214 else
215 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 215, __extension__ __PRETTY_FUNCTION__
); }))
; }
216 } else {
217 copying = workspace = ccmallocmalloc(data_size + source_data_size);
218 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
219 {
220 decoded_size = source_data_size;
221 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace + data_size, &decoded_size))
222 {
223 // If we loaded quantized tensor, don't do the conversion.
224 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
225 copying = workspace + data_size;
226 else {
227 ccv_half_precision_to_float((uint16_t*)(workspace + data_size), (float*)workspace, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(uint16_t
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(uint16_t)); (_a < _b) ? _a : _b; })
);
228 decoded_size = data_size;
229 }
230 } else {
231 if (!tensor)
232 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
Although the value stored to 'tensor' is used in the enclosing expression, the value is never actually read from 'tensor'
233 ccv_half_precision_to_float((uint16_t*)data, (float*)workspace, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
234 decoded_size = data_size;
235 }
236 } else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F) {
237 decoded_size = source_data_size;
238 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace + data_size, &decoded_size))
239 {
240 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
241 copying = workspace + data_size;
242 else {
243 ccv_float_to_half_precision((float*)(workspace + data_size), (uint16_t*)workspace, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(float
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(float)); (_a < _b) ? _a : _b; })
);
244 decoded_size = data_size;
245 }
246 } else {
247 if (!tensor)
248 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
249 ccv_float_to_half_precision((float*)data, (uint16_t*)workspace, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
250 decoded_size = data_size;
251 }
252 } else
253 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 253, __extension__ __PRETTY_FUNCTION__
); }))
; }
254 }
255 cumemcpy(tensor_out[0]->data.u8, tensor_out[0]->info.type, copying, CCV_TENSOR_CPU_MEMORY, decoded_size);
256 ccfreefree(workspace);
257 } else {
258 if (!options || !options->decode)
259 {
260 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
261 ccv_half_precision_to_float((uint16_t*)data, tensor->data.f32, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
262 else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F)
263 ccv_float_to_half_precision((float*)data, (uint16_t*)tensor->data.f16, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
264 else
265 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 265, __extension__ __PRETTY_FUNCTION__
); }))
; }
266 } else {
267 void* const workspace = ccmallocmalloc(source_data_size);
268 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
269 {
270 size_t decoded_size = source_data_size;
271 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace, &decoded_size))
272 {
273 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
274 {
275 if (decoded_size > 0)
276 memcpy(tensor_out[0]->data.f32, workspace, ccv_min(source_data_size, decoded_size)({ typeof (source_data_size) _a = (source_data_size); typeof (
decoded_size) _b = (decoded_size); (_a < _b) ? _a : _b; })
);
277 } else
278 ccv_half_precision_to_float((uint16_t*)workspace, tensor_out[0]->data.f32, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(uint16_t
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(uint16_t)); (_a < _b) ? _a : _b; })
);
279 } else {
280 if (!tensor)
281 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
282 ccv_half_precision_to_float((uint16_t*)data, tensor->data.f32, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
283 }
284 } else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F) {
285 size_t decoded_size = source_data_size;
286 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace, &decoded_size))
287 {
288 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
289 {
290 if (decoded_size > 0)
291 memcpy(tensor_out[0]->data.f16, workspace, ccv_min(source_data_size, decoded_size)({ typeof (source_data_size) _a = (source_data_size); typeof (
decoded_size) _b = (decoded_size); (_a < _b) ? _a : _b; })
);
292 } else
293 ccv_float_to_half_precision((float*)workspace, (uint16_t*)tensor_out[0]->data.f16, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(float
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(float)); (_a < _b) ? _a : _b; })
);
294 } else {
295 if (!tensor)
296 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
297 ccv_float_to_half_precision((float*)data, (uint16_t*)tensor->data.f16, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
298 }
299 } else
300 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 300, __extension__ __PRETTY_FUNCTION__
); }))
; }
301 ccfreefree(workspace);
302 }
303 }
304#elif defined(HAVE_MPS)
305 if (CCV_TENSOR_GET_MEMORY(tensor_params.type)((tensor_params.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
306 {
307 const size_t data_size = ccv_nnc_tensor_data_size(tensor_params);
308 unsigned char* workspace;
309 unsigned char* copying;
310 size_t decoded_size = data_size;
311 if (!options || !options->decode)
312 {
313 copying = workspace = ccmallocmalloc(data_size);
314 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
315 ccv_half_precision_to_float((uint16_t*)data, (float*)workspace, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
316 else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F)
317 ccv_float_to_half_precision((float*)data, (uint16_t*)workspace, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
318 else
319 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 319, __extension__ __PRETTY_FUNCTION__
); }))
; }
320 } else {
321 copying = workspace = ccmallocmalloc(data_size + source_data_size);
322 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
323 {
324 decoded_size = source_data_size;
325 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace + data_size, &decoded_size))
326 {
327 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
328 copying = workspace + data_size;
329 else {
330 ccv_half_precision_to_float((uint16_t*)(workspace + data_size), (float*)workspace, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(uint16_t
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(uint16_t)); (_a < _b) ? _a : _b; })
);
331 decoded_size = data_size;
332 }
333 } else {
334 if (!tensor)
335 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
336 ccv_half_precision_to_float((uint16_t*)data, (float*)workspace, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
337 decoded_size = data_size;
338 }
339 } else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F) {
340 decoded_size = source_data_size;
341 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace + data_size, &decoded_size))
342 {
343 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
344 copying = workspace + data_size;
345 else {
346 ccv_float_to_half_precision((float*)(workspace + data_size), (uint16_t*)workspace, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(float
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(float)); (_a < _b) ? _a : _b; })
);
347 decoded_size = data_size;
348 }
349 } else {
350 if (!tensor)
351 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
352 ccv_float_to_half_precision((float*)data, (uint16_t*)workspace, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
353 decoded_size = data_size;
354 }
355 } else
356 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 356, __extension__ __PRETTY_FUNCTION__
); }))
; }
357 }
358 assert(tensor_out[0]->dataof == 0)((void) sizeof ((tensor_out[0]->dataof == 0) ? 1 : 0), __extension__
({ if (tensor_out[0]->dataof == 0) ; else __assert_fail (
"tensor_out[0]->dataof == 0", "ccv_nnc_tensor_io.c", 358, __extension__
__PRETTY_FUNCTION__); }))
;
359 mpmemcpy(tensor_out[0]->data.u8, tensor_out[0]->dataof, tensor_out[0]->info.type, copying, 0, CCV_TENSOR_CPU_MEMORY, decoded_size);
360 ccfreefree(workspace);
361 } else {
362 if (!options || !options->decode)
363 {
364 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
365 ccv_half_precision_to_float((uint16_t*)data, tensor->data.f32, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
366 else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F)
367 ccv_float_to_half_precision((float*)data, (uint16_t*)tensor->data.f16, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
368 else
369 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 369, __extension__ __PRETTY_FUNCTION__
); }))
; }
370 } else {
371 void* const workspace = ccmallocmalloc(source_data_size);
372 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
373 {
374 size_t decoded_size = source_data_size;
375 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace, &decoded_size))
376 {
377 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
378 {
379 if (decoded_size > 0)
380 memcpy(tensor_out[0]->data.f32, workspace, ccv_min(source_data_size, decoded_size)({ typeof (source_data_size) _a = (source_data_size); typeof (
decoded_size) _b = (decoded_size); (_a < _b) ? _a : _b; })
);
381 } else
382 ccv_half_precision_to_float((uint16_t*)workspace, tensor_out[0]->data.f32, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(uint16_t
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(uint16_t)); (_a < _b) ? _a : _b; })
);
383 } else {
384 if (!tensor)
385 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
386 ccv_half_precision_to_float((uint16_t*)data, tensor->data.f32, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
387 }
388 } else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F) {
389 size_t decoded_size = source_data_size;
390 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace, &decoded_size))
391 {
392 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
393 {
394 if (decoded_size > 0)
395 memcpy(tensor_out[0]->data.f16, workspace, ccv_min(source_data_size, decoded_size)({ typeof (source_data_size) _a = (source_data_size); typeof (
decoded_size) _b = (decoded_size); (_a < _b) ? _a : _b; })
);
396 } else
397 ccv_float_to_half_precision((float*)workspace, (uint16_t*)tensor_out[0]->data.f16, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(float
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(float)); (_a < _b) ? _a : _b; })
);
398 } else {
399 if (!tensor)
400 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
401 ccv_float_to_half_precision((float*)data, (uint16_t*)tensor->data.f16, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
402 }
403 } else
404 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 404, __extension__ __PRETTY_FUNCTION__
); }))
; }
405 ccfreefree(workspace);
406 }
407 }
408#else
409 if (!options || !options->decode)
410 {
411 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
412 ccv_half_precision_to_float((uint16_t*)data, tensor->data.f32, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
413 else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F)
414 ccv_float_to_half_precision((float*)data, (uint16_t*)tensor->data.f16, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
415 else
416 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 416, __extension__ __PRETTY_FUNCTION__
); }))
; }
417 } else {
418 void* const workspace = ccmallocmalloc(source_data_size);
419 if (datatype == CCV_16F && tensor_params.datatype == CCV_32F)
420 {
421 size_t decoded_size = source_data_size;
422 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace, &decoded_size))
423 {
424 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
425 {
426 if (decoded_size > 0)
427 memcpy(tensor_out[0]->data.f32, workspace, ccv_min(source_data_size, decoded_size)({ typeof (source_data_size) _a = (source_data_size); typeof (
decoded_size) _b = (decoded_size); (_a < _b) ? _a : _b; })
);
428 } else
429 ccv_half_precision_to_float((uint16_t*)workspace, tensor_out[0]->data.f32, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(uint16_t
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(uint16_t)); (_a < _b) ? _a : _b; })
);
430 } else {
431 if (!tensor)
432 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
433 ccv_half_precision_to_float((uint16_t*)data, tensor->data.f32, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(uint16_t))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(uint16_t)); (_a < _b) ? _a
: _b; })
);
434 }
435 } else if (datatype == CCV_32F && tensor_params.datatype == CCV_16F) {
436 size_t decoded_size = source_data_size;
437 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace, &decoded_size))
438 {
439 if (CCV_GET_DATA_TYPE(tensor_out[0]->info.datatype)((tensor_out[0]->info.datatype) & 0xFF000) == CCV_QX)
440 {
441 if (decoded_size > 0)
442 memcpy(tensor_out[0]->data.f16, workspace, ccv_min(source_data_size, decoded_size)({ typeof (source_data_size) _a = (source_data_size); typeof (
decoded_size) _b = (decoded_size); (_a < _b) ? _a : _b; })
);
443 } else
444 ccv_float_to_half_precision((float*)workspace, (uint16_t*)tensor_out[0]->data.f16, ccv_min(tensor_count, ccv_min(source_data_size, decoded_size) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (({ typeof
(source_data_size) _a = (source_data_size); typeof (decoded_size
) _b = (decoded_size); (_a < _b) ? _a : _b; }) / sizeof(float
)) _b = (({ typeof (source_data_size) _a = (source_data_size)
; typeof (decoded_size) _b = (decoded_size); (_a < _b) ? _a
: _b; }) / sizeof(float)); (_a < _b) ? _a : _b; })
);
445 } else {
446 if (!tensor)
447 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
448 ccv_float_to_half_precision((float*)data, (uint16_t*)tensor->data.f16, ccv_min(tensor_count, sqlite3_column_bytes(tensor_select_stmt, 0) / sizeof(float))({ typeof (tensor_count) _a = (tensor_count); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)) _b = (sqlite3_column_bytes
(tensor_select_stmt, 0) / sizeof(float)); (_a < _b) ? _a :
_b; })
);
449 }
450 } else
451 { assert(0)((void) sizeof ((0) ? 1 : 0), __extension__ ({ if (0) ; else __assert_fail
("0", "ccv_nnc_tensor_io.c", 451, __extension__ __PRETTY_FUNCTION__
); }))
; }
452 ccfreefree(workspace);
453 }
454#endif
455 } else {
456 // If it is QX, we need to have a custom decoder to decode properly.
457 if (datatype != tensor_params.datatype)
458 { assert(options && options->decode)((void) sizeof ((options && options->decode) ? 1 :
0), __extension__ ({ if (options && options->decode
) ; else __assert_fail ("options && options->decode"
, "ccv_nnc_tensor_io.c", 458, __extension__ __PRETTY_FUNCTION__
); }))
; }
459 size_t data_size = ccv_nnc_tensor_data_size(tensor_params);
460#ifdef HAVE_CUDA1
461 if (!options || !options->decode)
462 {
463 if (CCV_TENSOR_GET_MEMORY(tensor_params.type)((tensor_params.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
464 cumemcpy(tensor->data.u8, tensor->info.type, data, CCV_TENSOR_CPU_MEMORY, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
465 else
466 memcpy(tensor->data.u8, data, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
467 } else {
468 if (CCV_TENSOR_GET_MEMORY(tensor_params.type)((tensor_params.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
469 {
470 void* const workspace = ccmallocmalloc(data_size);
471 size_t decoded_size = data_size;
472 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace, &decoded_size))
473 cumemcpy(tensor_out[0]->data.u8, tensor_out[0]->info.type, workspace, CCV_TENSOR_CPU_MEMORY, ccv_min(data_size, decoded_size)({ typeof (data_size) _a = (data_size); typeof (decoded_size)
_b = (decoded_size); (_a < _b) ? _a : _b; })
);
474 else {
475 if (!tensor)
476 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
477 cumemcpy(tensor->data.u8, tensor->info.type, data, CCV_TENSOR_CPU_MEMORY, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
478 }
479 ccfreefree(workspace);
480 } else {
481 size_t decoded_size = data_size;
482 if (!options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, tensor ? tensor->data.u8 : 0, &decoded_size))
483 {
484 if (!tensor)
485 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
486 memcpy(tensor->data.u8, data, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
487 }
488 }
489 }
490#elif defined(HAVE_MPS)
491 if (!options || !options->decode)
492 {
493 if (CCV_TENSOR_GET_MEMORY(tensor_params.type)((tensor_params.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
494 {
495 assert(tensor->dataof == 0)((void) sizeof ((tensor->dataof == 0) ? 1 : 0), __extension__
({ if (tensor->dataof == 0) ; else __assert_fail ("tensor->dataof == 0"
, "ccv_nnc_tensor_io.c", 495, __extension__ __PRETTY_FUNCTION__
); }))
;
496 mpmemcpy(tensor->data.u8, tensor->dataof, tensor->info.type, data, 0, CCV_TENSOR_CPU_MEMORY, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
497 } else
498 memcpy(tensor->data.u8, data, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
499 } else {
500 if (CCV_TENSOR_GET_MEMORY(tensor_params.type)((tensor_params.type) & 0x3) == CCV_TENSOR_GPU_MEMORY)
501 {
502 if (tensor)
503 { assert(tensor->dataof == 0)((void) sizeof ((tensor->dataof == 0) ? 1 : 0), __extension__
({ if (tensor->dataof == 0) ; else __assert_fail ("tensor->dataof == 0"
, "ccv_nnc_tensor_io.c", 503, __extension__ __PRETTY_FUNCTION__
); }))
; }
504 void* const workspace = ccmallocmalloc(data_size);
505 size_t decoded_size = data_size;
506 if (options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, workspace, &decoded_size)) {
507 mpmemcpy(tensor_out[0]->data.u8, tensor_out[0]->dataof, tensor_out[0]->info.type, workspace, 0, CCV_TENSOR_CPU_MEMORY, ccv_min(data_size, decoded_size)({ typeof (data_size) _a = (data_size); typeof (decoded_size)
_b = (decoded_size); (_a < _b) ? _a : _b; })
);
508 } else {
509 if (!tensor)
510 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
511 mpmemcpy(tensor->data.u8, tensor->dataof, tensor->info.type, data, 0, CCV_TENSOR_CPU_MEMORY, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
512 }
513 ccfreefree(workspace);
514 } else {
515 size_t decoded_size = data_size;
516 if (!options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, tensor ? tensor->data.u8 : 0, &decoded_size))
517 {
518 if (!tensor)
519 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
520 memcpy(tensor->data.u8, data, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
521 }
522 }
523 }
524#else
525 if (!options || !options->decode)
526 memcpy(tensor->data.u8, data, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
527 else {
528 size_t decoded_size = data_size;
529 if (!options->decode(data, sqlite3_column_bytes(tensor_select_stmt, 0), datatype, dim, nd, identifier, options->context, tensor_params, tensor_out, tensor ? tensor->data.u8 : 0, &decoded_size))
530 {
531 if (!tensor)
532 *tensor_out = tensor = ccv_nnc_tensor_new(0, tensor_params, 0);
533 memcpy(tensor->data.u8, data, ccv_min(data_size, sqlite3_column_bytes(tensor_select_stmt, 0))({ typeof (data_size) _a = (data_size); typeof (sqlite3_column_bytes
(tensor_select_stmt, 0)) _b = (sqlite3_column_bytes(tensor_select_stmt
, 0)); (_a < _b) ? _a : _b; })
);
534 }
535 }
536#endif
537 }
538 tensor_out[0]->type &= ~CCV_GARBAGE; // If it is marked as garbage, remove that mark now.
539 sqlite3_reset(tensor_select_stmt);
540 sqlite3_clear_bindings(tensor_select_stmt);
541 sqlite3_finalize(tensor_select_stmt);
542 return CCV_IO_FINAL;
543}
544