Coverage Report

Created: 2025-05-31 15:19

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/ccv_cnnp_model_io.c
Line
Count
Source
1
#include "ccv_nnc.h"
2
#include "ccv_nnc_easy.h"
3
#include "ccv_nnc_internal.h"
4
#include "ccv_internal.h"
5
#include "_ccv_cnnp_model.h"
6
#include "3rdparty/sqlite3/sqlite3.h"
7
#ifdef HAVE_CUDA
8
#include "gpu/ccv_nnc_compat.h"
9
#endif
10
11
#ifdef NDEBUG
12
#define SQLITE_ENFORCE(stmt) (void)(stmt)
13
#else
14
2
#define SQLITE_ENFORCE assert
15
#endif
16
17
static inline int _model_tensor_write(const ccv_cnnp_model_t* const self, const ccv_nnc_tensor_t* const tensor, const char* const sql, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options)
18
12
{
19
12
  if (self->rw.writer)
20
0
    return self->rw.writer(tensor, sql, handle, name, options);
21
12
  if (sql)
22
2
  {
23
2
    sqlite3* conn = (sqlite3*)handle;
24
2
    SQLITE_ENFORCE(SQLITE_OK == sqlite3_exec(conn, sql, 0, 0, 0));
25
2
    return CCV_IO_FINAL;
26
2
  } else
27
10
    return ccv_nnc_tensor_write(tensor, handle, name, options);
28
12
}
29
30
int ccv_cnnp_model_write(const ccv_cnnp_model_t* const model, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options)
31
1
{
32
1
  ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
33
1
  assert(compiled_data); // The model has to be compiled.
34
1
  _model_tensor_write(model, 0, "BEGIN", handle, 0, options);
35
1
  int i, j;
36
1
  const int parallel_count = ccv_max(model->parallel_count, 1);
37
1
  const int parameter_size = compiled_data->parameters->rnum;
38
1
  const int internal_size = compiled_data->internals->rnum;
39
1
  char internal_name[2048 + 16];
40
1
  int result = CCV_IO_FINAL;
41
11
  for (i = 0; i < parameter_size; 
i++10
)
42
10
  {
43
10
    const char* const id = *(char**)ccv_array_get(compiled_data->ids.parameters, i);
44
10
    if (name)
45
0
      snprintf(internal_name, 2048 + 16, "__%s__[%s]", name, id);
46
10
    else
47
10
      snprintf(internal_name, 2048 + 16, "%s", id);
48
10
    if (CCV_IO_FINAL != _model_tensor_write(model, CCV_NNC_TENSOR(compiled_data->tensors.parameters[i]), 0, handle, internal_name, options))
49
0
      result = CCV_IO_ERROR;
50
10
  }
51
2
  for (i = 0; i < parallel_count; 
i++1
)
52
1
    for (j = 0; j < internal_size; 
j++0
)
53
0
    {
54
0
      const char* const id = *(char**)ccv_array_get(compiled_data->ids.internals, j);
55
0
      if (name)
56
0
        snprintf(internal_name, 2048 + 16, "__%s__[%s(%d)]", name, id, i);
57
0
      else
58
0
        snprintf(internal_name, 2048 + 16, "%s(%d)", id, i);
59
0
      if (CCV_IO_FINAL != _model_tensor_write(model, compiled_data->tensors.internals[i * internal_size + j], 0, handle, internal_name, options))
60
0
        result = CCV_IO_ERROR;
61
0
    }
62
1
  _model_tensor_write(model, 0, "COMMIT", handle, 0, options);
63
1
  return result;
64
1
}
65
66
static inline int _model_tensor_read(const ccv_cnnp_model_t* const self, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const ccv_nnc_tensor_param_t info, ccv_nnc_tensor_t** const tensor_out)
67
10
{
68
10
  if (self->rw.reader)
69
0
    return self->rw.reader(handle, name, options, info, tensor_out);
70
10
  return ccv_nnc_tensor_read(handle, name, options, 0, &info, tensor_out);
71
10
}
72
73
int ccv_cnnp_model_read(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const ccv_cnnp_model_t* const model_out)
74
1
{
75
1
  sqlite3* conn = (sqlite3*)handle;
76
1
  assert(conn);
77
1
  ccv_cnnp_compiled_data_t* const compiled_data = model_out->compiled_data;
78
1
  assert(compiled_data); // The model has to be compiled.
79
1
  const int tensors_init = !!compiled_data->tensors_init.v;
80
1
  if (!tensors_init)
81
1
    ccv_cnnp_model_tensors_init_0(model_out, compiled_data);
82
1
  int i, j;
83
1
  const int parallel_count = ccv_max(model_out->parallel_count, 1);
84
1
  const int parameter_size = compiled_data->parameters->rnum;
85
1
  const int internal_size = compiled_data->internals->rnum;
86
1
  char internal_name[2048 + 16];
87
1
  uint32_t* const init_v = CCV_NNC_INIT_V(compiled_data->tensors_init.v);
88
11
  for (i = 0; i < parameter_size; 
i++10
)
89
10
  {
90
10
    const char* const id = *(char**)ccv_array_get(compiled_data->ids.parameters, i);
91
10
    if (name)
92
0
      snprintf(internal_name, 2048 + 16, "__%s__[%s]", name, id);
93
10
    else
94
10
      snprintf(internal_name, 2048 + 16, "%s", id);
95
10
    const ccv_nnc_tensor_symbol_t parameter = *(ccv_nnc_tensor_symbol_t*)ccv_array_get(compiled_data->parameters, i);
96
10
    const int d = parameter.d;
97
10
    ccv_nnc_tensor_param_t info = ccv_nnc_tensor_symbol_params(parameter.graph, parameter);
98
10
    if (CCV_TENSOR_GET_DEVICE(info.type) == CCV_COMPUTE_DEVICE_ANY)
99
10
      CCV_TENSOR_SET_DEVICE_ID(info.type, 0);
100
10
    const int device_id = CCV_TENSOR_GET_DEVICE_ID(info.type);
101
10
    if (compiled_data->tensors.parameters[i]) // Cannot be a shared parameter to read.
102
0
      { assert(!((uintptr_t)compiled_data->tensors.parameters[i] & (uintptr_t)1)); }
103
10
    if (_model_tensor_read(model_out, conn, internal_name, options, info, compiled_data->tensors.parameters + i) == CCV_IO_FINAL)
104
10
    {
105
10
      init_v[d >> 5] |= (1u << (d & 0x1f));
106
      // Create this tensor for other data parallel allocations.
107
10
      info = compiled_data->tensors.parameters[i]->info; // In case we loaded a different info.
108
10
      for (j = 1; j < parallel_count; 
j++0
)
109
0
        if (!compiled_data->tensors.parameters[i + j * parameter_size])
110
0
        {
111
0
          if (j != device_id)
112
0
            CCV_TENSOR_SET_DEVICE_ID(info.type, j);
113
0
          else
114
0
            CCV_TENSOR_SET_DEVICE_ID(info.type, 0);
115
0
          compiled_data->tensors.parameters[i + j * parameter_size] = ccv_nnc_tensor_new(0, info, 0);
116
0
        }
117
        // No need to copy over, this is done in ccv_cnnp_model.c's copy_tensors method.
118
10
    }
119
10
  }
120
2
  
for (i = 0; 1
i < parallel_count;
i++1
)
121
1
    for (j = 0; j < internal_size; 
j++0
)
122
0
    {
123
0
      const char* const id = *(char**)ccv_array_get(compiled_data->ids.internals, j);
124
0
      if (name)
125
0
        snprintf(internal_name, 2048 + 16, "__%s__[%s(%d)]", name, id, i);
126
0
      else
127
0
        snprintf(internal_name, 2048 + 16, "%s(%d)", id, i);
128
0
      const ccv_nnc_tensor_symbol_t retained = *(ccv_nnc_tensor_symbol_t*)ccv_array_get(compiled_data->internals, j);
129
0
      const int d = retained.d;
130
0
      ccv_nnc_tensor_param_t info = ccv_nnc_tensor_symbol_params(retained.graph, retained);
131
0
      if (CCV_TENSOR_GET_DEVICE(info.type) == CCV_COMPUTE_DEVICE_ANY)
132
0
        CCV_TENSOR_SET_DEVICE_ID(info.type, 0);
133
0
      if (i > 0)
134
0
      {
135
0
        const int device_id = CCV_TENSOR_GET_DEVICE_ID(info.type);
136
0
        if (i != device_id)
137
0
          CCV_TENSOR_SET_DEVICE_ID(info.type, i);
138
0
        else
139
0
          CCV_TENSOR_SET_DEVICE_ID(info.type, 0);
140
0
      }
141
0
      if (_model_tensor_read(model_out, conn, internal_name, options, info, compiled_data->tensors.internals + i * internal_size + j) == CCV_IO_FINAL)
142
0
        init_v[d >> 5] |= (1u << (d & 0x1f));
143
0
    }
144
  // Mark it as to have some other tensors to allocate.
145
1
  if (ccv_cnnp_model_tensors_any_to_alloc(model_out, compiled_data))
146
0
    compiled_data->tensors_init.v = (uint32_t*)((uintptr_t)compiled_data->tensors_init.v | (uintptr_t)1);
147
1
  else // Remove the flag.
148
1
    compiled_data->tensors_init.v = CCV_NNC_INIT_V(compiled_data->tensors_init.v);
149
1
  return CCV_IO_FINAL;
150
1
}
151
152
void ccv_cnnp_model_write_to_file(ccv_cnnp_model_t* const model, const char* const fn, const ccv_nnc_tensor_io_option_t* const options)
153
1
{
154
1
  ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
155
1
  assert(compiled_data); // The model has to be compiled.
156
1
  sqlite3* conn = 0;
157
1
  if (SQLITE_OK != sqlite3_open(fn, &conn))
158
0
    return;
159
1
  ccv_cnnp_model_write(model, conn, 0, options);
160
1
  sqlite3_close(conn);
161
1
}
162
163
void ccv_cnnp_model_read_from_file(const char* const fn, const ccv_nnc_tensor_io_option_t* const options, const ccv_cnnp_model_t* const model)
164
1
{
165
1
  ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
166
1
  assert(compiled_data); // The model has to be compiled.
167
1
  sqlite3* conn = 0;
168
1
  if (SQLITE_OK != sqlite3_open(fn, &conn))
169
0
    return;
170
1
  ccv_cnnp_model_read(conn, 0, options, model);
171
1
  sqlite3_close(conn);
172
1
}
173
174
void ccv_cnnp_model_set_io(ccv_cnnp_model_t* const model, ccv_cnnp_model_io_reader_f reader, ccv_cnnp_model_io_writer_f writer)
175
0
{
176
0
  model->rw.reader = reader;
177
0
  model->rw.writer = writer;
178
0
}