Coverage Report

Created: 2025-05-25 22:01

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/ccv_cnnp_model_addons.c
Line
Count
Source
1
#include "ccv_nnc.h"
2
#include "ccv_nnc_easy.h"
3
#include "ccv_nnc_internal.h"
4
#include "ccv_internal.h"
5
#include "_ccv_cnnp_model.h"
6
7
// MARK - Add-on Functions
8
9
static int _ccv_cnnp_model_clip_grad_norm_reduce_norm2(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
10
2
{
11
2
  const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type);
12
2
  ccv_nnc_tensor_t* const old_norm2 = outputs[1 + device_id * 2];
13
2
  ccv_nnc_tensor_t* const norm2 = outputs[1 + device_id * 2 + 1];
14
2
  const int tensor_count = ccv_nnc_tensor_count(inputs[0]->info);
15
2
  if (tensor_count == 1)
16
2
    ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1), hint, flags, TENSOR_LIST(inputs[0], inputs[0]), TENSOR_LIST(norm2), stream_context);
17
0
  else {
18
0
    ccv_nnc_cmd_exec(CMD_REDUCE_NORM2_FORWARD(), hint, flags, TENSOR_LIST(inputs[0]), TENSOR_LIST(norm2), stream_context);
19
0
    ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1), hint, flags, TENSOR_LIST(norm2, norm2), TENSOR_LIST(norm2), stream_context);
20
0
  }
21
2
  ccv_nnc_cmd_exec(CMD_ADD_FORWARD(1, 1), hint, flags, TENSOR_LIST(old_norm2, norm2), TENSOR_LIST(old_norm2), stream_context);
22
2
  return CCV_NNC_EXEC_SUCCESS;
23
2
}
24
25
static ccv_nnc_cmd_vtab_t clip_grad_norm_reduce_norm2_vtab = {
26
  .exec = _ccv_cnnp_model_clip_grad_norm_reduce_norm2
27
};
28
29
static int _ccv_cnnp_model_clip_grad_norm_scatter_norm2(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
30
2
{
31
2
  const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type);
32
2
  ccv_nnc_tensor_t* const norm2 = inputs[1 + device_id * 2];
33
2
  ccv_nnc_cmd_exec(CMD_MUL_FORWARD(1), hint, flags, TENSOR_LIST(inputs[0], norm2), TENSOR_LIST(outputs[0]), stream_context);
34
2
  return CCV_NNC_EXEC_SUCCESS;
35
2
}
36
37
static ccv_nnc_cmd_vtab_t clip_grad_norm_scatter_norm2_vtab = {
38
  .exec = _ccv_cnnp_model_clip_grad_norm_scatter_norm2
39
};
40
41
void ccv_cnnp_model_parameters_clip_grad_norm(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, int norm_type, float max_norm, ccv_nnc_stream_context_t* const stream_context)
42
2
{
43
2
  assert(norm_type == 2);
44
2
  ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
45
2
  assert(compiled_data);
46
2
  const int parallel_count = ccv_max(model->parallel_count, 1);
47
2
  ccv_nnc_tensor_t* norm2[parallel_count * 2];
48
2
  ccv_nnc_tensor_t* max_normt[parallel_count];
49
2
  const int stream_type = model->compiled_data->stream_type;
50
2
  int i;
51
2
  if (stream_type == CCV_STREAM_CONTEXT_GPU)
52
0
  {
53
0
    for (i = 0; i < parallel_count; i++)
54
0
    {
55
0
      ccv_nnc_tensor_param_t info = {
56
0
        .type = CCV_TENSOR_GPU_MEMORY,
57
0
        .format = CCV_TENSOR_FORMAT_NHWC,
58
0
        .datatype = CCV_32F,
59
0
        .dim = {1},
60
0
      };
61
0
      CCV_TENSOR_SET_DEVICE_ID(info.type, i);
62
0
      norm2[i * 2] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
63
0
      norm2[i * 2 + 1] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
64
0
      max_normt[i] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
65
0
    }
66
2
  } else {
67
4
    for (i = 0; i < parallel_count; 
i++2
)
68
2
    {
69
2
      ccv_nnc_tensor_param_t info = {
70
2
        .type = CCV_TENSOR_CPU_MEMORY,
71
2
        .format = CCV_TENSOR_FORMAT_NHWC,
72
2
        .datatype = CCV_32F,
73
2
        .dim = {1},
74
2
      };
75
2
      norm2[i * 2] = ccv_nnc_tensor_new(0, info, 0);
76
2
      norm2[i * 2 + 1] = ccv_nnc_tensor_new(0, info, 0);
77
2
      max_normt[i] = ccv_nnc_tensor_new(0, info, 0);
78
2
    }
79
2
  }
80
  // zero out old norm2.
81
2
  if (parallel_count > 1)
82
0
  {
83
0
    ccv_nnc_stream_context_t* streams[parallel_count];
84
0
    ccv_nnc_stream_signal_t* signal;
85
0
    if (stream_context)
86
0
      signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
87
0
    for (i = 0; i < parallel_count; i++)
88
0
    {
89
0
      const int stream_type = CCV_TENSOR_GET_MEMORY(norm2[i * 2]->info.type) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
90
0
      const int device_id = CCV_TENSOR_GET_DEVICE_ID(norm2[i * 2]->info.type);
91
0
      int type = stream_type;
92
0
      CCV_STREAM_SET_DEVICE_ID(type, device_id);
93
0
      ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
94
      // Wait signal to finish.
95
0
      if (stream_context)
96
0
        ccv_nnc_stream_context_wait_signal(stream_0, signal);
97
0
      ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(norm2[i * 2]), stream_0);
98
0
      if (stream_context)
99
0
      {
100
0
        ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
101
0
        ccv_nnc_stream_context_wait_signal(stream_context, signal);
102
0
      }
103
0
      streams[i] = stream_0;
104
0
    }
105
    // If this should be blocking, blocking it.
106
0
    if (!stream_context)
107
0
      for (i = 0; i < parallel_count; i++)
108
0
        if (streams[i])
109
0
          ccv_nnc_stream_context_wait(streams[i]);
110
2
  } else {
111
2
    ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(norm2[0]), stream_context);
112
2
  }
113
  // Gather norm2.
114
2
  ccv_nnc_cmd_t reduce_cmd = {
115
2
    .cmd = CCV_NNC_CUSTOM_FORWARD,
116
2
    .isa = &clip_grad_norm_reduce_norm2_vtab,
117
2
  };
118
2
  ccv_cnnp_model_parameter_gradients_map(model, parameters, reduce_cmd, ccv_nnc_no_hint, 0, 0, 0, norm2, parallel_count * 2, stream_context);
119
  // Now compute max(max_norm / norm2, 1.0).
120
2
  if (parallel_count > 1)
121
0
  {
122
0
    ccv_nnc_stream_context_t* streams[parallel_count];
123
0
    ccv_nnc_stream_signal_t* signal;
124
0
    if (stream_context)
125
0
      signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
126
0
    for (i = 0; i < parallel_count; i++)
127
0
    {
128
0
      const int stream_type = CCV_TENSOR_GET_MEMORY(norm2[i * 2]->info.type) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
129
0
      const int device_id = CCV_TENSOR_GET_DEVICE_ID(norm2[i * 2]->info.type);
130
0
      int type = stream_type;
131
0
      CCV_STREAM_SET_DEVICE_ID(type, device_id);
132
0
      ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
133
      // Wait signal to finish.
134
0
      if (stream_context)
135
0
        ccv_nnc_stream_context_wait_signal(stream_0, signal);
136
0
      ccv_nnc_cmd_exec(CMD_EWSQRT_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[i * 2]), TENSOR_LIST(norm2[i * 2]), stream_0);
137
0
      ccv_nnc_cmd_exec(CMD_SET_FORWARD(max_norm), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(max_normt[i]), stream_0);
138
0
      ccv_nnc_cmd_exec(CMD_EWDIV_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(max_normt[i], norm2[i * 2]), TENSOR_LIST(norm2[i * 2]), stream_0);
139
0
      ccv_nnc_cmd_exec(CMD_CLAMP_FORWARD(NAN, 1), ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[i * 2]), TENSOR_LIST(norm2[i * 2]), stream_0);
140
0
      if (stream_context)
141
0
      {
142
0
        ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
143
0
        ccv_nnc_stream_context_wait_signal(stream_context, signal);
144
0
      }
145
0
      streams[i] = stream_0;
146
0
    }
147
    // If this should be blocking, blocking it.
148
0
    if (!stream_context)
149
0
      for (i = 0; i < parallel_count; i++)
150
0
        if (streams[i])
151
0
          ccv_nnc_stream_context_wait(streams[i]);
152
2
  } else {
153
2
    ccv_nnc_cmd_exec(CMD_EWSQRT_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[0]), TENSOR_LIST(norm2[0]), stream_context);
154
2
    ccv_nnc_cmd_exec(CMD_SET_FORWARD(max_norm), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(max_normt[0]), stream_context);
155
2
    ccv_nnc_cmd_exec(CMD_EWDIV_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(max_normt[0], norm2[0]), TENSOR_LIST(norm2[0]), stream_context);
156
2
    ccv_nnc_cmd_exec(CMD_CLAMP_FORWARD(NAN, 1), ccv_nnc_no_hint, 0, TENSOR_LIST(norm2[0]), TENSOR_LIST(norm2[0]), stream_context);
157
2
  }
158
2
  ccv_nnc_cmd_t scatter_cmd = {
159
2
    .cmd = CCV_NNC_CUSTOM_FORWARD,
160
2
    .isa = &clip_grad_norm_scatter_norm2_vtab,
161
2
  };
162
2
  ccv_cnnp_model_parameter_gradients_map(model, parameters, scatter_cmd, ccv_nnc_no_hint, 0, norm2, parallel_count * 2, 0, 0, stream_context);
163
2
  if (stream_type == CCV_STREAM_CONTEXT_GPU)
164
0
    for (i = 0; i < parallel_count; i++)
165
0
    {
166
0
      ccv_nnc_xpu_free(&compiled_data->xpu_alloc, norm2[i * 2]->data.u8);
167
0
      ccv_nnc_xpu_free(&compiled_data->xpu_alloc, norm2[i * 2 + 1]->data.u8);
168
0
      ccv_nnc_xpu_free(&compiled_data->xpu_alloc, max_normt[i]->data.u8);
169
0
    }
170
4
  for (i = 0; i < parallel_count; 
i++2
)
171
2
  {
172
2
    ccv_nnc_tensor_free(norm2[i * 2]);
173
2
    ccv_nnc_tensor_free(norm2[i * 2 + 1]);
174
2
    ccv_nnc_tensor_free(max_normt[i]);
175
2
  }
176
2
}
177
178
// MARK - Add-on Functions
179
180
static int _ccv_cnnp_model_isnan(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
181
0
{
182
0
  const int device_id = CCV_TENSOR_GET_DEVICE_ID(inputs[0]->info.type);
183
0
  ccv_nnc_tensor_t* const old_isnanr = outputs[1 + device_id * 2];
184
0
  ccv_nnc_tensor_t* const isnanr = outputs[1 + device_id * 2 + 1];
185
0
  ccv_nnc_cmd_t reduce_cmd = CMD_REDUCE_ISNAN_FORWARD();
186
0
  reduce_cmd.info.reduce.count = ccv_nnc_tensor_nd(inputs[0]->info.dim);
187
0
  int i;
188
0
  for (i = 0; i < cmd.info.reduce.count; i++)
189
0
    reduce_cmd.info.reduce.axis[i] = i;
190
0
  ccv_nnc_cmd_exec(reduce_cmd, hint, flags, TENSOR_LIST(inputs[0]), TENSOR_LIST(isnanr), stream_context);
191
0
  ccv_nnc_cmd_exec(CMD_EWSUM_FORWARD(), hint, flags, TENSOR_LIST(old_isnanr, isnanr), TENSOR_LIST(old_isnanr), stream_context);
192
0
  return CCV_NNC_EXEC_SUCCESS;
193
0
}
194
195
static ccv_nnc_cmd_vtab_t reduce_isnan_vtab = {
196
  .exec = _ccv_cnnp_model_isnan
197
};
198
199
int ccv_cnnp_model_parameter_gradients_isnan(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, ccv_nnc_stream_context_t* const stream_context)
200
0
{
201
0
  ccv_cnnp_compiled_data_t* const compiled_data = model->compiled_data;
202
0
  assert(compiled_data);
203
0
  const int parallel_count = ccv_max(model->parallel_count, 1);
204
0
  ccv_nnc_tensor_t* isnanr[parallel_count * 2];
205
0
  const int stream_type = model->compiled_data->stream_type;
206
0
  int i;
207
0
  if (stream_type == CCV_STREAM_CONTEXT_GPU)
208
0
  {
209
0
    for (i = 0; i < parallel_count; i++)
210
0
    {
211
0
      ccv_nnc_tensor_param_t info = {
212
0
        .type = CCV_TENSOR_GPU_MEMORY,
213
0
        .format = CCV_TENSOR_FORMAT_NHWC,
214
0
        .datatype = CCV_32S,
215
0
        .dim = {1},
216
0
      };
217
0
      CCV_TENSOR_SET_DEVICE_ID(info.type, i);
218
0
      isnanr[i * 2] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
219
0
      isnanr[i * 2 + 1] = ccv_nnc_tensor_new(ccv_nnc_xpu_alloc(&compiled_data->xpu_alloc, i, stream_context, ccv_nnc_tensor_data_size(info)), info, 0);
220
0
    }
221
0
  } else {
222
0
    for (i = 0; i < parallel_count; i++)
223
0
    {
224
0
      ccv_nnc_tensor_param_t info = {
225
0
        .type = CCV_TENSOR_CPU_MEMORY,
226
0
        .format = CCV_TENSOR_FORMAT_NHWC,
227
0
        .datatype = CCV_32S,
228
0
        .dim = {1},
229
0
      };
230
0
      isnanr[i * 2] = ccv_nnc_tensor_new(0, info, 0);
231
0
      isnanr[i * 2 + 1] = ccv_nnc_tensor_new(0, info, 0);
232
0
    }
233
0
  }
234
  // zero out old isnanr.
235
0
  if (parallel_count > 1)
236
0
  {
237
0
    ccv_nnc_stream_context_t* streams[parallel_count];
238
0
    ccv_nnc_stream_signal_t* signal;
239
0
    if (stream_context)
240
0
      signal = ccv_nnc_stream_context_emit_signal_new(stream_context);
241
0
    for (i = 0; i < parallel_count; i++)
242
0
    {
243
0
      const int stream_type = CCV_TENSOR_GET_MEMORY(isnanr[i * 2]->info.type) == CCV_TENSOR_GPU_MEMORY ? CCV_STREAM_CONTEXT_GPU : CCV_STREAM_CONTEXT_CPU;
244
0
      const int device_id = CCV_TENSOR_GET_DEVICE_ID(isnanr[i * 2]->info.type);
245
0
      int type = stream_type;
246
0
      CCV_STREAM_SET_DEVICE_ID(type, device_id);
247
0
      ccv_nnc_stream_context_t* const stream_0 = ccv_cnnp_compiled_data_get_stream(compiled_data, type);
248
      // Wait signal to finish.
249
0
      if (stream_context)
250
0
        ccv_nnc_stream_context_wait_signal(stream_0, signal);
251
0
      ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(isnanr[i * 2]), stream_0);
252
0
      if (stream_context)
253
0
      {
254
0
        ccv_nnc_stream_signal_t* const signal = ccv_nnc_stream_context_emit_signal_new(stream_0);
255
0
        ccv_nnc_stream_context_wait_signal(stream_context, signal);
256
0
      }
257
0
      streams[i] = stream_0;
258
0
    }
259
    // If this should be blocking, blocking it.
260
0
    if (!stream_context)
261
0
      for (i = 0; i < parallel_count; i++)
262
0
        if (streams[i])
263
0
          ccv_nnc_stream_context_wait(streams[i]);
264
0
  } else
265
0
    ccv_nnc_cmd_exec(CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, 0, TENSOR_LIST(isnanr[0]), stream_context);
266
  // Gather isnanr.
267
0
  ccv_nnc_cmd_t reduce_cmd = {
268
0
    .cmd = CCV_NNC_CUSTOM_FORWARD,
269
0
    .isa = &reduce_isnan_vtab,
270
0
  };
271
0
  ccv_cnnp_model_parameter_gradients_map(model, parameters, reduce_cmd, ccv_nnc_no_hint, 0, 0, 0, isnanr, parallel_count * 2, stream_context);
272
0
  for (i = 0; i < parallel_count; i++)
273
0
    ccv_nnc_tensor_free(isnanr[i * 2 + 1]);
274
0
  int retval = 0;
275
0
  if (stream_type == CCV_TENSOR_GPU_MEMORY)
276
0
  {
277
0
    ccv_nnc_tensor_param_t info = {
278
0
      .type = CCV_TENSOR_CPU_MEMORY,
279
0
      .format = CCV_TENSOR_FORMAT_NHWC,
280
0
      .datatype = CCV_32S,
281
0
      .dim = {1},
282
0
    };
283
0
    ccv_nnc_tensor_t* checknan = ccv_nnc_tensor_new(0, info, 0);
284
0
    for (i = 0; i < parallel_count; i++)
285
0
    {
286
0
      ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(isnanr[i * 2]), TENSOR_LIST(checknan), 0);
287
0
      if (checknan->data.i32[0] > 0)
288
0
      {
289
0
        retval = 1;
290
0
        break;
291
0
      }
292
0
    }
293
0
    ccv_nnc_tensor_free(checknan);
294
0
  } else {
295
0
    for (i = 0; i < parallel_count; i++)
296
0
      if (isnanr[i * 2]->data.i32[0] > 0)
297
0
      {
298
0
        retval = 1;
299
0
        break;
300
0
      }
301
0
  }
302
0
  for (i = 0; i < parallel_count; i++)
303
0
    ccv_nnc_tensor_free(isnanr[i * 2]);
304
0
  return retval;
305
0
}
306
307
// MARK - Core Layers
308
309
static void _ccv_cnnp_sum_build(ccv_cnnp_model_t* const self, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
310
64
{
311
64
  PRINT(CCV_CLI_VERBOSE, "[cnnp_sum_build] -\n");
312
64
  assert(output_size == 1);
313
64
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, ccv_nnc_tensor_symbol_params(graph, inputs[0]), 0);
314
64
  ccv_nnc_graph_exec_symbol_new(graph, CMD_EWSUM_FORWARD(), inputs, input_size, outputs, output_size, 0);
315
64
}
316
317
static ccv_cnnp_model_t* _ccv_cnnp_sum_copy(const ccv_cnnp_model_t* const self, void* const context);
318
319
static const ccv_cnnp_model_vtab_t ccv_cnnp_sum_isa = {
320
  .build = _ccv_cnnp_sum_build,
321
  .copy = _ccv_cnnp_sum_copy,
322
};
323
324
typedef struct {
325
  ccv_cnnp_model_t super;
326
  ccv_nnc_tensor_symbol_t output;
327
} ccv_cnnp_model_sum_t;
328
329
ccv_cnnp_model_t* ccv_cnnp_sum(const char* const name)
330
63
{
331
63
  ccv_cnnp_model_sum_t* const model_sum = (ccv_cnnp_model_sum_t*)cccalloc(1, sizeof(ccv_cnnp_model_sum_t));
332
63
  model_sum->super.isa = &ccv_cnnp_sum_isa;
333
63
  model_sum->super.input_size = 0;
334
63
  model_sum->super.outputs = &model_sum->output;
335
63
  model_sum->super.output_size = 1;
336
63
  ccv_cnnp_model_copy_name(&model_sum->super, name);
337
63
  return (ccv_cnnp_model_t*)model_sum;
338
63
}
339
340
static ccv_cnnp_model_t* _ccv_cnnp_sum_copy(const ccv_cnnp_model_t* const self, void* const context)
341
3
{
342
3
  return ccv_cnnp_sum(self->name);
343
3
}
344
345
typedef struct {
346
  ccv_cnnp_model_t super;
347
  int axis;
348
  ccv_nnc_tensor_symbol_t output;
349
} ccv_cnnp_model_concat_t;
350
351
static void _ccv_cnnp_concat_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
352
4
{
353
4
  const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
354
4
  PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] 1. -\n");
355
4
  assert(output_size == 1);
356
4
  ccv_nnc_tensor_param_t output_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
357
4
  int i, j;
358
4
  if (output_params.dim[0] == 0)
359
0
    for (i = 1; i < input_size; i++)
360
0
    {
361
0
      output_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
362
0
      if (output_params.dim[0] != 0)
363
0
        break;
364
0
    }
365
4
  const int nd = ccv_nnc_tensor_nd(output_params.dim);
366
4
  const int axis = self->axis;
367
4
  assert(axis < nd);
368
4
  output_params.dim[axis] = 0;
369
4
  int input_is_contiguous = 1;
370
12
  for (i = 0; i < input_size; 
i++8
)
371
8
  {
372
8
    const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
373
8
    const int input_nd = ccv_nnc_tensor_nd(input_params.dim);
374
8
    if (input_nd == 0)
375
0
    {
376
0
      PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] %d. input[%d]: -\n", i + 2, i);
377
0
      input_is_contiguous = 0;
378
0
      continue;
379
0
    }
380
8
    if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE))
381
0
    {
382
0
      PRINT(CCV_CLI_VERBOSE, "[cnnp_concat_build] %d. input[%d]: (%d", i + 2, i, input_params.dim[0]);
383
0
      int i;
384
0
      for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC && input_params.dim[i] > 0; i++)
385
0
        PRINT(CCV_CLI_VERBOSE, ", %d", input_params.dim[i]);
386
0
      PRINT(CCV_CLI_VERBOSE, ")\n");
387
0
    }
388
8
    assert(input_nd == nd);
389
16
    
for (j = 0; 8
j < nd;
j++8
)
390
8
      if (j != axis)
391
0
        { assert(input_params.dim[j] == output_params.dim[j]); }
392
8
    output_params.dim[axis] += input_params.dim[axis];
393
8
  }
394
4
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
395
4
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {};
396
4
  int stride[CCV_NNC_MAX_DIM_ALLOC] = {};
397
4
  ccv_nnc_tensor_get_stride(output_params.dim, stride);
398
4
  if (input_is_contiguous)
399
4
  {
400
4
    ccv_nnc_tensor_symbol_t aliases[input_size];
401
12
    for (i = 0; i < input_size; 
i++8
)
402
8
    {
403
8
      const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
404
8
      aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
405
8
      ofs[axis] += input_params.dim[axis];
406
8
    }
407
    // Format transform is more flexible.
408
4
    ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD(), inputs, input_size, aliases, input_size, "concat");
409
4
  } else {
410
0
    ccv_nnc_tensor_symbol_t aliases[input_size];
411
0
    for (i = 0; i < input_size; i++)
412
0
    {
413
0
      const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
414
0
      if (input_params.dim[0] == 0)
415
0
      {
416
        // Create a new alias anyway, but not going to use it, in this way, the alias count will match during absorb.
417
0
        aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
418
0
        continue;
419
0
      }
420
0
      aliases[i] = ccv_nnc_tensor_symbol_alias_new(graph, outputs[0], ofs, stride, input_params, 0);
421
0
      ofs[axis] += input_params.dim[axis];
422
0
    }
423
    // Format transform is more flexible.
424
0
    ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD(), inputs, input_size, aliases, input_size, "concat");
425
0
  }
426
4
}
427
428
static ccv_cnnp_model_t* _ccv_cnnp_concat_copy(const ccv_cnnp_model_t* const self, void* const context);
429
430
static const ccv_cnnp_model_vtab_t ccv_cnnp_concat_isa = {
431
  .build = _ccv_cnnp_concat_build,
432
  .copy = _ccv_cnnp_concat_copy,
433
};
434
435
ccv_cnnp_model_t* ccv_cnnp_concat(const int axis, const char* const name)
436
4
{
437
4
  ccv_cnnp_model_concat_t* const model_concat = (ccv_cnnp_model_concat_t*)cccalloc(1, sizeof(ccv_cnnp_model_concat_t));
438
4
  model_concat->super.isa = &ccv_cnnp_concat_isa;
439
4
  model_concat->super.input_size = 0;
440
4
  model_concat->super.outputs = &model_concat->output;
441
4
  model_concat->super.output_size = 1;
442
4
  model_concat->axis = axis;
443
4
  ccv_cnnp_model_copy_name(&model_concat->super, name);
444
4
  return (ccv_cnnp_model_t*)model_concat;
445
4
}
446
447
static ccv_cnnp_model_t* _ccv_cnnp_concat_copy(const ccv_cnnp_model_t* const super, void* const context)
448
0
{
449
0
  const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
450
0
  return ccv_cnnp_concat(self->axis, self->super.name);
451
0
}
452
453
typedef struct {
454
  ccv_cnnp_model_t super;
455
  int axis;
456
  ccv_nnc_tensor_symbol_t outputs[1];
457
} ccv_cnnp_model_chunk_t;
458
459
static void _ccv_cnnp_chunk_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
460
2
{
461
2
  const ccv_cnnp_model_concat_t* const self = (const ccv_cnnp_model_concat_t*)super;
462
2
  PRINT(CCV_CLI_VERBOSE, "[cnnp_chunk_build] 1. axis: %d\n", self->axis);
463
2
  assert(input_size == 1);
464
2
  const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
465
2
  if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE))
466
0
  {
467
0
    PRINT(CCV_CLI_VERBOSE, "[cnnp_chunk_build] 2. input: (%d", input_params.dim[0]);
468
0
    int i;
469
0
    for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC && input_params.dim[i] > 0; i++)
470
0
      PRINT(CCV_CLI_VERBOSE, ", %d", input_params.dim[i]);
471
0
    PRINT(CCV_CLI_VERBOSE, ")\n");
472
0
  }
473
2
  ccv_nnc_tensor_param_t output_params = input_params;
474
2
  int i;
475
2
  const int nd = ccv_nnc_tensor_nd(output_params.dim);
476
2
  const int axis = self->axis;
477
2
  assert(axis < nd);
478
2
  const int n = self->super.output_size;
479
2
  assert(n == output_size);
480
2
  assert(output_params.dim[axis] % n == 0);
481
2
  output_params.dim[axis] = output_params.dim[axis] / n;
482
2
  int ofs[CCV_NNC_MAX_DIM_ALLOC] = {};
483
2
  int stride[CCV_NNC_MAX_DIM_ALLOC] = {};
484
2
  ccv_nnc_tensor_get_stride(input_params.dim, stride);
485
2
  ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
486
2
  if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
487
2
  {
488
6
    for (i = 0; i < output_size; 
i++4
)
489
4
    {
490
4
      outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, stride, output_params, 0);
491
4
      ofs[axis] += output_params.dim[axis];
492
4
    }
493
2
  } else {
494
    // Otherwise, we need to check if it is permute. For permute, we cannot do alias directly.
495
    // We need to first materialize the permute and then run reshape on top of it, otherwise it will be wrong.
496
0
    int old_stride[CCV_NNC_MAX_DIM_ALLOC];
497
0
    ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
498
    // We identify permute by checking if the stride is not in descending order.
499
    // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
500
0
    int i, no_permute = 1;
501
0
    for (i = 1; no_permute && i < nd; i++)
502
0
      if (old_stride[i - 1] < old_stride[i])
503
0
        no_permute = 0;
504
0
    if (no_permute)
505
0
    { // Just straightforward reshape if there is no no permute.
506
0
      for (i = 0; i < output_size; i++)
507
0
      {
508
0
        outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, old_stride, output_params, 0);
509
0
        ofs[axis] += output_params.dim[axis];
510
0
      }
511
0
    } else {
512
      // Otherwise, we first do format transform to plain tensor and then do reshape.
513
0
      ccv_nnc_tensor_symbol_t permuted = ccv_nnc_tensor_symbol_new(graph, input_params, 0);
514
0
      ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD(), TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(permuted), "reshape");
515
0
      for (i = 0; i < output_size; i++)
516
0
      {
517
0
        outputs[i] = ccv_nnc_tensor_symbol_alias_new(graph, permuted, ofs, stride, output_params, 0);
518
0
        ofs[axis] += output_params.dim[axis];
519
0
      }
520
0
    }
521
0
  }
522
2
}
523
524
static ccv_cnnp_model_t* _ccv_cnnp_chunk_copy(const ccv_cnnp_model_t* const self, void* const context);
525
526
static const ccv_cnnp_model_vtab_t ccv_cnnp_chunk_isa = {
527
  .build = _ccv_cnnp_chunk_build,
528
  .copy = _ccv_cnnp_chunk_copy,
529
};
530
531
ccv_cnnp_model_t* ccv_cnnp_chunk(const int n, const int axis, const char* const name)
532
2
{
533
2
  assert(n >= 1);
534
2
  ccv_cnnp_model_chunk_t* const model_chunk = (ccv_cnnp_model_chunk_t*)cccalloc(1, sizeof(ccv_cnnp_model_chunk_t) + sizeof(ccv_nnc_tensor_symbol_t) * (n - 1));
535
2
  model_chunk->super.isa = &ccv_cnnp_chunk_isa;
536
2
  model_chunk->super.input_size = 1;
537
2
  model_chunk->super.outputs = model_chunk->outputs;
538
2
  model_chunk->super.output_size = n;
539
2
  model_chunk->axis = axis;
540
2
  ccv_cnnp_model_copy_name(&model_chunk->super, name);
541
2
  return (ccv_cnnp_model_t*)model_chunk;
542
2
}
543
544
static ccv_cnnp_model_t* _ccv_cnnp_chunk_copy(const ccv_cnnp_model_t* const super, void* const context)
545
0
{
546
0
  const ccv_cnnp_model_chunk_t* const self = (const ccv_cnnp_model_chunk_t*)super;
547
0
  return ccv_cnnp_chunk(self->super.output_size, self->axis, self->super.name);
548
0
}
549
550
typedef struct {
551
  ccv_cnnp_model_t super;
552
  ccv_nnc_tensor_symbol_t output;
553
  int format;
554
  int dim[CCV_NNC_MAX_DIM_ALLOC];
555
  int ofs[CCV_NNC_MAX_DIM_ALLOC];
556
  int stride[CCV_NNC_MAX_DIM_ALLOC];
557
} ccv_cnnp_model_reshape_t;
558
559
static void _ccv_cnnp_reshape_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
560
1.06k
{
561
1.06k
  assert(input_size == 1);
562
1.06k
  assert(output_size == 1);
563
1.06k
  ccv_cnnp_model_reshape_t* const self = (ccv_cnnp_model_reshape_t*)super;
564
1.06k
  if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE))
565
0
  {
566
0
    PRINT(CCV_CLI_VERBOSE, "[cnnp_reshape_build] 1. dim: (%d", self->dim[0]);
567
0
    int i;
568
0
    for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC && self->dim[i] > 0; i++)
569
0
      PRINT(CCV_CLI_VERBOSE, ", %d", self->dim[i]);
570
0
    const int count = i;
571
0
    PRINT(CCV_CLI_VERBOSE, "), ofs: (%d", self->ofs[0]);
572
0
    for (i = 1; i < count; i++)
573
0
      PRINT(CCV_CLI_VERBOSE, ", %d", self->ofs[i]);
574
0
    PRINT(CCV_CLI_VERBOSE, "), stride: (%d", self->stride[0]);
575
0
    for (i = 1; i < count; i++)
576
0
      PRINT(CCV_CLI_VERBOSE, ", %d", self->stride[i]);
577
0
    PRINT(CCV_CLI_VERBOSE, ")\n");
578
0
  }
579
1.06k
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
580
1.06k
  int dim[CCV_NNC_MAX_DIM_ALLOC];
581
1.06k
  memcpy(dim, self->dim, sizeof(dim));
582
1.06k
  int i, auto_idx = -1;
583
1.06k
  size_t known = 1;
584
1.06k
  const size_t tensor_count = ccv_nnc_tensor_count(params);
585
2.26k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && dim[i]; 
i++1.19k
)
586
1.19k
    if (dim[i] == -1)
587
1
      auto_idx = i;
588
1.19k
    else
589
1.19k
      known *= dim[i];
590
1.06k
  if (auto_idx >= 0)
591
1
  {
592
1
    assert(known > 0 && tensor_count % known == 0);
593
1
    dim[auto_idx] = tensor_count / known;
594
1
  }
595
1.06k
  if (CCV_CLI_OUTPUT_LEVEL_IS(CCV_CLI_VERBOSE))
596
0
  {
597
0
    PRINT(CCV_CLI_VERBOSE, "[cnnp_reshape_build] 2. input: (%d", params.dim[0]);
598
0
    int i;
599
0
    for (i = 1; i < CCV_NNC_MAX_DIM_ALLOC && params.dim[i] > 0; i++)
600
0
      PRINT(CCV_CLI_VERBOSE, ", %d", params.dim[i]);
601
0
    PRINT(CCV_CLI_VERBOSE, ")\n");
602
0
  }
603
1.06k
  assert(ccv_nnc_dimension_count(dim) <= ccv_nnc_tensor_count(params));
604
1.06k
  ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
605
1.06k
  int stride_from_dim[CCV_NNC_MAX_DIM_ALLOC];
606
1.06k
  if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
607
1.06k
  {
608
1.06k
    memcpy(params.dim, dim, sizeof(params.dim));
609
1.06k
    int* stride;
610
1.06k
    if (self->stride[0] == 0)
611
1.06k
    {
612
1.06k
      ccv_nnc_tensor_get_stride(dim, stride_from_dim);
613
1.06k
      stride = stride_from_dim;
614
1.06k
    } else
615
5
      stride = self->stride;
616
1.06k
    if (self->format > 0)
617
5
      params.format = self->format;
618
1.06k
    outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], self->ofs, stride, params, 0);
619
1.06k
  } else {
620
    // Otherwise, we need to check if it is permute. For permute, we cannot do alias directly.
621
    // We need to first materialize the permute and then run reshape on top of it, otherwise it will be wrong.
622
1
    int old_stride[CCV_NNC_MAX_DIM_ALLOC];
623
1
    ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
624
    // We identify permute by checking if the stride is not in descending order.
625
    // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
626
1
    const int nd = ccv_nnc_tensor_nd(params.dim);
627
1
    const int new_nd = ccv_nnc_tensor_nd(dim);
628
1
    int i, no_permute = 1;
629
    // If the new dim has different nd, or we actually have a stride, we need to check if it is no permute or not.
630
1
    if (new_nd != nd || 
(0
self->stride[0] != 00
&&
memcmp(self->stride, old_stride, sizeof(self->stride))0
))
631
2
      
for (i = 1; 1
no_permute &&
i < nd1
;
i++1
)
632
1
        if (old_stride[i - 1] < old_stride[i])
633
1
          no_permute = 0;
634
1
    if (no_permute)
635
0
    { // Just straightforward reshape if there is no no permute.
636
0
      memcpy(params.dim, dim, sizeof(params.dim));
637
0
      int* stride;
638
0
      if (self->stride[0] == 0)
639
0
      {
640
0
        if (new_nd != nd) // Cannot use old stride.
641
0
        {
642
0
          ccv_nnc_tensor_get_stride(dim, stride_from_dim);
643
0
          stride = stride_from_dim;
644
0
        } else
645
0
          stride = old_stride;
646
0
      } else
647
0
        stride = self->stride;
648
0
      if (self->format > 0)
649
0
        params.format = self->format;
650
0
      outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], self->ofs, stride, params, 0);
651
1
    } else {
652
      // Otherwise, we first do format transform to plain tensor and then do reshape.
653
1
      ccv_nnc_tensor_symbol_t permuted = ccv_nnc_tensor_symbol_new(graph, params, 0);
654
1
      ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD(), TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(permuted), "reshape");
655
1
      memcpy(params.dim, dim, sizeof(params.dim));
656
1
      int* stride;
657
1
      if (self->stride[0] == 0)
658
1
      {
659
1
        ccv_nnc_tensor_get_stride(dim, stride_from_dim);
660
1
        stride = stride_from_dim;
661
1
      } else
662
0
        stride = self->stride;
663
1
      if (self->format > 0)
664
0
        params.format = self->format;
665
      // And then we create alias against the permuted one.
666
1
      outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, permuted, self->ofs, stride, params, 0);
667
1
    }
668
1
  }
669
1.06k
}
670
671
static ccv_cnnp_model_t* _ccv_cnnp_reshape_copy(const ccv_cnnp_model_t* const super, void* const context);
672
673
static const ccv_cnnp_model_vtab_t ccv_cnnp_reshape_isa = {
674
  .build = _ccv_cnnp_reshape_build,
675
  .copy = _ccv_cnnp_reshape_copy,
676
};
677
678
ccv_cnnp_model_t* ccv_cnnp_reshape(const int format, const int dim[CCV_NNC_MAX_DIM_ALLOC], const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const char* const name)
679
1.06k
{
680
1.06k
  ccv_cnnp_model_reshape_t* const model_reshape = (ccv_cnnp_model_reshape_t*)cccalloc(1, sizeof(ccv_cnnp_model_reshape_t));
681
1.06k
  model_reshape->super.isa = &ccv_cnnp_reshape_isa;
682
1.06k
  model_reshape->super.input_size = 1;
683
1.06k
  model_reshape->super.outputs = &model_reshape->output;
684
1.06k
  model_reshape->super.output_size = 1;
685
1.06k
  ccv_cnnp_model_copy_name(&model_reshape->super, name);
686
1.06k
  model_reshape->format = format;
687
1.06k
  memcpy(model_reshape->dim, dim, sizeof(model_reshape->dim));
688
1.06k
  memcpy(model_reshape->ofs, ofs, sizeof(model_reshape->ofs));
689
1.06k
  if (stride[0] != 0)
690
5
    memcpy(model_reshape->stride, stride, sizeof(model_reshape->stride));
691
1.06k
  return (ccv_cnnp_model_t*)model_reshape;
692
1.06k
}
693
694
static ccv_cnnp_model_t* _ccv_cnnp_reshape_copy(const ccv_cnnp_model_t* const super, void* const context)
695
1.00k
{
696
1.00k
  const ccv_cnnp_model_reshape_t* const self = (const ccv_cnnp_model_reshape_t*)super;
697
1.00k
  return ccv_cnnp_reshape(self->format, self->dim, self->ofs, self->stride, self->super.name);
698
1.00k
}
699
700
typedef struct {
701
  ccv_cnnp_model_t super;
702
  ccv_nnc_tensor_symbol_t output;
703
  int type;
704
  int begin[CCV_NNC_MAX_DIM_ALLOC];
705
  int end[CCV_NNC_MAX_DIM_ALLOC];
706
} ccv_cnnp_model_pad_t;
707
708
static void _ccv_cnnp_pad_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
709
1
{
710
1
  assert(input_size == 1);
711
1
  assert(output_size == 1);
712
1
  ccv_cnnp_model_pad_t* const self = (ccv_cnnp_model_pad_t*)super;
713
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_pad_build] -\n");
714
1
  const ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
715
1
  const int nd = ccv_nnc_tensor_nd(input_params.dim);
716
1
  ccv_nnc_tensor_param_t params = input_params;
717
1
  int i;
718
5
  for (i = 0 ; i < nd; 
i++4
)
719
4
    params.dim[i] += self->begin[i] + self->end[i];
720
1
  const ccv_nnc_tensor_symbol_t padded = ccv_nnc_tensor_symbol_new(graph, params, 0);
721
1
  ccv_nnc_cmd_t pad = CMD_PAD_FORWARD(self->type, (), ());
722
1
  memcpy(pad.info.size.dim, self->begin, sizeof(pad.info.size.dim));
723
1
  memcpy(pad.info.pad.end, self->end, sizeof(pad.info.pad.end));
724
1
  ccv_nnc_graph_exec_symbol_new(graph, pad, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(padded), "pad");
725
1
  outputs[0] = padded;
726
1
}
727
728
static ccv_cnnp_model_t* _ccv_cnnp_pad_copy(const ccv_cnnp_model_t* const super, void* const context);
729
730
static const ccv_cnnp_model_vtab_t ccv_cnnp_pad_isa = {
731
  .build = _ccv_cnnp_pad_build,
732
  .copy = _ccv_cnnp_pad_copy,
733
};
734
735
ccv_cnnp_model_t* ccv_cnnp_pad(const int type, const int begin[CCV_NNC_MAX_DIM_ALLOC], const int end[CCV_NNC_MAX_DIM_ALLOC], const char* const name)
736
1
{
737
1
  ccv_cnnp_model_pad_t* const model_pad = (ccv_cnnp_model_pad_t*)cccalloc(1, sizeof(ccv_cnnp_model_pad_t));
738
1
  model_pad->super.isa = &ccv_cnnp_pad_isa;
739
1
  model_pad->super.input_size = 1;
740
1
  model_pad->super.outputs = &model_pad->output;
741
1
  model_pad->super.output_size = 1;
742
1
  ccv_cnnp_model_copy_name(&model_pad->super, name);
743
1
  model_pad->type = type;
744
1
  memcpy(model_pad->begin, begin, sizeof(model_pad->begin));
745
1
  memcpy(model_pad->end, end, sizeof(model_pad->end));
746
1
  return (ccv_cnnp_model_t*)model_pad;
747
1
}
748
749
static ccv_cnnp_model_t* _ccv_cnnp_pad_copy(const ccv_cnnp_model_t* const super, void* const context)
750
0
{
751
0
  const ccv_cnnp_model_pad_t* const self = (const ccv_cnnp_model_pad_t*)super;
752
0
  return ccv_cnnp_pad(self->type, self->begin, self->end, self->super.name);
753
0
}
754
755
typedef struct {
756
  ccv_cnnp_model_t super;
757
  ccv_nnc_tensor_symbol_t output;
758
} ccv_cnnp_model_identity_t;
759
760
static void _ccv_cnnp_identity_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
761
0
{
762
0
  assert(input_size == 1);
763
0
  assert(output_size == 1);
764
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_identity_build] -\n");
765
0
  outputs[0] = inputs[0];
766
0
}
767
768
static ccv_cnnp_model_t* _ccv_cnnp_identity_copy(const ccv_cnnp_model_t* const super, void* const context);
769
770
static const ccv_cnnp_model_vtab_t ccv_cnnp_identity_isa = {
771
  .build = _ccv_cnnp_identity_build,
772
  .copy = _ccv_cnnp_identity_copy,
773
};
774
775
ccv_cnnp_model_t* ccv_cnnp_identity(const char* const name)
776
0
{
777
0
  ccv_cnnp_model_identity_t* const model_identity = (ccv_cnnp_model_identity_t*)cccalloc(1, sizeof(ccv_cnnp_model_identity_t));
778
0
  model_identity->super.isa = &ccv_cnnp_identity_isa;
779
0
  model_identity->super.input_size = 1;
780
0
  model_identity->super.outputs = &model_identity->output;
781
0
  model_identity->super.output_size = 1;
782
0
  ccv_cnnp_model_copy_name(&model_identity->super, name);
783
0
  return (ccv_cnnp_model_t*)model_identity;
784
0
}
785
786
static ccv_cnnp_model_t* _ccv_cnnp_identity_copy(const ccv_cnnp_model_t* const super, void* const context)
787
0
{
788
0
  const ccv_cnnp_model_identity_t* const self = (const ccv_cnnp_model_identity_t*)super;
789
0
  return ccv_cnnp_identity(self->super.name);
790
0
}
791
792
typedef struct {
793
  ccv_cnnp_model_t super;
794
  ccv_nnc_tensor_symbol_t output;
795
  int index[CCV_NNC_MAX_DIM_ALLOC];
796
} ccv_cnnp_model_permute_t;
797
798
static void _ccv_cnnp_permute_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
799
1
{
800
1
  assert(input_size == 1);
801
1
  assert(output_size == 1);
802
1
  ccv_cnnp_model_permute_t* const self = (ccv_cnnp_model_permute_t*)super;
803
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_permute_build] -\n");
804
1
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
805
1
  ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
806
1
  const int nd = ccv_nnc_tensor_nd(params.dim);
807
1
  int input_dim[CCV_NNC_MAX_DIM_ALLOC];
808
1
  memcpy(input_dim, params.dim, sizeof(params.dim));
809
1
  int input_stride[CCV_NNC_MAX_DIM_ALLOC] = {};
810
1
  int output_stride[CCV_NNC_MAX_DIM_ALLOC] = {};
811
1
  if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If it is not an alias. Find stride and permute.
812
0
  {
813
0
    ccv_nnc_tensor_get_stride(input_dim, input_stride);
814
0
    int i;
815
0
    for (i = 0; i < nd; i++)
816
0
    {
817
0
      const int idx = self->index[i];
818
0
      assert(idx >= 0 && idx < nd);
819
0
      params.dim[i] = input_dim[idx];
820
0
      output_stride[i] = input_stride[idx];
821
0
    }
822
0
    outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ccv_nnc_no_ofs, output_stride, params, 0);
823
1
  } else {
824
    // if it is an alias, we can get the stride from it and use that.
825
1
    int input_ofs[CCV_NNC_MAX_DIM_ALLOC];
826
1
    ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], input_ofs, input_stride);
827
1
    assert(input_stride[0] != 0);
828
1
    int output_ofs[CCV_NNC_MAX_DIM_ALLOC] = {};
829
1
    int i;
830
4
    for (i = 0; i < nd; 
i++3
)
831
3
    {
832
3
      const int idx = self->index[i];
833
3
      assert(idx >= 0 && idx < nd);
834
3
      params.dim[i] = input_dim[idx];
835
3
      output_stride[i] = input_stride[idx];
836
3
      output_ofs[i] = input_ofs[idx];
837
3
    }
838
1
    outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], output_ofs, output_stride, params, 0);
839
1
  }
840
1
}
841
842
static ccv_cnnp_model_t* _ccv_cnnp_permute_copy(const ccv_cnnp_model_t* const super, void* const context);
843
844
static const ccv_cnnp_model_vtab_t ccv_cnnp_permute_isa = {
845
  .build = _ccv_cnnp_permute_build,
846
  .copy = _ccv_cnnp_permute_copy,
847
};
848
849
ccv_cnnp_model_t* ccv_cnnp_permute(const int index[CCV_NNC_MAX_DIM_ALLOC], const char* const name)
850
1
{
851
1
  ccv_cnnp_model_permute_t* const model_permute = (ccv_cnnp_model_permute_t*)cccalloc(1, sizeof(ccv_cnnp_model_permute_t));
852
1
  model_permute->super.isa = &ccv_cnnp_permute_isa;
853
1
  model_permute->super.input_size = 1;
854
1
  model_permute->super.outputs = &model_permute->output;
855
1
  model_permute->super.output_size = 1;
856
1
  ccv_cnnp_model_copy_name(&model_permute->super, name);
857
1
  memcpy(model_permute->index, index, sizeof(model_permute->index));
858
1
  return (ccv_cnnp_model_t*)model_permute;
859
1
}
860
861
static ccv_cnnp_model_t* _ccv_cnnp_permute_copy(const ccv_cnnp_model_t* const super, void* const context)
862
0
{
863
0
  const ccv_cnnp_model_permute_t* const self = (const ccv_cnnp_model_permute_t*)super;
864
0
  return ccv_cnnp_permute(self->index, self->super.name);
865
0
}
866
867
typedef struct {
868
  ccv_cnnp_model_t super;
869
  int index;
870
  ccv_nnc_tensor_symbol_t output;
871
} ccv_cnnp_model_extract_t;
872
873
static void _ccv_cnnp_extract_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
874
6
{
875
6
  assert(output_size == 1);
876
6
  ccv_cnnp_model_extract_t* const self = (ccv_cnnp_model_extract_t*)super;
877
6
  PRINT(CCV_CLI_VERBOSE, "[cnnp_extract_build] index: %d\n", self->index);
878
6
  outputs[0] = inputs[self->index];
879
6
}
880
881
static ccv_cnnp_model_t* _ccv_cnnp_extract_copy(const ccv_cnnp_model_t* const self, void* const context);
882
883
static const ccv_cnnp_model_vtab_t ccv_cnnp_extract_isa = {
884
  .build = _ccv_cnnp_extract_build,
885
  .copy = _ccv_cnnp_extract_copy,
886
};
887
888
ccv_cnnp_model_t* ccv_cnnp_extract(const int index, const char* const name)
889
6
{
890
6
  ccv_cnnp_model_extract_t* const model_extract = (ccv_cnnp_model_extract_t*)cccalloc(1, sizeof(ccv_cnnp_model_extract_t));
891
6
  model_extract->index = index;
892
6
  model_extract->super.isa = &ccv_cnnp_extract_isa;
893
6
  model_extract->super.input_size = 0;
894
6
  model_extract->super.outputs = &model_extract->output;
895
6
  model_extract->super.output_size = 1;
896
6
  ccv_cnnp_model_copy_name(&model_extract->super, name);
897
6
  return (ccv_cnnp_model_t*)model_extract;
898
6
}
899
900
static ccv_cnnp_model_t* _ccv_cnnp_extract_copy(const ccv_cnnp_model_t* const super, void* const context)
901
0
{
902
0
  ccv_cnnp_model_extract_t* const self = (ccv_cnnp_model_extract_t*)super;
903
0
  return ccv_cnnp_extract(self->index, self->super.name);
904
0
}
905
906
typedef struct {
907
  ccv_cnnp_model_t super;
908
  ccv_nnc_tensor_symbol_t output;
909
} ccv_cnnp_model_flatten_t;
910
911
static void _ccv_cnnp_flatten_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
912
10
{
913
10
  PRINT(CCV_CLI_VERBOSE, "[cnnp_flatten_build] -\n");
914
10
  assert(input_size == 1);
915
10
  assert(output_size == 1);
916
10
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
917
10
  ccv_nnc_tensor_param_t output_params = params;
918
10
  memset(output_params.dim, 0, sizeof(output_params.dim));
919
10
  output_params.dim[0] = ccv_nnc_tensor_get_n(params);
920
10
  assert(output_params.dim[0] > 0);
921
10
  output_params.dim[1] = ccv_nnc_tensor_count(params) / output_params.dim[0];
922
10
  int stride[CCV_NNC_MAX_DIM_ALLOC] = {};
923
10
  ccv_nnc_tensor_get_stride(output_params.dim, stride);
924
10
  outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], DIM_ALLOC(), stride, output_params, 0);
925
10
}
926
927
static ccv_cnnp_model_t* _ccv_cnnp_flatten_copy(const ccv_cnnp_model_t* const self, void* const context);
928
929
static const ccv_cnnp_model_vtab_t ccv_cnnp_flatten_isa = {
930
  .build = _ccv_cnnp_flatten_build,
931
  .copy = _ccv_cnnp_flatten_copy,
932
};
933
934
ccv_cnnp_model_t* ccv_cnnp_flatten(const char* const name)
935
12
{
936
12
  ccv_cnnp_model_flatten_t* const model_flatten = (ccv_cnnp_model_flatten_t*)cccalloc(1, sizeof(ccv_cnnp_model_flatten_t));
937
12
  model_flatten->super.isa = &ccv_cnnp_flatten_isa;
938
12
  model_flatten->super.input_size = 1;
939
12
  model_flatten->super.outputs = &model_flatten->output;
940
12
  model_flatten->super.output_size = 1;
941
12
  ccv_cnnp_model_copy_name(&model_flatten->super, name);
942
12
  return (ccv_cnnp_model_t*)model_flatten;
943
12
}
944
945
static ccv_cnnp_model_t* _ccv_cnnp_flatten_copy(const ccv_cnnp_model_t* const self, void* const context)
946
2
{
947
2
  return ccv_cnnp_flatten(self->name);
948
2
}
949
950
// MARK - Batch Norm Layer
951
952
typedef struct {
953
  ccv_cnnp_model_t super;
954
  ccv_nnc_tensor_symbol_t output;
955
  ccv_nnc_tensor_symbol_t bias;
956
  ccv_nnc_tensor_symbol_t scale;
957
  ccv_nnc_graph_exec_symbol_t batch_norm;
958
  ccv_nnc_cmd_param_t params;
959
  ccv_array_t* zero_inits;
960
  ccv_array_t* retainables;
961
} ccv_cnnp_model_batch_norm_t;
962
963
static void _ccv_cnnp_batch_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
964
75
{
965
75
  assert(input_size == 1);
966
75
  assert(output_size == 1);
967
75
  ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
968
75
  PRINT(CCV_CLI_VERBOSE, "[cnnp_batch_norm_build] -\n");
969
75
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
970
75
  const int nd = ccv_nnc_tensor_nd(params.dim);
971
75
  ccv_nnc_tensor_param_t bias_params = params;
972
75
  memset(bias_params.dim, 0, sizeof(bias_params.dim));
973
  // If the accuracy is not enough, bump it to 32-bit floating point.
974
75
  if (bias_params.datatype != CCV_32F && 
bias_params.datatype != CCV_64F16
)
975
16
    bias_params.datatype = CCV_32F;
976
75
  bias_params.dim[0] = nd > 1 ? ccv_nnc_tensor_get_c(params) : 
params.dim[0]0
;
977
75
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, params, 0);
978
  // Both scale and bias are shared between if this model is reused.
979
75
  if (!self->scale.graph)
980
75
    self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
981
75
  if (!self->bias.graph)
982
75
    self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
983
75
  const ccv_nnc_tensor_symbol_t mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "mean");
984
75
  const ccv_nnc_tensor_symbol_t var = ccv_nnc_tensor_symbol_new(graph, bias_params, "var");
985
  // Otherwise, notice mean, var, saved_mean, saved_inv_std are not reused.
986
75
  if (!self->zero_inits)
987
75
    self->zero_inits = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0);
988
75
  ccv_array_push(self->zero_inits, &mean);
989
75
  ccv_array_push(self->zero_inits, &var);
990
75
  const ccv_nnc_tensor_symbol_t out_mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "out_mean");
991
75
  const ccv_nnc_tensor_symbol_t out_var = ccv_nnc_tensor_symbol_new(graph, bias_params, "out_var");
992
75
  if (!self->retainables)
993
75
    self->retainables = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0);
994
75
  ccv_array_push(self->retainables, &out_mean);
995
75
  ccv_array_push(self->retainables, &out_var);
996
75
  const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, bias_params, "saved_mean");
997
75
  const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, bias_params, "saved_inv_std");
998
75
  const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM);
999
75
  ccv_nnc_cmd_param_t batch_norm = self->params;
1000
75
  batch_norm.bnorm.count = hw >= 0 ? CCV_NNC_MAX_DIM + 1 : 
10
;
1001
75
  int i;
1002
75
  batch_norm.bnorm.axis[0] = (params.format == CCV_TENSOR_FORMAT_CHWN) ? 
30
: 0;
1003
75
  if (hw >= 0)
1004
225
    
for (i = 0; 75
i < CCV_NNC_MAX_DIM;
i++150
)
1005
150
      batch_norm.bnorm.axis[i + 1] = i + hw;
1006
75
  self->params = batch_norm;
1007
75
  self->batch_norm = ccv_nnc_graph_exec_symbol_new(graph, ccv_nnc_cmd(CCV_NNC_BATCH_NORM_FORWARD, 0, batch_norm, 0), TENSOR_SYMBOL_LIST(inputs[0], self->scale, self->bias, mean, var), TENSOR_SYMBOL_LIST(output, out_mean, out_var, saved_mean, saved_inv_std), "batch_norm");
1008
75
  outputs[0] = output;
1009
75
}
1010
1011
static void _ccv_cnnp_batch_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1012
24
{
1013
24
  ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1014
24
  if (self->scale.graph)
1015
24
    initializer(context, CMD_RANDOM_UNIFORM_FORWARD(0, 1), ccv_nnc_no_hint, 0, 0, self->scale);
1016
24
  if (self->bias.graph)
1017
24
    initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->bias);
1018
24
  int i;
1019
24
  if (self->zero_inits)
1020
72
    
for (i = 0; 24
i < self->zero_inits->rnum;
i++48
)
1021
48
      initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, *(ccv_nnc_tensor_symbol_t*)ccv_array_get(self->zero_inits, i));
1022
24
}
1023
1024
static void _ccv_cnnp_batch_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1025
75
{
1026
75
  ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1027
75
  if (self->scale.graph)
1028
75
    add_to_array(parameters, self->scale, is_trainable);
1029
75
  if (self->bias.graph)
1030
75
    add_to_array(parameters, self->bias, is_trainable);
1031
75
}
1032
1033
static void _ccv_cnnp_batch_norm_add_to_output(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const outputs)
1034
75
{
1035
75
  ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1036
75
  int i;
1037
75
  if (self->retainables)
1038
225
    
for (i = 0; 75
i < self->retainables->rnum;
i++150
)
1039
150
    {
1040
150
      const ccv_nnc_tensor_symbol_t symbol = *(ccv_nnc_tensor_symbol_t*)ccv_array_get(self->retainables, i);
1041
150
      add_to_array(outputs, symbol, 0);
1042
150
    }
1043
75
}
1044
1045
static void _ccv_cnnp_batch_norm_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
1046
32
{
1047
32
  ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1048
32
  if (self->batch_norm.graph)
1049
32
  {
1050
32
    self->params.bnorm.is_test = is_test;
1051
32
    updater(context, self->batch_norm, ccv_nnc_cmd(CCV_NNC_BATCH_NORM_FORWARD, 0, self->params, 0), ccv_nnc_no_hint);
1052
32
  }
1053
32
}
1054
1055
static void _ccv_cnnp_batch_norm_deinit(ccv_cnnp_model_t* const super)
1056
83
{
1057
83
  ccv_cnnp_model_batch_norm_t* const self = (ccv_cnnp_model_batch_norm_t*)super;
1058
83
  if (self->zero_inits)
1059
75
    ccv_array_free(self->zero_inits);
1060
83
  if (self->retainables)
1061
75
    ccv_array_free(self->retainables);
1062
83
}
1063
1064
static ccv_cnnp_model_t* _ccv_cnnp_batch_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
1065
1066
static const ccv_cnnp_model_vtab_t ccv_cnnp_batch_norm_isa = {
1067
  .build = _ccv_cnnp_batch_norm_build,
1068
  .init_states = _ccv_cnnp_batch_norm_init_states,
1069
  .add_to_parameter = _ccv_cnnp_batch_norm_add_to_parameter,
1070
  .add_to_output = _ccv_cnnp_batch_norm_add_to_output,
1071
  .copy = _ccv_cnnp_batch_norm_copy,
1072
  .set_is_test = _ccv_cnnp_batch_norm_set_is_test,
1073
  .deinit = _ccv_cnnp_batch_norm_deinit,
1074
};
1075
1076
ccv_cnnp_model_t* ccv_cnnp_batch_norm(const float momentum, const float epsilon, const int is_trainable, const char* const name)
1077
83
{
1078
83
  ccv_cnnp_model_batch_norm_t* const model_batch_norm = (ccv_cnnp_model_batch_norm_t*)cccalloc(1, sizeof(ccv_cnnp_model_batch_norm_t));
1079
83
  model_batch_norm->super.isa = &ccv_cnnp_batch_norm_isa;
1080
83
  model_batch_norm->super.input_size = 1;
1081
83
  model_batch_norm->super.outputs = &model_batch_norm->output;
1082
83
  model_batch_norm->super.output_size = 1;
1083
83
  model_batch_norm->super.is_trainable = is_trainable;
1084
83
  ccv_cnnp_model_copy_name(&model_batch_norm->super, name);
1085
83
  model_batch_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
1086
83
  model_batch_norm->scale.graph = 0;
1087
83
  model_batch_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1088
83
  model_batch_norm->bias.graph = 0;
1089
83
  model_batch_norm->params.bnorm.momentum = momentum;
1090
83
  model_batch_norm->params.bnorm.epsilon = epsilon;
1091
83
  return (ccv_cnnp_model_t*)model_batch_norm;
1092
83
}
1093
1094
static ccv_cnnp_model_t* _ccv_cnnp_batch_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
1095
8
{
1096
8
  const ccv_cnnp_model_batch_norm_t* const self = (const ccv_cnnp_model_batch_norm_t*)super;
1097
8
  return ccv_cnnp_batch_norm(self->params.bnorm.momentum, self->params.bnorm.epsilon, self->super.is_trainable, self->super.name);
1098
8
}
1099
1100
// MARK - Convolution Layer
1101
1102
typedef struct {
1103
  ccv_cnnp_model_t super;
1104
  ccv_nnc_tensor_symbol_t output;
1105
  ccv_nnc_tensor_symbol_t weights;
1106
  ccv_nnc_tensor_symbol_t bias;
1107
  int groups;
1108
  int filters;
1109
  int kdim[CCV_NNC_MAX_DIM_ALLOC];
1110
  int dilation[CCV_NNC_MAX_DIM_ALLOC];
1111
  int no_bias;
1112
  int format;
1113
  ccv_nnc_hint_t hint;
1114
} ccv_cnnp_model_convolution_t;
1115
1116
static void _ccv_cnnp_convolution_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1117
114
{
1118
114
  ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1119
114
  PRINT(CCV_CLI_VERBOSE, "[cnnp_convolution_build] -\n");
1120
114
  assert(input_size == 1);
1121
114
  assert(output_size == 1);
1122
114
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1123
114
  int i;
1124
114
  const int k_nd = ccv_nnc_tensor_nd(self->kdim);
1125
114
  const int nd = k_nd + 2;
1126
114
  ccv_nnc_tensor_param_t weights_params = params;
1127
114
  if (self->format)
1128
0
    weights_params.format = self->format;
1129
114
  ccv_nnc_tensor_set_n(&weights_params, self->filters);
1130
114
  const int a_nd = ccv_nnc_tensor_nd(params.dim);
1131
114
  int c;
1132
114
  switch (params.format)
1133
114
  {
1134
15
    case CCV_TENSOR_FORMAT_NHWC:
1135
15
      c = params.dim[a_nd - 1];
1136
15
      break;
1137
99
    case CCV_TENSOR_FORMAT_NCHW:
1138
99
      if (a_nd == k_nd + 1)
1139
0
        c = params.dim[0];
1140
99
      else
1141
99
        c = params.dim[a_nd <= 1 ? 
00
: 1];
1142
99
      break;
1143
0
    case CCV_TENSOR_FORMAT_CHWN:
1144
0
      c = params.dim[0];
1145
0
      break;
1146
114
  }
1147
114
  assert(c % self->groups == 0);
1148
114
  ccv_nnc_tensor_set_c(&weights_params, nd, c / self->groups);
1149
114
  int hw = -1;
1150
114
  if (weights_params.format == CCV_TENSOR_FORMAT_NHWC || 
weights_params.format == CCV_TENSOR_FORMAT_CHWN99
)
1151
15
    hw = 1;
1152
99
  else if (weights_params.format == CCV_TENSOR_FORMAT_NCHW)
1153
99
    hw = 2;
1154
114
  assert(hw >= 0);
1155
342
  
for (i = 0; 114
i < k_nd;
i++228
)
1156
228
    weights_params.dim[i + hw] = self->kdim[i];
1157
114
  if (!self->weights.graph)
1158
110
    self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1159
114
  assert(self->weights.graph == graph);
1160
114
  ccv_nnc_tensor_param_t bias_params = params;
1161
114
  if (self->format)
1162
0
    bias_params.format = self->format;
1163
114
  memset(bias_params.dim, 0, sizeof(bias_params.dim));
1164
114
  bias_params.dim[0] = self->filters;
1165
114
  ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD(self->groups, self->filters);
1166
342
  for (i = 0; i < k_nd; 
i++228
)
1167
228
    cmd.info.size.dim[i] = self->kdim[i];
1168
114
  cmd.info.size.dim[k_nd] = c;
1169
114
  memcpy(cmd.info.convolution.dilation, self->dilation, sizeof(self->dilation));
1170
114
  ccv_nnc_tensor_param_t output_params;
1171
  // Dilate weight size based on the dilation factor.
1172
342
  for (i = 0; i < k_nd; 
i++228
)
1173
228
    weights_params.dim[i + hw] = (self->kdim[i] - 1) * ccv_max(self->dilation[i], 1) + 1;
1174
114
  ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1175
114
      params,
1176
114
      weights_params,
1177
114
      bias_params,
1178
114
    }, 3, self->hint, &output_params, 1);
1179
114
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1180
114
  ccv_nnc_graph_exec_symbol_t convolution;
1181
114
  if (self->no_bias)
1182
10
    convolution = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights), TENSOR_SYMBOL_LIST(output), "convolution");
1183
104
  else {
1184
104
    if (!self->bias.graph)
1185
100
      self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1186
104
    convolution = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights, self->bias), TENSOR_SYMBOL_LIST(output), "convolution");
1187
104
  }
1188
114
  ccv_nnc_graph_exec_symbol_set_hint(graph, convolution, self->hint);
1189
114
  outputs[0] = output;
1190
114
}
1191
1192
static void _ccv_cnnp_convolution_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1193
36
{
1194
36
  ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1195
36
  const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1196
36
  const int n = ccv_max(ccv_nnc_tensor_get_n(weight_params), 1);
1197
36
  const int count = ccv_nnc_tensor_count(weight_params);
1198
36
  const float std = sqrtf(2) / sqrtf(count / n);
1199
36
  const float bound = sqrtf(3) * std;
1200
36
  initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound), ccv_nnc_no_hint, 0, 0, self->weights);
1201
36
  if (self->bias.graph)
1202
36
    initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->bias);
1203
36
}
1204
1205
static void _ccv_cnnp_convolution_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1206
114
{
1207
114
  ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1208
114
  add_to_array(parameters, self->weights, is_trainable);
1209
114
  if (self->bias.graph)
1210
104
    add_to_array(parameters, self->bias, is_trainable);
1211
114
}
1212
1213
static ccv_cnnp_model_t* _ccv_cnnp_convolution_copy(const ccv_cnnp_model_t* const super, void* const context);
1214
1215
static const ccv_cnnp_model_vtab_t ccv_cnnp_convolution_isa = {
1216
  .build = _ccv_cnnp_convolution_build,
1217
  .init_states = _ccv_cnnp_convolution_init_states,
1218
  .add_to_parameter = _ccv_cnnp_convolution_add_to_parameter,
1219
  .copy = _ccv_cnnp_convolution_copy,
1220
};
1221
1222
ccv_cnnp_model_t* ccv_cnnp_convolution(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int dilation[CCV_NNC_MAX_DIM_ALLOC], const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name)
1223
126
{
1224
126
  ccv_cnnp_model_convolution_t* const model_convolution = (ccv_cnnp_model_convolution_t*)cccalloc(1, sizeof(ccv_cnnp_model_convolution_t));
1225
126
  model_convolution->super.isa = &ccv_cnnp_convolution_isa;
1226
126
  model_convolution->super.input_size = 1;
1227
126
  model_convolution->super.outputs = &model_convolution->output;
1228
126
  model_convolution->super.output_size = 1;
1229
126
  model_convolution->super.is_trainable = is_trainable;
1230
126
  ccv_cnnp_model_copy_name(&model_convolution->super, name);
1231
126
  model_convolution->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1232
126
  model_convolution->weights.graph = 0;
1233
126
  model_convolution->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1234
126
  model_convolution->bias.graph = 0;
1235
126
  model_convolution->groups = groups;
1236
126
  model_convolution->filters = filters;
1237
126
  memcpy(model_convolution->kdim, kdim, sizeof(model_convolution->kdim));
1238
126
  memcpy(model_convolution->dilation, dilation, sizeof(model_convolution->dilation));
1239
126
  model_convolution->no_bias = no_bias;
1240
126
  model_convolution->hint = hint;
1241
126
  model_convolution->format = format;
1242
126
  return (ccv_cnnp_model_t*)model_convolution;
1243
126
}
1244
1245
static ccv_cnnp_model_t* _ccv_cnnp_convolution_copy(const ccv_cnnp_model_t* const super, void* const context)
1246
16
{
1247
16
  ccv_cnnp_model_convolution_t* const self = (ccv_cnnp_model_convolution_t*)super;
1248
16
  return ccv_cnnp_convolution(self->groups, self->filters, self->kdim, self->dilation, self->no_bias, self->hint, self->format, self->super.is_trainable, self->super.name);
1249
16
}
1250
1251
// MARK - Convolution Transpose Layer
1252
1253
typedef struct {
1254
  ccv_cnnp_model_t super;
1255
  ccv_nnc_tensor_symbol_t output;
1256
  ccv_nnc_tensor_symbol_t weights;
1257
  ccv_nnc_tensor_symbol_t bias;
1258
  int groups;
1259
  int filters;
1260
  int kdim[CCV_NNC_MAX_DIM_ALLOC];
1261
  int dilation[CCV_NNC_MAX_DIM_ALLOC];
1262
  int output_padding;
1263
  int no_bias;
1264
  int format;
1265
  ccv_nnc_hint_t hint;
1266
} ccv_cnnp_model_convolution_transpose_t;
1267
1268
static void _ccv_cnnp_convolution_transpose_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1269
0
{
1270
0
  ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1271
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_convolution_transpose_build] -\n");
1272
0
  assert(input_size == 1);
1273
0
  assert(output_size == 1);
1274
0
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1275
0
  int i;
1276
0
  const int nd = CCV_NNC_MAX_DIM + 2;
1277
0
  ccv_nnc_tensor_param_t weights_params = params;
1278
0
  if (self->format)
1279
0
    weights_params.format = self->format;
1280
0
  const int c = ccv_nnc_tensor_get_c(params);
1281
0
  ccv_nnc_tensor_set_n(&weights_params, c);
1282
0
  assert(c % self->groups == 0);
1283
0
  ccv_nnc_tensor_set_c(&weights_params, nd, self->filters / self->groups);
1284
0
  const int hw = ccv_nnc_tensor_hw(weights_params, nd, CCV_NNC_MAX_DIM);
1285
0
  assert(hw >= 0);
1286
0
  for (i = 0; i < CCV_NNC_MAX_DIM; i++)
1287
0
    weights_params.dim[i + hw] = self->kdim[i];
1288
0
  if (!self->weights.graph)
1289
0
    self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1290
0
  assert(self->weights.graph == graph);
1291
0
  ccv_nnc_tensor_param_t bias_params = params;
1292
0
  if (self->format)
1293
0
    bias_params.format = self->format;
1294
0
  memset(bias_params.dim, 0, sizeof(bias_params.dim));
1295
0
  bias_params.dim[0] = self->filters;
1296
0
  ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_TRANSPOSE_FORWARD(self->groups, self->filters, self->output_padding);
1297
0
  for (i = 0; i < CCV_NNC_MAX_DIM; i++)
1298
0
    cmd.info.size.dim[i] = self->kdim[i];
1299
0
  cmd.info.size.dim[CCV_NNC_MAX_DIM] = c;
1300
0
  memcpy(cmd.info.convolution_transpose.dilation, self->dilation, sizeof(self->dilation));
1301
0
  ccv_nnc_tensor_param_t output_params;
1302
  // Dilate weight size based on the dilation factor.
1303
0
  for (i = 0; i < CCV_NNC_MAX_DIM; i++)
1304
0
    weights_params.dim[i + hw] = (self->kdim[i] - 1) * ccv_max(self->dilation[i], 1) + 1;
1305
0
  ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1306
0
      params,
1307
0
      weights_params,
1308
0
      bias_params,
1309
0
    }, 3, self->hint, &output_params, 1);
1310
0
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1311
0
  ccv_nnc_graph_exec_symbol_t convolution_transpose;
1312
0
  if (self->no_bias)
1313
0
    convolution_transpose = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights), TENSOR_SYMBOL_LIST(output), "convolution_transpose");
1314
0
  else {
1315
0
    if (!self->bias.graph)
1316
0
      self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1317
0
    convolution_transpose = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights, self->bias), TENSOR_SYMBOL_LIST(output), "convolution_transpose");
1318
0
  }
1319
0
  ccv_nnc_graph_exec_symbol_set_hint(graph, convolution_transpose, self->hint);
1320
0
  outputs[0] = output;
1321
0
}
1322
1323
static void _ccv_cnnp_convolution_transpose_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1324
0
{
1325
0
  ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1326
0
  const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1327
0
  const int n = ccv_max(ccv_nnc_tensor_get_n(weight_params), 1);
1328
0
  const int count = ccv_nnc_tensor_count(weight_params);
1329
0
  const float std = sqrtf(2) / sqrtf(count / n);
1330
0
  const float bound = sqrtf(3) * std;
1331
0
  initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound), ccv_nnc_no_hint, 0, 0, self->weights);
1332
0
  if (self->bias.graph)
1333
0
    initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->bias);
1334
0
}
1335
1336
static void _ccv_cnnp_convolution_transpose_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1337
0
{
1338
0
  ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1339
0
  add_to_array(parameters, self->weights, is_trainable);
1340
0
  if (self->bias.graph)
1341
0
    add_to_array(parameters, self->bias, is_trainable);
1342
0
}
1343
1344
static ccv_cnnp_model_t* _ccv_cnnp_convolution_transpose_copy(const ccv_cnnp_model_t* const super, void* const context);
1345
1346
static const ccv_cnnp_model_vtab_t ccv_cnnp_convolution_transpose_isa = {
1347
  .build = _ccv_cnnp_convolution_transpose_build,
1348
  .init_states = _ccv_cnnp_convolution_transpose_init_states,
1349
  .add_to_parameter = _ccv_cnnp_convolution_transpose_add_to_parameter,
1350
  .copy = _ccv_cnnp_convolution_transpose_copy,
1351
};
1352
1353
ccv_cnnp_model_t* ccv_cnnp_convolution_transpose(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int dilation[CCV_NNC_MAX_DIM_ALLOC], const int output_padding, const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name)
1354
0
{
1355
0
  ccv_cnnp_model_convolution_transpose_t* const model_convolution_transpose = (ccv_cnnp_model_convolution_transpose_t*)cccalloc(1, sizeof(ccv_cnnp_model_convolution_transpose_t));
1356
0
  model_convolution_transpose->super.isa = &ccv_cnnp_convolution_transpose_isa;
1357
0
  model_convolution_transpose->super.input_size = 1;
1358
0
  model_convolution_transpose->super.outputs = &model_convolution_transpose->output;
1359
0
  model_convolution_transpose->super.output_size = 1;
1360
0
  model_convolution_transpose->super.is_trainable = is_trainable;
1361
0
  ccv_cnnp_model_copy_name(&model_convolution_transpose->super, name);
1362
0
  model_convolution_transpose->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1363
0
  model_convolution_transpose->weights.graph = 0;
1364
0
  model_convolution_transpose->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1365
0
  model_convolution_transpose->bias.graph = 0;
1366
0
  model_convolution_transpose->groups = groups;
1367
0
  model_convolution_transpose->filters = filters;
1368
0
  memcpy(model_convolution_transpose->kdim, kdim, sizeof(model_convolution_transpose->kdim));
1369
0
  memcpy(model_convolution_transpose->dilation, dilation, sizeof(model_convolution_transpose->dilation));
1370
0
  model_convolution_transpose->output_padding = output_padding;
1371
0
  model_convolution_transpose->no_bias = no_bias;
1372
0
  model_convolution_transpose->hint = hint;
1373
0
  model_convolution_transpose->format = format;
1374
0
  return (ccv_cnnp_model_t*)model_convolution_transpose;
1375
0
}
1376
1377
static ccv_cnnp_model_t* _ccv_cnnp_convolution_transpose_copy(const ccv_cnnp_model_t* const super, void* const context)
1378
0
{
1379
0
  ccv_cnnp_model_convolution_transpose_t* const self = (ccv_cnnp_model_convolution_transpose_t*)super;
1380
0
  return ccv_cnnp_convolution_transpose(self->groups, self->filters, self->kdim, self->dilation, self->output_padding, self->no_bias, self->hint, self->format, self->super.is_trainable, self->super.name);
1381
0
}
1382
1383
// MARK - Dense Layer
1384
1385
typedef struct {
1386
  ccv_cnnp_model_t super;
1387
  ccv_nnc_tensor_symbol_t output;
1388
  ccv_nnc_tensor_symbol_t weights;
1389
  ccv_nnc_tensor_symbol_t bias;
1390
  int count;
1391
  int no_bias;
1392
  int flags;
1393
} ccv_cnnp_model_dense_t;
1394
1395
static void _ccv_cnnp_dense_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1396
2.33k
{
1397
2.33k
  ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1398
2.33k
  PRINT(CCV_CLI_VERBOSE, "[cnnp_dense_build] -\n");
1399
2.33k
  assert(input_size == 1);
1400
2.33k
  assert(output_size == 1);
1401
2.33k
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1402
2.33k
  ccv_nnc_tensor_param_t weights_params = params;
1403
2.33k
  memset(weights_params.dim, 0, sizeof(weights_params.dim));
1404
2.33k
  weights_params.dim[0] = self->count;
1405
2.33k
  weights_params.dim[1] = params.dim[ccv_nnc_tensor_nd(params.dim) - 1];
1406
2.33k
  if (!self->weights.graph)
1407
2.31k
    self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
1408
2.33k
  assert(self->weights.graph == graph);
1409
2.33k
  ccv_nnc_tensor_param_t bias_params = params;
1410
2.33k
  memset(bias_params.dim, 0, sizeof(bias_params.dim));
1411
2.33k
  bias_params.dim[0] = self->count;
1412
2.33k
  ccv_nnc_cmd_t cmd = {0};
1413
2.33k
  cmd.cmd = CCV_NNC_GEMM_FORWARD;
1414
2.33k
  cmd.info.blas.a[0] = 1;
1415
2.33k
  cmd.info.blas.a[1] = 1;
1416
2.33k
  cmd.info.blas.transpose_b[0] = 0;
1417
2.33k
  cmd.info.blas.transpose_b[1] = 1;
1418
2.33k
  cmd.info.blas.flags = self->flags;
1419
2.33k
  ccv_nnc_tensor_param_t output_params;
1420
2.33k
  ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
1421
2.33k
      params,
1422
2.33k
      weights_params,
1423
2.33k
      bias_params,
1424
2.33k
    }, 3, ccv_nnc_no_hint, &output_params, 1);
1425
2.33k
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1426
2.33k
  if (self->no_bias)
1427
2.08k
    ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights), TENSOR_SYMBOL_LIST(output), "dense");
1428
246
  else {
1429
246
    if (!self->bias.graph)
1430
243
      self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
1431
246
    ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], self->weights, self->bias), TENSOR_SYMBOL_LIST(output), "dense");
1432
246
  }
1433
2.33k
  outputs[0] = output;
1434
2.33k
}
1435
1436
static void _ccv_cnnp_dense_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
1437
79
{
1438
79
  ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1439
79
  const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
1440
79
  const int c = weight_params.dim[1];
1441
79
  const float std = sqrtf(2) / sqrtf(c);
1442
79
  const float bound = sqrtf(3) * std;
1443
79
  initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound), ccv_nnc_no_hint, 0, 0, self->weights);
1444
79
  if (self->bias.graph)
1445
33
    initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->bias);
1446
79
}
1447
1448
static void _ccv_cnnp_dense_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
1449
2.33k
{
1450
2.33k
  ccv_cnnp_model_dense_t* const self = (ccv_cnnp_model_dense_t*)super;
1451
2.33k
  add_to_array(parameters, self->weights, is_trainable);
1452
2.33k
  if (self->bias.graph)
1453
246
    add_to_array(parameters, self->bias, is_trainable);
1454
2.33k
}
1455
1456
static ccv_cnnp_model_t* _ccv_cnnp_dense_copy(const ccv_cnnp_model_t* const super, void* const context);
1457
1458
static const ccv_cnnp_model_vtab_t ccv_cnnp_dense_isa = {
1459
  .build = _ccv_cnnp_dense_build,
1460
  .init_states = _ccv_cnnp_dense_init_states,
1461
  .add_to_parameter = _ccv_cnnp_dense_add_to_parameter,
1462
  .copy = _ccv_cnnp_dense_copy,
1463
};
1464
1465
ccv_cnnp_model_t* ccv_cnnp_dense(const int count, const int no_bias, const int flags, const int is_trainable, const char* const name)
1466
2.31k
{
1467
2.31k
  ccv_cnnp_model_dense_t* const model_dense = (ccv_cnnp_model_dense_t*)cccalloc(1, sizeof(ccv_cnnp_model_dense_t));
1468
2.31k
  model_dense->super.isa = &ccv_cnnp_dense_isa;
1469
2.31k
  model_dense->super.input_size = 1;
1470
2.31k
  model_dense->super.outputs = &model_dense->output;
1471
2.31k
  model_dense->super.output_size = 1;
1472
2.31k
  model_dense->super.is_trainable = is_trainable;
1473
2.31k
  ccv_cnnp_model_copy_name(&model_dense->super, name);
1474
2.31k
  model_dense->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
1475
2.31k
  model_dense->weights.graph = 0;
1476
2.31k
  model_dense->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
1477
2.31k
  model_dense->bias.graph = 0;
1478
2.31k
  model_dense->count = count;
1479
2.31k
  model_dense->no_bias = no_bias;
1480
2.31k
  model_dense->flags = flags;
1481
2.31k
  return (ccv_cnnp_model_t*)model_dense;
1482
2.31k
}
1483
1484
static ccv_cnnp_model_t* _ccv_cnnp_dense_copy(const ccv_cnnp_model_t* const super, void* const context)
1485
2.20k
{
1486
2.20k
  const ccv_cnnp_model_dense_t* const self = (const ccv_cnnp_model_dense_t*)super;
1487
2.20k
  return ccv_cnnp_dense(self->count, self->no_bias, self->flags, self->super.is_trainable, self->super.name);
1488
2.20k
}
1489
1490
// MARK - Pool Layers
1491
1492
typedef struct {
1493
  ccv_cnnp_model_t super;
1494
  ccv_nnc_tensor_symbol_t output;
1495
  int kdim[CCV_NNC_MAX_DIM_ALLOC];
1496
  ccv_nnc_hint_t hint;
1497
} ccv_cnnp_model_pool_t;
1498
1499
static void _ccv_cnnp_max_pool_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1500
18
{
1501
18
  ccv_cnnp_model_pool_t* const self = (ccv_cnnp_model_pool_t*)super;
1502
18
  PRINT(CCV_CLI_VERBOSE, "[cnnp_max_pool_build] -\n");
1503
18
  assert(input_size == 1);
1504
18
  assert(output_size == 1);
1505
18
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1506
18
  const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM);
1507
18
  ccv_nnc_cmd_t cmd;
1508
18
  if (hw >= 0 && self->kdim[0] == 0 && 
self->kdim[1] == 03
)
1509
3
    cmd = CMD_MAX_POOL_FORWARD(params.dim[hw], params.dim[hw + 1]);
1510
15
  else
1511
15
    cmd = CMD_MAX_POOL_FORWARD(self->kdim[0], self->kdim[1]);
1512
18
  ccv_nnc_tensor_param_t output_params;
1513
18
  ccv_nnc_hint_tensor_auto(cmd, &params, 1, self->hint, &output_params, 1);
1514
18
  const ccv_nnc_tensor_symbol_t pool_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1515
18
  const ccv_nnc_graph_exec_symbol_t exec = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(pool_output), "max_pool");
1516
18
  ccv_nnc_graph_exec_symbol_set_hint(graph, exec, self->hint);
1517
18
  outputs[0] = pool_output;
1518
18
}
1519
1520
static ccv_cnnp_model_t* _ccv_cnnp_max_pool_copy(const ccv_cnnp_model_t* const super, void* const context);
1521
1522
static const ccv_cnnp_model_vtab_t ccv_cnnp_max_pool_isa = {
1523
  .build = _ccv_cnnp_max_pool_build,
1524
  .copy = _ccv_cnnp_max_pool_copy,
1525
};
1526
1527
ccv_cnnp_model_t* ccv_cnnp_max_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name)
1528
24
{
1529
24
  ccv_cnnp_model_pool_t* const model_pool = (ccv_cnnp_model_pool_t*)cccalloc(1, sizeof(ccv_cnnp_model_pool_t));
1530
24
  model_pool->super.isa = &ccv_cnnp_max_pool_isa;
1531
24
  model_pool->super.input_size = 1;
1532
24
  model_pool->super.outputs = &model_pool->output;
1533
24
  model_pool->super.output_size = 1;
1534
24
  ccv_cnnp_model_copy_name(&model_pool->super, name);
1535
24
  memcpy(model_pool->kdim, kdim, sizeof(model_pool->kdim));
1536
24
  model_pool->hint = hint;
1537
24
  return (ccv_cnnp_model_t*)model_pool;
1538
24
}
1539
1540
static ccv_cnnp_model_t* _ccv_cnnp_max_pool_copy(const ccv_cnnp_model_t* const super, void* const context)
1541
6
{
1542
6
  const ccv_cnnp_model_pool_t* const self = (const ccv_cnnp_model_pool_t*)super;
1543
6
  return ccv_cnnp_max_pool(self->kdim, self->hint, self->super.name);
1544
6
}
1545
1546
static void _ccv_cnnp_average_pool_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1547
15
{
1548
15
  ccv_cnnp_model_pool_t* const self = (ccv_cnnp_model_pool_t*)super;
1549
15
  PRINT(CCV_CLI_VERBOSE, "[cnnp_average_pool_build] -\n");
1550
15
  assert(input_size == 1);
1551
15
  assert(output_size == 1);
1552
15
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1553
15
  const int hw = ccv_nnc_tensor_hw(params, ccv_nnc_tensor_nd(params.dim), CCV_NNC_MAX_DIM);
1554
15
  ccv_nnc_cmd_t cmd;
1555
15
  if (hw >= 0 && self->kdim[0] == 0 && 
self->kdim[1] == 02
)
1556
2
    cmd = CMD_AVERAGE_POOL_FORWARD(params.dim[hw], params.dim[hw + 1]);
1557
13
  else
1558
13
    cmd = CMD_AVERAGE_POOL_FORWARD(self->kdim[0], self->kdim[1]);
1559
15
  ccv_nnc_tensor_param_t output_params;
1560
15
  ccv_nnc_hint_tensor_auto(cmd, &params, 1, self->hint, &output_params, 1);
1561
15
  const ccv_nnc_tensor_symbol_t pool_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1562
15
  const ccv_nnc_graph_exec_symbol_t exec = ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(pool_output), "average_pool");
1563
15
  ccv_nnc_graph_exec_symbol_set_hint(graph, exec, self->hint);
1564
15
  outputs[0] = pool_output;
1565
15
}
1566
1567
static ccv_cnnp_model_t* _ccv_cnnp_average_pool_copy(const ccv_cnnp_model_t* const super, void* const context);
1568
1569
static const ccv_cnnp_model_vtab_t ccv_cnnp_average_pool_isa = {
1570
  .build = _ccv_cnnp_average_pool_build,
1571
  .copy = _ccv_cnnp_average_pool_copy,
1572
};
1573
1574
ccv_cnnp_model_t* ccv_cnnp_average_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name)
1575
17
{
1576
17
  ccv_cnnp_model_pool_t* const model_pool = (ccv_cnnp_model_pool_t*)cccalloc(1, sizeof(ccv_cnnp_model_pool_t));
1577
17
  model_pool->super.isa = &ccv_cnnp_average_pool_isa;
1578
17
  model_pool->super.input_size = 1;
1579
17
  model_pool->super.outputs = &model_pool->output;
1580
17
  model_pool->super.output_size = 1;
1581
17
  ccv_cnnp_model_copy_name(&model_pool->super, name);
1582
17
  memcpy(model_pool->kdim, kdim, sizeof(model_pool->kdim));
1583
17
  model_pool->hint = hint;
1584
17
  return (ccv_cnnp_model_t*)model_pool;
1585
17
}
1586
1587
static ccv_cnnp_model_t* _ccv_cnnp_average_pool_copy(const ccv_cnnp_model_t* const super, void* const context)
1588
2
{
1589
2
  const ccv_cnnp_model_pool_t* const self = (const ccv_cnnp_model_pool_t*)super;
1590
2
  return ccv_cnnp_average_pool(self->kdim, self->hint, self->super.name);
1591
2
}
1592
1593
// MARK - RELU Layer
1594
1595
typedef struct {
1596
  ccv_cnnp_model_t super;
1597
  ccv_nnc_tensor_symbol_t output;
1598
} ccv_cnnp_model_relu_t;
1599
1600
static void _ccv_cnnp_relu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1601
103
{
1602
103
  PRINT(CCV_CLI_VERBOSE, "[cnnp_relu_build] -\n");
1603
103
  assert(input_size == 1);
1604
103
  assert(output_size == 1);
1605
103
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1606
103
  ccv_nnc_tensor_param_t output_params;
1607
103
  const ccv_nnc_cmd_t relu = CMD_RELU_FORWARD();
1608
103
  ccv_nnc_hint_tensor_auto(relu, (ccv_nnc_tensor_param_t []){
1609
103
      params,
1610
103
    }, 1, ccv_nnc_no_hint, &output_params, 1);
1611
103
  const ccv_nnc_tensor_symbol_t relu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1612
103
  ccv_nnc_graph_exec_symbol_new(graph, relu, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(relu_output), "relu");
1613
103
  outputs[0] = relu_output;
1614
103
}
1615
1616
static ccv_cnnp_model_t* _ccv_cnnp_relu_copy(const ccv_cnnp_model_t* const self, void* const context);
1617
1618
static const ccv_cnnp_model_vtab_t ccv_cnnp_relu_isa = {
1619
  .build = _ccv_cnnp_relu_build,
1620
  .copy = _ccv_cnnp_relu_copy,
1621
};
1622
1623
ccv_cnnp_model_t* ccv_cnnp_relu(const char* const name)
1624
120
{
1625
120
  ccv_cnnp_model_relu_t* const model_relu = (ccv_cnnp_model_relu_t*)cccalloc(1, sizeof(ccv_cnnp_model_relu_t));
1626
120
  model_relu->super.isa = &ccv_cnnp_relu_isa;
1627
120
  model_relu->super.input_size = 1;
1628
120
  model_relu->super.outputs = &model_relu->output;
1629
120
  model_relu->super.output_size = 1;
1630
120
  ccv_cnnp_model_copy_name(&model_relu->super, name);
1631
120
  return (ccv_cnnp_model_t*)model_relu;
1632
120
}
1633
1634
static ccv_cnnp_model_t* _ccv_cnnp_relu_copy(const ccv_cnnp_model_t* const self, void* const context)
1635
17
{
1636
17
  return ccv_cnnp_relu(self->name);
1637
17
}
1638
1639
// MARK - Sigmoid Layer
1640
1641
typedef struct {
1642
  ccv_cnnp_model_t super;
1643
  ccv_nnc_tensor_symbol_t output;
1644
} ccv_cnnp_model_sigmoid_t;
1645
1646
static void _ccv_cnnp_sigmoid_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1647
5
{
1648
5
  PRINT(CCV_CLI_VERBOSE, "[cnnp_sigmoid_build] -\n");
1649
5
  assert(input_size == 1);
1650
5
  assert(output_size == 1);
1651
5
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1652
5
  ccv_nnc_tensor_param_t output_params;
1653
5
  const ccv_nnc_cmd_t sigmoid = CMD_SIGMOID_FORWARD();
1654
5
  ccv_nnc_hint_tensor_auto(sigmoid, (ccv_nnc_tensor_param_t []){
1655
5
      params,
1656
5
    }, 1, ccv_nnc_no_hint, &output_params, 1);
1657
5
  const ccv_nnc_tensor_symbol_t sigmoid_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1658
5
  ccv_nnc_graph_exec_symbol_new(graph, sigmoid, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(sigmoid_output), "sigmoid");
1659
5
  outputs[0] = sigmoid_output;
1660
5
}
1661
1662
static ccv_cnnp_model_t* _ccv_cnnp_sigmoid_copy(const ccv_cnnp_model_t* const self, void* const context);
1663
1664
static const ccv_cnnp_model_vtab_t ccv_cnnp_sigmoid_isa = {
1665
  .build = _ccv_cnnp_sigmoid_build,
1666
  .copy = _ccv_cnnp_sigmoid_copy,
1667
};
1668
1669
ccv_cnnp_model_t* ccv_cnnp_sigmoid(const char* const name)
1670
5
{
1671
5
  ccv_cnnp_model_sigmoid_t* const model_sigmoid = (ccv_cnnp_model_sigmoid_t*)cccalloc(1, sizeof(ccv_cnnp_model_sigmoid_t));
1672
5
  model_sigmoid->super.isa = &ccv_cnnp_sigmoid_isa;
1673
5
  model_sigmoid->super.input_size = 1;
1674
5
  model_sigmoid->super.outputs = &model_sigmoid->output;
1675
5
  model_sigmoid->super.output_size = 1;
1676
5
  ccv_cnnp_model_copy_name(&model_sigmoid->super, name);
1677
5
  return (ccv_cnnp_model_t*)model_sigmoid;
1678
5
}
1679
1680
static ccv_cnnp_model_t* _ccv_cnnp_sigmoid_copy(const ccv_cnnp_model_t* const self, void* const context)
1681
0
{
1682
0
  return ccv_cnnp_sigmoid(self->name);
1683
0
}
1684
1685
// MARK - Tanh Layer
1686
1687
typedef struct {
1688
  ccv_cnnp_model_t super;
1689
  ccv_nnc_tensor_symbol_t output;
1690
} ccv_cnnp_model_tanh_t;
1691
1692
static void _ccv_cnnp_tanh_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1693
0
{
1694
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_tanh_build] -\n");
1695
0
  assert(input_size == 1);
1696
0
  assert(output_size == 1);
1697
0
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1698
0
  ccv_nnc_tensor_param_t output_params;
1699
0
  const ccv_nnc_cmd_t tanh = CMD_TANH_FORWARD();
1700
0
  ccv_nnc_hint_tensor_auto(tanh, (ccv_nnc_tensor_param_t []){
1701
0
      params,
1702
0
    }, 1, ccv_nnc_no_hint, &output_params, 1);
1703
0
  const ccv_nnc_tensor_symbol_t tanh_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1704
0
  ccv_nnc_graph_exec_symbol_new(graph, tanh, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(tanh_output), "tanh");
1705
0
  outputs[0] = tanh_output;
1706
0
}
1707
1708
static ccv_cnnp_model_t* _ccv_cnnp_tanh_copy(const ccv_cnnp_model_t* const self, void* const context);
1709
1710
static const ccv_cnnp_model_vtab_t ccv_cnnp_tanh_isa = {
1711
  .build = _ccv_cnnp_tanh_build,
1712
  .copy = _ccv_cnnp_tanh_copy,
1713
};
1714
1715
ccv_cnnp_model_t* ccv_cnnp_tanh(const char* const name)
1716
0
{
1717
0
  ccv_cnnp_model_tanh_t* const model_tanh = (ccv_cnnp_model_tanh_t*)cccalloc(1, sizeof(ccv_cnnp_model_tanh_t));
1718
0
  model_tanh->super.isa = &ccv_cnnp_tanh_isa;
1719
0
  model_tanh->super.input_size = 1;
1720
0
  model_tanh->super.outputs = &model_tanh->output;
1721
0
  model_tanh->super.output_size = 1;
1722
0
  ccv_cnnp_model_copy_name(&model_tanh->super, name);
1723
0
  return (ccv_cnnp_model_t*)model_tanh;
1724
0
}
1725
1726
static ccv_cnnp_model_t* _ccv_cnnp_tanh_copy(const ccv_cnnp_model_t* const self, void* const context)
1727
0
{
1728
0
  return ccv_cnnp_tanh(self->name);
1729
0
}
1730
1731
// MARK - Swish Layer
1732
1733
typedef struct {
1734
  ccv_cnnp_model_t super;
1735
  ccv_nnc_tensor_symbol_t output;
1736
} ccv_cnnp_model_swish_t;
1737
1738
static void _ccv_cnnp_swish_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1739
0
{
1740
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_swish_build] -\n");
1741
0
  assert(input_size == 1);
1742
0
  assert(output_size == 1);
1743
0
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1744
0
  ccv_nnc_tensor_param_t output_params;
1745
0
  const ccv_nnc_cmd_t swish = CMD_SWISH_FORWARD();
1746
0
  ccv_nnc_hint_tensor_auto(swish, (ccv_nnc_tensor_param_t []){
1747
0
      params,
1748
0
    }, 1, ccv_nnc_no_hint, &output_params, 1);
1749
0
  const ccv_nnc_tensor_symbol_t swish_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1750
0
  ccv_nnc_graph_exec_symbol_new(graph, swish, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(swish_output), "swish");
1751
0
  outputs[0] = swish_output;
1752
0
}
1753
1754
static ccv_cnnp_model_t* _ccv_cnnp_swish_copy(const ccv_cnnp_model_t* const self, void* const context);
1755
1756
static const ccv_cnnp_model_vtab_t ccv_cnnp_swish_isa = {
1757
  .build = _ccv_cnnp_swish_build,
1758
  .copy = _ccv_cnnp_swish_copy,
1759
};
1760
1761
ccv_cnnp_model_t* ccv_cnnp_swish(const char* const name)
1762
0
{
1763
0
  ccv_cnnp_model_swish_t* const model_swish = (ccv_cnnp_model_swish_t*)cccalloc(1, sizeof(ccv_cnnp_model_swish_t));
1764
0
  model_swish->super.isa = &ccv_cnnp_swish_isa;
1765
0
  model_swish->super.input_size = 1;
1766
0
  model_swish->super.outputs = &model_swish->output;
1767
0
  model_swish->super.output_size = 1;
1768
0
  ccv_cnnp_model_copy_name(&model_swish->super, name);
1769
0
  return (ccv_cnnp_model_t*)model_swish;
1770
0
}
1771
1772
static ccv_cnnp_model_t* _ccv_cnnp_swish_copy(const ccv_cnnp_model_t* const self, void* const context)
1773
0
{
1774
0
  return ccv_cnnp_swish(self->name);
1775
0
}
1776
1777
// MARK - GELU Layer
1778
1779
typedef struct {
1780
  ccv_cnnp_model_t super;
1781
  ccv_nnc_tensor_symbol_t output;
1782
  int tanh;
1783
} ccv_cnnp_model_gelu_t;
1784
1785
static void _ccv_cnnp_gelu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1786
2
{
1787
2
  PRINT(CCV_CLI_VERBOSE, "[cnnp_gelu_build] -\n");
1788
2
  assert(input_size == 1);
1789
2
  assert(output_size == 1);
1790
2
  ccv_cnnp_model_gelu_t* const self = (ccv_cnnp_model_gelu_t*)super;
1791
2
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1792
2
  ccv_nnc_tensor_param_t output_params;
1793
2
  const ccv_nnc_cmd_t gelu = CMD_GELU_FORWARD(self->tanh);
1794
2
  ccv_nnc_hint_tensor_auto(gelu, (ccv_nnc_tensor_param_t []){
1795
2
      params,
1796
2
    }, 1, ccv_nnc_no_hint, &output_params, 1);
1797
2
  const ccv_nnc_tensor_symbol_t gelu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1798
2
  ccv_nnc_graph_exec_symbol_new(graph, gelu, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(gelu_output), "gelu");
1799
2
  outputs[0] = gelu_output;
1800
2
}
1801
1802
static ccv_cnnp_model_t* _ccv_cnnp_gelu_copy(const ccv_cnnp_model_t* const self, void* const context);
1803
1804
static const ccv_cnnp_model_vtab_t ccv_cnnp_gelu_isa = {
1805
  .build = _ccv_cnnp_gelu_build,
1806
  .copy = _ccv_cnnp_gelu_copy,
1807
};
1808
1809
ccv_cnnp_model_t* ccv_cnnp_gelu(const int tanh, const char* const name)
1810
1
{
1811
1
  ccv_cnnp_model_gelu_t* const model_gelu = (ccv_cnnp_model_gelu_t*)cccalloc(1, sizeof(ccv_cnnp_model_gelu_t));
1812
1
  model_gelu->super.isa = &ccv_cnnp_gelu_isa;
1813
1
  model_gelu->super.input_size = 1;
1814
1
  model_gelu->super.outputs = &model_gelu->output;
1815
1
  model_gelu->super.output_size = 1;
1816
1
  model_gelu->tanh = tanh;
1817
1
  ccv_cnnp_model_copy_name(&model_gelu->super, name);
1818
1
  return (ccv_cnnp_model_t*)model_gelu;
1819
1
}
1820
1821
static ccv_cnnp_model_t* _ccv_cnnp_gelu_copy(const ccv_cnnp_model_t* const super, void* const context)
1822
0
{
1823
0
  ccv_cnnp_model_gelu_t* const self = (ccv_cnnp_model_gelu_t*)super;
1824
0
  return ccv_cnnp_gelu(self->tanh, self->super.name);
1825
0
}
1826
1827
// MARK - Leaky ReLU Layer
1828
1829
typedef struct {
1830
  ccv_cnnp_model_t super;
1831
  ccv_nnc_tensor_symbol_t output;
1832
  float negative_slope;
1833
} ccv_cnnp_model_leaky_relu_t;
1834
1835
static void _ccv_cnnp_leaky_relu_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1836
0
{
1837
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_leaky_relu_build] -\n");
1838
0
  assert(input_size == 1);
1839
0
  assert(output_size == 1);
1840
0
  ccv_cnnp_model_leaky_relu_t* const self = (ccv_cnnp_model_leaky_relu_t*)super;
1841
0
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1842
0
  ccv_nnc_tensor_param_t output_params;
1843
0
  const ccv_nnc_cmd_t leaky_relu = CMD_LEAKY_RELU_FORWARD(self->negative_slope);
1844
0
  ccv_nnc_hint_tensor_auto(leaky_relu, (ccv_nnc_tensor_param_t []){
1845
0
      params,
1846
0
    }, 1, ccv_nnc_no_hint, &output_params, 1);
1847
0
  const ccv_nnc_tensor_symbol_t leaky_relu_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1848
0
  ccv_nnc_graph_exec_symbol_new(graph, leaky_relu, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(leaky_relu_output), "leaky_relu");
1849
0
  outputs[0] = leaky_relu_output;
1850
0
}
1851
1852
static ccv_cnnp_model_t* _ccv_cnnp_leaky_relu_copy(const ccv_cnnp_model_t* const self, void* const context);
1853
1854
static const ccv_cnnp_model_vtab_t ccv_cnnp_leaky_relu_isa = {
1855
  .build = _ccv_cnnp_leaky_relu_build,
1856
  .copy = _ccv_cnnp_leaky_relu_copy,
1857
};
1858
1859
ccv_cnnp_model_t* ccv_cnnp_leaky_relu(const float negative_slope, const char* const name)
1860
0
{
1861
0
  ccv_cnnp_model_leaky_relu_t* const model_leaky_relu = (ccv_cnnp_model_leaky_relu_t*)cccalloc(1, sizeof(ccv_cnnp_model_leaky_relu_t));
1862
0
  model_leaky_relu->super.isa = &ccv_cnnp_leaky_relu_isa;
1863
0
  model_leaky_relu->super.input_size = 1;
1864
0
  model_leaky_relu->super.outputs = &model_leaky_relu->output;
1865
0
  model_leaky_relu->super.output_size = 1;
1866
0
  model_leaky_relu->negative_slope = negative_slope;
1867
0
  ccv_cnnp_model_copy_name(&model_leaky_relu->super, name);
1868
0
  return (ccv_cnnp_model_t*)model_leaky_relu;
1869
0
}
1870
1871
static ccv_cnnp_model_t* _ccv_cnnp_leaky_relu_copy(const ccv_cnnp_model_t* const super, void* const context)
1872
0
{
1873
0
  ccv_cnnp_model_leaky_relu_t* const self = (ccv_cnnp_model_leaky_relu_t*)super;
1874
0
  return ccv_cnnp_leaky_relu(self->negative_slope, self->super.name);
1875
0
}
1876
1877
// MARK - Softmax Layer
1878
1879
typedef struct {
1880
  ccv_cnnp_model_t super;
1881
  ccv_nnc_tensor_symbol_t output;
1882
} ccv_cnnp_model_softmax_t;
1883
1884
static void _ccv_cnnp_softmax_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1885
8
{
1886
8
  PRINT(CCV_CLI_VERBOSE, "[cnnp_softmax_build] -\n");
1887
8
  assert(input_size == 1);
1888
8
  assert(output_size == 1);
1889
8
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
1890
8
  ccv_nnc_tensor_param_t output_params;
1891
8
  const ccv_nnc_cmd_t softmax = CMD_SOFTMAX_FORWARD();
1892
8
  ccv_nnc_hint_tensor_auto(softmax, (ccv_nnc_tensor_param_t []){
1893
8
      params,
1894
8
    }, 1, ccv_nnc_no_hint, &output_params, 1);
1895
8
  const ccv_nnc_tensor_symbol_t softmax_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1896
8
  ccv_nnc_graph_exec_symbol_new(graph, softmax, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(softmax_output), "softmax");
1897
8
  outputs[0] = softmax_output;
1898
8
}
1899
1900
static ccv_cnnp_model_t* _ccv_cnnp_softmax_copy(const ccv_cnnp_model_t* const self, void* const context);
1901
1902
static const ccv_cnnp_model_vtab_t ccv_cnnp_softmax_isa = {
1903
  .build = _ccv_cnnp_softmax_build,
1904
  .copy = _ccv_cnnp_softmax_copy,
1905
};
1906
1907
ccv_cnnp_model_t* ccv_cnnp_softmax(const char* const name)
1908
9
{
1909
9
  ccv_cnnp_model_softmax_t* const model_softmax = (ccv_cnnp_model_softmax_t*)cccalloc(1, sizeof(ccv_cnnp_model_softmax_t));
1910
9
  model_softmax->super.isa = &ccv_cnnp_softmax_isa;
1911
9
  model_softmax->super.input_size = 1;
1912
9
  model_softmax->super.outputs = &model_softmax->output;
1913
9
  model_softmax->super.output_size = 1;
1914
9
  ccv_cnnp_model_copy_name(&model_softmax->super, name);
1915
9
  return (ccv_cnnp_model_t*)model_softmax;
1916
9
}
1917
1918
static ccv_cnnp_model_t* _ccv_cnnp_softmax_copy(const ccv_cnnp_model_t* const self, void* const context)
1919
1
{
1920
1
  return ccv_cnnp_softmax(self->name);
1921
1
}
1922
1923
// MARK - Add Layer
1924
1925
typedef struct {
1926
  ccv_cnnp_model_t super;
1927
  float p;
1928
  float q;
1929
  ccv_nnc_tensor_symbol_t output;
1930
} ccv_cnnp_model_add_t;
1931
1932
static void _ccv_cnnp_add_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1933
0
{
1934
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_add_build] -\n");
1935
0
  const ccv_cnnp_model_add_t* const self = (const ccv_cnnp_model_add_t*)super;
1936
0
  assert(input_size == 2);
1937
0
  assert(output_size == 1);
1938
0
  ccv_nnc_tensor_param_t input_params[2];
1939
0
  int i;
1940
0
  for (i = 0; i < 2; i++)
1941
0
    input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
1942
0
  ccv_nnc_tensor_param_t output_params;
1943
0
  const ccv_nnc_cmd_t add = CMD_ADD_FORWARD(self->p, self->q);
1944
0
  ccv_nnc_hint_tensor_auto(add, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
1945
0
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1946
0
  ccv_nnc_graph_exec_symbol_new(graph, add, inputs, input_size, outputs, output_size, "add");
1947
0
}
1948
1949
static ccv_cnnp_model_t* _ccv_cnnp_add_copy(const ccv_cnnp_model_t* const self, void* const context);
1950
1951
static const ccv_cnnp_model_vtab_t ccv_cnnp_add_isa = {
1952
  .build = _ccv_cnnp_add_build,
1953
  .copy = _ccv_cnnp_add_copy,
1954
};
1955
1956
ccv_cnnp_model_t* ccv_cnnp_add(const float p, const float q, const char* const name)
1957
0
{
1958
0
  ccv_cnnp_model_add_t* const model_add = (ccv_cnnp_model_add_t*)cccalloc(1, sizeof(ccv_cnnp_model_add_t));
1959
0
  model_add->super.isa = &ccv_cnnp_add_isa;
1960
0
  model_add->super.input_size = 2;
1961
0
  model_add->super.outputs = &model_add->output;
1962
0
  model_add->super.output_size = 1;
1963
0
  model_add->p = p;
1964
0
  model_add->q = q;
1965
0
  ccv_cnnp_model_copy_name(&model_add->super, name);
1966
0
  return (ccv_cnnp_model_t*)model_add;
1967
0
}
1968
1969
static ccv_cnnp_model_t* _ccv_cnnp_add_copy(const ccv_cnnp_model_t* const super, void* const context)
1970
0
{
1971
0
  const ccv_cnnp_model_add_t* const self = (const ccv_cnnp_model_add_t*)super;
1972
0
  return ccv_cnnp_add(self->p, self->q, self->super.name);
1973
0
}
1974
1975
// MARK - Mul Layer
1976
1977
typedef struct {
1978
  ccv_cnnp_model_t super;
1979
  ccv_nnc_tensor_symbol_t output;
1980
  float p;
1981
} ccv_cnnp_model_mul_t;
1982
1983
static void _ccv_cnnp_mul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
1984
6
{
1985
6
  PRINT(CCV_CLI_VERBOSE, "[cnnp_mul_build] -\n");
1986
6
  const ccv_cnnp_model_mul_t* const self = (const ccv_cnnp_model_mul_t*)super;
1987
6
  assert(input_size == 2);
1988
6
  assert(output_size == 1);
1989
6
  ccv_nnc_tensor_param_t input_params[2];
1990
6
  int i;
1991
18
  for (i = 0; i < 2; 
i++12
)
1992
12
    input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
1993
6
  ccv_nnc_tensor_param_t output_params;
1994
6
  const ccv_nnc_cmd_t mul = CMD_MUL_FORWARD(self->p);
1995
6
  ccv_nnc_hint_tensor_auto(mul, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
1996
6
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
1997
6
  ccv_nnc_graph_exec_symbol_new(graph, mul, inputs, input_size, outputs, output_size, "mul");
1998
6
}
1999
2000
static ccv_cnnp_model_t* _ccv_cnnp_mul_copy(const ccv_cnnp_model_t* const self, void* const context);
2001
2002
static const ccv_cnnp_model_vtab_t ccv_cnnp_mul_isa = {
2003
  .build = _ccv_cnnp_mul_build,
2004
  .copy = _ccv_cnnp_mul_copy,
2005
};
2006
2007
ccv_cnnp_model_t* ccv_cnnp_mul(const float p, const char* const name)
2008
5
{
2009
5
  ccv_cnnp_model_mul_t* const model_mul = (ccv_cnnp_model_mul_t*)cccalloc(1, sizeof(ccv_cnnp_model_mul_t));
2010
5
  model_mul->super.isa = &ccv_cnnp_mul_isa;
2011
5
  model_mul->super.input_size = 2;
2012
5
  model_mul->super.outputs = &model_mul->output;
2013
5
  model_mul->super.output_size = 1;
2014
5
  model_mul->p = p;
2015
5
  ccv_cnnp_model_copy_name(&model_mul->super, name);
2016
5
  return (ccv_cnnp_model_t*)model_mul;
2017
5
}
2018
2019
static ccv_cnnp_model_t* _ccv_cnnp_mul_copy(const ccv_cnnp_model_t* const super, void* const context)
2020
0
{
2021
0
  const ccv_cnnp_model_mul_t* const self = (const ccv_cnnp_model_mul_t*)super;
2022
0
  return ccv_cnnp_mul(self->p, self->super.name);
2023
0
}
2024
2025
// MARK - Scalar Mul Layer
2026
2027
typedef struct {
2028
  ccv_cnnp_model_t super;
2029
  ccv_nnc_tensor_symbol_t output;
2030
  float a;
2031
} ccv_cnnp_model_scalar_mul_t;
2032
2033
static void _ccv_cnnp_scalar_mul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2034
4
{
2035
4
  PRINT(CCV_CLI_VERBOSE, "[cnnp_scalar_mul_build] -\n");
2036
4
  assert(input_size == 1);
2037
4
  assert(output_size == 1);
2038
4
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2039
4
  ccv_nnc_tensor_param_t output_params;
2040
4
  ccv_cnnp_model_scalar_mul_t* const self = (ccv_cnnp_model_scalar_mul_t*)super;
2041
4
  const ccv_nnc_cmd_t scalar_mul = CMD_SCALAR_MUL_FORWARD(self->a);
2042
4
  ccv_nnc_hint_tensor_auto(scalar_mul, (ccv_nnc_tensor_param_t []){
2043
4
      params,
2044
4
    }, 1, ccv_nnc_no_hint, &output_params, 1);
2045
4
  const ccv_nnc_tensor_symbol_t scalar_mul_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2046
4
  ccv_nnc_graph_exec_symbol_new(graph, scalar_mul, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(scalar_mul_output), "scalar_mul");
2047
4
  outputs[0] = scalar_mul_output;
2048
4
}
2049
2050
static ccv_cnnp_model_t* _ccv_cnnp_scalar_mul_copy(const ccv_cnnp_model_t* const super, void* const context);
2051
2052
static const ccv_cnnp_model_vtab_t ccv_cnnp_scalar_mul_isa = {
2053
  .build = _ccv_cnnp_scalar_mul_build,
2054
  .copy = _ccv_cnnp_scalar_mul_copy,
2055
};
2056
2057
ccv_cnnp_model_t* ccv_cnnp_scalar_mul(const float a, const char* const name)
2058
4
{
2059
4
  ccv_cnnp_model_scalar_mul_t* const model_scalar_mul = (ccv_cnnp_model_scalar_mul_t*)cccalloc(1, sizeof(ccv_cnnp_model_scalar_mul_t));
2060
4
  model_scalar_mul->super.isa = &ccv_cnnp_scalar_mul_isa;
2061
4
  model_scalar_mul->super.input_size = 1;
2062
4
  model_scalar_mul->super.outputs = &model_scalar_mul->output;
2063
4
  model_scalar_mul->super.output_size = 1;
2064
4
  model_scalar_mul->a = a;
2065
4
  ccv_cnnp_model_copy_name(&model_scalar_mul->super, name);
2066
4
  return (ccv_cnnp_model_t*)model_scalar_mul;
2067
4
}
2068
2069
static ccv_cnnp_model_t* _ccv_cnnp_scalar_mul_copy(const ccv_cnnp_model_t* const super, void* const context)
2070
0
{
2071
0
  const ccv_cnnp_model_scalar_mul_t* const self = (const ccv_cnnp_model_scalar_mul_t*)super;
2072
0
  return ccv_cnnp_scalar_mul(self->a, self->super.name);
2073
0
}
2074
2075
// MARK - Div Layer
2076
2077
typedef struct {
2078
  ccv_cnnp_model_t super;
2079
  ccv_nnc_tensor_symbol_t output;
2080
  int reciprocal;
2081
} ccv_cnnp_model_div_t;
2082
2083
static void _ccv_cnnp_div_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2084
2
{
2085
2
  const ccv_cnnp_model_div_t* const self = (const ccv_cnnp_model_div_t*)super;
2086
2
  PRINT(CCV_CLI_VERBOSE, "[cnnp_div_build] -\n");
2087
2
  assert(output_size == 1);
2088
2
  ccv_nnc_tensor_param_t input_params[2];
2089
2
  int i;
2090
2
  ccv_nnc_tensor_param_t output_params;
2091
2
  const ccv_nnc_cmd_t div = CMD_EWDIV_FORWARD();
2092
2
  if (self->reciprocal)
2093
1
  {
2094
1
    assert(input_size == 1);
2095
1
    input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2096
1
    input_params[1] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2097
1
    ccv_nnc_hint_tensor_auto(div, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
2098
1
    outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2099
1
    ccv_nnc_graph_exec_symbol_new(graph, div, TENSOR_SYMBOL_LIST(NO_TENSOR_SYMBOL, inputs[0]), outputs, output_size, "div");
2100
1
  } else {
2101
1
    assert(input_size == 2);
2102
3
    
for (i = 0; 1
i < 2;
i++2
)
2103
2
      input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2104
1
    ccv_nnc_hint_tensor_auto(div, input_params, input_size, ccv_nnc_no_hint, &output_params, 1);
2105
1
    outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2106
1
    ccv_nnc_graph_exec_symbol_new(graph, div, inputs, input_size, outputs, output_size, "div");
2107
1
  }
2108
2
}
2109
2110
static ccv_cnnp_model_t* _ccv_cnnp_div_copy(const ccv_cnnp_model_t* const self, void* const context);
2111
2112
static const ccv_cnnp_model_vtab_t ccv_cnnp_div_isa = {
2113
  .build = _ccv_cnnp_div_build,
2114
  .copy = _ccv_cnnp_div_copy,
2115
};
2116
2117
ccv_cnnp_model_t* ccv_cnnp_div(const int reciprocal, const char* const name)
2118
2
{
2119
2
  ccv_cnnp_model_div_t* const model_div = (ccv_cnnp_model_div_t*)cccalloc(1, sizeof(ccv_cnnp_model_div_t));
2120
2
  model_div->super.isa = &ccv_cnnp_div_isa;
2121
2
  model_div->super.input_size = reciprocal ? 
11
:
21
;
2122
2
  model_div->super.outputs = &model_div->output;
2123
2
  model_div->super.output_size = 1;
2124
2
  model_div->reciprocal = reciprocal;
2125
2
  ccv_cnnp_model_copy_name(&model_div->super, name);
2126
2
  return (ccv_cnnp_model_t*)model_div;
2127
2
}
2128
2129
static ccv_cnnp_model_t* _ccv_cnnp_div_copy(const ccv_cnnp_model_t* const super, void* const context)
2130
0
{
2131
0
  const ccv_cnnp_model_div_t* const self = (const ccv_cnnp_model_div_t*)super;
2132
0
  return ccv_cnnp_div(self->reciprocal, self->super.name);
2133
0
}
2134
2135
// MARK - Sqrt Layer
2136
2137
typedef struct {
2138
  ccv_cnnp_model_t super;
2139
  ccv_nnc_tensor_symbol_t output;
2140
} ccv_cnnp_model_sqrt_t;
2141
2142
static void _ccv_cnnp_sqrt_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2143
0
{
2144
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_sqrt_build] -\n");
2145
0
  assert(output_size == 1);
2146
0
  ccv_nnc_tensor_param_t input_params[1];
2147
0
  ccv_nnc_tensor_param_t output_params;
2148
0
  const ccv_nnc_cmd_t sqrt = CMD_EWSQRT_FORWARD();
2149
0
  assert(input_size == 1);
2150
0
  input_params[0] = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2151
0
  ccv_nnc_hint_tensor_auto(sqrt, input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2152
0
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2153
0
  ccv_nnc_graph_exec_symbol_new(graph, sqrt, inputs, 1, outputs, output_size, "sqrt");
2154
0
}
2155
2156
static ccv_cnnp_model_t* _ccv_cnnp_sqrt_copy(const ccv_cnnp_model_t* const self, void* const context);
2157
2158
static const ccv_cnnp_model_vtab_t ccv_cnnp_sqrt_isa = {
2159
  .build = _ccv_cnnp_sqrt_build,
2160
  .copy = _ccv_cnnp_sqrt_copy,
2161
};
2162
2163
ccv_cnnp_model_t* ccv_cnnp_sqrt(const char* const name)
2164
0
{
2165
0
  ccv_cnnp_model_sqrt_t* const model_sqrt = (ccv_cnnp_model_sqrt_t*)cccalloc(1, sizeof(ccv_cnnp_model_sqrt_t));
2166
0
  model_sqrt->super.isa = &ccv_cnnp_sqrt_isa;
2167
0
  model_sqrt->super.input_size = 1;
2168
0
  model_sqrt->super.outputs = &model_sqrt->output;
2169
0
  model_sqrt->super.output_size = 1;
2170
0
  ccv_cnnp_model_copy_name(&model_sqrt->super, name);
2171
0
  return (ccv_cnnp_model_t*)model_sqrt;
2172
0
}
2173
2174
static ccv_cnnp_model_t* _ccv_cnnp_sqrt_copy(const ccv_cnnp_model_t* const super, void* const context)
2175
0
{
2176
0
  const ccv_cnnp_model_sqrt_t* const self = (const ccv_cnnp_model_sqrt_t*)super;
2177
0
  return ccv_cnnp_sqrt(self->super.name);
2178
0
}
2179
2180
// MARK - Cmul Layer
2181
2182
typedef struct {
2183
  ccv_cnnp_model_t super;
2184
  ccv_nnc_tensor_symbol_t output;
2185
} ccv_cnnp_model_cmul_t;
2186
2187
static void _ccv_cnnp_cmul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2188
0
{
2189
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_cmul_build] -\n");
2190
0
  assert(input_size == 2);
2191
0
  assert(output_size == 1);
2192
0
  ccv_nnc_tensor_param_t input_params[2];
2193
0
  int i;
2194
0
  for (i = 0; i < 2; i++)
2195
0
    input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
2196
0
  ccv_nnc_tensor_param_t output_params;
2197
0
  const ccv_nnc_cmd_t mul = CMD_CMUL_FORWARD();
2198
0
  ccv_nnc_hint_tensor_auto(mul, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
2199
0
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2200
0
  ccv_nnc_graph_exec_symbol_new(graph, mul, inputs, input_size, outputs, output_size, "cmul");
2201
0
}
2202
2203
static ccv_cnnp_model_t* _ccv_cnnp_cmul_copy(const ccv_cnnp_model_t* const self, void* const context);
2204
2205
static const ccv_cnnp_model_vtab_t ccv_cnnp_cmul_isa = {
2206
  .build = _ccv_cnnp_cmul_build,
2207
  .copy = _ccv_cnnp_cmul_copy,
2208
};
2209
2210
ccv_cnnp_model_t* ccv_cnnp_cmul(const char* const name)
2211
0
{
2212
0
  ccv_cnnp_model_cmul_t* const model_cmul = (ccv_cnnp_model_cmul_t*)cccalloc(1, sizeof(ccv_cnnp_model_cmul_t));
2213
0
  model_cmul->super.isa = &ccv_cnnp_cmul_isa;
2214
0
  model_cmul->super.input_size = 2;
2215
0
  model_cmul->super.outputs = &model_cmul->output;
2216
0
  model_cmul->super.output_size = 1;
2217
0
  ccv_cnnp_model_copy_name(&model_cmul->super, name);
2218
0
  return (ccv_cnnp_model_t*)model_cmul;
2219
0
}
2220
2221
static ccv_cnnp_model_t* _ccv_cnnp_cmul_copy(const ccv_cnnp_model_t* const super, void* const context)
2222
0
{
2223
0
  return ccv_cnnp_cmul(super->name);
2224
0
}
2225
2226
// MARK - Transpose Layer
2227
2228
typedef struct {
2229
  ccv_cnnp_model_t super;
2230
  ccv_nnc_tensor_symbol_t output;
2231
  int transpose[2];
2232
} ccv_cnnp_model_transpose_t;
2233
2234
static void _ccv_cnnp_transpose_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2235
22
{
2236
22
  ccv_cnnp_model_transpose_t* const self = (ccv_cnnp_model_transpose_t*)super;
2237
22
  PRINT(CCV_CLI_VERBOSE, "[cnnp_transpose_build] (%d, %d)\n", self->transpose[0], self->transpose[1]);
2238
22
  assert(input_size == 1);
2239
22
  assert(output_size == 1);
2240
22
  if (self->transpose[0] == self->transpose[1])
2241
0
  {
2242
0
    outputs[0] = inputs[0];
2243
0
    return;
2244
0
  }
2245
22
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2246
22
  ccv_nnc_tensor_param_t output_params;
2247
22
  const ccv_nnc_cmd_t transpose = CMD_TRANSPOSE_FORWARD(self->transpose[0], self->transpose[1]);
2248
22
  ccv_nnc_hint_tensor_auto(transpose, (ccv_nnc_tensor_param_t []){
2249
22
      params,
2250
22
    }, 1, ccv_nnc_no_hint, &output_params, 1);
2251
22
  const ccv_nnc_tensor_symbol_t transpose_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2252
22
  ccv_nnc_graph_exec_symbol_new(graph, transpose, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(transpose_output), "transpose");
2253
22
  outputs[0] = transpose_output;
2254
22
}
2255
2256
static ccv_cnnp_model_t* _ccv_cnnp_transpose_copy(const ccv_cnnp_model_t* const super, void* const context);
2257
2258
static const ccv_cnnp_model_vtab_t ccv_cnnp_transpose_isa = {
2259
  .build = _ccv_cnnp_transpose_build,
2260
  .copy = _ccv_cnnp_transpose_copy,
2261
};
2262
2263
ccv_cnnp_model_t* ccv_cnnp_transpose(const int axis_a, const int axis_b, const char* const name)
2264
22
{
2265
22
  ccv_cnnp_model_transpose_t* const model_transpose = (ccv_cnnp_model_transpose_t*)cccalloc(1, sizeof(ccv_cnnp_model_transpose_t));
2266
22
  model_transpose->super.isa = &ccv_cnnp_transpose_isa;
2267
22
  model_transpose->super.input_size = 1;
2268
22
  model_transpose->super.outputs = &model_transpose->output;
2269
22
  model_transpose->super.output_size = 1;
2270
22
  model_transpose->transpose[0] = axis_a;
2271
22
  model_transpose->transpose[1] = axis_b;
2272
22
  ccv_cnnp_model_copy_name(&model_transpose->super, name);
2273
22
  return (ccv_cnnp_model_t*)model_transpose;
2274
22
}
2275
2276
static ccv_cnnp_model_t* _ccv_cnnp_transpose_copy(const ccv_cnnp_model_t* const super, void* const context)
2277
0
{
2278
0
  const ccv_cnnp_model_transpose_t* const self = (const ccv_cnnp_model_transpose_t*)super;
2279
0
  return ccv_cnnp_transpose(self->transpose[0], self->transpose[1], self->super.name);
2280
0
}
2281
2282
// MARK - Layer Norm Layer
2283
2284
typedef struct {
2285
  ccv_cnnp_model_t super;
2286
  ccv_nnc_tensor_symbol_t output;
2287
  ccv_nnc_tensor_symbol_t bias;
2288
  ccv_nnc_tensor_symbol_t scale;
2289
  ccv_nnc_cmd_param_t params;
2290
} ccv_cnnp_model_layer_norm_t;
2291
2292
static void _ccv_cnnp_layer_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2293
8
{
2294
8
  PRINT(CCV_CLI_VERBOSE, "[cnnp_layer_norm_build] -\n");
2295
8
  assert(input_size == 1);
2296
8
  assert(output_size == 1);
2297
8
  ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2298
8
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2299
8
  ccv_nnc_tensor_param_t bias_params = params;
2300
8
  const int nd = ccv_nnc_tensor_nd(params.dim);
2301
8
  int i;
2302
32
  for (i = 0; i < nd; 
i++24
)
2303
24
    bias_params.dim[i] = 1;
2304
16
  for (i = 0; i < self->params.lnorm.count; 
i++8
)
2305
8
    bias_params.dim[self->params.lnorm.axis[i]] = params.dim[self->params.lnorm.axis[i]];
2306
8
  if (self->params.lnorm.elementwise_affine)
2307
8
  {
2308
    // Both scale and bias are shared between if this model is reused.
2309
8
    if (!self->scale.graph)
2310
8
      self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
2311
8
    if (!self->bias.graph)
2312
8
      self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
2313
8
  }
2314
8
  const ccv_nnc_cmd_t layer_norm = ccv_nnc_cmd(CCV_NNC_LAYER_NORM_FORWARD, 0, self->params, 0);
2315
8
  ccv_nnc_tensor_param_t output_params[3];
2316
8
  if (self->params.lnorm.elementwise_affine)
2317
8
    ccv_nnc_hint_tensor_auto(layer_norm, (ccv_nnc_tensor_param_t []){
2318
8
        params,
2319
8
        bias_params,
2320
8
        bias_params,
2321
8
      }, 3, ccv_nnc_no_hint, output_params, 3);
2322
0
  else
2323
0
    ccv_nnc_hint_tensor_auto(layer_norm, (ccv_nnc_tensor_param_t []){
2324
0
        params,
2325
0
      }, 1, ccv_nnc_no_hint, output_params, 3);
2326
8
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2327
8
  const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_mean");
2328
8
  const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[2], "saved_inv_std");
2329
8
  if (self->params.lnorm.elementwise_affine)
2330
8
    ccv_nnc_graph_exec_symbol_new(graph, layer_norm, TENSOR_SYMBOL_LIST(inputs[0], self->scale, self->bias), TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std), "layer_norm");
2331
0
  else
2332
0
    ccv_nnc_graph_exec_symbol_new(graph, layer_norm, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std), "layer_norm");
2333
8
  outputs[0] = output;
2334
8
}
2335
2336
static void _ccv_cnnp_layer_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2337
8
{
2338
8
  ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2339
8
  if (self->scale.graph)
2340
8
    initializer(context, CMD_SET_FORWARD(1), ccv_nnc_no_hint, 0, 0, self->scale);
2341
8
  if (self->bias.graph)
2342
8
    initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->bias);
2343
8
}
2344
2345
static void _ccv_cnnp_layer_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2346
8
{
2347
8
  ccv_cnnp_model_layer_norm_t* const self = (ccv_cnnp_model_layer_norm_t*)super;
2348
8
  if (self->scale.graph)
2349
8
    add_to_array(parameters, self->scale, is_trainable);
2350
8
  if (self->bias.graph)
2351
8
    add_to_array(parameters, self->bias, is_trainable);
2352
8
}
2353
2354
static ccv_cnnp_model_t* _ccv_cnnp_layer_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
2355
2356
static const ccv_cnnp_model_vtab_t ccv_cnnp_layer_norm_isa = {
2357
  .build = _ccv_cnnp_layer_norm_build,
2358
  .init_states = _ccv_cnnp_layer_norm_init_states,
2359
  .add_to_parameter = _ccv_cnnp_layer_norm_add_to_parameter,
2360
  .copy = _ccv_cnnp_layer_norm_copy,
2361
};
2362
2363
ccv_cnnp_model_t* ccv_cnnp_layer_norm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name)
2364
8
{
2365
8
  ccv_cnnp_model_layer_norm_t* const model_layer_norm = (ccv_cnnp_model_layer_norm_t*)cccalloc(1, sizeof(ccv_cnnp_model_layer_norm_t));
2366
8
  model_layer_norm->super.isa = &ccv_cnnp_layer_norm_isa;
2367
8
  model_layer_norm->super.input_size = 1;
2368
8
  model_layer_norm->super.outputs = &model_layer_norm->output;
2369
8
  model_layer_norm->super.output_size = 1;
2370
8
  model_layer_norm->super.is_trainable = is_trainable;
2371
8
  ccv_cnnp_model_copy_name(&model_layer_norm->super, name);
2372
8
  model_layer_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
2373
8
  model_layer_norm->scale.graph = 0;
2374
8
  model_layer_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
2375
8
  model_layer_norm->bias.graph = 0;
2376
8
  model_layer_norm->params.lnorm.epsilon = epsilon;
2377
8
  model_layer_norm->params.lnorm.count = axis_count;
2378
8
  model_layer_norm->params.lnorm.elementwise_affine = elementwise_affine;
2379
8
  memcpy(model_layer_norm->params.lnorm.axis, axis, sizeof(int) * axis_count);
2380
8
  return (ccv_cnnp_model_t*)model_layer_norm;
2381
8
}
2382
2383
static ccv_cnnp_model_t* _ccv_cnnp_layer_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
2384
0
{
2385
0
  const ccv_cnnp_model_layer_norm_t* const self = (const ccv_cnnp_model_layer_norm_t*)super;
2386
0
  return ccv_cnnp_layer_norm(self->params.lnorm.epsilon, self->params.lnorm.axis, self->params.lnorm.count, self->params.lnorm.elementwise_affine, self->super.is_trainable, self->super.name);
2387
0
}
2388
2389
// MARK - Group Norm Layer
2390
2391
typedef struct {
2392
  ccv_cnnp_model_t super;
2393
  ccv_nnc_tensor_symbol_t output;
2394
  ccv_nnc_tensor_symbol_t bias;
2395
  ccv_nnc_tensor_symbol_t scale;
2396
  ccv_nnc_cmd_param_t params;
2397
} ccv_cnnp_model_group_norm_t;
2398
2399
static void _ccv_cnnp_group_norm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2400
0
{
2401
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_group_norm_build] -\n");
2402
0
  assert(input_size == 1);
2403
0
  assert(output_size == 1);
2404
0
  ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2405
0
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2406
0
  ccv_nnc_tensor_param_t bias_params = params;
2407
0
  const int nd = ccv_nnc_tensor_nd(params.dim);
2408
0
  int i;
2409
0
  for (i = 0; i < nd; i++)
2410
0
    bias_params.dim[i] = 1;
2411
0
  bias_params.dim[self->params.gnorm.group_axis] = params.dim[self->params.gnorm.group_axis];
2412
0
  if (self->params.gnorm.elementwise_affine)
2413
0
  {
2414
    // Both scale and bias are shared between if this model is reused.
2415
0
    if (!self->scale.graph)
2416
0
      self->scale = ccv_nnc_tensor_symbol_new(graph, bias_params, "scale");
2417
0
    if (!self->bias.graph)
2418
0
      self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
2419
0
  }
2420
0
  const ccv_nnc_cmd_t group_norm = ccv_nnc_cmd(CCV_NNC_GROUP_NORM_FORWARD, 0, self->params, 0);
2421
0
  ccv_nnc_tensor_param_t output_params[3];
2422
0
  if (self->params.gnorm.elementwise_affine)
2423
0
    ccv_nnc_hint_tensor_auto(group_norm, (ccv_nnc_tensor_param_t []){
2424
0
        params,
2425
0
        bias_params,
2426
0
        bias_params,
2427
0
      }, 3, ccv_nnc_no_hint, output_params, 3);
2428
0
  else
2429
0
    ccv_nnc_hint_tensor_auto(group_norm, (ccv_nnc_tensor_param_t []){
2430
0
        params,
2431
0
      }, 1, ccv_nnc_no_hint, output_params, 3);
2432
0
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2433
0
  const ccv_nnc_tensor_symbol_t saved_mean = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_mean");
2434
0
  const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[2], "saved_inv_std");
2435
0
  if (self->params.gnorm.elementwise_affine)
2436
0
    ccv_nnc_graph_exec_symbol_new(graph, group_norm, TENSOR_SYMBOL_LIST(inputs[0], self->scale, self->bias), TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std), "group_norm");
2437
0
  else
2438
0
    ccv_nnc_graph_exec_symbol_new(graph, group_norm, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(output, saved_mean, saved_inv_std), "group_norm");
2439
0
  outputs[0] = output;
2440
0
}
2441
2442
static void _ccv_cnnp_group_norm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2443
0
{
2444
0
  ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2445
0
  if (self->scale.graph)
2446
0
    initializer(context, CMD_SET_FORWARD(1), ccv_nnc_no_hint, 0, 0, self->scale);
2447
0
  if (self->bias.graph)
2448
0
    initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->bias);
2449
0
}
2450
2451
static void _ccv_cnnp_group_norm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2452
0
{
2453
0
  ccv_cnnp_model_group_norm_t* const self = (ccv_cnnp_model_group_norm_t*)super;
2454
0
  if (self->scale.graph)
2455
0
    add_to_array(parameters, self->scale, is_trainable);
2456
0
  if (self->bias.graph)
2457
0
    add_to_array(parameters, self->bias, is_trainable);
2458
0
}
2459
2460
static ccv_cnnp_model_t* _ccv_cnnp_group_norm_copy(const ccv_cnnp_model_t* const super, void* const context);
2461
2462
static const ccv_cnnp_model_vtab_t ccv_cnnp_group_norm_isa = {
2463
  .build = _ccv_cnnp_group_norm_build,
2464
  .init_states = _ccv_cnnp_group_norm_init_states,
2465
  .add_to_parameter = _ccv_cnnp_group_norm_add_to_parameter,
2466
  .copy = _ccv_cnnp_group_norm_copy,
2467
};
2468
2469
ccv_cnnp_model_t* ccv_cnnp_group_norm(const int group_axis, const int groups, const float epsilon, const int reduce_axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name)
2470
0
{
2471
0
  ccv_cnnp_model_group_norm_t* const model_group_norm = (ccv_cnnp_model_group_norm_t*)cccalloc(1, sizeof(ccv_cnnp_model_group_norm_t));
2472
0
  model_group_norm->super.isa = &ccv_cnnp_group_norm_isa;
2473
0
  model_group_norm->super.input_size = 1;
2474
0
  model_group_norm->super.outputs = &model_group_norm->output;
2475
0
  model_group_norm->super.output_size = 1;
2476
0
  model_group_norm->super.is_trainable = is_trainable;
2477
0
  ccv_cnnp_model_copy_name(&model_group_norm->super, name);
2478
0
  model_group_norm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
2479
0
  model_group_norm->scale.graph = 0;
2480
0
  model_group_norm->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
2481
0
  model_group_norm->bias.graph = 0;
2482
0
  model_group_norm->params.gnorm.group_axis = group_axis;
2483
0
  model_group_norm->params.gnorm.groups = groups;
2484
0
  model_group_norm->params.gnorm.epsilon = epsilon;
2485
0
  model_group_norm->params.gnorm.reduce_count = axis_count;
2486
0
  model_group_norm->params.gnorm.elementwise_affine = elementwise_affine;
2487
0
  memcpy(model_group_norm->params.gnorm.reduce_axis, reduce_axis, sizeof(int) * axis_count);
2488
0
  return (ccv_cnnp_model_t*)model_group_norm;
2489
0
}
2490
2491
static ccv_cnnp_model_t* _ccv_cnnp_group_norm_copy(const ccv_cnnp_model_t* const super, void* const context)
2492
0
{
2493
0
  const ccv_cnnp_model_group_norm_t* const self = (const ccv_cnnp_model_group_norm_t*)super;
2494
0
  return ccv_cnnp_group_norm(self->params.gnorm.group_axis, self->params.gnorm.groups, self->params.gnorm.epsilon, self->params.gnorm.reduce_axis, self->params.gnorm.reduce_count, self->params.gnorm.elementwise_affine, self->super.is_trainable, self->super.name);
2495
0
}
2496
2497
// MARK - RMSNorm Layer
2498
2499
typedef struct {
2500
  ccv_cnnp_model_t super;
2501
  ccv_nnc_tensor_symbol_t output;
2502
  ccv_nnc_tensor_symbol_t scale;
2503
  ccv_nnc_cmd_param_t params;
2504
} ccv_cnnp_model_rmsnorm_t;
2505
2506
static void _ccv_cnnp_rmsnorm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2507
0
{
2508
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_rmsnorm_build] -\n");
2509
0
  assert(input_size == 1);
2510
0
  assert(output_size == 1);
2511
0
  ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
2512
0
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2513
0
  ccv_nnc_tensor_param_t scale_params = params;
2514
0
  const int nd = ccv_nnc_tensor_nd(params.dim);
2515
0
  int i;
2516
0
  for (i = 0; i < nd; i++)
2517
0
    scale_params.dim[i] = 1;
2518
0
  for (i = 0; i < self->params.rmsnorm.count; i++)
2519
0
    scale_params.dim[self->params.rmsnorm.axis[i]] = params.dim[self->params.rmsnorm.axis[i]];
2520
  // Both scale and bias are shared between if this model is reused.
2521
0
  if (!self->scale.graph)
2522
0
    self->scale = ccv_nnc_tensor_symbol_new(graph, scale_params, "scale");
2523
0
  const ccv_nnc_cmd_t rmsnorm = ccv_nnc_cmd(CCV_NNC_RMSNORM_FORWARD, 0, self->params, 0);
2524
0
  ccv_nnc_tensor_param_t output_params[2];
2525
0
  ccv_nnc_hint_tensor_auto(rmsnorm, (ccv_nnc_tensor_param_t []){
2526
0
      params,
2527
0
      scale_params,
2528
0
    }, 2, ccv_nnc_no_hint, output_params, 2);
2529
0
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2530
0
  const ccv_nnc_tensor_symbol_t saved_inv_std = ccv_nnc_tensor_symbol_new(graph, output_params[1], "saved_inv_std");
2531
0
  ccv_nnc_graph_exec_symbol_new(graph, rmsnorm, TENSOR_SYMBOL_LIST(inputs[0], self->scale), TENSOR_SYMBOL_LIST(output, saved_inv_std), "rmsnorm");
2532
0
  outputs[0] = output;
2533
0
}
2534
2535
static void _ccv_cnnp_rmsnorm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2536
0
{
2537
0
  ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
2538
0
  if (self->scale.graph)
2539
0
    initializer(context, CMD_SET_FORWARD(1), ccv_nnc_no_hint, 0, 0, self->scale);
2540
0
}
2541
2542
static void _ccv_cnnp_rmsnorm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2543
0
{
2544
0
  ccv_cnnp_model_rmsnorm_t* const self = (ccv_cnnp_model_rmsnorm_t*)super;
2545
0
  if (self->scale.graph)
2546
0
    add_to_array(parameters, self->scale, is_trainable);
2547
0
}
2548
2549
static ccv_cnnp_model_t* _ccv_cnnp_rmsnorm_copy(const ccv_cnnp_model_t* const super, void* const context);
2550
2551
static const ccv_cnnp_model_vtab_t ccv_cnnp_rmsnorm_isa = {
2552
  .build = _ccv_cnnp_rmsnorm_build,
2553
  .init_states = _ccv_cnnp_rmsnorm_init_states,
2554
  .add_to_parameter = _ccv_cnnp_rmsnorm_add_to_parameter,
2555
  .copy = _ccv_cnnp_rmsnorm_copy,
2556
};
2557
2558
ccv_cnnp_model_t* ccv_cnnp_rmsnorm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int is_trainable, const char* const name)
2559
0
{
2560
0
  ccv_cnnp_model_rmsnorm_t* const model_rmsnorm = (ccv_cnnp_model_rmsnorm_t*)cccalloc(1, sizeof(ccv_cnnp_model_rmsnorm_t));
2561
0
  model_rmsnorm->super.isa = &ccv_cnnp_rmsnorm_isa;
2562
0
  model_rmsnorm->super.input_size = 1;
2563
0
  model_rmsnorm->super.outputs = &model_rmsnorm->output;
2564
0
  model_rmsnorm->super.output_size = 1;
2565
0
  model_rmsnorm->super.is_trainable = is_trainable;
2566
0
  ccv_cnnp_model_copy_name(&model_rmsnorm->super, name);
2567
0
  model_rmsnorm->scale.d = CCV_NNC_NO_TENSOR_SYMBOL;
2568
0
  model_rmsnorm->scale.graph = 0;
2569
0
  model_rmsnorm->params.rmsnorm.epsilon = epsilon;
2570
0
  model_rmsnorm->params.rmsnorm.count = axis_count;
2571
0
  memcpy(model_rmsnorm->params.lnorm.axis, axis, sizeof(int) * axis_count);
2572
0
  return (ccv_cnnp_model_t*)model_rmsnorm;
2573
0
}
2574
2575
static ccv_cnnp_model_t* _ccv_cnnp_rmsnorm_copy(const ccv_cnnp_model_t* const super, void* const context)
2576
0
{
2577
0
  const ccv_cnnp_model_rmsnorm_t* const self = (const ccv_cnnp_model_rmsnorm_t*)super;
2578
0
  return ccv_cnnp_rmsnorm(self->params.rmsnorm.epsilon, self->params.rmsnorm.axis, self->params.rmsnorm.count, self->super.is_trainable, self->super.name);
2579
0
}
2580
2581
// MARK - Batched Matrix Mul Layer
2582
2583
typedef struct {
2584
  ccv_cnnp_model_t super;
2585
  ccv_nnc_tensor_symbol_t output;
2586
  int transpose_a[2];
2587
  int transpose_b[2];
2588
  int flags;
2589
} ccv_cnnp_model_matmul_t;
2590
2591
static void _ccv_cnnp_matmul_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2592
10
{
2593
10
  PRINT(CCV_CLI_VERBOSE, "[cnnp_matmul_build] -\n");
2594
10
  assert(input_size == 2);
2595
10
  assert(output_size == 1);
2596
10
  ccv_cnnp_model_matmul_t* const self = (ccv_cnnp_model_matmul_t*)super;
2597
10
  ccv_nnc_tensor_param_t a_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2598
10
  ccv_nnc_tensor_param_t b_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
2599
10
  ccv_nnc_tensor_param_t output_params;
2600
10
  ccv_nnc_cmd_t matmul = CMD_GEMM_FORWARD(self->transpose_a, self->transpose_b);
2601
10
  matmul.info.blas.flags = self->flags;
2602
10
  ccv_nnc_hint_tensor_auto(matmul, (ccv_nnc_tensor_param_t []){
2603
10
      a_params,
2604
10
      b_params,
2605
10
    }, 2, ccv_nnc_no_hint, &output_params, 1);
2606
10
  const ccv_nnc_tensor_symbol_t matmul_output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2607
10
  ccv_nnc_graph_exec_symbol_new(graph, matmul, inputs, input_size, TENSOR_SYMBOL_LIST(matmul_output), "matmul");
2608
10
  outputs[0] = matmul_output;
2609
10
}
2610
2611
static ccv_cnnp_model_t* _ccv_cnnp_matmul_copy(const ccv_cnnp_model_t* const super, void* const context);
2612
2613
static const ccv_cnnp_model_vtab_t ccv_cnnp_matmul_isa = {
2614
  .build = _ccv_cnnp_matmul_build,
2615
  .copy = _ccv_cnnp_matmul_copy,
2616
};
2617
2618
ccv_cnnp_model_t* ccv_cnnp_matmul(const int transpose_a[2], const int transpose_b[2], const int flags, const char* const name)
2619
10
{
2620
10
  ccv_cnnp_model_matmul_t* const model_matmul = (ccv_cnnp_model_matmul_t*)cccalloc(1, sizeof(ccv_cnnp_model_matmul_t));
2621
10
  model_matmul->super.isa = &ccv_cnnp_matmul_isa;
2622
10
  model_matmul->super.input_size = 2;
2623
10
  model_matmul->super.outputs = &model_matmul->output;
2624
10
  model_matmul->super.output_size = 1;
2625
10
  model_matmul->transpose_a[0] = transpose_a[0];
2626
10
  model_matmul->transpose_a[1] = transpose_a[1];
2627
10
  model_matmul->transpose_b[0] = transpose_b[0];
2628
10
  model_matmul->transpose_b[1] = transpose_b[1];
2629
10
  model_matmul->flags = flags;
2630
10
  ccv_cnnp_model_copy_name(&model_matmul->super, name);
2631
10
  return (ccv_cnnp_model_t*)model_matmul;
2632
10
}
2633
2634
static ccv_cnnp_model_t* _ccv_cnnp_matmul_copy(const ccv_cnnp_model_t* const super, void* const context)
2635
1
{
2636
1
  const ccv_cnnp_model_matmul_t* const self = (const ccv_cnnp_model_matmul_t*)super;
2637
1
  return ccv_cnnp_matmul(self->transpose_a, self->transpose_b, self->flags, self->super.name);
2638
1
}
2639
2640
// MARK - Dropout Layer
2641
2642
typedef struct {
2643
  ccv_cnnp_model_t super;
2644
  ccv_nnc_tensor_symbol_t output;
2645
  ccv_nnc_graph_exec_symbol_t dropout;
2646
  float p;
2647
  int entirety;
2648
} ccv_cnnp_model_dropout_t;
2649
2650
static void _ccv_cnnp_dropout_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2651
12
{
2652
12
  PRINT(CCV_CLI_VERBOSE, "[cnnp_dropout_build] -\n");
2653
12
  assert(input_size == 1);
2654
12
  assert(output_size == 1);
2655
12
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2656
12
  ccv_nnc_tensor_param_t output_params[2];
2657
12
  ccv_cnnp_model_dropout_t* const self = (ccv_cnnp_model_dropout_t*)super;
2658
12
  const ccv_nnc_cmd_t dropout = CMD_DROPOUT_FORWARD(self->p, self->entirety);
2659
12
  ccv_nnc_hint_tensor_auto(dropout, (ccv_nnc_tensor_param_t []){
2660
12
      params,
2661
12
    }, 1, ccv_nnc_no_hint, output_params, 2);
2662
12
  const ccv_nnc_tensor_symbol_t dropout_output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
2663
12
  const ccv_nnc_tensor_symbol_t mask = ccv_nnc_tensor_symbol_new(graph, output_params[1], "mask");
2664
12
  self->dropout = ccv_nnc_graph_exec_symbol_new(graph, dropout, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(dropout_output, mask), "dropout");
2665
12
  outputs[0] = dropout_output;
2666
12
}
2667
2668
static void _ccv_cnnp_dropout_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
2669
24
{
2670
24
  ccv_cnnp_model_dropout_t* const self = (ccv_cnnp_model_dropout_t*)super;
2671
24
  if (self->dropout.graph)
2672
24
  {
2673
24
    if (is_test)
2674
      // During test, the dropout is not applied. Data transfer is perfect because if these are the same tensor, it will skip.
2675
12
      updater(context, self->dropout, CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint);
2676
12
    else
2677
12
      updater(context, self->dropout, CMD_DROPOUT_FORWARD(self->p, self->entirety), ccv_nnc_no_hint);
2678
24
  }
2679
24
}
2680
2681
static ccv_cnnp_model_t* _ccv_cnnp_dropout_copy(const ccv_cnnp_model_t* const super, void* const context);
2682
2683
static const ccv_cnnp_model_vtab_t ccv_cnnp_dropout_isa = {
2684
  .build = _ccv_cnnp_dropout_build,
2685
  .set_is_test = _ccv_cnnp_dropout_set_is_test,
2686
  .copy = _ccv_cnnp_dropout_copy,
2687
};
2688
2689
ccv_cnnp_model_t* ccv_cnnp_dropout(const float p, const int entirety, const char* const name)
2690
12
{
2691
12
  ccv_cnnp_model_dropout_t* const model_dropout = (ccv_cnnp_model_dropout_t*)cccalloc(1, sizeof(ccv_cnnp_model_dropout_t));
2692
12
  model_dropout->super.isa = &ccv_cnnp_dropout_isa;
2693
12
  model_dropout->super.input_size = 1;
2694
12
  model_dropout->super.outputs = &model_dropout->output;
2695
12
  model_dropout->super.output_size = 1;
2696
12
  model_dropout->p = p;
2697
12
  model_dropout->entirety = entirety;
2698
12
  ccv_cnnp_model_copy_name(&model_dropout->super, name);
2699
12
  return (ccv_cnnp_model_t*)model_dropout;
2700
12
}
2701
2702
static ccv_cnnp_model_t* _ccv_cnnp_dropout_copy(const ccv_cnnp_model_t* const super, void* const context)
2703
0
{
2704
0
  const ccv_cnnp_model_dropout_t* const self = (const ccv_cnnp_model_dropout_t*)super;
2705
0
  return ccv_cnnp_dropout(self->p, self->entirety, self->super.name);
2706
0
}
2707
2708
// MARK - Masked Fill Layer
2709
2710
typedef struct {
2711
  ccv_cnnp_model_t super;
2712
  ccv_nnc_tensor_symbol_t output;
2713
  float eq;
2714
  float fill;
2715
} ccv_cnnp_model_masked_fill_t;
2716
2717
static void _ccv_cnnp_masked_fill_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2718
4
{
2719
4
  PRINT(CCV_CLI_VERBOSE, "[cnnp_masked_fill_build] -\n");
2720
4
  assert(input_size == 2);
2721
4
  assert(output_size == 1);
2722
4
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2723
4
  ccv_cnnp_model_masked_fill_t* const self = (ccv_cnnp_model_masked_fill_t*)super;
2724
4
  const ccv_nnc_tensor_symbol_t masked_fill_output = ccv_nnc_tensor_symbol_new(graph, params, 0);
2725
4
  ccv_nnc_graph_exec_symbol_new(graph, CMD_MASKED_FILL_FORWARD(self->eq, self->fill), TENSOR_SYMBOL_LIST(inputs[0], inputs[1]), TENSOR_SYMBOL_LIST(masked_fill_output), "masked_fill");
2726
4
  outputs[0] = masked_fill_output;
2727
4
}
2728
2729
static ccv_cnnp_model_t* _ccv_cnnp_masked_fill_copy(const ccv_cnnp_model_t* const super, void* const context);
2730
2731
static const ccv_cnnp_model_vtab_t ccv_cnnp_masked_fill_isa = {
2732
  .build = _ccv_cnnp_masked_fill_build,
2733
  .copy = _ccv_cnnp_masked_fill_copy,
2734
};
2735
2736
ccv_cnnp_model_t* ccv_cnnp_masked_fill(const float eq, const float fill, const char* const name)
2737
4
{
2738
4
  ccv_cnnp_model_masked_fill_t* const model_masked_fill = (ccv_cnnp_model_masked_fill_t*)cccalloc(1, sizeof(ccv_cnnp_model_masked_fill_t));
2739
4
  model_masked_fill->super.isa = &ccv_cnnp_masked_fill_isa;
2740
4
  model_masked_fill->super.input_size = 2;
2741
4
  model_masked_fill->super.outputs = &model_masked_fill->output;
2742
4
  model_masked_fill->super.output_size = 1;
2743
4
  model_masked_fill->eq = eq;
2744
4
  model_masked_fill->fill = fill;
2745
4
  ccv_cnnp_model_copy_name(&model_masked_fill->super, name);
2746
4
  return (ccv_cnnp_model_t*)model_masked_fill;
2747
4
}
2748
2749
static ccv_cnnp_model_t* _ccv_cnnp_masked_fill_copy(const ccv_cnnp_model_t* const super, void* const context)
2750
0
{
2751
0
  const ccv_cnnp_model_masked_fill_t* const self = (const ccv_cnnp_model_masked_fill_t*)super;
2752
0
  return ccv_cnnp_masked_fill(self->eq, self->fill, self->super.name);
2753
0
}
2754
2755
// MARK - Index Select Layer
2756
2757
typedef struct {
2758
  ccv_cnnp_model_t super;
2759
  ccv_nnc_tensor_symbol_t output;
2760
} ccv_cnnp_model_index_select_t;
2761
2762
static void _ccv_cnnp_index_select_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2763
2
{
2764
2
  PRINT(CCV_CLI_VERBOSE, "[cnnp_index_select_build] -\n");
2765
2
  assert(input_size == 2);
2766
2
  assert(output_size == 1);
2767
2
  const ccv_nnc_tensor_param_t vocab_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2768
2
  const ccv_nnc_tensor_param_t index_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
2769
2
  ccv_nnc_tensor_param_t output_params;
2770
2
  const ccv_nnc_cmd_t index_select = CMD_INDEX_SELECT_FORWARD();
2771
2
  ccv_nnc_hint_tensor_auto(index_select, (ccv_nnc_tensor_param_t []){
2772
2
      vocab_params,
2773
2
      index_params,
2774
2
    }, 2, ccv_nnc_no_hint, &output_params, 1);
2775
2
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2776
2
  ccv_nnc_graph_exec_symbol_new(graph, index_select, TENSOR_SYMBOL_LIST(inputs[0], inputs[1]), TENSOR_SYMBOL_LIST(output), "index_select");
2777
2
  outputs[0] = output;
2778
2
}
2779
2780
static ccv_cnnp_model_t* _ccv_cnnp_index_select_copy(const ccv_cnnp_model_t* const super, void* const context);
2781
2782
static const ccv_cnnp_model_vtab_t ccv_cnnp_index_select_isa = {
2783
  .build = _ccv_cnnp_index_select_build,
2784
  .copy = _ccv_cnnp_index_select_copy,
2785
};
2786
2787
ccv_cnnp_model_t* ccv_cnnp_index_select(const char* const name)
2788
2
{
2789
2
  ccv_cnnp_model_index_select_t* const model_index_select = (ccv_cnnp_model_index_select_t*)cccalloc(1, sizeof(ccv_cnnp_model_index_select_t));
2790
2
  model_index_select->super.isa = &ccv_cnnp_index_select_isa;
2791
2
  model_index_select->super.input_size = 2;
2792
2
  model_index_select->super.outputs = &model_index_select->output;
2793
2
  model_index_select->super.output_size = 1;
2794
2
  ccv_cnnp_model_copy_name(&model_index_select->super, name);
2795
2
  return (ccv_cnnp_model_t*)model_index_select;
2796
2
}
2797
2798
static ccv_cnnp_model_t* _ccv_cnnp_index_select_copy(const ccv_cnnp_model_t* const super, void* const context)
2799
0
{
2800
0
  ccv_cnnp_model_index_select_t* const self = (ccv_cnnp_model_index_select_t*)super;
2801
0
  return ccv_cnnp_index_select(self->super.name);
2802
0
}
2803
2804
// MARK - Embedding Layer
2805
2806
typedef struct {
2807
  ccv_cnnp_model_t super;
2808
  ccv_nnc_tensor_symbol_t output;
2809
  ccv_nnc_tensor_symbol_t vocab;
2810
  int datatype;
2811
  int vocab_size;
2812
  int embed_size;
2813
} ccv_cnnp_model_embedding_t;
2814
2815
static void _ccv_cnnp_embedding_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2816
1
{
2817
1
  ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
2818
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_embedding_build] vocab_size: %d, embed_size: %d\n", self->vocab_size, self->embed_size);
2819
1
  assert(input_size == 1);
2820
1
  assert(output_size == 1);
2821
1
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2822
1
  ccv_nnc_tensor_param_t vocab_params = params;
2823
1
  memset(vocab_params.dim, 0, sizeof(vocab_params.dim));
2824
1
  vocab_params.datatype = self->datatype;
2825
1
  vocab_params.dim[0] = self->vocab_size;
2826
1
  vocab_params.dim[1] = self->embed_size;
2827
1
  if (!self->vocab.graph)
2828
1
    self->vocab = ccv_nnc_tensor_symbol_new(graph, vocab_params, "vocab");
2829
1
  assert(self->vocab.graph == graph);
2830
1
  ccv_nnc_tensor_param_t output_params;
2831
1
  const ccv_nnc_cmd_t embedding = CMD_INDEX_SELECT_FORWARD();
2832
1
  ccv_nnc_hint_tensor_auto(embedding, (ccv_nnc_tensor_param_t []){
2833
1
      vocab_params,
2834
1
      params,
2835
1
    }, 2, ccv_nnc_no_hint, &output_params, 1);
2836
1
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2837
1
  ccv_nnc_graph_exec_symbol_new(graph, embedding, TENSOR_SYMBOL_LIST(self->vocab, inputs[0]), TENSOR_SYMBOL_LIST(output), "embedding");
2838
1
  outputs[0] = output;
2839
1
}
2840
2841
static void _ccv_cnnp_embedding_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
2842
1
{
2843
1
  ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
2844
1
  const float std = sqrtf(2) / sqrtf(self->vocab_size + self->embed_size);
2845
1
  const float bound = sqrtf(3) * std;
2846
1
  initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound), ccv_nnc_no_hint, 0, 0, self->vocab);
2847
1
}
2848
2849
static void _ccv_cnnp_embedding_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
2850
1
{
2851
1
  ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
2852
1
  add_to_array(parameters, self->vocab, is_trainable);
2853
1
}
2854
2855
static ccv_cnnp_model_t* _ccv_cnnp_embedding_copy(const ccv_cnnp_model_t* const super, void* const context);
2856
2857
static const ccv_cnnp_model_vtab_t ccv_cnnp_embedding_isa = {
2858
  .build = _ccv_cnnp_embedding_build,
2859
  .init_states = _ccv_cnnp_embedding_init_states,
2860
  .add_to_parameter = _ccv_cnnp_embedding_add_to_parameter,
2861
  .copy = _ccv_cnnp_embedding_copy,
2862
};
2863
2864
ccv_cnnp_model_t* ccv_cnnp_embedding(const int datatype, const int vocab_size, const int embed_size, const int is_trainable, const char* const name)
2865
1
{
2866
1
  ccv_cnnp_model_embedding_t* const model_embedding = (ccv_cnnp_model_embedding_t*)cccalloc(1, sizeof(ccv_cnnp_model_embedding_t));
2867
1
  model_embedding->super.isa = &ccv_cnnp_embedding_isa;
2868
1
  model_embedding->super.input_size = 1;
2869
1
  model_embedding->super.outputs = &model_embedding->output;
2870
1
  model_embedding->super.output_size = 1;
2871
1
  model_embedding->super.is_trainable = is_trainable;
2872
1
  ccv_cnnp_model_copy_name(&model_embedding->super, name);
2873
1
  model_embedding->vocab.d = CCV_NNC_NO_TENSOR_SYMBOL;
2874
1
  model_embedding->vocab.graph = 0;
2875
1
  assert(datatype == CCV_32F || datatype == CCV_16F);
2876
1
  model_embedding->datatype = datatype;
2877
1
  assert(vocab_size > 0);
2878
1
  model_embedding->vocab_size = vocab_size;
2879
1
  assert(embed_size > 0);
2880
1
  model_embedding->embed_size = embed_size;
2881
1
  return (ccv_cnnp_model_t*)model_embedding;
2882
1
}
2883
2884
static ccv_cnnp_model_t* _ccv_cnnp_embedding_copy(const ccv_cnnp_model_t* const super, void* const context)
2885
0
{
2886
0
  ccv_cnnp_model_embedding_t* const self = (ccv_cnnp_model_embedding_t*)super;
2887
0
  return ccv_cnnp_embedding(self->datatype, self->vocab_size, self->embed_size, self->super.is_trainable, self->super.name);
2888
0
}
2889
2890
// MARK - Pool Layers
2891
2892
typedef struct {
2893
  ccv_cnnp_model_t super;
2894
  ccv_nnc_tensor_symbol_t output;
2895
  int type;
2896
  float width_scale;
2897
  float height_scale;
2898
  int align_corners;
2899
} ccv_cnnp_model_upsample_t;
2900
2901
static void _ccv_cnnp_upsample_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2902
3
{
2903
3
  PRINT(CCV_CLI_VERBOSE, "[cnnp_upsample_build] -\n");
2904
3
  assert(input_size == 1);
2905
3
  assert(output_size == 1);
2906
3
  ccv_cnnp_model_upsample_t* const self = (ccv_cnnp_model_upsample_t*)super;
2907
3
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2908
3
  ccv_nnc_cmd_t cmd = CMD_UPSAMPLE_FORWARD(self->type, self->width_scale, self->height_scale, self->align_corners);
2909
3
  ccv_nnc_tensor_param_t output_params;
2910
3
  ccv_nnc_hint_tensor_auto(cmd, &params, 1, ccv_nnc_no_hint, &output_params, 1);
2911
3
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2912
3
  ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0]), TENSOR_SYMBOL_LIST(output), "upsample");
2913
3
  outputs[0] = output;
2914
3
}
2915
2916
static ccv_cnnp_model_t* _ccv_cnnp_upsample_copy(const ccv_cnnp_model_t* const super, void* const context);
2917
2918
static const ccv_cnnp_model_vtab_t ccv_cnnp_upsample_isa = {
2919
  .build = _ccv_cnnp_upsample_build,
2920
  .copy = _ccv_cnnp_upsample_copy,
2921
};
2922
2923
ccv_cnnp_model_t* ccv_cnnp_upsample(const int type, const float width_scale, const float height_scale, const int align_corners, const char* const name)
2924
3
{
2925
3
  ccv_cnnp_model_upsample_t* const model_upsample = (ccv_cnnp_model_upsample_t*)cccalloc(1, sizeof(ccv_cnnp_model_upsample_t));
2926
3
  model_upsample->super.isa = &ccv_cnnp_upsample_isa;
2927
3
  model_upsample->super.input_size = 1;
2928
3
  model_upsample->super.outputs = &model_upsample->output;
2929
3
  model_upsample->super.output_size = 1;
2930
3
  ccv_cnnp_model_copy_name(&model_upsample->super, name);
2931
3
  assert(type == CCV_NNC_UPSAMPLE_NEAREST || type == CCV_NNC_UPSAMPLE_BILINEAR);
2932
3
  model_upsample->type = type;
2933
3
  model_upsample->width_scale = width_scale;
2934
3
  model_upsample->height_scale = height_scale;
2935
3
  model_upsample->align_corners = align_corners;
2936
3
  return (ccv_cnnp_model_t*)model_upsample;
2937
3
}
2938
2939
static ccv_cnnp_model_t* _ccv_cnnp_upsample_copy(const ccv_cnnp_model_t* const super, void* const context)
2940
0
{
2941
0
  const ccv_cnnp_model_upsample_t* const self = (const ccv_cnnp_model_upsample_t*)super;
2942
0
  return ccv_cnnp_upsample(self->type, self->width_scale, self->height_scale, self->align_corners, self->super.name);
2943
0
}
2944
2945
// MARK - Reduce Sum Layer
2946
2947
typedef struct {
2948
  ccv_cnnp_model_t super;
2949
  int axis[CCV_NNC_MAX_DIM_ALLOC];
2950
  int count;
2951
  ccv_nnc_tensor_symbol_t output;
2952
} ccv_cnnp_model_reduce_sum_t;
2953
2954
static void _ccv_cnnp_reduce_sum_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
2955
1
{
2956
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_sum_build] -\n");
2957
1
  const ccv_cnnp_model_reduce_sum_t* const self = (const ccv_cnnp_model_reduce_sum_t*)super;
2958
1
  assert(input_size == 1);
2959
1
  assert(output_size == 1);
2960
1
  ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
2961
1
  ccv_nnc_tensor_param_t output_params;
2962
1
  ccv_nnc_cmd_t reduce_sum = CMD_REDUCE_SUM_FORWARD();
2963
1
  int i;
2964
2
  for (i = 0; i < self->count; 
i++1
)
2965
1
    reduce_sum.info.reduce.axis[i] = self->axis[i];
2966
1
  reduce_sum.info.reduce.count = self->count;
2967
1
  ccv_nnc_hint_tensor_auto(reduce_sum, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
2968
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
2969
1
  ccv_nnc_graph_exec_symbol_new(graph, reduce_sum, inputs, input_size, outputs, output_size, "reduce_sum");
2970
1
}
2971
2972
static ccv_cnnp_model_t* _ccv_cnnp_reduce_sum_copy(const ccv_cnnp_model_t* const self, void* const context);
2973
2974
static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_sum_isa = {
2975
  .build = _ccv_cnnp_reduce_sum_build,
2976
  .copy = _ccv_cnnp_reduce_sum_copy,
2977
};
2978
2979
ccv_cnnp_model_t* ccv_cnnp_reduce_sum(const int* const axis, const int axis_count, const char* const name)
2980
1
{
2981
1
  ccv_cnnp_model_reduce_sum_t* const model_reduce_sum = (ccv_cnnp_model_reduce_sum_t*)cccalloc(1, sizeof(ccv_cnnp_model_reduce_sum_t));
2982
1
  model_reduce_sum->super.isa = &ccv_cnnp_reduce_sum_isa;
2983
1
  model_reduce_sum->super.input_size = 1;
2984
1
  model_reduce_sum->super.outputs = &model_reduce_sum->output;
2985
1
  model_reduce_sum->super.output_size = 1;
2986
1
  ccv_cnnp_model_copy_name(&model_reduce_sum->super, name);
2987
1
  assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC);
2988
1
  int i;
2989
2
  for (i = 0; i < axis_count; 
i++1
)
2990
1
    model_reduce_sum->axis[i] = axis[i];
2991
1
  model_reduce_sum->count = axis_count;
2992
1
  return (ccv_cnnp_model_t*)model_reduce_sum;
2993
1
}
2994
2995
static ccv_cnnp_model_t* _ccv_cnnp_reduce_sum_copy(const ccv_cnnp_model_t* const super, void* const context)
2996
0
{
2997
0
  const ccv_cnnp_model_reduce_sum_t* const self = (const ccv_cnnp_model_reduce_sum_t*)super;
2998
0
  return ccv_cnnp_reduce_sum(self->axis, self->count, self->super.name);
2999
0
}
3000
3001
// MARK - Reduce Mean Layer
3002
3003
typedef struct {
3004
  ccv_cnnp_model_t super;
3005
  int axis[CCV_NNC_MAX_DIM_ALLOC];
3006
  int count;
3007
  ccv_nnc_tensor_symbol_t output;
3008
} ccv_cnnp_model_reduce_mean_t;
3009
3010
static void _ccv_cnnp_reduce_mean_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3011
1
{
3012
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_mean_build] -\n");
3013
1
  const ccv_cnnp_model_reduce_mean_t* const self = (const ccv_cnnp_model_reduce_mean_t*)super;
3014
1
  assert(input_size == 1);
3015
1
  assert(output_size == 1);
3016
1
  ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3017
1
  ccv_nnc_tensor_param_t output_params;
3018
1
  ccv_nnc_cmd_t reduce_mean = CMD_REDUCE_MEAN_FORWARD();
3019
1
  int i;
3020
2
  for (i = 0; i < self->count; 
i++1
)
3021
1
    reduce_mean.info.reduce.axis[i] = self->axis[i];
3022
1
  reduce_mean.info.reduce.count = self->count;
3023
1
  ccv_nnc_hint_tensor_auto(reduce_mean, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3024
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3025
1
  ccv_nnc_graph_exec_symbol_new(graph, reduce_mean, inputs, input_size, outputs, output_size, "reduce_mean");
3026
1
}
3027
3028
static ccv_cnnp_model_t* _ccv_cnnp_reduce_mean_copy(const ccv_cnnp_model_t* const self, void* const context);
3029
3030
static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_mean_isa = {
3031
  .build = _ccv_cnnp_reduce_mean_build,
3032
  .copy = _ccv_cnnp_reduce_mean_copy,
3033
};
3034
3035
ccv_cnnp_model_t* ccv_cnnp_reduce_mean(const int* const axis, const int axis_count, const char* const name)
3036
1
{
3037
1
  ccv_cnnp_model_reduce_mean_t* const model_reduce_mean = (ccv_cnnp_model_reduce_mean_t*)cccalloc(1, sizeof(ccv_cnnp_model_reduce_mean_t));
3038
1
  model_reduce_mean->super.isa = &ccv_cnnp_reduce_mean_isa;
3039
1
  model_reduce_mean->super.input_size = 1;
3040
1
  model_reduce_mean->super.outputs = &model_reduce_mean->output;
3041
1
  model_reduce_mean->super.output_size = 1;
3042
1
  ccv_cnnp_model_copy_name(&model_reduce_mean->super, name);
3043
1
  assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC);
3044
1
  int i;
3045
2
  for (i = 0; i < axis_count; 
i++1
)
3046
1
    model_reduce_mean->axis[i] = axis[i];
3047
1
  model_reduce_mean->count = axis_count;
3048
1
  return (ccv_cnnp_model_t*)model_reduce_mean;
3049
1
}
3050
3051
static ccv_cnnp_model_t* _ccv_cnnp_reduce_mean_copy(const ccv_cnnp_model_t* const super, void* const context)
3052
0
{
3053
0
  const ccv_cnnp_model_reduce_mean_t* const self = (const ccv_cnnp_model_reduce_mean_t*)super;
3054
0
  return ccv_cnnp_reduce_mean(self->axis, self->count, self->super.name);
3055
0
}
3056
3057
// MARK - Reduce Max Layer
3058
3059
typedef struct {
3060
  ccv_cnnp_model_t super;
3061
  int axis[CCV_NNC_MAX_DIM_ALLOC];
3062
  int count;
3063
  ccv_nnc_tensor_symbol_t output;
3064
} ccv_cnnp_model_reduce_max_t;
3065
3066
static void _ccv_cnnp_reduce_max_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3067
1
{
3068
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_max_build] -\n");
3069
1
  const ccv_cnnp_model_reduce_max_t* const self = (const ccv_cnnp_model_reduce_max_t*)super;
3070
1
  assert(input_size == 1);
3071
1
  assert(output_size == 1);
3072
1
  ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3073
1
  ccv_nnc_tensor_param_t output_params;
3074
1
  ccv_nnc_cmd_t reduce_max = CMD_REDUCE_MAX_FORWARD();
3075
1
  int i;
3076
2
  for (i = 0; i < self->count; 
i++1
)
3077
1
    reduce_max.info.reduce.axis[i] = self->axis[i];
3078
1
  reduce_max.info.reduce.count = self->count;
3079
1
  ccv_nnc_hint_tensor_auto(reduce_max, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3080
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3081
1
  ccv_nnc_graph_exec_symbol_new(graph, reduce_max, inputs, input_size, outputs, output_size, "reduce_max");
3082
1
}
3083
3084
static ccv_cnnp_model_t* _ccv_cnnp_reduce_max_copy(const ccv_cnnp_model_t* const self, void* const context);
3085
3086
static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_max_isa = {
3087
  .build = _ccv_cnnp_reduce_max_build,
3088
  .copy = _ccv_cnnp_reduce_max_copy,
3089
};
3090
3091
ccv_cnnp_model_t* ccv_cnnp_reduce_max(const int* const axis, const int axis_count, const char* const name)
3092
1
{
3093
1
  ccv_cnnp_model_reduce_max_t* const model_reduce_max = (ccv_cnnp_model_reduce_max_t*)cccalloc(1, sizeof(ccv_cnnp_model_reduce_max_t));
3094
1
  model_reduce_max->super.isa = &ccv_cnnp_reduce_max_isa;
3095
1
  model_reduce_max->super.input_size = 1;
3096
1
  model_reduce_max->super.outputs = &model_reduce_max->output;
3097
1
  model_reduce_max->super.output_size = 1;
3098
1
  ccv_cnnp_model_copy_name(&model_reduce_max->super, name);
3099
1
  assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC);
3100
1
  int i;
3101
2
  for (i = 0; i < axis_count; 
i++1
)
3102
1
    model_reduce_max->axis[i] = axis[i];
3103
1
  model_reduce_max->count = axis_count;
3104
1
  return (ccv_cnnp_model_t*)model_reduce_max;
3105
1
}
3106
3107
static ccv_cnnp_model_t* _ccv_cnnp_reduce_max_copy(const ccv_cnnp_model_t* const super, void* const context)
3108
0
{
3109
0
  const ccv_cnnp_model_reduce_max_t* const self = (const ccv_cnnp_model_reduce_max_t*)super;
3110
0
  return ccv_cnnp_reduce_max(self->axis, self->count, self->super.name);
3111
0
}
3112
3113
// MARK - Reduce Min Layer
3114
3115
typedef struct {
3116
  ccv_cnnp_model_t super;
3117
  int axis[CCV_NNC_MAX_DIM_ALLOC];
3118
  int count;
3119
  ccv_nnc_tensor_symbol_t output;
3120
} ccv_cnnp_model_reduce_min_t;
3121
3122
static void _ccv_cnnp_reduce_min_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3123
1
{
3124
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_min_build] -\n");
3125
1
  const ccv_cnnp_model_reduce_min_t* const self = (const ccv_cnnp_model_reduce_min_t*)super;
3126
1
  assert(input_size == 1);
3127
1
  assert(output_size == 1);
3128
1
  ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3129
1
  ccv_nnc_tensor_param_t output_params;
3130
1
  ccv_nnc_cmd_t reduce_min = CMD_REDUCE_MIN_FORWARD();
3131
1
  int i;
3132
2
  for (i = 0; i < self->count; 
i++1
)
3133
1
    reduce_min.info.reduce.axis[i] = self->axis[i];
3134
1
  reduce_min.info.reduce.count = self->count;
3135
1
  ccv_nnc_hint_tensor_auto(reduce_min, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3136
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3137
1
  ccv_nnc_graph_exec_symbol_new(graph, reduce_min, inputs, input_size, outputs, output_size, "reduce_min");
3138
1
}
3139
3140
static ccv_cnnp_model_t* _ccv_cnnp_reduce_min_copy(const ccv_cnnp_model_t* const self, void* const context);
3141
3142
static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_min_isa = {
3143
  .build = _ccv_cnnp_reduce_min_build,
3144
  .copy = _ccv_cnnp_reduce_min_copy,
3145
};
3146
3147
ccv_cnnp_model_t* ccv_cnnp_reduce_min(const int* const axis, const int axis_count, const char* const name)
3148
1
{
3149
1
  ccv_cnnp_model_reduce_min_t* const model_reduce_min = (ccv_cnnp_model_reduce_min_t*)cccalloc(1, sizeof(ccv_cnnp_model_reduce_min_t));
3150
1
  model_reduce_min->super.isa = &ccv_cnnp_reduce_min_isa;
3151
1
  model_reduce_min->super.input_size = 1;
3152
1
  model_reduce_min->super.outputs = &model_reduce_min->output;
3153
1
  model_reduce_min->super.output_size = 1;
3154
1
  ccv_cnnp_model_copy_name(&model_reduce_min->super, name);
3155
1
  assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC);
3156
1
  int i;
3157
2
  for (i = 0; i < axis_count; 
i++1
)
3158
1
    model_reduce_min->axis[i] = axis[i];
3159
1
  model_reduce_min->count = axis_count;
3160
1
  return (ccv_cnnp_model_t*)model_reduce_min;
3161
1
}
3162
3163
static ccv_cnnp_model_t* _ccv_cnnp_reduce_min_copy(const ccv_cnnp_model_t* const super, void* const context)
3164
0
{
3165
0
  const ccv_cnnp_model_reduce_min_t* const self = (const ccv_cnnp_model_reduce_min_t*)super;
3166
0
  return ccv_cnnp_reduce_min(self->axis, self->count, self->super.name);
3167
0
}
3168
3169
// MARK - Reduce Norm2 Layer
3170
3171
typedef struct {
3172
  ccv_cnnp_model_t super;
3173
  int axis[CCV_NNC_MAX_DIM_ALLOC];
3174
  int count;
3175
  ccv_nnc_tensor_symbol_t output;
3176
} ccv_cnnp_model_reduce_norm2_t;
3177
3178
static void _ccv_cnnp_reduce_norm2_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3179
1
{
3180
1
  const ccv_cnnp_model_reduce_norm2_t* const self = (const ccv_cnnp_model_reduce_norm2_t*)super;
3181
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_reduce_norm2_build] -\n");
3182
1
  assert(input_size == 1);
3183
1
  assert(output_size == 1);
3184
1
  ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3185
1
  ccv_nnc_tensor_param_t output_params;
3186
1
  ccv_nnc_cmd_t reduce_norm2 = CMD_REDUCE_NORM2_FORWARD();
3187
1
  int i;
3188
2
  for (i = 0; i < self->count; 
i++1
)
3189
1
    reduce_norm2.info.reduce.axis[i] = self->axis[i];
3190
1
  reduce_norm2.info.reduce.count = self->count;
3191
1
  ccv_nnc_hint_tensor_auto(reduce_norm2, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3192
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3193
1
  ccv_nnc_graph_exec_symbol_new(graph, reduce_norm2, inputs, input_size, outputs, output_size, "reduce_norm2");
3194
1
}
3195
3196
static ccv_cnnp_model_t* _ccv_cnnp_reduce_norm2_copy(const ccv_cnnp_model_t* const self, void* const context);
3197
3198
static const ccv_cnnp_model_vtab_t ccv_cnnp_reduce_norm2_isa = {
3199
  .build = _ccv_cnnp_reduce_norm2_build,
3200
  .copy = _ccv_cnnp_reduce_norm2_copy,
3201
};
3202
3203
ccv_cnnp_model_t* ccv_cnnp_reduce_norm2(const int* const axis, const int axis_count, const char* const name)
3204
1
{
3205
1
  ccv_cnnp_model_reduce_norm2_t* const model_reduce_norm2 = (ccv_cnnp_model_reduce_norm2_t*)cccalloc(1, sizeof(ccv_cnnp_model_reduce_norm2_t));
3206
1
  model_reduce_norm2->super.isa = &ccv_cnnp_reduce_norm2_isa;
3207
1
  model_reduce_norm2->super.input_size = 1;
3208
1
  model_reduce_norm2->super.outputs = &model_reduce_norm2->output;
3209
1
  model_reduce_norm2->super.output_size = 1;
3210
1
  ccv_cnnp_model_copy_name(&model_reduce_norm2->super, name);
3211
1
  assert(axis_count <= CCV_NNC_MAX_DIM_ALLOC);
3212
1
  int i;
3213
2
  for (i = 0; i < axis_count; 
i++1
)
3214
1
    model_reduce_norm2->axis[i] = axis[i];
3215
1
  model_reduce_norm2->count = axis_count;
3216
1
  return (ccv_cnnp_model_t*)model_reduce_norm2;
3217
1
}
3218
3219
static ccv_cnnp_model_t* _ccv_cnnp_reduce_norm2_copy(const ccv_cnnp_model_t* const super, void* const context)
3220
0
{
3221
0
  const ccv_cnnp_model_reduce_norm2_t* const self = (const ccv_cnnp_model_reduce_norm2_t*)super;
3222
0
  return ccv_cnnp_reduce_norm2(self->axis, self->count, self->super.name);
3223
0
}
3224
3225
// MARK - Argmax Layer
3226
3227
typedef struct {
3228
  ccv_cnnp_model_t super;
3229
  int axis;
3230
  ccv_nnc_tensor_symbol_t output;
3231
} ccv_cnnp_model_argmax_t;
3232
3233
static void _ccv_cnnp_argmax_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3234
1
{
3235
1
  const ccv_cnnp_model_argmax_t* const self = (const ccv_cnnp_model_argmax_t*)super;
3236
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_argmax_build] -\n");
3237
1
  assert(input_size == 1);
3238
1
  assert(output_size == 1);
3239
1
  ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3240
1
  ccv_nnc_tensor_param_t output_params;
3241
1
  ccv_nnc_cmd_t argmax = CMD_ARGMAX_FORWARD();
3242
1
  argmax.info.reduce.axis[0] = self->axis;
3243
1
  argmax.info.reduce.count = 1;
3244
1
  ccv_nnc_hint_tensor_auto(argmax, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3245
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3246
1
  ccv_nnc_graph_exec_symbol_new(graph, argmax, inputs, input_size, outputs, output_size, "argmax");
3247
1
}
3248
3249
static ccv_cnnp_model_t* _ccv_cnnp_argmax_copy(const ccv_cnnp_model_t* const self, void* const context);
3250
3251
static const ccv_cnnp_model_vtab_t ccv_cnnp_argmax_isa = {
3252
  .build = _ccv_cnnp_argmax_build,
3253
  .copy = _ccv_cnnp_argmax_copy,
3254
};
3255
3256
ccv_cnnp_model_t* ccv_cnnp_argmax(const int axis, const char* const name)
3257
1
{
3258
1
  ccv_cnnp_model_argmax_t* const model_argmax = (ccv_cnnp_model_argmax_t*)cccalloc(1, sizeof(ccv_cnnp_model_argmax_t));
3259
1
  model_argmax->super.isa = &ccv_cnnp_argmax_isa;
3260
1
  model_argmax->super.input_size = 1;
3261
1
  model_argmax->super.outputs = &model_argmax->output;
3262
1
  model_argmax->super.output_size = 1;
3263
1
  ccv_cnnp_model_copy_name(&model_argmax->super, name);
3264
1
  model_argmax->axis = axis;
3265
1
  return (ccv_cnnp_model_t*)model_argmax;
3266
1
}
3267
3268
static ccv_cnnp_model_t* _ccv_cnnp_argmax_copy(const ccv_cnnp_model_t* const super, void* const context)
3269
0
{
3270
0
  const ccv_cnnp_model_argmax_t* const self = (const ccv_cnnp_model_argmax_t*)super;
3271
0
  return ccv_cnnp_argmax(self->axis, self->super.name);
3272
0
}
3273
3274
// MARK - Argmin Layer
3275
3276
typedef struct {
3277
  ccv_cnnp_model_t super;
3278
  int axis;
3279
  ccv_nnc_tensor_symbol_t output;
3280
} ccv_cnnp_model_argmin_t;
3281
3282
static void _ccv_cnnp_argmin_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3283
1
{
3284
1
  const ccv_cnnp_model_argmin_t* const self = (const ccv_cnnp_model_argmin_t*)super;
3285
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_argmin_build] -\n");
3286
1
  assert(input_size == 1);
3287
1
  assert(output_size == 1);
3288
1
  ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3289
1
  ccv_nnc_tensor_param_t output_params;
3290
1
  ccv_nnc_cmd_t argmin = CMD_ARGMIN_FORWARD();
3291
1
  argmin.info.reduce.axis[0] = self->axis;
3292
1
  argmin.info.reduce.count = 1;
3293
1
  ccv_nnc_hint_tensor_auto(argmin, &input_params, 1, ccv_nnc_no_hint, &output_params, 1);
3294
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3295
1
  ccv_nnc_graph_exec_symbol_new(graph, argmin, inputs, input_size, outputs, output_size, "argmin");
3296
1
}
3297
3298
static ccv_cnnp_model_t* _ccv_cnnp_argmin_copy(const ccv_cnnp_model_t* const self, void* const context);
3299
3300
static const ccv_cnnp_model_vtab_t ccv_cnnp_argmin_isa = {
3301
  .build = _ccv_cnnp_argmin_build,
3302
  .copy = _ccv_cnnp_argmin_copy,
3303
};
3304
3305
ccv_cnnp_model_t* ccv_cnnp_argmin(const int axis, const char* const name)
3306
1
{
3307
1
  ccv_cnnp_model_argmin_t* const model_argmin = (ccv_cnnp_model_argmin_t*)cccalloc(1, sizeof(ccv_cnnp_model_argmin_t));
3308
1
  model_argmin->super.isa = &ccv_cnnp_argmin_isa;
3309
1
  model_argmin->super.input_size = 1;
3310
1
  model_argmin->super.outputs = &model_argmin->output;
3311
1
  model_argmin->super.output_size = 1;
3312
1
  ccv_cnnp_model_copy_name(&model_argmin->super, name);
3313
1
  model_argmin->axis = axis;
3314
1
  return (ccv_cnnp_model_t*)model_argmin;
3315
1
}
3316
3317
static ccv_cnnp_model_t* _ccv_cnnp_argmin_copy(const ccv_cnnp_model_t* const super, void* const context)
3318
0
{
3319
0
  const ccv_cnnp_model_argmin_t* const self = (const ccv_cnnp_model_argmin_t*)super;
3320
0
  return ccv_cnnp_argmin(self->axis, self->super.name);
3321
0
}
3322
3323
// MARK - Min Layer
3324
3325
typedef struct {
3326
  ccv_cnnp_model_t super;
3327
  ccv_nnc_tensor_symbol_t output;
3328
} ccv_cnnp_model_min_t;
3329
3330
static void _ccv_cnnp_min_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3331
1
{
3332
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_min_build] -\n");
3333
1
  assert(input_size == 2);
3334
1
  assert(output_size == 1);
3335
1
  ccv_nnc_tensor_param_t input_params[2];
3336
1
  int i;
3337
3
  for (i = 0; i < 2; 
i++2
)
3338
2
    input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
3339
1
  ccv_nnc_tensor_param_t output_params;
3340
1
  const ccv_nnc_cmd_t min = CMD_MIN_FORWARD();
3341
1
  ccv_nnc_hint_tensor_auto(min, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
3342
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3343
1
  ccv_nnc_graph_exec_symbol_new(graph, min, inputs, input_size, outputs, output_size, "min");
3344
1
}
3345
3346
static ccv_cnnp_model_t* _ccv_cnnp_min_copy(const ccv_cnnp_model_t* const self, void* const context);
3347
3348
static const ccv_cnnp_model_vtab_t ccv_cnnp_min_isa = {
3349
  .build = _ccv_cnnp_min_build,
3350
  .copy = _ccv_cnnp_min_copy,
3351
};
3352
3353
ccv_cnnp_model_t* ccv_cnnp_min(const char* const name)
3354
1
{
3355
1
  ccv_cnnp_model_min_t* const model_min = (ccv_cnnp_model_min_t*)cccalloc(1, sizeof(ccv_cnnp_model_min_t));
3356
1
  model_min->super.isa = &ccv_cnnp_min_isa;
3357
1
  model_min->super.input_size = 2;
3358
1
  model_min->super.outputs = &model_min->output;
3359
1
  model_min->super.output_size = 1;
3360
1
  ccv_cnnp_model_copy_name(&model_min->super, name);
3361
1
  return (ccv_cnnp_model_t*)model_min;
3362
1
}
3363
3364
static ccv_cnnp_model_t* _ccv_cnnp_min_copy(const ccv_cnnp_model_t* const super, void* const context)
3365
0
{
3366
0
  const ccv_cnnp_model_min_t* const self = (const ccv_cnnp_model_min_t*)super;
3367
0
  return ccv_cnnp_min(self->super.name);
3368
0
}
3369
3370
// MARK - Max Layer
3371
3372
typedef struct {
3373
  ccv_cnnp_model_t super;
3374
  ccv_nnc_tensor_symbol_t output;
3375
} ccv_cnnp_model_max_t;
3376
3377
static void _ccv_cnnp_max_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3378
1
{
3379
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_max_build] -\n");
3380
1
  assert(input_size == 2);
3381
1
  assert(output_size == 1);
3382
1
  ccv_nnc_tensor_param_t input_params[2];
3383
1
  int i;
3384
3
  for (i = 0; i < 2; 
i++2
)
3385
2
    input_params[i] = ccv_nnc_tensor_symbol_params(graph, inputs[i]);
3386
1
  ccv_nnc_tensor_param_t output_params;
3387
1
  const ccv_nnc_cmd_t max = CMD_MAX_FORWARD();
3388
1
  ccv_nnc_hint_tensor_auto(max, input_params, 2, ccv_nnc_no_hint, &output_params, 1);
3389
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
3390
1
  ccv_nnc_graph_exec_symbol_new(graph, max, inputs, input_size, outputs, output_size, "max");
3391
1
}
3392
3393
static ccv_cnnp_model_t* _ccv_cnnp_max_copy(const ccv_cnnp_model_t* const self, void* const context);
3394
3395
static const ccv_cnnp_model_vtab_t ccv_cnnp_max_isa = {
3396
  .build = _ccv_cnnp_max_build,
3397
  .copy = _ccv_cnnp_max_copy,
3398
};
3399
3400
ccv_cnnp_model_t* ccv_cnnp_max(const char* const name)
3401
1
{
3402
1
  ccv_cnnp_model_max_t* const model_max = (ccv_cnnp_model_max_t*)cccalloc(1, sizeof(ccv_cnnp_model_max_t));
3403
1
  model_max->super.isa = &ccv_cnnp_max_isa;
3404
1
  model_max->super.input_size = 2;
3405
1
  model_max->super.outputs = &model_max->output;
3406
1
  model_max->super.output_size = 1;
3407
1
  ccv_cnnp_model_copy_name(&model_max->super, name);
3408
1
  return (ccv_cnnp_model_t*)model_max;
3409
1
}
3410
3411
static ccv_cnnp_model_t* _ccv_cnnp_max_copy(const ccv_cnnp_model_t* const super, void* const context)
3412
0
{
3413
0
  const ccv_cnnp_model_max_t* const self = (const ccv_cnnp_model_max_t*)super;
3414
0
  return ccv_cnnp_max(self->super.name);
3415
0
}
3416
3417
// MARK - LSTM Layer
3418
3419
typedef struct {
3420
  ccv_cnnp_model_t super;
3421
  int masked;
3422
  ccv_nnc_tensor_symbol_t output;
3423
  ccv_nnc_tensor_symbol_t weights;
3424
  ccv_nnc_tensor_symbol_t reserves;
3425
  ccv_nnc_cmd_param_t params;
3426
  ccv_nnc_graph_exec_symbol_t lstm;
3427
} ccv_cnnp_model_lstm_t;
3428
3429
static int _ccv_cnnp_lstm_weight_dim(int bidirectional, int num_layers, int input_size, int hidden_size, int proj_size, int bias)
3430
1
{
3431
1
  const int D = !!bidirectional + 1;
3432
1
  if (hidden_size == proj_size)
3433
1
    return (num_layers * (bias ? 8 : 
00
) + (num_layers - 1) * (hidden_size * 4 * D + hidden_size * 4) + input_size * 4 + hidden_size * 4) * D;
3434
0
  else
3435
0
    return (num_layers * (bias ? 8 : 0) + (num_layers - 1) * (proj_size * 4 * D + proj_size * 4) + (proj_size * 4 + input_size * 4) + num_layers * proj_size) * D;
3436
1
}
3437
3438
static void _ccv_cnnp_lstm_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3439
1
{
3440
1
  ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3441
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_lstm_build] -\n");
3442
1
  assert(input_size == self->super.input_size);
3443
1
  assert(output_size == 1);
3444
1
  const int proj_size = self->params.rnn.proj_size == 0 ? self->params.rnn.hidden_size : 
self->params.rnn.proj_size0
;
3445
1
  ccv_nnc_tensor_param_t input_params[5];
3446
1
  input_params[0]= ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3447
1
  if (input_size == 2)
3448
1
    input_params[1] = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
3449
1
  input_params[4] = input_params[0];
3450
1
  memset(input_params[4].dim, 0, sizeof(input_params[4].dim));
3451
1
  const int x_nd = ccv_nnc_tensor_nd(input_params[0].dim);
3452
1
  const int feature_count = input_params[0].dim[x_nd - 1];
3453
1
  input_params[4].dim[0] = _ccv_cnnp_lstm_weight_dim(self->params.rnn.bidirectional, self->params.rnn.num_layers, feature_count, self->params.rnn.hidden_size, proj_size, self->params.rnn.bias);
3454
1
  input_params[4].dim[1] = self->params.rnn.hidden_size;
3455
1
  const ccv_nnc_cmd_t lstm = ccv_nnc_cmd(CCV_NNC_LSTM_FORWARD, 0, self->params, 0);
3456
1
  ccv_nnc_tensor_param_t output_params[4];
3457
1
  ccv_nnc_hint_tensor_auto(lstm, input_params, 5, ccv_nnc_no_hint, output_params, 4);
3458
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
3459
1
  if (!self->weights.graph)
3460
1
    self->weights = ccv_nnc_tensor_symbol_new(graph, input_params[4], "weights");
3461
1
  if (!self->reserves.graph)
3462
1
    self->reserves = ccv_nnc_tensor_symbol_new(graph, output_params[3], "reserves");
3463
1
  const ccv_nnc_tensor_symbol_t mask = input_size == 2 ? inputs[1] : 
NO_TENSOR_SYMBOL0
;
3464
1
  self->lstm = ccv_nnc_graph_exec_symbol_new(graph, lstm, TENSOR_SYMBOL_LIST(inputs[0], mask, NO_TENSOR_SYMBOL, NO_TENSOR_SYMBOL, self->weights), TENSOR_SYMBOL_LIST(outputs[0], NO_TENSOR_SYMBOL, NO_TENSOR_SYMBOL, self->reserves), "lstm");
3465
1
}
3466
3467
static void _ccv_cnnp_lstm_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
3468
1
{
3469
1
  ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3470
1
  if (self->weights.graph)
3471
1
  {
3472
1
    const float stdv = 1.0 / sqrt(self->params.rnn.hidden_size);
3473
1
    initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-stdv, stdv), ccv_nnc_no_hint, 0, 0, self->weights);
3474
1
  }
3475
1
}
3476
3477
static void _ccv_cnnp_lstm_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
3478
1
{
3479
1
  ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3480
1
  if (self->weights.graph)
3481
1
    add_to_array(parameters, self->weights, is_trainable);
3482
1
}
3483
3484
static void _ccv_cnnp_lstm_set_is_test(ccv_cnnp_model_t* const super, const int is_test, const ccv_cnnp_cmd_updater_f updater, void* const context)
3485
2
{
3486
2
  ccv_cnnp_model_lstm_t* const self = (ccv_cnnp_model_lstm_t*)super;
3487
2
  if (self->lstm.graph)
3488
2
  {
3489
2
    self->params.rnn.is_test = is_test;
3490
2
    updater(context, self->lstm, ccv_nnc_cmd(CCV_NNC_LSTM_FORWARD, 0, self->params, 0), ccv_nnc_no_hint);
3491
2
  }
3492
2
}
3493
3494
static ccv_cnnp_model_t* _ccv_cnnp_lstm_copy(const ccv_cnnp_model_t* const self, void* const context);
3495
3496
static const ccv_cnnp_model_vtab_t ccv_cnnp_lstm_isa = {
3497
  .build = _ccv_cnnp_lstm_build,
3498
  .init_states = _ccv_cnnp_lstm_init_states,
3499
  .add_to_parameter = _ccv_cnnp_lstm_add_to_parameter,
3500
  .copy = _ccv_cnnp_lstm_copy,
3501
  .set_is_test = _ccv_cnnp_lstm_set_is_test,
3502
};
3503
3504
ccv_cnnp_model_t* ccv_cnnp_lstm(const int masked, const int hidden_size, const int proj_size, const int num_layers, const int bias, const int batch_first, const int bidirectional, const float dropout, const int is_trainable, const char* const name)
3505
1
{
3506
1
  ccv_cnnp_model_lstm_t* const model_lstm = (ccv_cnnp_model_lstm_t*)cccalloc(1, sizeof(ccv_cnnp_model_lstm_t));
3507
1
  model_lstm->super.isa = &ccv_cnnp_lstm_isa;
3508
1
  model_lstm->super.input_size = masked ? 2 : 
10
;
3509
1
  model_lstm->super.outputs = &model_lstm->output;
3510
1
  model_lstm->super.output_size = 1;
3511
1
  model_lstm->super.is_trainable = is_trainable;
3512
1
  ccv_cnnp_model_copy_name(&model_lstm->super, name);
3513
1
  model_lstm->masked = masked;
3514
1
  model_lstm->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
3515
1
  model_lstm->weights.graph = 0;
3516
1
  model_lstm->params.rnn.hidden_size = hidden_size;
3517
1
  model_lstm->params.rnn.proj_size = proj_size;
3518
1
  model_lstm->params.rnn.num_layers = num_layers;
3519
1
  model_lstm->params.rnn.bias = bias;
3520
1
  model_lstm->params.rnn.batch_first = batch_first;
3521
1
  model_lstm->params.rnn.bidirectional = bidirectional;
3522
1
  model_lstm->params.rnn.dropout = dropout;
3523
1
  return (ccv_cnnp_model_t*)model_lstm;
3524
1
}
3525
3526
static ccv_cnnp_model_t* _ccv_cnnp_lstm_copy(const ccv_cnnp_model_t* const super, void* const context)
3527
0
{
3528
0
  const ccv_cnnp_model_lstm_t* const self = (const ccv_cnnp_model_lstm_t*)super;
3529
0
  return ccv_cnnp_lstm(self->masked, self->params.rnn.hidden_size, self->params.rnn.proj_size, self->params.rnn.num_layers, self->params.rnn.bias, self->params.rnn.batch_first, self->params.rnn.bidirectional, self->params.rnn.dropout, self->super.is_trainable, self->super.name);
3530
0
}
3531
3532
/// MARK - Datatype conversion layer.
3533
3534
typedef struct {
3535
  ccv_cnnp_model_t super;
3536
  ccv_nnc_tensor_symbol_t output;
3537
  int datatype;
3538
  int ref_to_last;
3539
} ccv_cnnp_model_datatype_conversion_t;
3540
3541
static void _ccv_cnnp_datatype_conversion_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3542
2
{
3543
2
  ccv_cnnp_model_datatype_conversion_t* const self = (ccv_cnnp_model_datatype_conversion_t*)super;
3544
2
  PRINT(CCV_CLI_VERBOSE, "[cnnp_datatype_conversion_build] -\n");
3545
2
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3546
2
  if (self->ref_to_last)
3547
1
  {
3548
1
    assert(input_size > 1);
3549
1
    const ccv_nnc_tensor_param_t last_params = ccv_nnc_tensor_symbol_params(graph, inputs[input_size - 1]);
3550
1
    params.datatype = last_params.datatype;
3551
1
  } else
3552
1
    params.datatype = self->datatype;
3553
2
  assert(output_size == 1);
3554
2
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3555
2
  ccv_nnc_graph_exec_symbol_new(graph, CMD_DATATYPE_CONVERSION_FORWARD(), inputs, output_size /* intentional */, outputs, output_size, 0);
3556
2
}
3557
3558
static ccv_cnnp_model_t* _ccv_cnnp_datatype_conversion_copy(const ccv_cnnp_model_t* const self, void* const context);
3559
3560
static const ccv_cnnp_model_vtab_t ccv_cnnp_datatype_conversion_isa = {
3561
  .build = _ccv_cnnp_datatype_conversion_build,
3562
  .copy = _ccv_cnnp_datatype_conversion_copy,
3563
};
3564
3565
ccv_cnnp_model_t* ccv_cnnp_datatype_conversion(const int datatype, const int ref_to_last, const char* const name)
3566
2
{
3567
2
  ccv_cnnp_model_datatype_conversion_t* const model_datatype_conversion = (ccv_cnnp_model_datatype_conversion_t*)cccalloc(1, sizeof(ccv_cnnp_model_datatype_conversion_t));
3568
2
  model_datatype_conversion->super.isa = &ccv_cnnp_datatype_conversion_isa;
3569
2
  model_datatype_conversion->super.input_size = 0;
3570
2
  model_datatype_conversion->super.outputs = &model_datatype_conversion->output;
3571
2
  model_datatype_conversion->super.output_size = 1;
3572
2
  model_datatype_conversion->datatype = datatype;
3573
2
  model_datatype_conversion->ref_to_last = ref_to_last;
3574
2
  ccv_cnnp_model_copy_name(&model_datatype_conversion->super, name);
3575
2
  return (ccv_cnnp_model_t*)model_datatype_conversion;
3576
2
}
3577
3578
static ccv_cnnp_model_t* _ccv_cnnp_datatype_conversion_copy(const ccv_cnnp_model_t* const super, void* const context)
3579
0
{
3580
0
  ccv_cnnp_model_datatype_conversion_t* const self = (ccv_cnnp_model_datatype_conversion_t*)super;
3581
0
  return ccv_cnnp_datatype_conversion(self->datatype, self->ref_to_last, self->super.name);
3582
0
}
3583
3584
/// MARK - Clamp layer.
3585
3586
typedef struct {
3587
  ccv_cnnp_model_t super;
3588
  ccv_nnc_tensor_symbol_t output;
3589
  float min;
3590
  float max;
3591
} ccv_cnnp_model_clamp_t;
3592
3593
static void _ccv_cnnp_clamp_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3594
0
{
3595
0
  ccv_cnnp_model_clamp_t* const self = (ccv_cnnp_model_clamp_t*)super;
3596
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_clamp_build] -\n");
3597
0
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3598
0
  assert(output_size == 1);
3599
0
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3600
0
  ccv_nnc_graph_exec_symbol_new(graph, CMD_CLAMP_FORWARD(self->min, self->max), inputs, output_size /* intentional */, outputs, output_size, 0);
3601
0
}
3602
3603
static ccv_cnnp_model_t* _ccv_cnnp_clamp_copy(const ccv_cnnp_model_t* const self, void* const context);
3604
3605
static const ccv_cnnp_model_vtab_t ccv_cnnp_clamp_isa = {
3606
  .build = _ccv_cnnp_clamp_build,
3607
  .copy = _ccv_cnnp_clamp_copy,
3608
};
3609
3610
ccv_cnnp_model_t* ccv_cnnp_clamp(const float min, const float max, const char* const name)
3611
0
{
3612
0
  ccv_cnnp_model_clamp_t* const model_clamp = (ccv_cnnp_model_clamp_t*)cccalloc(1, sizeof(ccv_cnnp_model_clamp_t));
3613
0
  model_clamp->super.isa = &ccv_cnnp_clamp_isa;
3614
0
  model_clamp->super.input_size = 0;
3615
0
  model_clamp->super.outputs = &model_clamp->output;
3616
0
  model_clamp->super.output_size = 1;
3617
0
  model_clamp->min = min;
3618
0
  model_clamp->max = max;
3619
0
  ccv_cnnp_model_copy_name(&model_clamp->super, name);
3620
0
  return (ccv_cnnp_model_t*)model_clamp;
3621
0
}
3622
3623
static ccv_cnnp_model_t* _ccv_cnnp_clamp_copy(const ccv_cnnp_model_t* const super, void* const context)
3624
0
{
3625
0
  ccv_cnnp_model_clamp_t* const self = (ccv_cnnp_model_clamp_t*)super;
3626
0
  return ccv_cnnp_clamp(self->min, self->max, self->super.name);
3627
0
}
3628
3629
// MARK - Parameter Layer
3630
3631
typedef struct {
3632
  ccv_cnnp_model_t super;
3633
  float init_bound;
3634
  ccv_nnc_tensor_symbol_t weights;
3635
  ccv_nnc_tensor_param_t weights_params;
3636
  ccv_nnc_tensor_symbol_t output;
3637
} ccv_cnnp_model_parameter_t;
3638
3639
static void _ccv_cnnp_parameter_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3640
1
{
3641
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_parameter_build] -\n");
3642
1
  assert(output_size == 1);
3643
1
  ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
3644
1
  if (!self->weights.graph)
3645
1
    self->weights = ccv_nnc_tensor_symbol_new(graph, self->weights_params, "weights");
3646
1
  assert(self->weights.graph == graph);
3647
1
  outputs[0] = self->weights;
3648
1
}
3649
3650
static void _ccv_cnnp_parameter_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
3651
0
{
3652
0
  ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
3653
0
  if (self->init_bound > 0)
3654
0
    initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-self->init_bound, self->init_bound), ccv_nnc_no_hint, 0, 0, self->weights);
3655
0
  else
3656
0
    initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->weights);
3657
0
}
3658
3659
static void _ccv_cnnp_parameter_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
3660
1
{
3661
1
  ccv_cnnp_model_parameter_t* const self = (ccv_cnnp_model_parameter_t*)super;
3662
1
  add_to_array(parameters, self->weights, is_trainable);
3663
1
}
3664
3665
static ccv_cnnp_model_t* _ccv_cnnp_parameter_copy(const ccv_cnnp_model_t* const super, void* const context);
3666
3667
static const ccv_cnnp_model_vtab_t ccv_cnnp_parameter_isa = {
3668
  .build = _ccv_cnnp_parameter_build,
3669
  .init_states = _ccv_cnnp_parameter_init_states,
3670
  .add_to_parameter = _ccv_cnnp_parameter_add_to_parameter,
3671
  .copy = _ccv_cnnp_parameter_copy,
3672
};
3673
3674
ccv_cnnp_model_t* ccv_cnnp_parameter(const ccv_nnc_tensor_param_t params, const float init_bound, const int is_trainable, const char* const name)
3675
1
{
3676
1
  ccv_cnnp_model_parameter_t* const model_parameter = (ccv_cnnp_model_parameter_t*)cccalloc(1, sizeof(ccv_cnnp_model_parameter_t));
3677
1
  model_parameter->super.isa = &ccv_cnnp_parameter_isa;
3678
1
  model_parameter->super.input_size = 0;
3679
1
  model_parameter->super.outputs = &model_parameter->output;
3680
1
  model_parameter->super.output_size = 1;
3681
1
  model_parameter->super.is_trainable = is_trainable;
3682
1
  ccv_cnnp_model_copy_name(&model_parameter->super, name);
3683
1
  model_parameter->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
3684
1
  model_parameter->weights.graph = 0;
3685
1
  model_parameter->weights_params = params;
3686
1
  return (ccv_cnnp_model_t*)model_parameter;
3687
1
}
3688
3689
static ccv_cnnp_model_t* _ccv_cnnp_parameter_copy(const ccv_cnnp_model_t* const super, void* const context)
3690
0
{
3691
0
  const ccv_cnnp_model_parameter_t* const self = (const ccv_cnnp_model_parameter_t*)super;
3692
0
  return ccv_cnnp_parameter(self->weights_params, self->init_bound, self->super.is_trainable, self->super.name);
3693
0
}
3694
3695
// MARK - Scalar Layer
3696
3697
typedef struct {
3698
  ccv_cnnp_model_t super;
3699
  int type;
3700
  int format;
3701
  int datatype;
3702
  float value;
3703
  ccv_nnc_tensor_symbol_t output;
3704
} ccv_cnnp_model_scalar_t;
3705
3706
static void _ccv_cnnp_scalar_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3707
2
{
3708
2
  PRINT(CCV_CLI_VERBOSE, "[cnnp_scalar_build] -\n");
3709
2
  assert(output_size == 1);
3710
2
  ccv_cnnp_model_scalar_t* const self = (ccv_cnnp_model_scalar_t*)super;
3711
2
  ccv_nnc_tensor_param_t params = {
3712
2
    .type = self->type,
3713
2
    .format = self->format,
3714
2
    .datatype = self->datatype,
3715
2
    .dim = {
3716
2
      1
3717
2
    }
3718
2
  };
3719
2
  if (input_size > 0)
3720
1
  {
3721
1
    ccv_nnc_tensor_param_t input_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3722
1
    params.type = input_params.type;
3723
1
    params.format = input_params.format;
3724
1
    params.datatype = input_params.datatype;
3725
1
  }
3726
2
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3727
2
  ccv_nnc_graph_exec_symbol_new(graph, CMD_SET_FORWARD(self->value), 0, 0, outputs, 1, 0);
3728
2
}
3729
3730
static ccv_cnnp_model_t* _ccv_cnnp_scalar_copy(const ccv_cnnp_model_t* const super, void* const context);
3731
3732
static const ccv_cnnp_model_vtab_t ccv_cnnp_scalar_isa = {
3733
  .build = _ccv_cnnp_scalar_build,
3734
  .copy = _ccv_cnnp_scalar_copy,
3735
};
3736
3737
ccv_cnnp_model_t* ccv_cnnp_scalar(const int type, const int format, const int datatype, const float value, const char* const name)
3738
2
{
3739
2
  ccv_cnnp_model_scalar_t* const model_scalar = (ccv_cnnp_model_scalar_t*)cccalloc(1, sizeof(ccv_cnnp_model_scalar_t));
3740
2
  model_scalar->super.isa = &ccv_cnnp_scalar_isa;
3741
2
  model_scalar->super.input_size = 0;
3742
2
  model_scalar->super.outputs = &model_scalar->output;
3743
2
  model_scalar->super.output_size = 1;
3744
2
  ccv_cnnp_model_copy_name(&model_scalar->super, name);
3745
2
  model_scalar->type = type;
3746
2
  model_scalar->format = format;
3747
2
  model_scalar->datatype = datatype;
3748
2
  model_scalar->value = value;
3749
2
  return (ccv_cnnp_model_t*)model_scalar;
3750
2
}
3751
3752
static ccv_cnnp_model_t* _ccv_cnnp_scalar_copy(const ccv_cnnp_model_t* const super, void* const context)
3753
0
{
3754
0
  const ccv_cnnp_model_scalar_t* const self = (const ccv_cnnp_model_scalar_t*)super;
3755
0
  return ccv_cnnp_scalar(self->type, self->format, self->datatype, self->value, self->super.name);
3756
0
}
3757
3758
// MARK - Variable Layer
3759
3760
typedef struct {
3761
  ccv_cnnp_model_t super;
3762
  ccv_nnc_tensor_param_t params;
3763
  ccv_nnc_tensor_symbol_t output;
3764
} ccv_cnnp_model_variable_t;
3765
3766
static void _ccv_cnnp_variable_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3767
1
{
3768
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_variable_build] -\n");
3769
1
  assert(input_size == 0);
3770
1
  assert(output_size == 1);
3771
1
  ccv_cnnp_model_variable_t* const self = (ccv_cnnp_model_variable_t*)super;
3772
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, self->params, 0);
3773
1
}
3774
3775
static ccv_cnnp_model_t* _ccv_cnnp_variable_copy(const ccv_cnnp_model_t* const super, void* const context);
3776
3777
static const ccv_cnnp_model_vtab_t ccv_cnnp_variable_isa = {
3778
  .build = _ccv_cnnp_variable_build,
3779
  .copy = _ccv_cnnp_variable_copy,
3780
};
3781
3782
ccv_cnnp_model_t* ccv_cnnp_variable(const ccv_nnc_tensor_param_t params, const char* const name)
3783
1
{
3784
1
  ccv_cnnp_model_variable_t* const model_variable = (ccv_cnnp_model_variable_t*)cccalloc(1, sizeof(ccv_cnnp_model_variable_t));
3785
1
  model_variable->super.isa = &ccv_cnnp_variable_isa;
3786
1
  model_variable->super.input_size = 0;
3787
1
  model_variable->super.outputs = &model_variable->output;
3788
1
  model_variable->super.output_size = 1;
3789
1
  ccv_cnnp_model_copy_name(&model_variable->super, name);
3790
1
  model_variable->params = params;
3791
1
  return (ccv_cnnp_model_t*)model_variable;
3792
1
}
3793
3794
static ccv_cnnp_model_t* _ccv_cnnp_variable_copy(const ccv_cnnp_model_t* const super, void* const context)
3795
0
{
3796
0
  const ccv_cnnp_model_variable_t* const self = (const ccv_cnnp_model_variable_t*)super;
3797
0
  return ccv_cnnp_variable(self->params, self->super.name);
3798
0
}
3799
3800
// MARK - Move Layer
3801
3802
typedef struct {
3803
  ccv_cnnp_model_t super;
3804
  ccv_nnc_tensor_symbol_t output;
3805
} ccv_cnnp_model_move_t;
3806
3807
static void _ccv_cnnp_move_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3808
3
{
3809
3
  PRINT(CCV_CLI_VERBOSE, "[cnnp_move_build] -\n");
3810
3
  assert(input_size == 2);
3811
3
  assert(output_size == 1);
3812
3
  outputs[0] = inputs[1];
3813
3
  ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD(), inputs, 1, outputs, 1, "move");
3814
3
}
3815
3816
static ccv_cnnp_model_t* _ccv_cnnp_move_copy(const ccv_cnnp_model_t* const super, void* const context);
3817
3818
static const ccv_cnnp_model_vtab_t ccv_cnnp_move_isa = {
3819
  .build = _ccv_cnnp_move_build,
3820
  .copy = _ccv_cnnp_move_copy,
3821
};
3822
3823
ccv_cnnp_model_t* ccv_cnnp_move(const char* const name)
3824
3
{
3825
3
  ccv_cnnp_model_move_t* const model_move = (ccv_cnnp_model_move_t*)cccalloc(1, sizeof(ccv_cnnp_model_move_t));
3826
3
  model_move->super.isa = &ccv_cnnp_move_isa;
3827
3
  model_move->super.input_size = 2;
3828
3
  model_move->super.outputs = &model_move->output;
3829
3
  model_move->super.output_size = 1;
3830
3
  ccv_cnnp_model_copy_name(&model_move->super, name);
3831
3
  return (ccv_cnnp_model_t*)model_move;
3832
3
}
3833
3834
static ccv_cnnp_model_t* _ccv_cnnp_move_copy(const ccv_cnnp_model_t* const super, void* const context)
3835
0
{
3836
0
  const ccv_cnnp_model_move_t* const self = (const ccv_cnnp_model_move_t*)super;
3837
0
  return ccv_cnnp_move(self->super.name);
3838
0
}
3839
3840
// MARK - "Making" Contiguous Layer
3841
3842
typedef struct {
3843
  ccv_cnnp_model_t super;
3844
  ccv_nnc_tensor_symbol_t output;
3845
} ccv_cnnp_model_contiguous_t;
3846
3847
static void _ccv_cnnp_contiguous_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3848
5
{
3849
5
  PRINT(CCV_CLI_VERBOSE, "[cnnp_contiguous_build] -\n");
3850
5
  assert(input_size == 1);
3851
5
  assert(output_size == 1);
3852
5
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3853
5
  ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
3854
5
  if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
3855
0
  {
3856
0
    outputs[0] = inputs[0];
3857
0
    return;
3858
0
  }
3859
  // Otherwise, we need to check its stride to know if it is contiguous.
3860
5
  int old_stride[CCV_NNC_MAX_DIM_ALLOC];
3861
5
  ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], 0, old_stride);
3862
  // We identify permute by checking if the stride is not in descending order.
3863
  // This also covered "permute" through reshape, rather than using ccv_cnnp_permute directly.
3864
5
  if (ccv_nnc_is_tensor_stride_packed(old_stride, params.dim))
3865
2
  {
3866
2
    outputs[0] = inputs[0];
3867
2
    return;
3868
2
  }
3869
3
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3870
3
  ccv_nnc_graph_exec_symbol_t make_contiguous = ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD(), inputs, 1, outputs, 1, "contiguous");
3871
3
  ccv_nnc_graph_exec_symbol_set_flags(graph, make_contiguous, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
3872
3
}
3873
3874
static ccv_cnnp_model_t* _ccv_cnnp_contiguous_copy(const ccv_cnnp_model_t* const super, void* const context);
3875
3876
static const ccv_cnnp_model_vtab_t ccv_cnnp_contiguous_isa = {
3877
  .build = _ccv_cnnp_contiguous_build,
3878
  .copy = _ccv_cnnp_contiguous_copy,
3879
};
3880
3881
ccv_cnnp_model_t* ccv_cnnp_contiguous(const char* const name)
3882
5
{
3883
5
  ccv_cnnp_model_contiguous_t* const model_contiguous = (ccv_cnnp_model_contiguous_t*)cccalloc(1, sizeof(ccv_cnnp_model_contiguous_t));
3884
5
  model_contiguous->super.isa = &ccv_cnnp_contiguous_isa;
3885
5
  model_contiguous->super.input_size = 1;
3886
5
  model_contiguous->super.outputs = &model_contiguous->output;
3887
5
  model_contiguous->super.output_size = 1;
3888
5
  ccv_cnnp_model_copy_name(&model_contiguous->super, name);
3889
5
  return (ccv_cnnp_model_t*)model_contiguous;
3890
5
}
3891
3892
static ccv_cnnp_model_t* _ccv_cnnp_contiguous_copy(const ccv_cnnp_model_t* const super, void* const context)
3893
0
{
3894
0
  const ccv_cnnp_model_contiguous_t* const self = (const ccv_cnnp_model_contiguous_t*)super;
3895
0
  return ccv_cnnp_contiguous(self->super.name);
3896
0
}
3897
3898
// MARK - "Making" Copy Layer
3899
3900
typedef struct {
3901
  ccv_cnnp_model_t super;
3902
  ccv_nnc_tensor_symbol_t output;
3903
} ccv_cnnp_model_copy_t;
3904
3905
static void _ccv_cnnp_copy_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3906
0
{
3907
0
  PRINT(CCV_CLI_VERBOSE, "[cnnp_copy_build] -\n");
3908
0
  assert(input_size == 1);
3909
0
  assert(output_size == 1);
3910
0
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3911
0
  ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
3912
0
  if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
3913
0
  {
3914
0
    outputs[0] = inputs[0];
3915
0
    return;
3916
0
  }
3917
0
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
3918
0
  ccv_nnc_graph_exec_symbol_t make_contiguous = ccv_nnc_graph_exec_symbol_new(graph, CMD_FORMAT_TRANSFORM_FORWARD(), inputs, 1, outputs, 1, "contiguous");
3919
0
  ccv_nnc_graph_exec_symbol_set_flags(graph, make_contiguous, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
3920
0
}
3921
3922
static ccv_cnnp_model_t* _ccv_cnnp_copy_copy(const ccv_cnnp_model_t* const super, void* const context);
3923
3924
static const ccv_cnnp_model_vtab_t ccv_cnnp_copy_isa = {
3925
  .build = _ccv_cnnp_copy_build,
3926
  .copy = _ccv_cnnp_copy_copy,
3927
};
3928
3929
ccv_cnnp_model_t* ccv_cnnp_copy(const char* const name)
3930
0
{
3931
0
  ccv_cnnp_model_copy_t* const model_copy = (ccv_cnnp_model_copy_t*)cccalloc(1, sizeof(ccv_cnnp_model_copy_t));
3932
0
  model_copy->super.isa = &ccv_cnnp_copy_isa;
3933
0
  model_copy->super.input_size = 1;
3934
0
  model_copy->super.outputs = &model_copy->output;
3935
0
  model_copy->super.output_size = 1;
3936
0
  ccv_cnnp_model_copy_name(&model_copy->super, name);
3937
0
  return (ccv_cnnp_model_t*)model_copy;
3938
0
}
3939
3940
static ccv_cnnp_model_t* _ccv_cnnp_copy_copy(const ccv_cnnp_model_t* const super, void* const context)
3941
0
{
3942
0
  const ccv_cnnp_model_copy_t* const self = (const ccv_cnnp_model_copy_t*)super;
3943
0
  return ccv_cnnp_copy(self->super.name);
3944
0
}
3945
3946
// MARK - Scaled-Dot Product Attention Layer
3947
3948
typedef struct {
3949
  ccv_cnnp_model_t super;
3950
  ccv_nnc_tensor_symbol_t output;
3951
  ccv_nnc_tensor_symbol_t weights;
3952
  ccv_nnc_tensor_symbol_t bias;
3953
  float scale;
3954
  int is_causal;
3955
  int has_attn_mask;
3956
  int flags;
3957
  int fused_unify_head_weights;
3958
  int no_bias;
3959
} ccv_cnnp_model_scaled_dot_product_attention_t;
3960
3961
static void _ccv_cnnp_scaled_dot_product_attention_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
3962
3
{
3963
3
  PRINT(CCV_CLI_VERBOSE, "[cnnp_scaled_dot_product_attention_build] -\n");
3964
3
  assert(input_size == 3 || input_size == 4);
3965
3
  assert(output_size == 1);
3966
3
  ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
3967
3
  const ccv_nnc_tensor_param_t q_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
3968
3
  const ccv_nnc_tensor_param_t k_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
3969
3
  const ccv_nnc_tensor_param_t v_params = ccv_nnc_tensor_symbol_params(graph, inputs[2]);
3970
3
  const int v_nd = ccv_nnc_tensor_nd(v_params.dim);
3971
3
  assert(v_nd == 3 || v_nd == 4);
3972
3
  const int hEv = (v_nd == 3 ? 
10
: v_params.dim[2]) * v_params.dim[v_nd - 1];
3973
3
  ccv_nnc_tensor_param_t weights_params = q_params;
3974
3
  memset(weights_params.dim, 0, sizeof(weights_params.dim));
3975
3
  weights_params.dim[0] = hEv;
3976
3
  weights_params.dim[1] = hEv;
3977
3
  ccv_nnc_tensor_param_t bias_params = q_params;
3978
3
  memset(bias_params.dim, 0, sizeof(bias_params.dim));
3979
3
  bias_params.dim[0] = hEv;
3980
3
  ccv_nnc_cmd_t cmd = {0};
3981
3
  cmd.cmd = CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_FORWARD;
3982
3
  cmd.info.scaled_dot_product_attention.scale = self->scale;
3983
3
  cmd.info.scaled_dot_product_attention.is_causal = self->is_causal;
3984
3
  cmd.info.scaled_dot_product_attention.flags = self->flags;
3985
3
  ccv_nnc_tensor_param_t output_params[3];
3986
3
  ccv_nnc_tensor_symbol_t output;
3987
3
  ccv_nnc_tensor_symbol_t saved_softmax_lse;
3988
3
  ccv_nnc_tensor_symbol_t saved_v_proj = NO_TENSOR_SYMBOL;
3989
3
  ccv_nnc_tensor_symbol_t attn_mask = NO_TENSOR_SYMBOL;
3990
3
  ccv_nnc_tensor_symbol_t weights = NO_TENSOR_SYMBOL;
3991
3
  ccv_nnc_tensor_symbol_t bias = NO_TENSOR_SYMBOL;
3992
3
  if (self->has_attn_mask)
3993
1
    attn_mask = inputs[3];
3994
3
  if (self->fused_unify_head_weights)
3995
1
  {
3996
1
    if (!self->weights.graph)
3997
1
      self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
3998
1
    weights = self->weights;
3999
1
    if (!self->no_bias)
4000
1
    {
4001
1
      if (!self->bias.graph)
4002
1
        self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
4003
1
      bias = self->bias;
4004
1
    }
4005
1
    ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
4006
1
        q_params,
4007
1
        k_params,
4008
1
        v_params,
4009
1
        (ccv_nnc_tensor_param_t){},
4010
1
        weights_params,
4011
1
        bias_params,
4012
1
      }, 6, ccv_nnc_no_hint, output_params, 3);
4013
1
    output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
4014
1
    saved_softmax_lse = ccv_nnc_tensor_symbol_new(graph, output_params[1], 0);
4015
1
    saved_v_proj = ccv_nnc_tensor_symbol_new(graph, output_params[2], 0);
4016
2
  } else {
4017
2
    ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
4018
2
        q_params,
4019
2
        k_params,
4020
2
        v_params,
4021
2
      }, 3, ccv_nnc_no_hint, output_params, 2);
4022
2
    output = ccv_nnc_tensor_symbol_new(graph, output_params[0], 0);
4023
2
    saved_softmax_lse = ccv_nnc_tensor_symbol_new(graph, output_params[1], 0);
4024
2
  }
4025
3
  ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], attn_mask, weights, bias), TENSOR_SYMBOL_LIST(output, saved_softmax_lse, saved_v_proj), "scaled_dot_product_attention");
4026
3
  outputs[0] = output;
4027
3
}
4028
4029
static void _ccv_cnnp_scaled_dot_product_attention_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
4030
0
{
4031
0
  ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
4032
0
  if (self->weights.graph)
4033
0
  {
4034
0
    assert(self->fused_unify_head_weights);
4035
0
    const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
4036
0
    const int c = weight_params.dim[1];
4037
0
    const float std = sqrtf(2) / sqrtf(c);
4038
0
    const float bound = sqrtf(3) * std;
4039
0
    initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound), ccv_nnc_no_hint, 0, 0, self->weights);
4040
0
    if (self->bias.graph)
4041
0
      initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->bias);
4042
0
  }
4043
0
}
4044
4045
static void _ccv_cnnp_scaled_dot_product_attention_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
4046
1
{
4047
1
  ccv_cnnp_model_scaled_dot_product_attention_t* const self = (ccv_cnnp_model_scaled_dot_product_attention_t*)super;
4048
1
  if (self->weights.graph)
4049
1
  {
4050
1
    assert(self->fused_unify_head_weights);
4051
1
    add_to_array(parameters, self->weights, is_trainable);
4052
1
    if (self->bias.graph)
4053
1
      add_to_array(parameters, self->bias, is_trainable);
4054
1
  }
4055
1
}
4056
4057
static ccv_cnnp_model_t* _ccv_cnnp_scaled_dot_product_attention_copy(const ccv_cnnp_model_t* const super, void* const context);
4058
4059
static const ccv_cnnp_model_vtab_t ccv_cnnp_scaled_dot_product_attention_isa = {
4060
  .build = _ccv_cnnp_scaled_dot_product_attention_build,
4061
  .copy = _ccv_cnnp_scaled_dot_product_attention_copy,
4062
};
4063
4064
static const ccv_cnnp_model_vtab_t ccv_cnnp_scaled_dot_product_attention_fused_isa = {
4065
  .build = _ccv_cnnp_scaled_dot_product_attention_build,
4066
  .init_states = _ccv_cnnp_scaled_dot_product_attention_init_states,
4067
  .add_to_parameter = _ccv_cnnp_scaled_dot_product_attention_add_to_parameter,
4068
  .copy = _ccv_cnnp_scaled_dot_product_attention_copy,
4069
};
4070
4071
ccv_cnnp_model_t* ccv_cnnp_scaled_dot_product_attention(const float scale, const int is_causal, const int has_attn_mask, const int flags, const int fused_unify_head_weights, const int no_bias, const int is_trainable, const char* const name)
4072
3
{
4073
3
  ccv_cnnp_model_scaled_dot_product_attention_t* const model_scaled_dot_product_attention = (ccv_cnnp_model_scaled_dot_product_attention_t*)cccalloc(1, sizeof(ccv_cnnp_model_scaled_dot_product_attention_t));
4074
3
  model_scaled_dot_product_attention->super.isa = fused_unify_head_weights ? 
&ccv_cnnp_scaled_dot_product_attention_fused_isa1
:
&ccv_cnnp_scaled_dot_product_attention_isa2
;
4075
3
  model_scaled_dot_product_attention->super.input_size = has_attn_mask ? 
41
:
32
;
4076
3
  model_scaled_dot_product_attention->super.outputs = &model_scaled_dot_product_attention->output;
4077
3
  model_scaled_dot_product_attention->super.output_size = 1;
4078
3
  model_scaled_dot_product_attention->super.is_trainable = is_trainable;
4079
3
  ccv_cnnp_model_copy_name(&model_scaled_dot_product_attention->super, name);
4080
3
  model_scaled_dot_product_attention->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
4081
3
  model_scaled_dot_product_attention->weights.graph = 0;
4082
3
  model_scaled_dot_product_attention->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
4083
3
  model_scaled_dot_product_attention->bias.graph = 0;
4084
3
  model_scaled_dot_product_attention->scale = scale;
4085
3
  model_scaled_dot_product_attention->is_causal = is_causal;
4086
3
  model_scaled_dot_product_attention->has_attn_mask = has_attn_mask;
4087
3
  model_scaled_dot_product_attention->flags = flags;
4088
3
  model_scaled_dot_product_attention->fused_unify_head_weights = fused_unify_head_weights;
4089
3
  model_scaled_dot_product_attention->no_bias = no_bias;
4090
3
  return (ccv_cnnp_model_t*)model_scaled_dot_product_attention;
4091
3
}
4092
4093
static ccv_cnnp_model_t* _ccv_cnnp_scaled_dot_product_attention_copy(const ccv_cnnp_model_t* const super, void* const context)
4094
0
{
4095
0
  const ccv_cnnp_model_scaled_dot_product_attention_t* const self = (const ccv_cnnp_model_scaled_dot_product_attention_t*)super;
4096
0
  return ccv_cnnp_scaled_dot_product_attention(self->scale, self->is_causal, self->has_attn_mask, self->flags, self->fused_unify_head_weights, self->no_bias, self->super.is_trainable, self->super.name);
4097
0
}
4098
4099
// MARK - Debug Layer
4100
4101
typedef struct {
4102
  ccv_cnnp_model_t super;
4103
  ccv_nnc_tensor_symbol_t output;
4104
  ccv_cnnp_model_debug_f debugger;
4105
  ccv_cnnp_model_debug_context_deinit_f debug_deinit;
4106
  ccv_cnnp_model_debug_context_copy_f debug_copy;
4107
  void* debug_context;
4108
} ccv_cnnp_model_debug_t;
4109
4110
static int _ccv_cnnp_debug_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
4111
1
{
4112
1
  if (cmd.cmd == CCV_NNC_CUSTOM_BACKWARD)
4113
0
  {
4114
0
    assert(0 && "don't support debug backward pass yet");
4115
0
  }
4116
1
  ccv_cnnp_model_debug_t* const self = (ccv_cnnp_model_debug_t*)cmd.data;
4117
1
  self->debugger(inputs, input_size, stream_context, self->debug_context);
4118
1
  return CCV_NNC_EXEC_SUCCESS;
4119
1
}
4120
4121
static ccv_nnc_cmd_vtab_t ccv_cnnp_debug_exec_isa = {
4122
  .exec = _ccv_cnnp_debug_exec
4123
};
4124
4125
static void _ccv_cnnp_debug_build(ccv_cnnp_model_t* const self, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4126
1
{
4127
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_debug_build] -\n");
4128
1
  assert(input_size >= 1);
4129
1
  assert(output_size == 1);
4130
1
  ccv_nnc_tensor_symbol_t to = ccv_nnc_tensor_symbol_alias_to(graph, inputs[0]);
4131
1
  ccv_nnc_tensor_param_t output_params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4132
1
  if (to.d == CCV_NNC_NO_TENSOR_SYMBOL) // If we are not reshape an alias, it is straightforward.
4133
1
  {
4134
1
    int ofs[CCV_NNC_MAX_DIM_ALLOC] = {0};
4135
1
    int stride[CCV_NNC_MAX_DIM_ALLOC];
4136
1
    ccv_nnc_tensor_get_stride(output_params.dim, stride);
4137
1
    outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, inputs[0], ofs, stride, output_params, 0);
4138
1
  } else {
4139
0
    int old_ofs[CCV_NNC_MAX_DIM_ALLOC];
4140
0
    int old_stride[CCV_NNC_MAX_DIM_ALLOC];
4141
0
    ccv_nnc_tensor_symbol_alias_params(graph, inputs[0], old_ofs, old_stride);
4142
0
    outputs[0] = ccv_nnc_tensor_symbol_alias_new(graph, to, old_ofs, old_stride, output_params, 0);
4143
0
  }
4144
1
  ccv_nnc_cmd_t cmd = ccv_nnc_cmd(CCV_NNC_CUSTOM_FORWARD, (ccv_nnc_cmd_vtab_t*)&ccv_cnnp_debug_exec_isa, (ccv_nnc_cmd_param_t){}, 0);
4145
1
  cmd.data = self;
4146
1
  ccv_nnc_graph_exec_symbol_t make_debug = ccv_nnc_graph_exec_symbol_new(graph, cmd, inputs, input_size, outputs, 1, "debug");
4147
  // Disable any optimizations.
4148
1
  ccv_nnc_graph_exec_symbol_set_flags(graph, make_debug, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
4149
1
}
4150
4151
static void _ccv_cnnp_debug_deinit(ccv_cnnp_model_t* const super)
4152
1
{
4153
1
  const ccv_cnnp_model_debug_t* const self = (const ccv_cnnp_model_debug_t*)super;
4154
1
  if (self->debug_deinit && 
self->debug_context0
)
4155
0
    self->debug_deinit(self->debug_context);
4156
1
}
4157
4158
static ccv_cnnp_model_t* _ccv_cnnp_debug_copy(const ccv_cnnp_model_t* const super, void* const context);
4159
4160
static const ccv_cnnp_model_vtab_t ccv_cnnp_debug_isa = {
4161
  .build = _ccv_cnnp_debug_build,
4162
  .deinit = _ccv_cnnp_debug_deinit,
4163
  .copy = _ccv_cnnp_debug_copy,
4164
};
4165
4166
ccv_cnnp_model_t* ccv_cnnp_debug(ccv_cnnp_model_debug_f func, void* const context, ccv_cnnp_model_debug_context_deinit_f deinit, ccv_cnnp_model_debug_context_copy_f copy, const char* const name)
4167
1
{
4168
1
  ccv_cnnp_model_debug_t* const model_debug = (ccv_cnnp_model_debug_t*)cccalloc(1, sizeof(ccv_cnnp_model_debug_t));
4169
1
  model_debug->super.isa = &ccv_cnnp_debug_isa;
4170
1
  model_debug->super.input_size = 0;
4171
1
  model_debug->super.outputs = &model_debug->output;
4172
1
  model_debug->super.output_size = 1;
4173
1
  model_debug->debugger = func;
4174
1
  model_debug->debug_context = context;
4175
1
  model_debug->debug_deinit = deinit;
4176
1
  model_debug->debug_copy = copy;
4177
1
  ccv_cnnp_model_copy_name(&model_debug->super, name);
4178
1
  return (ccv_cnnp_model_t*)model_debug;
4179
1
}
4180
4181
static ccv_cnnp_model_t* _ccv_cnnp_debug_copy(const ccv_cnnp_model_t* const super, void* const context)
4182
0
{
4183
0
  const ccv_cnnp_model_debug_t* const self = (const ccv_cnnp_model_debug_t*)super;
4184
0
  void* debug_context = self->debug_context;
4185
0
  if (self->debug_copy && self->debug_context)
4186
0
    debug_context = self->debug_copy(self->debug_context);
4187
0
  return ccv_cnnp_debug(self->debugger, debug_context, self->debug_deinit, self->debug_copy, self->super.name);
4188
0
}
4189
4190
/// MARK - Sort layer.
4191
4192
typedef struct {
4193
  ccv_cnnp_model_t super;
4194
  ccv_nnc_tensor_symbol_t outputs[2];
4195
  int along_axis;
4196
  int descending;
4197
} ccv_cnnp_model_sort_t;
4198
4199
static void _ccv_cnnp_sort_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4200
1
{
4201
1
  ccv_cnnp_model_sort_t* const self = (ccv_cnnp_model_sort_t*)super;
4202
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_sort_build] - along_axis: %d, descending: %d\n", self->along_axis, self->descending);
4203
1
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4204
1
  assert(output_size == 2);
4205
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4206
1
  params.datatype = CCV_32S;
4207
1
  outputs[1] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4208
1
  ccv_nnc_graph_exec_symbol_new(graph, CMD_SORT_FORWARD(self->along_axis, self->descending), inputs, input_size, outputs, output_size, "sort");
4209
1
}
4210
4211
static ccv_cnnp_model_t* _ccv_cnnp_sort_copy(const ccv_cnnp_model_t* const self, void* const context);
4212
4213
static const ccv_cnnp_model_vtab_t ccv_cnnp_sort_isa = {
4214
  .build = _ccv_cnnp_sort_build,
4215
  .copy = _ccv_cnnp_sort_copy,
4216
};
4217
4218
ccv_cnnp_model_t* ccv_cnnp_sort(const int along_axis, const int descending, const char* const name)
4219
1
{
4220
1
  ccv_cnnp_model_sort_t* const model_sort = (ccv_cnnp_model_sort_t*)cccalloc(1, sizeof(ccv_cnnp_model_sort_t));
4221
1
  model_sort->super.isa = &ccv_cnnp_sort_isa;
4222
1
  model_sort->super.input_size = 0;
4223
1
  model_sort->super.outputs = model_sort->outputs;
4224
1
  model_sort->super.output_size = 2;
4225
1
  model_sort->along_axis = along_axis;
4226
1
  model_sort->descending = descending;
4227
1
  ccv_cnnp_model_copy_name(&model_sort->super, name);
4228
1
  return (ccv_cnnp_model_t*)model_sort;
4229
1
}
4230
4231
static ccv_cnnp_model_t* _ccv_cnnp_sort_copy(const ccv_cnnp_model_t* const super, void* const context)
4232
0
{
4233
0
  ccv_cnnp_model_sort_t* const self = (ccv_cnnp_model_sort_t*)super;
4234
0
  return ccv_cnnp_sort(self->along_axis, self->descending, self->super.name);
4235
0
}
4236
4237
/// MARK - Partition layer.
4238
4239
typedef struct {
4240
  ccv_cnnp_model_t super;
4241
  ccv_nnc_tensor_symbol_t outputs[2];
4242
  int kth;
4243
  int along_axis;
4244
  int descending;
4245
} ccv_cnnp_model_partition_t;
4246
4247
static void _ccv_cnnp_partition_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4248
1
{
4249
1
  ccv_cnnp_model_partition_t* const self = (ccv_cnnp_model_partition_t*)super;
4250
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_partition_build] - kth: %d, along_axis: %d, descending: %d\n", self->kth, self->along_axis, self->descending);
4251
1
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4252
1
  assert(output_size == 2);
4253
1
  if (self->kth > 0)
4254
1
    params.dim[self->along_axis] = self->kth;
4255
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4256
1
  params.datatype = CCV_32S;
4257
1
  outputs[1] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4258
1
  ccv_nnc_graph_exec_symbol_new(graph, CMD_PARTITION_FORWARD(self->kth, self->along_axis, self->descending), inputs, input_size, outputs, output_size, "partition");
4259
1
}
4260
4261
static ccv_cnnp_model_t* _ccv_cnnp_partition_copy(const ccv_cnnp_model_t* const self, void* const context);
4262
4263
static const ccv_cnnp_model_vtab_t ccv_cnnp_partition_isa = {
4264
  .build = _ccv_cnnp_partition_build,
4265
  .copy = _ccv_cnnp_partition_copy,
4266
};
4267
4268
ccv_cnnp_model_t* ccv_cnnp_partition(const int kth, const int along_axis, const int descending, const char* const name)
4269
1
{
4270
1
  ccv_cnnp_model_partition_t* const model_partition = (ccv_cnnp_model_partition_t*)cccalloc(1, sizeof(ccv_cnnp_model_partition_t));
4271
1
  model_partition->super.isa = &ccv_cnnp_partition_isa;
4272
1
  model_partition->super.input_size = 0;
4273
1
  model_partition->super.outputs = model_partition->outputs;
4274
1
  model_partition->super.output_size = 2;
4275
1
  model_partition->kth = kth;
4276
1
  model_partition->along_axis = along_axis;
4277
1
  model_partition->descending = descending;
4278
1
  ccv_cnnp_model_copy_name(&model_partition->super, name);
4279
1
  return (ccv_cnnp_model_t*)model_partition;
4280
1
}
4281
4282
static ccv_cnnp_model_t* _ccv_cnnp_partition_copy(const ccv_cnnp_model_t* const super, void* const context)
4283
0
{
4284
0
  ccv_cnnp_model_partition_t* const self = (ccv_cnnp_model_partition_t*)super;
4285
0
  return ccv_cnnp_partition(self->kth, self->along_axis, self->descending, self->super.name);
4286
0
}
4287
4288
/// MARK - Unique consecutive layer.
4289
4290
typedef struct {
4291
  ccv_cnnp_model_t super;
4292
  ccv_nnc_tensor_symbol_t outputs[2];
4293
  int bincount;
4294
} ccv_cnnp_model_unique_consecutive_t;
4295
4296
static void _ccv_cnnp_unique_consecutive_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4297
1
{
4298
1
  ccv_cnnp_model_unique_consecutive_t* const self = (ccv_cnnp_model_unique_consecutive_t*)super;
4299
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_unique_consecutive_build] - bincount: %d\n", self->bincount);
4300
1
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4301
1
  assert(output_size == 2);
4302
1
  if (self->bincount > 0)
4303
1
    params.dim[0] = ccv_min(params.dim[0], self->bincount);
4304
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4305
1
  params.datatype = CCV_32S;
4306
1
  outputs[1] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4307
1
  ccv_nnc_graph_exec_symbol_new(graph, CMD_UNIQUE_CONSECUTIVE_FORWARD(self->bincount), inputs, input_size, outputs, output_size, "unique_consecutive");
4308
1
}
4309
4310
static ccv_cnnp_model_t* _ccv_cnnp_unique_consecutive_copy(const ccv_cnnp_model_t* const self, void* const context);
4311
4312
static const ccv_cnnp_model_vtab_t ccv_cnnp_unique_consecutive_isa = {
4313
  .build = _ccv_cnnp_unique_consecutive_build,
4314
  .copy = _ccv_cnnp_unique_consecutive_copy,
4315
};
4316
4317
ccv_cnnp_model_t* ccv_cnnp_unique_consecutive(const int bincount, const char* const name)
4318
1
{
4319
1
  ccv_cnnp_model_unique_consecutive_t* const model_unique_consecutive = (ccv_cnnp_model_unique_consecutive_t*)cccalloc(1, sizeof(ccv_cnnp_model_unique_consecutive_t));
4320
1
  model_unique_consecutive->super.isa = &ccv_cnnp_unique_consecutive_isa;
4321
1
  model_unique_consecutive->super.input_size = 0;
4322
1
  model_unique_consecutive->super.outputs = model_unique_consecutive->outputs;
4323
1
  model_unique_consecutive->super.output_size = 2;
4324
1
  model_unique_consecutive->bincount = bincount;
4325
1
  ccv_cnnp_model_copy_name(&model_unique_consecutive->super, name);
4326
1
  return (ccv_cnnp_model_t*)model_unique_consecutive;
4327
1
}
4328
4329
static ccv_cnnp_model_t* _ccv_cnnp_unique_consecutive_copy(const ccv_cnnp_model_t* const super, void* const context)
4330
0
{
4331
0
  ccv_cnnp_model_unique_consecutive_t* const self = (ccv_cnnp_model_unique_consecutive_t*)super;
4332
0
  return ccv_cnnp_unique_consecutive(self->bincount, self->super.name);
4333
0
}
4334
4335
/// MARK - Scatter add layer.
4336
4337
typedef struct {
4338
  ccv_cnnp_model_t super;
4339
  ccv_nnc_tensor_symbol_t output;
4340
  int bincount;
4341
} ccv_cnnp_model_scatter_add_t;
4342
4343
static void _ccv_cnnp_scatter_add_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4344
1
{
4345
1
  ccv_cnnp_model_scatter_add_t* const self = (ccv_cnnp_model_scatter_add_t*)super;
4346
1
  PRINT(CCV_CLI_VERBOSE, "[cnnp_scatter_add_build] - bincount: %d\n", self->bincount);
4347
1
  ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4348
1
  assert(output_size == 1);
4349
1
  assert(self->bincount > 0);
4350
1
  params.dim[0] = self->bincount;
4351
1
  outputs[0] = ccv_nnc_tensor_symbol_new(graph, params, 0);
4352
1
  ccv_nnc_graph_exec_symbol_new(graph, CMD_SCATTER_ADD_FORWARD(self->bincount), inputs, input_size, outputs, output_size, "scatter_add");
4353
1
}
4354
4355
static ccv_cnnp_model_t* _ccv_cnnp_scatter_add_copy(const ccv_cnnp_model_t* const self, void* const context);
4356
4357
static const ccv_cnnp_model_vtab_t ccv_cnnp_scatter_add_isa = {
4358
  .build = _ccv_cnnp_scatter_add_build,
4359
  .copy = _ccv_cnnp_scatter_add_copy,
4360
};
4361
4362
ccv_cnnp_model_t* ccv_cnnp_scatter_add(const int bincount, const char* const name)
4363
1
{
4364
1
  assert(bincount > 0);
4365
1
  ccv_cnnp_model_scatter_add_t* const model_scatter_add = (ccv_cnnp_model_scatter_add_t*)cccalloc(1, sizeof(ccv_cnnp_model_scatter_add_t));
4366
1
  model_scatter_add->super.isa = &ccv_cnnp_scatter_add_isa;
4367
1
  model_scatter_add->super.input_size = 0;
4368
1
  model_scatter_add->super.outputs = &model_scatter_add->output;
4369
1
  model_scatter_add->super.output_size = 1;
4370
1
  model_scatter_add->bincount = bincount;
4371
1
  ccv_cnnp_model_copy_name(&model_scatter_add->super, name);
4372
1
  return (ccv_cnnp_model_t*)model_scatter_add;
4373
1
}
4374
4375
static ccv_cnnp_model_t* _ccv_cnnp_scatter_add_copy(const ccv_cnnp_model_t* const super, void* const context)
4376
0
{
4377
0
  ccv_cnnp_model_scatter_add_t* const self = (ccv_cnnp_model_scatter_add_t*)super;
4378
0
  return ccv_cnnp_scatter_add(self->bincount, self->super.name);
4379
0
}
4380
4381
// MARK - Segmented Dense Layer
4382
4383
typedef struct {
4384
  ccv_cnnp_model_t super;
4385
  ccv_nnc_tensor_symbol_t output;
4386
  ccv_nnc_tensor_symbol_t weights;
4387
  ccv_nnc_tensor_symbol_t bias;
4388
  int segments;
4389
  int count;
4390
  int no_bias;
4391
  int flags;
4392
} ccv_cnnp_model_segmented_dense_t;
4393
4394
static void _ccv_cnnp_segmented_dense_build(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size)
4395
2
{
4396
2
  ccv_cnnp_model_segmented_dense_t* const self = (ccv_cnnp_model_segmented_dense_t*)super;
4397
2
  PRINT(CCV_CLI_VERBOSE, "[cnnp_segmented_dense_build] -\n");
4398
2
  assert(input_size == 3);
4399
2
  assert(output_size == 1);
4400
2
  const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params(graph, inputs[0]);
4401
2
  const ccv_nnc_tensor_param_t indices_params = ccv_nnc_tensor_symbol_params(graph, inputs[1]);
4402
2
  const ccv_nnc_tensor_param_t counts_params = ccv_nnc_tensor_symbol_params(graph, inputs[2]);
4403
2
  ccv_nnc_tensor_param_t weights_params = params;
4404
2
  memset(weights_params.dim, 0, sizeof(weights_params.dim));
4405
2
  weights_params.dim[0] = self->segments;
4406
2
  weights_params.dim[1] = self->count;
4407
2
  weights_params.dim[2] = params.dim[ccv_nnc_tensor_nd(params.dim) - 1];
4408
2
  if (!self->weights.graph)
4409
2
    self->weights = ccv_nnc_tensor_symbol_new(graph, weights_params, "weights");
4410
2
  assert(self->weights.graph == graph);
4411
2
  ccv_nnc_tensor_param_t bias_params = params;
4412
2
  memset(bias_params.dim, 0, sizeof(bias_params.dim));
4413
2
  bias_params.dim[0] = self->segments;
4414
2
  bias_params.dim[1] = self->count;
4415
2
  ccv_nnc_cmd_t cmd = {0};
4416
2
  cmd.cmd = CCV_NNC_SEGMENTED_GEMM_FORWARD;
4417
2
  cmd.info.blas.a[0] = 1;
4418
2
  cmd.info.blas.a[1] = 1;
4419
2
  cmd.info.blas.transpose_b[0] = 1;
4420
2
  cmd.info.blas.transpose_b[1] = 2;
4421
2
  cmd.info.blas.flags = self->flags;
4422
2
  ccv_nnc_tensor_param_t output_params;
4423
2
  ccv_nnc_hint_tensor_auto(cmd, (ccv_nnc_tensor_param_t []){
4424
2
      params, indices_params, counts_params,
4425
2
      weights_params,
4426
2
      bias_params,
4427
2
    }, 5, ccv_nnc_no_hint, &output_params, 1);
4428
2
  const ccv_nnc_tensor_symbol_t output = ccv_nnc_tensor_symbol_new(graph, output_params, 0);
4429
2
  if (self->no_bias)
4430
1
    ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], self->weights), TENSOR_SYMBOL_LIST(output), "segmented_dense");
4431
1
  else {
4432
1
    if (!self->bias.graph)
4433
1
      self->bias = ccv_nnc_tensor_symbol_new(graph, bias_params, "bias");
4434
1
    ccv_nnc_graph_exec_symbol_new(graph, cmd, TENSOR_SYMBOL_LIST(inputs[0], inputs[1], inputs[2], self->weights, self->bias), TENSOR_SYMBOL_LIST(output), "segmented_dense");
4435
1
  }
4436
2
  outputs[0] = output;
4437
2
}
4438
4439
static void _ccv_cnnp_segmented_dense_init_states(ccv_cnnp_model_t* const super, ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_state_initializer_f initializer, void* const context)
4440
0
{
4441
0
  ccv_cnnp_model_segmented_dense_t* const self = (ccv_cnnp_model_segmented_dense_t*)super;
4442
0
  const ccv_nnc_tensor_param_t weight_params = ccv_nnc_tensor_symbol_params(graph, self->weights);
4443
0
  const int c = weight_params.dim[1];
4444
0
  const float std = sqrtf(2) / sqrtf(c);
4445
0
  const float bound = sqrtf(3) * std;
4446
0
  initializer(context, CMD_RANDOM_UNIFORM_FORWARD(-bound, bound), ccv_nnc_no_hint, 0, 0, self->weights);
4447
0
  if (self->bias.graph)
4448
0
    initializer(context, CMD_SET_FORWARD(0), ccv_nnc_no_hint, 0, 0, self->bias);
4449
0
}
4450
4451
static void _ccv_cnnp_segmented_dense_add_to_parameter(ccv_cnnp_model_t* const super, const ccv_cnnp_add_to_array_f add_to_array, void* const parameters, const int is_trainable)
4452
2
{
4453
2
  ccv_cnnp_model_segmented_dense_t* const self = (ccv_cnnp_model_segmented_dense_t*)super;
4454
2
  add_to_array(parameters, self->weights, is_trainable);
4455
2
  if (self->bias.graph)
4456
1
    add_to_array(parameters, self->bias, is_trainable);
4457
2
}
4458
4459
static ccv_cnnp_model_t* _ccv_cnnp_segmented_dense_copy(const ccv_cnnp_model_t* const super, void* const context);
4460
4461
static const ccv_cnnp_model_vtab_t ccv_cnnp_segmented_dense_isa = {
4462
  .build = _ccv_cnnp_segmented_dense_build,
4463
  .init_states = _ccv_cnnp_segmented_dense_init_states,
4464
  .add_to_parameter = _ccv_cnnp_segmented_dense_add_to_parameter,
4465
  .copy = _ccv_cnnp_segmented_dense_copy,
4466
};
4467
4468
ccv_cnnp_model_t* ccv_cnnp_segmented_dense(const int segments, const int count, const int no_bias, const int flags, const int is_trainable, const char* const name)
4469
2
{
4470
2
  ccv_cnnp_model_segmented_dense_t* const model_segmented_dense = (ccv_cnnp_model_segmented_dense_t*)cccalloc(1, sizeof(ccv_cnnp_model_segmented_dense_t));
4471
2
  model_segmented_dense->super.isa = &ccv_cnnp_segmented_dense_isa;
4472
2
  model_segmented_dense->super.input_size = 3;
4473
2
  model_segmented_dense->super.outputs = &model_segmented_dense->output;
4474
2
  model_segmented_dense->super.output_size = 1;
4475
2
  model_segmented_dense->super.is_trainable = is_trainable;
4476
2
  ccv_cnnp_model_copy_name(&model_segmented_dense->super, name);
4477
2
  model_segmented_dense->weights.d = CCV_NNC_NO_TENSOR_SYMBOL;
4478
2
  model_segmented_dense->weights.graph = 0;
4479
2
  model_segmented_dense->bias.d = CCV_NNC_NO_TENSOR_SYMBOL;
4480
2
  model_segmented_dense->bias.graph = 0;
4481
2
  model_segmented_dense->segments = segments;
4482
2
  model_segmented_dense->count = count;
4483
2
  model_segmented_dense->no_bias = no_bias;
4484
2
  model_segmented_dense->flags = flags;
4485
2
  return (ccv_cnnp_model_t*)model_segmented_dense;
4486
2
}
4487
4488
static ccv_cnnp_model_t* _ccv_cnnp_segmented_dense_copy(const ccv_cnnp_model_t* const super, void* const context)
4489
0
{
4490
0
  const ccv_cnnp_model_segmented_dense_t* const self = (const ccv_cnnp_model_segmented_dense_t*)super;
4491
0
  return ccv_cnnp_segmented_dense(self->segments, self->count, self->no_bias, self->flags, self->super.is_trainable, self->super.name);
4492
0
}