Coverage Report

Created: 2025-05-07 17:36

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/ccv_nnc.h
Line
Count
Source
1
/**********************************************************
2
 * C-based/Cached/Core Computer Vision Library
3
 * Liu Liu, 2010-02-01
4
 **********************************************************/
5
6
/**********************************************************
7
 * CCV - Neural Network Collection
8
 **********************************************************/
9
10
#ifndef GUARD_ccv_nnc_h
11
#define GUARD_ccv_nnc_h
12
13
#include "ccv.h"
14
#include <stddef.h>
15
16
// These are generated by cmd/build-cmd.rb
17
#include "cmd/ccv_nnc_cmd.h"
18
#include "cmd/ccv_nnc_backend.h"
19
20
/**
21
 * @defgroup level_0 Level-0 API
22
 * @{
23
 */
24
25
/**
26
 * Initialize the library.
27
 */
28
void ccv_nnc_init(void);
29
30
enum {
31
  CCV_NNC_DISABLE_MIXED_MPS_GEMM = 0x1,
32
  CCV_NNC_DISABLE_MIXED_MPS_SOFTMAX = 0x2,
33
  CCV_NNC_DISABLE_MMAP_MTL_BUFFER = 0x4,
34
  CCV_NNC_DISABLE_METAL_FLASH_ATTENTION = 0x8,
35
  CCV_NNC_DISABLE_MFA_GEMM = 0x10,
36
};
37
/**
38
 * Enable system-wide specific flag.
39
 */
40
void ccv_nnc_enable_flag(uint64_t flag);
41
/**
42
 * Disable system-wide specific flag.
43
 */
44
void ccv_nnc_disable_flag(uint64_t flag);
45
/**
46
 * Get system-wide specific flag to check.
47
 */
48
uint64_t ccv_nnc_flags(void);
49
50
/** @} */
51
52
/**
53
 * @defgroup level_1 Level-1 API
54
 * @{
55
 */
56
57
/**
58
 * @defgroup level_1_cmd Commands
59
 * @{
60
 */
61
enum {
62
  // Attributes that enable symbolic graph simplification
63
  CCV_NNC_CMD_ATTR_PASSTHROUGH  = 0x01, /**< This doesn't compute anything, but pass the first n tensors to the output (useful for backprop that is identical). */
64
  CCV_NNC_CMD_ATTR_OUTPUT_ONES  = 0x02, /**< All the output tensors are 1s (unit). */
65
  CCV_NNC_CMD_ATTR_NULL_IS_ONES = 0x04, /**< Accept nullptr input as if these are tensors with 1s (unit). */
66
};
67
68
// Flags pass into cmd when executing.
69
enum {
70
  CCV_NNC_ACCUMULATE_OUTPUT = 0x01, /**< Enable accumulate outputs (unsupported). */
71
  CCV_NNC_ZERO_MEMORY_ALLOC = 0x02, /**< Don't allocate any extra memory for this operation. */
72
};
73
74
enum {
75
  CCV_NNC_EXEC_SUCCESS   = 0, /**< Successfully executed the command. */
76
  CCV_NNC_EXEC_INVALID   = -1, /**< Invalid inputs. */
77
  CCV_NNC_EXEC_NO_KERNEL = -2, /**< No kernel available for a given command / backend. */
78
  CCV_NNC_EXEC_OOM       = -3, /**< Out of memory error. */
79
};
80
81
enum {
82
  CCV_NNC_MSE_REDUCE_MEAN = 0, /**< Reduce with mean when computing MSE loss. */
83
  CCV_NNC_MSE_REDUCE_SUM = 1, /**< Reduce with sum when computing MSE loss. */
84
};
85
86
enum {
87
  CCV_NNC_HISTOGRAM_EVEN = 0, /**< The bins are evenly distributed from min to max. */
88
  CCV_NNC_HISTOGRAM_LOGARITHMIC = 1, /**< The bins are distributed follow exponentially curve, growing from min to max with ratio. */
89
  CCV_NNC_HISTOGRAM_BINS = 2, /**< The bins range will be supplied, such as [0, 2, 3, 10]. For result, [-inf, 0, 2, 3, 10, inf] implied. */
90
};
91
92
enum {
93
  CCV_NNC_UPSAMPLE_NEAREST = 0, /**< Using nearest value. */
94
  CCV_NNC_UPSAMPLE_BILINEAR = 1, /**< Using bilinear interpolation. */
95
};
96
97
enum {
98
  CCV_NNC_PAD_ZERO = 0, /**< Pad 0s. */
99
  CCV_NNC_PAD_REPLICATE = 1, /**< Pad by replicating the edge. */
100
};
101
102
enum {
103
  CCV_NNC_GEMM_32F = 0x1, /**< For GEMM (or similar op), whether prefer to use FP32 for accumulator. */
104
  CCV_NNC_GEMM_32TF = 0x2, /**< For GEMM (or similar op), whether prefer to use TF32 for accumulator. */
105
  CCV_NNC_GEMM_16F = 0x4, /**< For GEMM (or similar op), whether prefer to use FP16 for accumulator. */
106
};
107
108
/**
109
 * Parameters for command.
110
 */
111
typedef struct {
112
  struct {
113
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< [size.dim] The window size for the layer. For full connect layer, it is 1 because it is 1x1 convolutional layer with count of filters */
114
  } size;
115
  union {
116
    struct {
117
      int count; /**< [convolution.count] The number of filters for convolutional layer. */
118
      int groups; /**< [convolution.groups] The number of groups for convolutional layer. */
119
      int dilation[CCV_NNC_MAX_DIM_ALLOC]; /**< [convolution.dilation[]] The dilation factor for convolutional layer. Default to 1. */
120
    } convolution;
121
    struct {
122
      int count; /**< [convolution_transpose.count] The number of filters for convolutional layer. */
123
      int groups; /**< [convolution_transpose.groups] The number of groups for convolutional layer. */
124
      int dilation[CCV_NNC_MAX_DIM_ALLOC]; /**< [convolution_transpose.dilation[]] The dilation factor for convolutional layer. Default to 1. */
125
      int output_padding; /**< [convolution_transpose.output_padding] The output padding to resolve ambiguity when treat this as inverse of convolution. */
126
    } convolution_transpose;
127
    struct {
128
      int hidden_size; /**< [rnn.hidden_size] The number of features in the hidden state h. */
129
      int proj_size; /**< [rnn.proj_size] The number of features in the hidden state h. */
130
      int num_layers; /**< [rnn.num_layers] The number of layers for RNN. */
131
      int bias; /**< [rnn.bias] If 0, the layer won't use bias weights. */
132
      int batch_first; /**< [rnn.batch_first] If 1, will batch before sequence. */
133
      int bidirectional; /**< [rnn.bidrectional] Enable bidirectional mode of RNN.*/
134
      float dropout; /**< [rnn.dropout] If non-zero, enable dropout at each layer of RNN.*/
135
      int is_test; /**< [rnn.is_test] Whether running this kernel in test mode or not. */
136
    } rnn;
137
    struct {
138
      int reserved; /**< [pool.reserved] A reserved field. */
139
    } pool;
140
    struct {
141
      float kappa; /**< [rnorm.kappa] As of b[i] = a[i] / (rnorm.kappa + rnorm.alpha * sum(a, i - rnorm.size / 2, i + rnorm.size / 2)) ^ rnorm.beta */
142
      float alpha; /**< [rnorm.alpha] See **rnorm.kappa**. */
143
      float beta; /**< [rnorm.beta] See **rnorm.kappa**. */
144
    } rnorm;
145
    struct {
146
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [bnorm.axis[]] The axis selected to compute mean / variance. */
147
      int count; /**< [bnorm.count] The number of axis selected. */
148
      float epsilon; /**< [bnorm.epsilon] The epsilon for standard derivation. */
149
      int is_test; /**< [bnorm.is_test] Whether in test mode. */
150
      float momentum; /**< [bnorm.momentum] running_mean = running_mean * momentum + mean * (1 - momentum). */
151
    } bnorm;
152
    struct {
153
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [lnorm.axis[]] The axis selected to compute mean / variance. */
154
      int count; /**< [lnorm.count] The number of axis selected. */
155
      float epsilon; /**< [lnorm.epsilon] The epsilon for standard derivation. */
156
      int elementwise_affine; /**< [lnorm.elementwise_affine] Whether it supports scale / bias. */
157
    } lnorm;
158
    struct {
159
      int group_axis; /**< [gnorm.group_axis] The axis selected to be grouped. */
160
      int reduce_axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [gnorm.reduce_axis[]] The other axis selected to compute mean / variance. */
161
      int reduce_count; /**< [gnorm.reduce_count] The number of other axis selected. */
162
      int groups; /**< [gnorm.group] The number of groups that separates channels. */
163
      float epsilon; /**< [gnorm.epsilon] The epsilon for standard derivation. */
164
      int elementwise_affine; /**< [lnorm.elementwise_affine] Whether it supports scale / bias. */
165
    } gnorm;
166
    struct {
167
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [rmsnorm.axis[]] The axis selected to compute mean / variance. */
168
      int count; /**< [rmsnorm.count] The number of axis selected. */
169
      float epsilon; /**< [rmsnorm.epsilon] The epsilon for standard derivation. */
170
    } rmsnorm;
171
    struct {
172
      int nesterov; /**< [sgd.nesterov] Nesterov accelerated gradient. */
173
      float rate; /**< [sgd.rate] The learning rate. */
174
      float scale; /**< [sgd.scale] The scale to be applied to the gradient before doing any minimization. */
175
      float decay; /**< [sgd.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
176
      float momentum; /**< [sgd.momentum] For SGD, this follows http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf. */
177
      float dampening; /**< [sgd.dampening] This usually == momentum, however, it can be changed. */
178
    } sgd;
179
    struct {
180
      int step; /**< [adam.step] Step t in adam optimizer. */
181
      float rate; /**< [adam.rate] The learning rate. */
182
      float scale; /**< [adam.scale] The scale to be applied to the gradient before doing any minimization. */
183
      float beta1; /**< [adam.beta1] The beta1 hyper-parameter in adam optimizer. */
184
      float beta2; /**< [adam.beta2] The beta2 hyper-parameter in adam optimizer. */
185
      float decay; /**< [adam.decay] This is the weight decay parameter, which represents L2 regularization. */
186
      float epsilon; /**< [adam.epsilon] The epsilon for standard derivation. */
187
      int amsgrad; /**< [adam.amsgrad] Whether use amsgrad. */
188
    } adam;
189
    struct {
190
      int step; /**< [lamb.step] Step t in lamb optimizer. */
191
      float rate; /**< [lamb.rate] The learning rate. */
192
      float scale; /**< [lamb.scale] The scale to be applied to the gradient before doing any minimization. */
193
      float beta1; /**< [lamb.beta1] The beta1 hyper-parameter in lamb optimizer. */
194
      float beta2; /**< [lamb.beta2] The beta2 hyper-parameter in lamb optimizer. */
195
      float decay; /**< [lamb.decay] This is the weight decay parameter, which represents L2 regularization. */
196
      float epsilon; /**< [lamb.epsilon] The epsilon for standard derivation. */
197
    } lamb;
198
    struct {
199
      float rate; /**< [rmsprop.rate] The learning rate. */
200
      float scale; /**< [rmsprop.scale] The scale to be applied to the gradient before doing any minimization. */
201
      float decay; /**< [rmsprop.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
202
      float alpha; /**< [rmsprop.momentum] The alpha hyper-parameter. */
203
      float momentum; /**< [rmsprop.momentum] The momentum hyper-parameter. */
204
      float epsilon; /**< [rmsprop.epsilon] The epsilon for standard derivation. */
205
    } rmsprop;
206
    struct {
207
      int transpose_a[2]; /**< [blas.transpose_a[2]] The axis we'd like to transpose for input a. */
208
      int transpose_b[2]; /**< [blas.transpose_b[2]] The axis we'd like to transpose for input b. */
209
      float a[3]; /**< [blas.a[3]] BLAS scalars. */
210
      int flags; /**< [blas.flags] Auxiliary flags to enable certain features for BLAS operation. */
211
    } blas;
212
    struct {
213
      float trim0; /**< [label_smoothing.trim0] The smoothed label for 0. */
214
      float trim1; /**< [label_smoothing.trim1] The smoothed label for 1. */
215
    } label_smoothing;
216
    struct {
217
      float pos_weight; /**< [binary_crossentropy.pos_weight] The pos_weight on the loss: -(pos_weight * y * log(x) + (1 - y) * log(1 - x)) */
218
    } binary_crossentropy;
219
    struct {
220
      float beta; /**< [smooth_l1.beta] The beta on the smooth L1 loss (or Huber loss) */
221
    } smooth_l1;
222
    struct {
223
      int reduce_op; /**< [mse.reduce_op] Whether reduce with mean or with sum */
224
    } mse;
225
    struct {
226
      int tanh; /**< [gelu.tanh] Use tanh approximation */
227
    } gelu;
228
    struct {
229
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [reduce.axis[]] The axis selected to reduce. */
230
      int count; /**< [reduce.count] The number of axis selected. */
231
    } reduce;
232
    struct {
233
      int axis[2]; /**< [transpose.axis[2]] The axis we'd like to transpose for input. */
234
    } transpose;
235
    struct {
236
      float p; /**< [dropout.p] Dropout probability. */
237
      int entirety; /**< [dropout.entirety] Drop the whole layer with the given probability. */
238
    } dropout;
239
    struct {
240
      int type; /**< [upsample.type] 0 - nearest, 1 - bilinear. */
241
      float width_scale; /**< [upsample.width_scale] scale for width parameter. It is between 1 and 2 at the moment. */
242
      float height_scale; /**< [upsample.height_scale] scale for height parameter. It is between 1 and 2 at the moment. */
243
      int align_corners; /**< [upsample.align_corners] Whether to scale to align corners. Thus, for 0...1, if false, it will align to -0.25, 0.25, 0.75, 1.25, if true, it will align to 0, 0.3333, 0.6666, 1.0 */
244
    } upsample;
245
    struct {
246
      float min; /**< [clamp.min] The minimum, NaN is no min. */
247
      float max; /**< [clamp.max] The maximum, NaN is no max. */
248
    } clamp;
249
    struct {
250
      float iou_threshold; /**< [nms.iou_threshold] Threshold between 0 to 1 for IoU threshold. */
251
    } nms;
252
    struct {
253
      int type; /**< [histogram.type] The type, can be even, logarithmic, or bins. */
254
      int bins; /**< [histogram.bins] The number of bins, only applied to even. */
255
      float min; /**< [histogram.min] The minimal number, for even or logarithmic. */
256
      float max; /**< [histogram.min] The maximal number, for even or logarithmic. */
257
      float rate; /**< [histogram.ratio] The rate from min to max, only applied to logarithmic. */
258
    } histogram;
259
    struct {
260
      float negative_slope; /**< [leaky_relu.negative_slop] The negative slope to be applied when activation < 0. */
261
    } leaky_relu;
262
    struct {
263
      float scale; /**< [scaled_dot_product_attention.scale] The scale we multiple to the dot product of Q & K */
264
      int is_causal; /**< [scaled_dot_product_attention.is_causal] Whether we have causal matrix associated with the attention. The attention mask will be cut to triangular if provided. */
265
      int flags; /**< [scaled_dot_product_attention.flags] Which precision is preferred for accumulator, FP16 or FP32. */
266
      int deterministic; /**< [scaled_dot_product_attention.deterministic] Whether we want the attention computation to be deterministic (CUDA only). */
267
    } scaled_dot_product_attention;
268
    struct {
269
      int type; /**< [pad.type] The type of pad, can be either zeros or replicating edge. */
270
      int end[CCV_NNC_MAX_DIM_ALLOC]; /**< [pad.end] Work together with size.dim. size.dim is how much to add at the beginning and pad.end is how much to add at the end. */
271
    } pad;
272
    struct {
273
      int along_axis; /**< [sort.along_axis] Which axis to sort along with. */
274
      int descending; /**< [sort.descending] Whether sorting by descending order. */
275
    } sort;
276
    struct {
277
      int kth; /**< [partition.kth] How many items to retain after partition. */
278
      int along_axis; /**< [partition.along_axis] Which axis to partition along with. */
279
      int descending; /**< [partition.descending] Whether partitioning by descending order. */
280
    } partition;
281
    struct {
282
      int bincount; /**< [unique_consecutive.bincount] Potentially how many unique items there will be, 0 if unknown. */
283
    } unique_consecutive;
284
    struct {
285
      int bincount; /**< [scatter_add.bincount] Potentially how many unique items there will be, 0 if unknown. */
286
    } scatter_add;
287
    void* userdata;
288
  };
289
} ccv_nnc_cmd_param_t;
290
291
/*
292
 * Hints for command.
293
 */
294
typedef struct {
295
  struct {
296
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< Stride for each dimension. */
297
  } stride;
298
  struct {
299
    int begin[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the beginning of a dimension. */
300
    int end[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the end of a dimension. */
301
  } border;
302
} ccv_nnc_hint_t;
303
304
/**
305
 * Opaque pointer to a stream object.
306
 */
307
typedef struct ccv_nnc_stream_context_s ccv_nnc_stream_context_t;
308
309
typedef struct ccv_nnc_cmd_vtab_s ccv_nnc_cmd_vtab_t;
310
311
typedef struct ccv_nnc_cmd_s {
312
  uint32_t cmd; /**< The identifier for command. */
313
  uint32_t backend; /**< The identifier for backend. */
314
  int algorithm; /**< The algorithm selector (as defined by backend). */
315
  ccv_nnc_cmd_param_t info; /**< The command parameters. */
316
  /**
317
   * This is for type CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD
318
   */
319
  ccv_nnc_cmd_vtab_t* isa;
320
  void* data;
321
} ccv_nnc_cmd_t;
322
323
/**
324
 * For forward functions, the input tensors and output tensors can be arbitrary.
325
 * However, for backward functions (backpropagation, or gradient functions in other libs),
326
 * the input is: 0~m-1: gradient for output tensors, 1~n: input tensors for forward functions, n+1~n+m: output tensors for forward functions,
327
 * the output is: 0~n-1: output gradients w.r.t. input tensors.
328
 * Which input / output tensors can be ignored can be specified in the cmd config structs.
329
 */
330
typedef int(*ccv_nnc_cmd_exec_f)(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
331
332
/**
333
 * The function prototype for autotune. The only difference is the max_workspace_size.
334
 * Whoever implement this function prototype means we handled over autotune task to the
335
 * command itself, you are responsible to select the best algorithm.
336
 * @return The selected algorithm.
337
 */
338
typedef int(*ccv_nnc_cmd_autotune_f)(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
339
340
/**
341
 * The function prototype is for automatically deduce tensor shapes.
342
 */
343
344
typedef struct ccv_nnc_cmd_vtab_s {
345
  ccv_nnc_cmd_exec_f exec;
346
  void (*tensor_auto)(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
347
} ccv_nnc_cmd_vtab_t;
348
349
/** @} */
350
351
/**
352
 * @defgroup level_1_uops Micro Commands to Define Commands
353
 * @{
354
 */
355
356
/**
357
 * @page micro_jittor The concept of meta-ops in Jittor is amazing
358
 *
359
 * NNC will never do JIT. Particularly, I will never do codegen and compile at runtime, especially with static shapes.
360
 * The reason is pretty simple. JIT would be too much architectural dependent and with that, almost impossible for NNC
361
 * to be this small embeddable library that you can carry everywhere. However, this shouldn't prevent NNC to generate
362
 * proper descriptions of each command so a JIT version can be built if there are architectural support for it. In this
363
 * way, the core of NNC can be small and embeddable, but a new backend (identified by the backend attribute) can implement
364
 * more sophisticated JIT mechanism.
365
 *
366
 * More over, I need to generate some code for reference implementations, ideally from some descriptions. This is important
367
 * because with 90+ ops, having a correctly implemented command turns out to be more challenging than I expected.
368
 * Especially if I want them to be compliant with the metadata describes it (what shape it accepts, what datatype works,
369
 * whether it can accept tensor views, and how in-place tensors supported). Many of reference commands are not supporting
370
 * all datatypes and tensor views, and this has to be rectified because these are "reference commands", they must be.
371
 *
372
 * Jittor introduced to the world the idea of meta-ops. Basically, it claims every ops (or macro ops) can be break down to
373
 * 3 types of micro ops (they call them meta-ops): a reindex op that can map tensor from one dimensionality to another, an
374
 * element-wise op that does element-wise primitive math, and finally, a reduce op that can reduce along particular axis
375
 * of a tensor with some elementary math. This feels rather limited initially, but when thinking through it, I am convinced
376
 * it should be enough to describe all commands presented in NNC (this shouldn't be a surprise actually).
377
 *
378
 * Thus, the plan now is to use the meta-ops idea, implementing new micro commands that can describe other commands in
379
 * NNC. In this way, I can generate reference implementation from these descriptions and hopefully have better coverage
380
 * than my existing CPU / GPU reference implementations.
381
 *
382
 * To build on-top what Jittor did, if you need to have my dynamism in the ops, it is essential to index with the provided
383
 * tensor. With just reindex, binary operands and reduce, you cannot do that. Thus, on top of these 3, we added the 4th
384
 * micro op (meta-op) that is "select". This will be sufficient to implement ops such as masking.
385
 *
386
 */
387
388
/**
389
 * Abstract vtab for different ccv_nnc_micro_io_t.
390
 */
391
typedef struct ccv_nnc_micro_io_vtab_s ccv_nnc_micro_io_vtab_t;
392
393
enum {
394
  // These could be much more unary ops.
395
  CCV_NNC_MICRO_UNARY_OP_NEG,
396
  CCV_NNC_MICRO_UNARY_OP_LOG,
397
  CCV_NNC_MICRO_UNARY_OP_EXP,
398
};
399
400
enum {
401
  CCV_NNC_MICRO_BINARY_OP_PLUS,
402
  CCV_NNC_MICRO_BINARY_OP_MINUS,
403
  CCV_NNC_MICRO_BINARY_OP_MUL,
404
  CCV_NNC_MICRO_BINARY_OP_DIV,
405
  CCV_NNC_MICRO_BINARY_OP_MAX,
406
  CCV_NNC_MICRO_BINARY_OP_MIN,
407
  CCV_NNC_MICRO_BINARY_OP_EQUAL_TO,
408
  CCV_NNC_MICRO_BINARY_OP_LESS_THAN,
409
};
410
411
enum {
412
  CCV_NNC_MICRO_REDUCE_OP_MAX,
413
  CCV_NNC_MICRO_REDUCE_OP_MIN,
414
  CCV_NNC_MICRO_REDUCE_OP_ARGMAX,
415
  CCV_NNC_MICRO_REDUCE_OP_ARGMIN,
416
  CCV_NNC_MICRO_REDUCE_OP_MEAN, // Mean is complicated, we need a way to compute total for loops after this. It has to be done statically, and that is "interesting".
417
  CCV_NNC_MICRO_REDUCE_OP_SUM,
418
  CCV_NNC_MICRO_REDUCE_OP_PROD,
419
};
420
421
/**
422
 * Abstract micro op representation.
423
 */
424
typedef struct ccv_nnc_micro_io_s* ccv_nnc_micro_io_t;
425
426
struct ccv_nnc_micro_io_s {
427
  const ccv_nnc_micro_io_vtab_t* isa;
428
  ccv_nnc_micro_io_t* inputs;
429
  int input_size;
430
  int dimensions;
431
  int id;
432
};
433
434
typedef struct {
435
  // Type of the scalar is about precision, nothing to restrict the tensor's type. For example, we may assign a int32_t 0
436
  // to a float16 tensor element, this is perfectly fine.
437
  int type;
438
  union {
439
    unsigned char u8;
440
    int i32;
441
    ccv_float16_t f16;
442
    float f32;
443
    int64_t i64;
444
    uint64_t u64;
445
    double f64;
446
  };
447
} ccv_nnc_micro_scalar_t;
448
449
/**
450
 * Create a free-form input that represent a tensor.
451
 * @param dimensions The maximum dimension of the input.
452
 */
453
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_input(const int dimensions);
454
/**
455
 * Use shape and reindex expression to reindex the given tensor into a different shape.
456
 * The expressions can bind integer parameters which starts with $.
457
 *
458
 * The expression follows specific pattern, integer parameters starts with $. Dimensions are represented as dXn, such
459
 * as dA0, dA1, dA2 ... Index into the provided tensor can be represented as i0, i1, i2. These are all 0-indexed.
460
 *
461
 * Constants are supported, such as 235, 431 etc. Operators supported currently are -, +, /, *.
462
 *
463
 * Thus, for broadcast a tensor x[w, h] to y[w, h, h], it can be represented as:
464
 * shape: { "dA0", "dA1", "dA1" }, reindex: { "i0", "i1", "0" }.
465
 * For example, transpose can be represented as:
466
 * shape: { "dA1", "dA0" }, reindex: { "i1", "i0" }
467
 *
468
 * @param shape The shape expressions per axis.
469
 * @param shape_count The dimensions of the output.
470
 * @param ss The tensors to reference shape dimensions.
471
 * @param s_count The number of tensors to reference shape dimensions.
472
 * @param reindex The reindex expressions per axis.
473
 * @param reindex_count The dimensions of the input.
474
 * @param x The input for reindex operation.
475
 * @return The reindexed tensor.
476
 */
477
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reindex(const char* const* const shape, const int shape_count, const ccv_nnc_micro_io_t* const ss, const int s_count, const char* const* const reindex, const int reindex_count, const ccv_nnc_micro_io_t x);
478
/**
479
 * Apply element-wise computations with one tensor.
480
 * @param op The binary operand.
481
 * @param x The input.
482
 * @return The result tensor.
483
 */
484
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_unary(const uint32_t op, const ccv_nnc_micro_io_t x);
485
/**
486
 * Apply pair-wise computations with two tensors. They has to match shape exactly.
487
 * @param op The binary operand.
488
 * @param left The left input.
489
 * @param right The right input.
490
 * @return The result tensor.
491
 */
492
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_binary(const uint32_t op, const ccv_nnc_micro_io_t left, const ccv_nnc_micro_io_t right);
493
/**
494
 * Apply reduction computation against some dimensions and generate the final reduced tensor.
495
 * @param op The reduction operand.
496
 * @param axis The axis to reduce.
497
 * @param axis_count Number of axes.
498
 * @param x The input tensor.
499
 * @return The result tensor after reduction.
500
 */
501
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reduce(const uint8_t op, const int* const axis, const int axis_count, const ccv_nnc_micro_io_t x);
502
/**
503
 * Use the index tensor to select one value from the x per axis.
504
 * @param axis The axis to select.
505
 * @param x The tensor to be indexed.
506
 * @param index The integer tensor of indexes.
507
 * @return The result tensor with values selected from x with index from index tensor.
508
 */
509
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_select(const int axis, const ccv_nnc_micro_io_t x, const ccv_nnc_micro_io_t index);
510
/**
511
 * Return the gradient for a particular output. For example, if x is ccv_nnc_micro_unary(exp, input),
512
 * this represents the gradient of x, not the input. This method is used to generate representation
513
 * of gradients for ccv_nnc_micro_combine_new method.
514
 * @param x The tensor to take a gradient of.
515
 * @return The result tensor that represents the gradient of x.
516
 */
517
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_grad(const ccv_nnc_micro_io_t x);
518
/**
519
 * The combined op from micro ops.
520
 */
521
typedef struct ccv_nnc_micro_combine_s ccv_nnc_micro_combine_t;
522
/**
523
 * Combine micro ops into one, and do some optimization passes. The combined one can be then processed to generate
524
 * optimized kernels. Particularly, we can processed the combined one into C code and CUDA code as reference
525
 * implementations.
526
 * @param inputs The inputs for the combined ops.
527
 * @param input_size The number of the inputs.
528
 * @param parameters The name of the parameters, this determines the order of the these parameters.
529
 * @param parameter_size The number of parameters.
530
 * @param outputs The outputs for the combined ops.
531
 * @param output_size The number of the outputs.
532
 * @param ingrads The gradient inputs for the combined ops, including any inputs / outputs if there are any.
533
 * @param ingrad_size The number of ingrads.
534
 * @param outgrads The gradient outputs for the combined ops.
535
 * @param outgrad_size The number of outgrads.
536
 */
537
CCV_WARN_UNUSED(ccv_nnc_micro_combine_t*) ccv_nnc_micro_combine_new(const ccv_nnc_micro_io_t* const inputs, const int input_size, const char* const* const parameters, const int parameter_size, const ccv_nnc_micro_io_t* const outputs, const int output_size, const ccv_nnc_micro_io_t* const ingrads, const int ingrad_size, const ccv_nnc_micro_io_t* const outgrads, const int outgrad_size);
538
/**
539
 * Free the combined op.
540
 * @param combine The op to be freed.
541
 */
542
void ccv_nnc_micro_combine_free(ccv_nnc_micro_combine_t* const combine);
543
/**
544
 * Run combined op in interpret mode. This is only useful for debug internals. Because this is for
545
 * generic combined op, there is no hint, or flags, or stream context, or cmd.
546
 * @param combine The op.
547
 * @param cmd Choice between CMD_CUSTOM_FORWARD and CMD_CUSTOM_BACKWARD.
548
 * @param inputs The input tensors.
549
 * @param input_size The size of input tensors.
550
 * @param values The value corresponding to the parameters when call ccv_nnc_micro_combine_new.
551
 * @param parameter_size How many parameters. It must match when called ccv_nnc_micro_combine_new.
552
 * @param outputs The output tensors.
553
 * @param output_size The size of output tensors.
554
 */
555
void ccv_nnc_micro_combine_interpret(ccv_nnc_micro_combine_t* const combine, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_micro_scalar_t* const values, const int parameter_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
556
/**
557
 * Generate C code from the combined op.
558
 * @param combine The combined op to generate some C code.
559
 * @return The generated C code string.
560
 */
561
char* ccv_nnc_micro_combine_c(ccv_nnc_micro_combine_t* const combine);
562
563
/** @} */
564
565
/**
566
 * @defgroup level_1_tensor Tensors
567
 * @{
568
 */
569
570
/**
571
 * Count the dimensionality of a tensor.
572
 */
573
static inline int ccv_nnc_tensor_nd(const int dim[CCV_NNC_MAX_DIM_ALLOC])
574
2.51M
{
575
2.51M
  int i;
576
6.68M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++4.16M
)
577
6.68M
    if (dim[i] == 0)
578
2.51M
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
2.51M
}
Unexecuted instantiation: adam.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cifar.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cnnp.core.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compare.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compression.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: concat.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cublas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cudnn.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: datatype.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dense.net.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dynamic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gelu.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.vgg.d.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: imdb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: index.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: lamb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: leaky_relu.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: loss.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: lstm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: mpsblas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: mpsdnn.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nccl.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nms.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: pad.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: palettize.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: parallel.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: partition.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: random.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: reduce.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rmsprop.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: roi_align.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: scatter_add.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: schedule.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: sgd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: smooth_l1.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: sort.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: swish.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.vgg.d.tests.c:ccv_nnc_tensor_nd
tensor.tests.c:ccv_nnc_tensor_nd
Line
Count
Source
574
18
{
575
18
  int i;
576
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++18
)
577
36
    if (dim[i] == 0)
578
18
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
18
}
Unexecuted instantiation: transform.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: unique_consecutive.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: upsample.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: attention.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.vector.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: batch.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: broadcast.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cblas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: complex.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: crossentropy.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: custom.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.addons.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dropout.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: forward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gemm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gradient.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.io.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: group.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: histogram.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: layer.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: micro.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: minimize.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: numa.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rand.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rmsnorm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: simplify.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.compile.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tape.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tensor.bind.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tfb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: winograd.tests.c:ccv_nnc_tensor_nd
ccv_nnc_cmd.c:ccv_nnc_tensor_nd
Line
Count
Source
574
347k
{
575
347k
  int i;
576
844k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++497k
)
577
844k
    if (dim[i] == 0)
578
347k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
347k
}
ccv_nnc_tensor.c:ccv_nnc_tensor_nd
Line
Count
Source
574
365
{
575
365
  int i;
576
1.33k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++972
)
577
1.33k
    if (dim[i] == 0)
578
365
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
365
}
ccv_nnc_tensor_io.c:ccv_nnc_tensor_nd
Line
Count
Source
574
46
{
575
46
  int i;
576
175
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++129
)
577
175
    if (dim[i] == 0)
578
46
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
46
}
Unexecuted instantiation: ccv_nnc_stream.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_core.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_interpret.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_io.c:ccv_nnc_tensor_nd
ccv_nnc_symbolic_graph_compile.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4.66k
{
575
4.66k
  int i;
576
10.4k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++5.79k
)
577
10.4k
    if (dim[i] == 0)
578
4.66k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4.66k
}
ccv_nnc_symbolic_graph_backward.c:ccv_nnc_tensor_nd
Line
Count
Source
574
2.18k
{
575
2.18k
  int i;
576
4.62k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++2.44k
)
577
4.62k
    if (dim[i] == 0)
578
2.18k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
2.18k
}
Unexecuted instantiation: ccv_nnc_symbolic_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tensor_tape.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_parallel.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_memory_compression.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_memory_reduction.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_run.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_xpu_alloc.c:ccv_nnc_tensor_nd
ccv_nnc_dynamic_graph.c:ccv_nnc_tensor_nd
Line
Count
Source
574
1.05k
{
575
1.05k
  int i;
576
2.15k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.10k
)
577
2.15k
    if (dim[i] == 0)
578
1.05k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
1.05k
}
Unexecuted instantiation: ccv_nnc_dynamic_graph_alloc.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_backward.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_apply_gradients.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_evaluate.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe_core.c:ccv_nnc_tensor_nd
ccv_cnnp_dataframe_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
574
183k
{
575
183k
  int i;
576
730k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++546k
)
577
730k
    if (dim[i] == 0)
578
183k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
183k
}
Unexecuted instantiation: ccv_cnnp_dataframe_csv.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_io.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_core.c:ccv_nnc_tensor_nd
ccv_cnnp_model_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
574
3.97k
{
575
3.97k
  int i;
576
9.65k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++5.67k
)
577
9.65k
    if (dim[i] == 0)
578
3.97k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
3.97k
}
Unexecuted instantiation: ccv_nnc_palettize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_gradient_checkpointing.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_chain_decomposition.c:ccv_nnc_tensor_nd
ccv_nnc_adam_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
16.1k
{
575
16.1k
  int i;
576
48.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.1k
)
577
48.2k
    if (dim[i] == 0)
578
16.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
16.1k
}
ccv_nnc_adamw_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
16.1k
{
575
16.1k
  int i;
576
48.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.1k
)
577
48.2k
    if (dim[i] == 0)
578
16.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
16.1k
}
ccv_nnc_gemm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
303k
{
575
303k
  int i;
576
814k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++510k
)
577
814k
    if (dim[i] == 0)
578
303k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
303k
}
ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
574
1.09k
{
575
1.09k
  int i;
576
2.68k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.58k
)
577
2.68k
    if (dim[i] == 0)
578
1.09k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
1.09k
}
ccv_nnc_add_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
205k
{
575
205k
  int i;
576
479k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++274k
)
577
479k
    if (dim[i] == 0)
578
205k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
205k
}
ccv_nnc_mul_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
109k
{
575
109k
  int i;
576
220k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++111k
)
577
220k
    if (dim[i] == 0)
578
109k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
109k
}
ccv_nnc_cmul_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
173
{
575
173
  int i;
576
569
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++396
)
577
569
    if (dim[i] == 0)
578
173
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
173
}
ccv_nnc_segmented_gemm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
44
{
575
44
  int i;
576
144
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++100
)
577
144
    if (dim[i] == 0)
578
44
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
44
}
ccv_nnc_min_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
82
{
575
82
  int i;
576
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
577
312
    if (dim[i] == 0)
578
82
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
82
}
ccv_nnc_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
82
{
575
82
  int i;
576
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
577
312
    if (dim[i] == 0)
578
82
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
82
}
ccv_nnc_lssc_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
96
{
575
96
  int i;
576
448
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++352
)
577
448
    if (dim[i] == 0)
578
96
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
96
}
ccv_nnc_conv_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
10.1k
{
575
10.1k
  int i;
576
47.1k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++37.0k
)
577
47.1k
    if (dim[i] == 0)
578
10.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
10.1k
}
ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
574
3.13k
{
575
3.13k
  int i;
576
14.8k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++11.7k
)
577
14.8k
    if (dim[i] == 0)
578
3.13k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
3.13k
}
ccv_nnc_conv_transpose_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
40
{
575
40
  int i;
576
176
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++136
)
577
176
    if (dim[i] == 0)
578
40
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
40
}
ccv_nnc_dropout_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
24
{
575
24
  int i;
576
72
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
577
72
    if (dim[i] == 0)
578
24
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
24
}
ccv_nnc_ew_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
765k
{
575
765k
  int i;
576
1.89M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.13M
)
577
1.89M
    if (dim[i] == 0)
578
765k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
765k
}
Unexecuted instantiation: ccv_nnc_gelu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_histogram_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4
{
575
4
  int i;
576
20
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++16
)
577
20
    if (dim[i] == 0)
578
4
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4
}
ccv_nnc_index_select_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
33
{
575
33
  int i;
576
84
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++51
)
577
84
    if (dim[i] == 0)
578
33
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
33
}
ccv_nnc_reduce_isnan_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
21
{
575
21
  int i;
576
55
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++34
)
577
55
    if (dim[i] == 0)
578
21
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
21
}
ccv_nnc_lamb_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
48
{
575
48
  int i;
576
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
577
96
    if (dim[i] == 0)
578
48
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
48
}
Unexecuted instantiation: ccv_nnc_leaky_relu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
194
{
575
194
  int i;
576
550
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++356
)
577
550
    if (dim[i] == 0)
578
194
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
194
}
ccv_nnc_categorical_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
35
{
575
35
  int i;
576
93
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
577
93
    if (dim[i] == 0)
578
35
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
35
}
ccv_nnc_mse_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
2.19k
{
575
2.19k
  int i;
576
4.48k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++2.28k
)
577
4.48k
    if (dim[i] == 0)
578
2.19k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
2.19k
}
ccv_nnc_smooth_l1_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
63
{
575
63
  int i;
576
177
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++114
)
577
177
    if (dim[i] == 0)
578
63
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
63
}
ccv_nnc_nms_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
18
{
575
18
  int i;
576
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++30
)
577
48
    if (dim[i] == 0)
578
18
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
18
}
ccv_nnc_batch_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
438
{
575
438
  int i;
576
1.41k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++975
)
577
1.41k
    if (dim[i] == 0)
578
438
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
438
}
ccv_nnc_layer_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
208
{
575
208
  int i;
576
1.04k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++832
)
577
1.04k
    if (dim[i] == 0)
578
208
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
208
}
ccv_nnc_group_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
331
{
575
331
  int i;
576
1.57k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.24k
)
577
1.57k
    if (dim[i] == 0)
578
331
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
331
}
ccv_nnc_rmsnorm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
93
{
575
93
  int i;
576
465
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++372
)
577
465
    if (dim[i] == 0)
578
93
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
93
}
ccv_nnc_pad_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
105
{
575
105
  int i;
576
371
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++266
)
577
371
    if (dim[i] == 0)
578
105
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
105
}
ccv_nnc_partition_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
117
{
575
117
  int i;
576
369
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++252
)
577
369
    if (dim[i] == 0)
578
117
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
117
}
ccv_nnc_max_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4.25k
{
575
4.25k
  int i;
576
21.1k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++16.9k
)
577
21.1k
    if (dim[i] == 0)
578
4.25k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4.25k
}
ccv_nnc_avg_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
5.47k
{
575
5.47k
  int i;
576
27.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++21.8k
)
577
27.2k
    if (dim[i] == 0)
578
5.47k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
5.47k
}
Unexecuted instantiation: ccv_nnc_rand_uniform_cpu_ref.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rand_normal_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_reduce_sum_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
78.6k
{
575
78.6k
  int i;
576
236k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++158k
)
577
236k
    if (dim[i] == 0)
578
78.6k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
78.6k
}
ccv_nnc_reduce_mean_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
41
{
575
41
  int i;
576
112
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++71
)
577
112
    if (dim[i] == 0)
578
41
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
41
}
ccv_nnc_reduce_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
41
{
575
41
  int i;
576
99
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
577
99
    if (dim[i] == 0)
578
41
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
41
}
ccv_nnc_reduce_min_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
21
{
575
21
  int i;
576
59
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++38
)
577
59
    if (dim[i] == 0)
578
21
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
21
}
ccv_nnc_reduce_norm2_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
115
{
575
115
  int i;
576
299
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++184
)
577
299
    if (dim[i] == 0)
578
115
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
115
}
ccv_nnc_argmax_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
8
{
575
8
  int i;
576
27
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++19
)
577
27
    if (dim[i] == 0)
578
8
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
8
}
ccv_nnc_argmin_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
6
{
575
6
  int i;
576
21
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++15
)
577
21
    if (dim[i] == 0)
578
6
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
6
}
Unexecuted instantiation: ccv_nnc_relu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_rmsprop_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
16.0k
{
575
16.0k
  int i;
576
48.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.0k
)
577
48.0k
    if (dim[i] == 0)
578
16.0k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
16.0k
}
ccv_nnc_roi_align_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
104
{
575
104
  int i;
576
406
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++302
)
577
406
    if (dim[i] == 0)
578
104
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
104
}
ccv_nnc_scaled_dot_product_attention_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
145
{
575
145
  int i;
576
710
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++565
)
577
710
    if (dim[i] == 0)
578
145
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
145
}
ccv_nnc_scatter_add_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
27
{
575
27
  int i;
576
68
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++41
)
577
68
    if (dim[i] == 0)
578
27
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
27
}
ccv_nnc_sgd_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
220k
{
575
220k
  int i;
576
570k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++349k
)
577
570k
    if (dim[i] == 0)
578
220k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
220k
}
Unexecuted instantiation: ccv_nnc_sigmoid_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_sigmoid_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
238
{
575
238
  int i;
576
698
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++460
)
577
698
    if (dim[i] == 0)
578
238
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
238
}
ccv_nnc_softmax_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
514
{
575
514
  int i;
576
1.04k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++531
)
577
1.04k
    if (dim[i] == 0)
578
514
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
514
}
ccv_nnc_softmax_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
1.24k
{
575
1.24k
  int i;
576
3.12k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.87k
)
577
3.12k
    if (dim[i] == 0)
578
1.24k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
1.24k
}
ccv_nnc_sort_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
51
{
575
51
  int i;
576
129
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++78
)
577
129
    if (dim[i] == 0)
578
51
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
51
}
Unexecuted instantiation: ccv_nnc_swish_cpu_ref.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tanh_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_unique_consecutive_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
18
{
575
18
  int i;
576
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++18
)
577
36
    if (dim[i] == 0)
578
18
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
18
}
ccv_nnc_upsample_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
114
{
575
114
  int i;
576
456
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++342
)
577
456
    if (dim[i] == 0)
578
114
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
114
}
ccv_nnc_util_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
310
{
575
310
  int i;
576
1.30k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++994
)
577
1.30k
    if (dim[i] == 0)
578
310
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
310
}
Unexecuted instantiation: ccv_nnc_adam.c:ccv_nnc_tensor_nd
ccv_nnc_blas.c:ccv_nnc_tensor_nd
Line
Count
Source
574
120k
{
575
120k
  int i;
576
303k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++183k
)
577
303k
    if (dim[i] == 0)
578
120k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
120k
}
_ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
574
603
{
575
603
  int i;
576
1.47k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++867
)
577
1.47k
    if (dim[i] == 0)
578
603
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
603
}
_ccv_nnc_gemm_cpu_sys.c:ccv_nnc_tensor_nd
Line
Count
Source
574
67.2k
{
575
67.2k
  int i;
576
175k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++107k
)
577
175k
    if (dim[i] == 0)
578
67.2k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
67.2k
}
Unexecuted instantiation: ccv_nnc_comm.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_cmp.c:ccv_nnc_tensor_nd
ccv_nnc_compression.c:ccv_nnc_tensor_nd
Line
Count
Source
574
10
{
575
10
  int i;
576
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++38
)
577
48
    if (dim[i] == 0)
578
10
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
10
}
_ccv_nnc_conv_cpu_4x4_3x3_winograd.c:ccv_nnc_tensor_nd
Line
Count
Source
574
380
{
575
380
  int i;
576
1.52k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.14k
)
577
1.52k
    if (dim[i] == 0)
578
380
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
380
}
Unexecuted instantiation: _ccv_nnc_conv_cpu_fft.c:ccv_nnc_tensor_nd
_ccv_nnc_conv_cpu_gemm.c:ccv_nnc_tensor_nd
Line
Count
Source
574
8
{
575
8
  int i;
576
32
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++24
)
577
32
    if (dim[i] == 0)
578
8
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
8
}
_ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4.46k
{
575
4.46k
  int i;
576
21.8k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++17.3k
)
577
21.8k
    if (dim[i] == 0)
578
4.46k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4.46k
}
ccv_nnc_convolution.c:ccv_nnc_tensor_nd
Line
Count
Source
574
13.1k
{
575
13.1k
  int i;
576
61.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48.0k
)
577
61.2k
    if (dim[i] == 0)
578
13.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
13.1k
}
Unexecuted instantiation: ccv_nnc_dropout.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_ew.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_gelu.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_histogram.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_index_select.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_reduce_isnan.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_lamb.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_leaky_relu.c:ccv_nnc_tensor_nd
ccv_nnc_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
574
12
{
575
12
  int i;
576
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++24
)
577
36
    if (dim[i] == 0)
578
12
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
12
}
ccv_nnc_categorical_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
574
31
{
575
31
  int i;
576
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++65
)
577
96
    if (dim[i] == 0)
578
31
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
31
}
ccv_nnc_mse.c:ccv_nnc_tensor_nd
Line
Count
Source
574
18
{
575
18
  int i;
576
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++18
)
577
36
    if (dim[i] == 0)
578
18
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
18
}
ccv_nnc_smooth_l1.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4
{
575
4
  int i;
576
12
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8
)
577
12
    if (dim[i] == 0)
578
4
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4
}
Unexecuted instantiation: ccv_nnc_nms.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_norm.c:ccv_nnc_tensor_nd
ccv_nnc_pad.c:ccv_nnc_tensor_nd
Line
Count
Source
574
3
{
575
3
  int i;
576
15
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++12
)
577
15
    if (dim[i] == 0)
578
3
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
3
}
Unexecuted instantiation: ccv_nnc_partition.c:ccv_nnc_tensor_nd
ccv_nnc_pool.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4.07k
{
575
4.07k
  int i;
576
18.3k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++14.2k
)
577
18.3k
    if (dim[i] == 0)
578
4.07k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4.07k
}
Unexecuted instantiation: ccv_nnc_rand.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_reduce.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_relu.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rmsprop.c:ccv_nnc_tensor_nd
ccv_nnc_lstm.c:ccv_nnc_tensor_nd
Line
Count
Source
574
11
{
575
11
  int i;
576
44
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++33
)
577
44
    if (dim[i] == 0)
578
11
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
11
}
Unexecuted instantiation: ccv_nnc_roi_align.c:ccv_nnc_tensor_nd
ccv_nnc_scaled_dot_product_attention.c:ccv_nnc_tensor_nd
Line
Count
Source
574
57
{
575
57
  int i;
576
285
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++228
)
577
285
    if (dim[i] == 0)
578
57
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
57
}
Unexecuted instantiation: ccv_nnc_scatter_add.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_sgd.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_sigmoid.c:ccv_nnc_tensor_nd
ccv_nnc_sigmoid_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
574
36
{
575
36
  int i;
576
108
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++72
)
577
108
    if (dim[i] == 0)
578
36
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
36
}
Unexecuted instantiation: ccv_nnc_softmax.c:ccv_nnc_tensor_nd
ccv_nnc_softmax_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
574
860
{
575
860
  int i;
576
2.56k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.70k
)
577
2.56k
    if (dim[i] == 0)
578
860
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
860
}
Unexecuted instantiation: ccv_nnc_sort.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_swish.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tanh.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_unique_consecutive.c:ccv_nnc_tensor_nd
ccv_nnc_upsample.c:ccv_nnc_tensor_nd
Line
Count
Source
574
12
{
575
12
  int i;
576
60
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
577
60
    if (dim[i] == 0)
578
12
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
12
}
Unexecuted instantiation: ccv_nnc_util.c:ccv_nnc_tensor_nd
581
582
/**
583
 * Create a new tensor.
584
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
585
 * @param params Tensor parameters.
586
 * @param flags Reserved flags for the allocation.
587
 * @return The newly created tensor.
588
 */
589
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
590
enum {
591
  CCV_NNC_TENSOR_MEMORY_MAP_EAGER = 0x1, /**< Load tensor mapped directly. */
592
  CCV_NNC_TENSOR_MEMORY_MAP_ON_DEMAND = 0x2, /**< Defer tensor map until read on supported devices. */
593
};
594
/**
595
 * Create a new tensor with data from a file. This will create a mmap tensor if that is preferred.
596
 * @param params Tensor parameters.
597
 * @param filename The file to load tensor content from.
598
 * @param offset The offset to the tensor content from the file.
599
 * @param flags Reserved flags for this loading.
600
 * @return The newly created tensor.
601
 */
602
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new_from_file(const ccv_nnc_tensor_param_t params, const char* const filename, const off_t offset, const int flags);
603
/**
604
 * Create a new tensor on stack.
605
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
606
 * @param params Tensor parameters.
607
 * @param flags Reserved flags for the allocation.
608
 * @return The tensor struct.
609
 */
610
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
611
/**
612
 * Resize an existing tensor to a new dimension.
613
 * @param tensor The old tensor to be resized.
614
 * @param params Tensor parameters.
615
 * @return Potentially a new tensor, but if the size is sufficient, it will be in-place operation.
616
 */
617
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_resize(ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params);
618
/**
619
 * Pin the tensor memory for faster access on GPU.
620
 * @param tensor A tensor that we want to pin the memory.
621
 * @return 0 for success.
622
 */
623
int ccv_nnc_tensor_pin_memory(ccv_nnc_tensor_t* const tensor);
624
/**
625
 * Free a tensor object.
626
 * @param tensor The tensor to be freed.
627
 */
628
void ccv_nnc_tensor_free(ccv_nnc_tensor_t* const tensor);
629
/**
630
 * Create a tensor view. A tensor view can be non-continuous. Essentially, it provides a view into a tensor.
631
 * @param tensor The tensor that we want to view into.
632
 * @param params The tensor parameters for the tensor view.
633
 * @param ofs The offset on each of the dimension.
634
 * @param stride The stride of each dimension.
635
 * @return The newly created tensor view.
636
 */
637
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t*) ccv_nnc_tensor_view_new(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
638
/**
639
 * Create a tensor view on stack.
640
 * @param tensor The tensor that we want to view into.
641
 * @param params The tensor parameters for the tensor view.
642
 * @param ofs The offset on each of the dimension.
643
 * @param stride The line size of each dimension.
644
 * @return The tensor view struct.
645
 */
646
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t) ccv_nnc_tensor_view(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
647
/**
648
 * Free a tensor view object.
649
 * @param tensor_view The tensor view to be freed.
650
 */
651
void ccv_nnc_tensor_view_free(ccv_nnc_tensor_view_t* const tensor_view);
652
/**
653
 * Zero out a given tensor.
654
 * @param tensor The tensor to be zero out.
655
 */
656
void ccv_nnc_tensor_zero(void* const tensor);
657
/**
658
 * Compare whether two tensors are equal. This will tolerant some floating point issues follow http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
659
 * @param a Tensor a.
660
 * @param b Tensor b.
661
 * @return 0 if equal, -1 otherwise.
662
 */
663
CCV_WARN_UNUSED(int) ccv_nnc_tensor_eq(const ccv_nnc_tensor_t* const a, const ccv_nnc_tensor_t* const b);
664
/**
665
 * Format a tensor output to string so that it can be used as debug output for other languages. This will look like:
666
 * [
667
 *   0.13, 0.44, 0.24, 0.24
668
 * ]
669
 * And format closely to what numpy looks like.
670
 * @param a The input tensor, it can be a tensor or a tensor view. It has to be accessible on CPU.
671
 * @return An allocated string that you can call ccfree to free it.
672
 */
673
CCV_WARN_UNUSED(char*) ccv_nnc_tensor_format_new(const ccv_nnc_tensor_t* const a);
674
/**
675
 * Method to decode tensor into a give buffer.
676
 * @param data The encoded data that needs to be decoded.
677
 * @param data_size The size of the encoded data.
678
 * @param datatype The expected data type of the encoded data.
679
 * @param dimensions The expected dimension for the data.
680
 * @param dimension_count The number of dimensions for the data.
681
 * @param identifier The identifier saved along the encoder (non-zero) that used to identify this decoder.
682
 * @param context The context associated with this decoder.
683
 * @param tensor_params The tensor parameters for the final container. This can be different from the expected values above.
684
 * @param tensor_out The final container for the tensor. It can be nil and you need to initialize it in that case.
685
 * @param decoded The buffer for data to be decoded.
686
 * @param decoded_size The size of the buffer to be decoded.
687
 * @return 1 if it is processed, 0 otherwise.
688
 */
689
typedef int (*ccv_nnc_tensor_io_option_decode_f)(const void* const data, const size_t data_size, const int datatype, const int* const dimensions, const int dimension_count, const unsigned int identifier, void* const context, const ccv_nnc_tensor_param_t tensor_params, ccv_nnc_tensor_t** const tensor_out, void* const decoded, size_t* const decoded_size);
690
/**
691
 * Method to encode tensor into a give buffer.
692
 * @param data The data that needs to be encoded.
693
 * @param data_size The size of the data to be encoded.
694
 * @param datatype The expected data type of the data to be encoded.
695
 * @param dimensions The expected dimension for the data.
696
 * @param dimension_count The number of dimensions for the data.
697
 * @param context The context associated with this encoder.
698
 * @param encoded The buffer for encoded data.
699
 * @param encoded_size The size of the buffer.
700
 * @param tensor_params The tensor parameters that can be modified.
701
 * @param identifier The identifier identifies this encoder (non-zero).
702
 * @return 1 if it is processed, 0 otherwise.
703
 */
704
typedef int (*ccv_nnc_tensor_io_option_encode_f)(const void* const data, const size_t data_size, const int datatype, const int* const dimensions, const int dimension_count, void* const context, void* const encoded, size_t* const encoded_size, ccv_nnc_tensor_param_t* const tensor_params, unsigned int* const identifier);
705
/**
706
 * Additional options to regulate tensor write / read behavior. For example, you can pass
707
 * encryptor / compressor to encrypt / compress the data prior to write to disk. You can
708
 * also only store reference, and use external storage for tensors.
709
 */
710
typedef struct {
711
  ccv_nnc_tensor_io_option_decode_f decode;
712
  ccv_nnc_tensor_io_option_encode_f encode;
713
  void* context;
714
} ccv_nnc_tensor_io_option_t;
715
/**
716
 * Write tensor to a SQLite database with a given name.
717
 * @param tensor The tensor.
718
 * @param handle The SQLite handle.
719
 * @param name The name to find the tensor in the database.
720
 * @param options If provided, we will use this to encode tensor data.
721
 * @return CCV_IO_FINAL for success, otherwise error.
722
 */
723
int ccv_nnc_tensor_write(const ccv_nnc_tensor_t* const tensor, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
724
725
enum {
726
  CCV_NNC_TENSOR_READ_METADATA_ONLY = CCV_NO_DATA_ALLOC, /**< Read tensor that data is nil, with only metadata. */
727
  CCV_NNC_TENSOR_READ_CPU_MEMORY = CCV_TENSOR_CPU_MEMORY, /**< Read tensor to CPU allocated buffer. */
728
};
729
/**
730
 * Read a tensor from a SQLite database with a given name.
731
 * @param handle The SQLite handle.
732
 * @param name The name to find the tensor in the database.
733
 * @param options If provided, we will use this to decode any data that identifier != 0.
734
 * @param flags Additional flag to configure how we read tensor.
735
 * @param tensor_params If provided, we will use this to create the tensor if tensor_out is not provided.
736
 * @param tensor_out The pointer to hold the tensor. If you supply the tensor yourself, we will read the data into the existing tensor.
737
 * @return CCV_IO_FINAL for success, otherwise error.
738
 */
739
int ccv_nnc_tensor_read(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const int flags, const ccv_nnc_tensor_param_t* const tensor_params, ccv_nnc_tensor_t** const tensor_out);
740
/** @} */
741
742
/**
743
 * @addtogroup level_1_cmd
744
 * @{
745
 */
746
747
/**
748
 * Return a high precision time unit. What this time unit is is platform specific.
749
 * @return A monotonic increasing 64-bit integer w.r.t. passing of time.
750
 */
751
uint64_t ccv_nnc_cmd_mono_time(void);
752
/**
753
 * Return UTF-8 encoded name of a given command.
754
 * @return A UTF-8 string (pointing to a static constant).
755
 */
756
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_name(const uint32_t cmd);
757
/**
758
 * Return UTF-8 encoded name of a given backend.
759
 * @return A UTF-8 string (pointing to a static constant).
760
 */
761
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_backend_name(const uint32_t backend);
762
/**
763
 * Check whether a given backend is available for a given command.
764
 * @return 1 if it is available.
765
 */
766
CCV_WARN_UNUSED(int) ccv_nnc_cmd_ok(const uint32_t cmd, const uint32_t backend);
767
/**
768
 * Create a wrapped command with parameters.
769
 * @param cmd The command identifier.
770
 * @param isa If this is a CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD command, this supplies the custom functions.
771
 * @param params The parameters for the command.
772
 * @param flags A reserved field for flags.
773
 * @return A wrapped ccv_nnc_cmd_t structure.
774
 */
775
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd(const uint32_t cmd, ccv_nnc_cmd_vtab_t* const isa, const ccv_nnc_cmd_param_t params, const int flags);
776
/**
777
 * Verify whether a hint is compatible with a given command and a given input tensor parameters / output tensor parameters.
778
 * @param hint The hint for a given command. Hint defines things such as paddings, strides etc. for a given command.
779
 * @param cmd The wrapped command.
780
 * @param a The input tensor parameters.
781
 * @param b The output tensor parameters.
782
 * @return 1 if it passes.
783
 */
784
CCV_WARN_UNUSED(int) ccv_nnc_hint_verify(const ccv_nnc_hint_t hint, const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
785
/**
786
 * Automatically find the best hint for a given input / output (on forward pass only).
787
 * @param cmd The wrapped command.
788
 * @param a The input tensor parameters.
789
 * @param b The output tensor parameters.
790
 * @return Best hint we can guess.
791
 */
792
CCV_WARN_UNUSED(ccv_nnc_hint_t) ccv_nnc_hint_auto(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
793
/**
794
 * Automatically find the outputs for the given inputs / hint.
795
 * @param cmd The wrapped command.
796
 * @param inputs An array of input tensor parameters.
797
 * @param input_size The size of input array.
798
 * @param hint The hint for the given command.
799
 * @param outputs An array for the output tensor parameters.
800
 * @param output_size The size of the output array.
801
 */
802
void ccv_nnc_hint_tensor_auto(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
803
/**
804
 * Find a suitable backend for a given command and tensor settings.
805
 * @param cmd The wrapped command.
806
 * @param tensor_memory The tensor memory setup (whether it is CPU or GPU).
807
 * @param tensor_formats The tensor layout format (NCHW, NHWC, CHWN etc.)
808
 * @param tensor_datatypes The datatype of a given tensor (FP32 etc.)
809
 * @return The backend identifier for the selected backend.
810
 */
811
CCV_WARN_UNUSED(uint32_t) ccv_nnc_cmd_find_backend(const ccv_nnc_cmd_t cmd, const int tensor_memory, const int tensor_formats, const int tensor_datatypes);
812
/**
813
 * Run autotune to find the best kernel and configuration for the given input.
814
 * @param cmd The original wrapped command.
815
 * @param max_workspace_size The maximum memory allowed for this command to execute.
816
 * @param hint The hint for the given command.
817
 * @param flags The reserved field for flags.
818
 * @param inputs An array of input tensors.
819
 * @param input_size The size of input array.
820
 * @param outputs An array of output tensors.
821
 * @param output_size The size of output array.
822
 * @param stream_context The stream we can do the autotune on. 0 uses default stream.
823
 * @return The modified cmd that contains the updated configuration.
824
 */
825
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd_autotune(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
826
/**
827
 * Check whether a given tensor input / output pattern can be computed by the given command.
828
 * bitmasks encode whether a given input tensor / output tensor available at a position.
829
 * @param cmd The wrapped command to check.
830
 * @param input_size The intended size of the input tensor array.
831
 * @param output_size The intended size of the output tensor array.
832
 * @param input_bitmasks The input tensor array encoding in bitmap, 0: no tensor, 1: has a tensor.
833
 * @param input_bitmask_size The size of the input bitmask array.
834
 * @param output_bitmasks The output tensor array encoding in bitmap.
835
 * @param output_bitmask_size The size of the output bitmask array.
836
 * @return 1 if the command can be executed with the given input / output pattern.
837
 */
838
CCV_WARN_UNUSED(int) ccv_nnc_cmd_bitmask(const ccv_nnc_cmd_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size);
839
/**
840
 * Return auxillary information related to a particular command with a particular backend.
841
 * A backend is required to be useful for this method.
842
 * @param cmd The wrapped cmmand to check auxillary information.
843
 * @return The auxillary information specific to a particular command with a particular backend.
844
 */
845
CCV_WARN_UNUSED(void*) ccv_nnc_cmd_aux(const ccv_nnc_cmd_t cmd);
846
/**
847
 * Execute a given command.
848
 * @param cmd The wrapped command to be executed.
849
 * @param hint The hint provided for the command.
850
 * @param flags A reserved field for flags.
851
 * @param inputs The input tensor array.
852
 * @param input_size The size of input tensor array.
853
 * @param outputs The output tensor array.
854
 * @param output_size The size of output tensor array.
855
 * @param stream_context The stream which the command will be executed upon.
856
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
857
 */
858
int ccv_nnc_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
859
/**
860
 * Check whether the command is a forward pass or not.
861
 * @param cmd The wrapped command.
862
 * @return 1 if it is a forward pass.
863
 */
864
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_forward(const ccv_nnc_cmd_t cmd);
865
/**
866
 * Check whether the command is a backward pass or not.
867
 * @param cmd The wrapped command.
868
 * @return 1 if it is a backward pass.
869
 */
870
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_backward(const ccv_nnc_cmd_t cmd);
871
/**
872
 * Check this command against listed attributes.
873
 * @param cmd The wrapped command.
874
 * @param flags The flags to check against the command (unsupported).
875
 * @return 1 if the flag is supported by the command.
876
 */
877
CCV_WARN_UNUSED(int) ccv_nnc_cmd_attr(const ccv_nnc_cmd_t cmd, const int flags);
878
/**
879
 * Check whether this command allow inplace operation against a particular input and output (index from 0).
880
 * @param cmd The wrapped command.
881
 * @param input_idx The index of the input tensor we want to check.
882
 * @param input_size The total number of inputs.
883
 * @param output_idx the index of the output tensor we want to check.
884
 * @param output_size The total number of outputs.
885
 * @return 1 if the input tensor can be used as the output tensor.
886
 */
887
CCV_WARN_UNUSED(int) ccv_nnc_cmd_allow_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
888
/**
889
 * Check whether this command need to enforce inplace operation against a particular input and output (index from 0).
890
 * @param cmd The wrapped command.
891
 * @param input_idx The index of the input tensor we want to check.
892
 * @param input_size The total number of inputs.
893
 * @param output_idx the index of the output tensor we want to check.
894
 * @param output_size The total number of outputs.
895
 * @return 1 if the input tensor is required to be used as the output tensor.
896
 */
897
CCV_WARN_UNUSED(int) ccv_nnc_cmd_enforce_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
898
/**
899
 * Set for a profiler to be on or off. Right now, this just proxy call on to cudaProfilerStart / cudaProfilerStop.
900
 * @param state 1 is on, 0 is off.
901
 */
902
void ccv_nnc_set_profiler(int state);
903
/**
904
 * Set the queue watermark when queueing up GPU commands. This is a Metal-only option.
905
 * @param > 0 is how many in-flight GPU commands can have.
906
 */
907
void ccv_nnc_set_queue_watermark(int state);
908
/**
909
 * Get the queue watermark when queueing up GPU commands. This is a Metal-only option.
910
 * @return How many in-flight GPU commands can have.
911
 */
912
CCV_WARN_UNUSED(int) ccv_nnc_queue_watermark(void);
913
/**
914
 * Set the device mapping to use custom order for device rather than driver imposed order. This is helpful
915
 * to manage code where which GPU to use have no control over. The previous permutation is cleared up on
916
 * each call and you can set 0 size device map to clear up all custom mapping.
917
 * @param type Currently, only CCV_NNC_STREAM_CONTEXT_GPU on NVIDIA systems are supported.
918
 * @param device_map The array of device map, maximum 64 devices.
919
 * @param size The size of the array, only first 64 will be used.
920
 */
921
void ccv_nnc_set_device_permutation(const int type, const int* const device_map, const int size);
922
/**
923
 * Quantize a given memory region of a given datatype / memory resides, into nbits palette.
924
 * @param input The input memory region, it can be CCV_64F, CCV_32F or CCV_16F.
925
 * @param datatype The datatype, it can be CCV_64F, CCV_32F or CCV_16F.
926
 * @param memory_type Where the memory resides. Right now only support CPU_MEMORY.
927
 * @param input_length How many elements in the input.
928
 * @param qbits How many bits for the palette. Right now only 4 / 5 / 6 / 7 / 8 bits supported.
929
 * @param number_in_blocks How many elements share a palette.
930
 * @param output The output memory region.
931
 * @param output_length The maximum size of the output.
932
 * @return The actual length in bytes of the output.
933
 */
934
CCV_WARN_UNUSED(size_t) ccv_nnc_palettize(const void* input, const int datatype, const int memory_type, const size_t input_length, const int qbits, const int number_in_blocks, void* output, const size_t output_length);
935
/**
936
 * Dequantize a given memory region of a given datatype / memory resides, from built-in nbits palette.
937
 * @param input The input memory region.
938
 * @param datatype The datatype, it can be CCV_64F, CCV_32F or CCV_16F.
939
 * @param memory_type Where the memory resides. It can be either CPU_MEMORY or GPU_MEMORY.
940
 * @param input_length The size of the input in bytes.
941
 * @param qbits How many bits for the palette. Right now only 4 / 5 / 6 / 7 / 8 bits supported.
942
 * @param number_in_blocks How many elements share a palette.
943
 * @param output The output memory region, it can be CCV_64F, CCV_32F or CCV_16F.
944
 * @param output_length How many elements in the output.
945
 */
946
void ccv_nnc_depalettize(const void* input, const int datatype, const int memory_type, const size_t input_length, const int qbits, const int number_in_blocks, void* output, const size_t output_length);
947
948
/** @} */
949
950
/**
951
 * @defgroup level_1_stream Streams
952
 * @{
953
 */
954
955
// Control flow constructs
956
// Follow heavily based along CUDA's stream / event idea.
957
enum {
958
  CCV_STREAM_CONTEXT_CPU = 0x1, /**< A CPU based stream context (unsupported). */
959
  CCV_STREAM_CONTEXT_GPU = 0x2, /**< A GPU based stream context. */
960
};
961
211k
#define CCV_STREAM_GET_CONTEXT(type) ((type) & 0x3)
962
#define CCV_STREAM_GET_DEVICE(type) CCV_TENSOR_GET_DEVICE(type)
963
44.9k
#define CCV_STREAM_GET_DEVICE_ID(type) CCV_TENSOR_GET_DEVICE_ID(type)
964
3.25k
#define CCV_STREAM_SET_DEVICE_ID(type, device_id) CCV_TENSOR_SET_DEVICE_ID(type, device_id)
965
/**
966
 * Create a new stream context.
967
 * @param type A combination of CPU / GPU and DEVICE_ID.
968
 * @return The newly created stream context.
969
 */
970
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_new(const int type);
971
/**
972
 * Get the type of the stream context.
973
 * @param stream_context The stream context we want to inspect.
974
 * @return The type of the stream context.
975
 */
976
CCV_WARN_UNUSED(int) ccv_nnc_stream_context_type(const ccv_nnc_stream_context_t* const stream_context);
977
/**
978
 * Get a stream context local workspace memory. This memory region will be reused
979
 * the next time when you call this method on the same stream context.
980
 * @param stream_context The stream context which provides the workspace memory.
981
 * @param workspace_size The size of the workspace memory.
982
 * @param mem The memory type of the said workspace memory (GPU or CPU).
983
 * @return A pointer to the workspace memory.
984
 */
985
CCV_WARN_UNUSED(void*) ccv_nnc_stream_context_get_workspace(ccv_nnc_stream_context_t* const stream_context, const size_t workspace_size, const int mem);
986
/**
987
 * Deallocate any workspace memory on the stream context.
988
 * @param stream The stream context to drain workspace memory.
989
 */
990
void ccv_nnc_stream_context_drain(ccv_nnc_stream_context_t* const stream);
991
/**
992
 * The callback prototype on the stream context.
993
 */
994
typedef void(*ccv_nnc_callback_f)(void* const callback_context);
995
/**
996
 * Add a callback function to be called once stream executed to that point.
997
 * @param stream The stream context to add callback.
998
 * @param callback The callback function.
999
 * @param callback_context The context to be called with the callback function.
1000
 */
1001
void ccv_nnc_stream_context_add_callback(ccv_nnc_stream_context_t* const stream, const ccv_nnc_callback_f callback, void* const callback_context);
1002
/**
1003
 * Wait until all tasks submitted (command, graph run etc.) on the stream context
1004
 * completed.
1005
 * @param stream The stream context to wait.
1006
 */
1007
void ccv_nnc_stream_context_wait(const ccv_nnc_stream_context_t* const stream);
1008
/**
1009
 * The hooks to be called when a stream context is destroyed.
1010
 * At the moment, the stream context will be destroyed at the time
1011
 * ccv_nnc_stream_context_free is called, so there is no tricks.
1012
 * This method is useful because we have some resources associated
1013
 * with stream pointer, hence, would be good to free these resources
1014
 * upon free the stream.
1015
 */
1016
typedef void (*ccv_nnc_stream_context_destructor_f)(const ccv_nnc_stream_context_t* const stream, void* const context);
1017
/**
1018
 * Add a new destructor hook callback when a stream is freed.
1019
 * @param stream The stream to be observed.
1020
 * @param destructor The new destructor callback method.
1021
 * @param context additional context.
1022
 * @return A integer identifier to help remove the hook.
1023
 */
1024
int ccv_nnc_stream_context_add_destructor_hook(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_context_destructor_f destructor, void* const context);
1025
/**
1026
 * Remove a destructor hook callback.
1027
 * @param stream The stream we observe.
1028
 * @param hook_id The returned integer when calling the add method.
1029
 */
1030
void ccv_nnc_stream_context_remove_destructor_hook(ccv_nnc_stream_context_t* const stream, const int hook_id);
1031
/**
1032
 * Deallocate the stream context.
1033
 * @param stream_context The stream context to be destroyed.
1034
 */
1035
void ccv_nnc_stream_context_free(ccv_nnc_stream_context_t* const stream_context);
1036
/**
1037
 * Set random seed for stream context.
1038
 * @param stream_context The stream context to set the seed. 0 means use the default stream context.
1039
 * @param seed The seed for the stream context.
1040
 */
1041
void ccv_nnc_stream_context_set_seed(ccv_nnc_stream_context_t* const stream_context, uint32_t seed);
1042
/**
1043
 * Generate uint32_t random number for stream context.
1044
 * These are usually used as seed for other high-performance random number generators.
1045
 * @param stream_context The stream context associated with random number generation.
1046
 */
1047
uint32_t ccv_nnc_stream_context_genrand_uint32(ccv_nnc_stream_context_t* const stream_context);
1048
1049
/**
1050
 * Opaque pointer to the signal object.
1051
 */
1052
typedef struct ccv_nnc_stream_signal_s ccv_nnc_stream_signal_t;
1053
1054
/**
1055
 * Create a new stream signal.
1056
 * @param type A composed type denotes whether it associated with a GPU or CPU stream context, and on which device.
1057
 * @return The newly created stream signal.
1058
 */
1059
CCV_WARN_UNUSED(ccv_nnc_stream_signal_t*) ccv_nnc_stream_signal_new(const int type);
1060
/**
1061
 * Get the type of the stream signal.
1062
 * @param signal The stream signal we want to inspect.
1063
 * @return The type of the stream signal.
1064
 */
1065
CCV_WARN_UNUSED(int) ccv_nnc_stream_signal_type(const ccv_nnc_stream_signal_t* const signal);
1066
/**
1067
 * Emit a signal on a stream.
1068
 * @param stream The stream context where the signal will be emitted.
1069
 * @param signal The signal to be emitted. It has to be on the same device as the stream.
1070
 */
1071
void ccv_nnc_stream_context_emit_signal(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_signal_t* const signal);
1072
/**
1073
 * Emit a signal on a stream directly. It will be managed by the stream. You have to use it immediately after return.
1074
 * @param stream The stream context where the signal will be emitted.
1075
 * @return The new signal emitted on the stream context.
1076
 */
1077
ccv_nnc_stream_signal_t* ccv_nnc_stream_context_emit_signal_new(ccv_nnc_stream_context_t* const stream);
1078
/**
1079
 * Wait a signal on a stream.
1080
 * @param stream The stream context that will be blocked by the signal.
1081
 * @param signal The signal to be waited. It can be on a different device of the stream.
1082
 */
1083
void ccv_nnc_stream_context_wait_signal(const ccv_nnc_stream_context_t* const stream, const ccv_nnc_stream_signal_t* const signal);
1084
/**
1085
 * Get on which stream context this signal is going to be emitted on.
1086
 * @param signal The signal we want to inspect.
1087
 * @return The most recent stream context you called ccv_nnc_stream_context_emit_signal with.
1088
 */
1089
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_signal_get_emitter(const ccv_nnc_stream_signal_t* const signal);
1090
/**
1091
 * Deallocate the signal.
1092
 * @param signal The signal to be destroyed.
1093
 */
1094
void ccv_nnc_stream_signal_free(ccv_nnc_stream_signal_t* const signal);
1095
/**
1096
 * Return number of devices.
1097
 * @param type The type of devices (CCV_NNC_STREAM_CONTEXT_GPU / CCV_NNC_STREAM_CONTEXT_CPU)
1098
 * @return The number of devices.
1099
 */
1100
CCV_WARN_UNUSED(int) ccv_nnc_device_count(const int type);
1101
/**
1102
 * The neighbor discovery function that will be called with the device id.
1103
 */
1104
typedef ccv_nnc_stream_context_t*(*ccv_nnc_stream_context_neighbor_discovery_f)(const int device_id, void* const context);
1105
/**
1106
 * Set the neighbor stream context discovery mechanism. This method exposes how
1107
 * neighbor should be defined per stream context. This method is useful for
1108
 * commands that operates cross devices and need to find the correct stream
1109
 * context for these devices. Stream context itself is bounded to one device
1110
 * only.
1111
 * @param stream_context The stream context that bounds to a discovery mechanism.
1112
 * @param discovery The neighbor discovery function to invoke.
1113
 * @param context The associated context with the neighbor discovery function.
1114
 */
1115
void ccv_nnc_stream_context_set_neighbor_discovery(ccv_nnc_stream_context_t* const stream_context, ccv_nnc_stream_context_neighbor_discovery_f discovery, void* const context);
1116
/**
1117
 * Find a neighbor stream context on a given device id for current stream context.
1118
 * @param stream_context The stream context which we will look for neighbors.
1119
 * @param device_id On which device the stream context may exist.
1120
 * @return 0 if no stream context found. Otherwise, return the stream context on that device.
1121
 */
1122
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_find_neighbor(ccv_nnc_stream_context_t* const stream_context, const int device_id);
1123
1124
/** @} */
1125
1126
/** @} */
1127
1128
/**
1129
 * @defgroup level_2 Level-2 API
1130
 * @{
1131
 */
1132
1133
/**
1134
 * @defgroup level_2_essentials Essentials
1135
 * @{
1136
 */
1137
1138
enum {
1139
  CCV_NNC_SHORT_DOT_GRAPH = 0x0, /**< Display a simplified graph. */
1140
  CCV_NNC_LONG_DOT_GRAPH  = 0x1, /**< Display a graph that contains all information. */
1141
};
1142
1143
/**
1144
 * Opaque pointer holds the concrete graph representation.
1145
 */
1146
typedef struct ccv_nnc_graph_s ccv_nnc_graph_t;
1147
1148
/**
1149
 * The opaque on stack object hold a reference to an execution node within a graph.
1150
 */
1151
typedef struct {
1152
  int32_t d; // This is int because sometimes I piggy-back on negatives to carry out some internal computations.
1153
  ccv_nnc_graph_t* graph;
1154
} ccv_nnc_graph_exec_t;
1155
1156
82.1k
#define CCV_NO_GRAPH_EXEC(exec) ((exec).graph == 0)
1157
1158
/**
1159
 * Create an empty graph.
1160
 * Note that all graph mutation methods are not thread-safe.
1161
 * You should only operate the graph in serial fashion.
1162
 * @return An opaque ccv_nnc_graph_t pointer.
1163
 */
1164
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_new(void);
1165
/**
1166
 * Create a node with specific command execution, as well as its inputs & outputs.
1167
 * Underlying, the graph maintains the backing object for the node, and all you get is
1168
 * a on-stack object to index the backing object from the graph.
1169
 * @param graph The concrete graph.
1170
 * @param cmd The wrapped command.
1171
 * @param hint The hint for this command.
1172
 * @param inputs The input tensors array.
1173
 * @param input_size The size of input tensors array.
1174
 * @param outputs The output tensors array.
1175
 * @param output_size The size of output tensors array.
1176
 * @return An on-stack object that references a execution node.
1177
 */
1178
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_new(ccv_nnc_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
1179
/**
1180
 * Set the command for an existing execution node.
1181
 * @param graph The concrete graph.
1182
 * @param exec The execution node reference.
1183
 * @param cmd The new wrapped command.
1184
 */
1185
void ccv_nnc_graph_exec_set(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_cmd_t cmd);
1186
/**
1187
 * Return the command on an existing execution node.
1188
 * @param graph The concrete graph.
1189
 * @param exec The execution node reference.
1190
 * @return The wrapped command.
1191
 */
1192
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_graph_exec_cmd(const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec);
1193
/**
1194
 * Set hint for an existing execution node.
1195
 * @param graph The concrete graph.
1196
 * @param exec The execution node reference.
1197
 * @param hint The new hint.
1198
 */
1199
void ccv_nnc_graph_exec_set_hint(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_hint_t hint);
1200
/**
1201
 * Set input / output tensors for an existing execution node.
1202
 * @param graph The concrete graph.
1203
 * @param exec The execution node reference.
1204
 * @param inputs The input tensors array.
1205
 * @param input_size The size of input tensors array.
1206
 * @param outputs The output tensors array.
1207
 * @param output_size The size of output tensors array.
1208
 */
1209
void ccv_nnc_graph_exec_set_io(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
1210
/**
1211
 * Concatenate input graph nodes with an output graph node to create a new graph.
1212
 * @param graph The concrete graph.
1213
 * @param source The execution node reference to connect.
1214
 * @param destination The execution node reference connect to.
1215
 * @return Non-zero if cannot concat successfully.
1216
 */
1217
int ccv_nnc_graph_exec_concat(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
1218
/**
1219
 * Disconnect input graph nodes with an output graph nodes in this graph.
1220
 * @param graph The concrete graph.
1221
 * @param source The execution node reference to disconnect.
1222
 * @param destination The execution node reference disconnect to.
1223
 * @return Non-zero if cannot disjoin successfully.
1224
 */
1225
int ccv_nnc_graph_exec_disjoin(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
1226
/**
1227
 * Count number of exec in the graph.
1228
 * @param graph The concrete graph.
1229
 * @return The number of execution nodes in the graph.
1230
 */
1231
int ccv_nnc_graph_exec_count(const ccv_nnc_graph_t* const graph);
1232
/**
1233
 * Generate output that can be parsed by GraphViz (DOT language).
1234
 * @param graph The concrete graph.
1235
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
1236
 * @param out The output file stream.
1237
 */
1238
void ccv_nnc_graph_dot(const ccv_nnc_graph_t* const graph, const int flags, FILE* out);
1239
/**
1240
 * Run the autotune function on all execution node, and assign back with the optimized commands.
1241
 * @param graph The concrete graph.
1242
 * @param max_workspace_size The maximum allowed extra memory usage.
1243
 * @param flags A reserved field for flags.
1244
 * @param sources The source execution nodes to begin. 0 uses default sources.
1245
 * @param source_size The size of source execution nodes.
1246
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
1247
 * @param destination_size The size of destination execution nodes.
1248
 */
1249
void ccv_nnc_graph_autotune(ccv_nnc_graph_t* const graph, const size_t max_workspace_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1250
/**
1251
 * Make the graph topsorted, thus, do a topological sort so when run the graph, no additional memory will be allocated.
1252
 * Otherwise when we run the graph, we need to allocate some memory on heap to faciliate.
1253
 * @param graph The concrete graph.
1254
 * @param exec_cvt The execution node assignments will change, and you can give an array to know the changes.
1255
 * @param exec_cvt_size The provided conversion array size.
1256
 */
1257
void ccv_nnc_graph_topsort(ccv_nnc_graph_t* const graph, int* const exec_cvt, const int exec_cvt_size);
1258
1259
/**
1260
 * Opaque pointer holds the graph schedule.
1261
 */
1262
typedef struct ccv_nnc_graph_static_schedule_s ccv_nnc_graph_static_schedule_t;
1263
/**
1264
 * Assuming the graph runs from the beginning to the end. Allocate a internal schedule object that will
1265
 * run the graph efficiently if it runs from the beginning to the end. It will basically call ccv_nnc_graph_static_schedule
1266
 * and save the end result to a internal schedule object to this graph.
1267
 * @param graph The concrete graph.
1268
 * @param stream_type The type of stream context we are going to use.
1269
 * @param max_stream_count The number of stream contexts to be allocated internally.
1270
 */
1271
void ccv_nnc_graph_set_default_static_schedule(ccv_nnc_graph_t* const graph, const int stream_type, const int max_stream_count);
1272
/**
1273
 * Allocate extra streams to make this graph parallel runnable. Note this requires the graph to be topsorted.
1274
 * After this is done, you can schedule a graph either on its default stream, or a new stream with the schedule
1275
 * object.
1276
 * @param graph The concrete graph.
1277
 * @param stream_type The type of stream context we are going to use.
1278
 * @param max_stream_count The number of stream contexts to be allocated internally.
1279
 * @param sources The source execution nodes to begin. 0 uses default sources.
1280
 * @param source_size The size of source execution nodes.
1281
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
1282
 * @param destination_size The size of destination execution nodes.
1283
 * @return An opaque schedule object that let the graph knows how to run itself efficiently.
1284
 */
1285
CCV_WARN_UNUSED(ccv_nnc_graph_static_schedule_t*) ccv_nnc_graph_static_schedule_new(ccv_nnc_graph_t* const graph, const int stream_type, const int max_stream_count, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1286
/**
1287
 * Free a schedule object for a graph.
1288
 * @param schedule The schedule object returned from ccv_nnc_graph_static_schedule_new.
1289
 */
1290
void ccv_nnc_graph_static_schedule_free(ccv_nnc_graph_static_schedule_t* const schedule);
1291
/**
1292
 * Query the default stream for a given graph.
1293
 * @param graph The concrete graph.
1294
 * @return The default stream context.
1295
 */
1296
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_graph_default_stream(const ccv_nnc_graph_t* const graph);
1297
/**
1298
 * Set default sources for a give graph.
1299
 * @param graph The concrete graph.
1300
 * @param sources The source execution nodes to begin.
1301
 * @param source_size The size of source execution nodes.
1302
 */
1303
void ccv_nnc_graph_set_sources(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const sources, const int source_size);
1304
/**
1305
 * Get the default source execution nodes pointer.
1306
 * @param graph The concrete graph.
1307
 * @return A pointer to an array of default source execution nodes.
1308
 */
1309
ccv_nnc_graph_exec_t* ccv_nnc_graph_sources(const ccv_nnc_graph_t* const graph);
1310
/**
1311
 * Get the number of default source execution nodes.
1312
 * @param graph The concrete graph.
1313
 * @return The number of default source execution nodes.
1314
 */
1315
int ccv_nnc_graph_source_size(const ccv_nnc_graph_t* const graph);
1316
/**
1317
 * Set default destinations for a give graph.
1318
 * @param graph The concrete graph.
1319
 * @param destinations The destination execution nodes which we end.
1320
 * @param destination_size The size of destination execution nodes.
1321
 */
1322
void ccv_nnc_graph_set_destinations(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1323
/**
1324
 * Get the default destination execution nodes pointer.
1325
 * @param graph The concrete graph.
1326
 * @return A pointer to an array of default destination execution nodes.
1327
 */
1328
ccv_nnc_graph_exec_t* ccv_nnc_graph_destinations(const ccv_nnc_graph_t* const graph);
1329
/**
1330
 * Get the number of default destination execution nodes.
1331
 * @param graph The concrete graph.
1332
 * @return The number of default destination execution nodes.
1333
 */
1334
int ccv_nnc_graph_destination_size(const ccv_nnc_graph_t* const graph);
1335
/**
1336
 * This graph, and its relevant auxiliary objects (opaque to user) are deallocated.
1337
 * @param graph The concrete graph.
1338
 */
1339
void ccv_nnc_graph_free(ccv_nnc_graph_t* const graph);
1340
/**
1341
 * Opaque pointer to the tape of tensors. The tape are used by the while loop.
1342
 */
1343
typedef struct ccv_nnc_tensor_tape_s ccv_nnc_tensor_tape_t;
1344
/**
1345
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1346
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1347
 * @param graph The concrete graph.
1348
 * @param flags A reserved field for flags.
1349
 * @param sources The source execution nodes array.
1350
 * @param source_size The size of source execution nodes array. 0 uses default sources.
1351
 * @param destinations The destination execution nodes array.
1352
 * @param destination_size The size of destination execution nodes array. 0 uses default destinations.
1353
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1354
 * @param stream_context Which stream this graph will be executed upon.
1355
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1356
 */
1357
int ccv_nnc_graph_run(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1358
/**
1359
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1360
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1361
 * Comparing with ccv_nnc_graph_run method, this method doesn't take sources / destinations node, rather, it takes the
1362
 * schedule object.
1363
 * @param graph The concrete graph.
1364
 * @param flags A reserved field for flags.
1365
 * @param schedule The schedule object specified the sources / destinations and how to efficiently run this.
1366
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1367
 * @param stream_context Which stream this graph will be executed upon.
1368
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1369
 */
1370
int ccv_nnc_graph_run_with_schedule(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_static_schedule_t* const schedule, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1371
/**
1372
 * Cancel execution of a graph. You need to handle synchronization yourself when calling this method to make
1373
 * sure the graph is currently executing when cancelling. This method will set a flag internally and the
1374
 * graph execution will check that flag when push compute on the computation device and abort if it is cancelled.
1375
 * When you call ccv_nnc_graph_run again, this cancellation won't in effect and you need to call cancel again.
1376
 * @param graph The concrete graph.
1377
 */
1378
void ccv_nnc_graph_cancel(ccv_nnc_graph_t* const graph);
1379
1380
/** @} */
1381
1382
/**
1383
 * @defgroup level_2_others Others
1384
 * @{
1385
 */
1386
1387
/**
1388
 * Set input / output flags for an existing execution node.
1389
 * This must be called after set_io, set additional flags for tensors related to this exec.
1390
 * @param graph The concrete graph.
1391
 * @param exec The execution node reference.
1392
 * @param input_flags The input flags array.
1393
 * @param input_flag_size the size of input flags array, should be the same as input tensors array (or 0).
1394
 * @param output_flags The output flags array.
1395
 * @param output_flag_size the size of output flags array, should be the same as output tensors array (or 0).
1396
 */
1397
void ccv_nnc_graph_exec_set_io_flags(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const int* const input_flags, const int input_flag_size, const int* const output_flags, const int output_flag_size);
1398
/**
1399
 * Set the pair reference for exec. In backward pass, an execution node's pair node is the forward pass node.
1400
 * @param graph The concrete graph.
1401
 * @param exec The execution node reference.
1402
 * @param pair_exec The pair execution node reference.
1403
 */
1404
void ccv_nnc_graph_exec_pair_with(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_graph_exec_t pair_exec);
1405
/**
1406
 * Add tensor pair that can be used to "carry over". (carry over: passing a tensor from current loop to the next loop).
1407
 * @param graph The concrete graph.
1408
 * @param from The tensor we have output in this loop.
1409
 * @param to The tensor we will use as input in the next loop.
1410
 */
1411
void ccv_nnc_graph_add_carry_over(ccv_nnc_graph_t* const graph, const ccv_nnc_tensor_t* const from, const ccv_nnc_tensor_t* const to);
1412
/**
1413
 * Updates are the tensors that not directly involved in the computation, but its pointers need to get updated
1414
 * along with this exec, thus need to be "update" to other exec nodes.
1415
 * @param graph The concrete graph.
1416
 * @param exec The execution node reference.
1417
 * @param update The tensor need to be updated along the execution node.
1418
 */
1419
void ccv_nnc_graph_exec_add_as_affected(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const update);
1420
1421
/** @} */
1422
1423
/** @} */
1424
1425
/**
1426
 * @defgroup level_3 Level-3 API
1427
 * @{
1428
 */
1429
1430
/**
1431
 * @defgroup level_3_essentials Essentials
1432
 * @{
1433
 */
1434
1435
/**
1436
 * Opaque pointer to the symbolic graph object.
1437
 */
1438
typedef struct ccv_nnc_symbolic_graph_s ccv_nnc_symbolic_graph_t;
1439
1440
/**
1441
 * Opaque pointer to an arena of allocated tensors.
1442
 */
1443
typedef struct ccv_nnc_tensor_arena_s ccv_nnc_tensor_arena_t;
1444
1445
/**
1446
 * Opaque pointer to an arena of allocated execs.
1447
 */
1448
typedef struct ccv_nnc_graph_exec_arena_s ccv_nnc_graph_exec_arena_t;
1449
1450
/**
1451
 * On stack object references a tensor symbol in the symbolic graph.
1452
 */
1453
typedef struct {
1454
  int32_t d;
1455
  const ccv_nnc_symbolic_graph_t* graph;
1456
} ccv_nnc_tensor_symbol_t;
1457
1458
/**
1459
 * On stack object references a execution node symbol in the symbolic graph.
1460
 */
1461
typedef struct {
1462
  int32_t d;
1463
  const ccv_nnc_symbolic_graph_t* graph;
1464
} ccv_nnc_graph_exec_symbol_t;
1465
1466
enum {
1467
  CCV_NNC_TENSOR_SYMBOL_INIT_ZEROS = 0x01, /**< Initialize underlying tensor for the symbol with zeros */
1468
  CCV_NNC_TENSOR_SYMBOL_INIT_ONES = 0x02, /**< Initialize underlying tensor for the symbol with ones */
1469
  CCV_NNC_TENSOR_SYMBOL_TAPE_VAR = 0x04, /**< Mark this as a tape variable (it cannot be folded, will contain flag CCV_TAPE_ALLOC) */
1470
  // The one below is special.
1471
  CCV_NNC_TENSOR_SYMBOL_DEAD = 0x80000000, /**< Mark this tensor symbol as dead, any future usage will cause assertion */
1472
};
1473
1474
147k
#define CCV_NNC_TENSOR_SYMBOL_IS_DEAD(x) ((x) & CCV_NNC_TENSOR_SYMBOL_DEAD)
1475
1476
enum {
1477
  CCV_NNC_GRAPH_EXEC_DEAD = 0x1, /**< Mark this node as dead. */
1478
  CCV_NNC_GRAPH_EXEC_P_WHILE = 0x10, /**< Mark this node keyword is while */
1479
  CCV_NNC_GRAPH_EXEC_CASE_OF = 0x20, /**< Mark this node keyword is case_of */
1480
  CCV_NNC_GRAPH_EXEC_DISABLE_OPT = 0x10000, /**< Mark this node to avoid optimization pass. */
1481
};
1482
1483
450k
#define CCV_NNC_GRAPH_EXEC_IS_DEAD(x) ((x) & CCV_NNC_GRAPH_EXEC_DEAD)
1484
25.1k
#define CCV_NNC_GRAPH_REF(x) ((x)->_heap_graph_ref ? 
(x)->_heap_graph_ref178
:
(x)->_inline_graph_ref24.9k
)
1485
1486
enum {
1487
  CCV_NNC_NO_TENSOR_SYMBOL = -1, /**< Special symbol reference for no tensor symbol. */
1488
  CCV_NNC_WHILE_COUNT_TENSOR_SYMBOL = -2, /**< Special symbol reference for while loop count tensor. */
1489
};
1490
1491
enum {
1492
  CCV_NNC_NO_GRAPH_EXEC_SYMBOL = -1, /**< Special symbol reference for no exec symbol. */
1493
};
1494
1495
1496
enum {
1497
  CCV_NNC_SYMBOL_TENSOR, /**< Identifier for tensor symbol */
1498
  CCV_NNC_SYMBOL_TENSOR_ALIAS, /**< Identifier for tensor alias symbol */
1499
  CCV_NNC_SYMBOL_GRAPH_EXEC, /**< Identifier for exec symbol */
1500
};
1501
1502
22
#define CCV_NNC_IS_WHILE_COUNT_TENSOR_SYMBOL(d) (((uint32_t)(d) & 0xf) == 0xe)
1503
1504
/**
1505
 * A data structure to pass in a pair of tensor symbols.
1506
 */
1507
typedef struct {
1508
  ccv_nnc_tensor_symbol_t source; /**< The 'from' tensor symbol. */
1509
  ccv_nnc_tensor_symbol_t destination; /**< The 'to' tensor symbol. */
1510
} ccv_nnc_tensor_symbol_map_t;
1511
1512
/**
1513
 * Create a new empty symbolic graph. It is an opaque data structure that maintains the whole graph of computation in its symbolic form.
1514
 * Note that all graph mutation methods are not thread-safe. You should only operate the graph in serial fashion.
1515
 */
1516
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_new(void);
1517
/**
1518
 * Create an tensor symbol (thus, with no actual memory space allocation) in a symbolic graph.
1519
 * @param graph The symbolic graph.
1520
 * @param info The tensor parameters.
1521
 * @param name The name of the tensor symbol, it is optional.
1522
 * @return A tensor symbol reference.
1523
 */
1524
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_param_t info, const char* const name);
1525
/**
1526
 * Create an alias to the tensor symbol as tensor view (thus, pointing to the same memory region, but with a different header info and offset).
1527
 * @param graph The symbolic graph.
1528
 * @param tensor_symbol The tensor symbol we are going to reference to.
1529
 * @param ofs The offset on each of the dimension.
1530
 * @param stride The stride of each dimension.
1531
 * @param info The tensor parameters for the new alias.
1532
 * @param name The name of the tensor symbol alias, it is optional.
1533
 * @return A tensor symbol alias reference.
1534
 */
1535
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
1536
/**
1537
 * Manually delete a tensor symbol off the symbolic graph.
1538
 * @param graph The symbolic graph.
1539
 * @param tensor The tensor symbol reference.
1540
 */
1541
void ccv_nnc_tensor_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_t tensor);
1542
/**
1543
 * Create a graph execution node (an operation that takes a set of inputs and generates a set of outputs).
1544
 * @param graph The symbolic graph.
1545
 * @param cmd The wrapped command.
1546
 * @param inputs The input tensor symbols array.
1547
 * @param input_size The size of input tensor symbols array.
1548
 * @param outputs The output tensor symbols array.
1549
 * @param output_size The size of output tensor symbols array.
1550
 * @param name The name of this execution node, optional.
1551
 * @return The execution node symbol reference.
1552
 */
1553
ccv_nnc_graph_exec_symbol_t ccv_nnc_graph_exec_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
1554
/**
1555
 * ccv_nnc_graph_exec_symbol_new defaults to use `ccv_nnc_hint_auto` find the best hints for a set of inputs / outputs.
1556
 * However, you can also set your own hints.
1557
 * @param graph The symbolic graph.
1558
 * @param exec The execution node symbol reference.
1559
 * @param hint The hint for the command.
1560
 */
1561
void ccv_nnc_graph_exec_symbol_set_hint(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_hint_t hint);
1562
/**
1563
 * Manually delete a exec symbol off the symbolic graph.
1564
 * @param graph The symbolic graph.
1565
 * @param symbol The execution node symbol reference.
1566
 */
1567
void ccv_nnc_graph_exec_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_t symbol);
1568
enum {
1569
  CCV_NNC_AUTOGEN_ALL_EXECS = 0x1, /**< Automatic concatenation for all execution nodes */
1570
  CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS = 0x2, /**< Automatically find all source and destination nodes. */
1571
};
1572
/**
1573
 * Automatic concatenate these nodes together based on its inputs / outputs.
1574
 * Imagining this is to generate the execution flow based on input tensors and output tensors.
1575
 * nil for execs and 0 for exec_size means to loop over all the execs on the graph and autogen.
1576
 * @param graph The symbolic graph.
1577
 * @param execs The execution nodes array.
1578
 * @param exec_size The size of execution nodes array.
1579
 * @param flags The flags determines what operations to perform when concatenating.
1580
 * @return non-zero if cannot figure out.
1581
 */
1582
int ccv_nnc_graph_exec_symbol_autogen(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const execs, const int exec_size, const int flags);
1583
/**
1584
 * Set the default sources for a symbolic graph.
1585
 * @param graph The symbolic graph.
1586
 * @param sources The source execution nodes array.
1587
 * @param source_size The size of source execution nodes array.
1588
 */
1589
void ccv_nnc_symbolic_graph_set_sources(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size);
1590
/**
1591
 * Add one node to the default sources for a symbolic graph.
1592
 * @param graph The symbolic graph.
1593
 * @param source The source execution node.
1594
 */
1595
void ccv_nnc_symbolic_graph_add_source(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source);
1596
/**
1597
 * Get the pointer to the default sources.
1598
 * @param graph The symbolic graph.
1599
 * @return The pointer to the source execution nodes array.
1600
 */
1601
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_sources(const ccv_nnc_symbolic_graph_t* const graph);
1602
/**
1603
 * Get the size of the default source nodes array.
1604
 * @param graph The symbolic graph.
1605
 * @return The size of the default source nodes array.
1606
 */
1607
int ccv_nnc_symbolic_graph_source_size(const ccv_nnc_symbolic_graph_t* const graph);
1608
/**
1609
 * Set the default destinations for a symbolic graph.
1610
 * @param graph The symbolic graph.
1611
 * @param destinations The destination execution nodes array.
1612
 * @param destination_size The size of destination execution nodes array.
1613
 */
1614
void ccv_nnc_symbolic_graph_set_destinations(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1615
/**
1616
 * Add one node to the default destinations for a symbolic graph.
1617
 * @param graph The symbolic graph.
1618
 * @param destination The destination execution node.
1619
 */
1620
void ccv_nnc_symbolic_graph_add_destination(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t destination);
1621
/**
1622
 * Get the pointer to the default destinations.
1623
 * @param graph The symbolic graph.
1624
 * @return The pointer to the destinationsexecution nodes array.
1625
 */
1626
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_destinations(const ccv_nnc_symbolic_graph_t* const graph);
1627
/**
1628
 * Get the size of the default destination nodes array.
1629
 * @param graph The symbolic graph.
1630
 * @return The size of the default destination nodes array.
1631
 */
1632
int ccv_nnc_symbolic_graph_destination_size(const ccv_nnc_symbolic_graph_t* const graph);
1633
/**
1634
 * Generate output that can be parsed by GraphViz (DOT language).
1635
 * @param graph The symbolic graph.
1636
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
1637
 * @param out The output file stream.
1638
 */
1639
void ccv_nnc_symbolic_graph_dot(const ccv_nnc_symbolic_graph_t* const graph, const int flags, FILE* out);
1640
1641
/**
1642
 * The data structure to wrap a tensor symbol and a concrete tensor together.
1643
 */
1644
typedef struct {
1645
  ccv_nnc_tensor_symbol_t symbol;
1646
  const ccv_nnc_tensor_t* tensor;
1647
} ccv_nnc_tensor_bind_t;
1648
1649
typedef struct {
1650
  void* (*alloc)(const int type, const int pinned_mem /* Currently only used to annotate CCV_TENSOR_PINNED_MEM, future can be expanded to generic flags */, const size_t size, void* const arg);
1651
  void (*free)(void* const ptr, void* const arg);
1652
} ccv_nnc_symbolic_graph_compile_allocator_vtab_t;
1653
1654
typedef struct {
1655
  const ccv_nnc_symbolic_graph_compile_allocator_vtab_t* isa;
1656
  struct {
1657
    void* alloc;
1658
    void* free;
1659
  } context;
1660
} ccv_nnc_symbolic_graph_compile_allocator_t;
1661
1662
typedef struct {
1663
  ccv_nnc_symbolic_graph_compile_allocator_t allocator;
1664
} ccv_nnc_symbolic_graph_compile_param_t;
1665
1666
/**
1667
 * Compile a symbolic graph into a graph that can be executed, and a set of tensors (opaque data structure tensor arena) are allocated based on which tensor symbols are the input and which are the outputs. The tensor allocation is done to minimize the required storage.
1668
 * tensor_binds provide custom binding for these tensors. You still responsible to manage the life-time of these tensors.
1669
 * outputs marks the tensor symbols that need to be kept til the end of the graph.
1670
 * @param graph The symbolic graph.
1671
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
1672
 * @param tensor_binds The binding array (a tensor symbol and a concrete tensor). We replace everywhere that uses the tensor symbol with the concrete tensor.
1673
 * @param tensor_bind_size The size of the binding array.
1674
 * @param outputs The output tensor symbols that we want to keep the value.
1675
 * @param output_size The size of the output tensor symbols array.
1676
 * @param sources The sources for the graph.
1677
 * @param source_size The size of the sources array. 0 to use default sources.
1678
 * @param destinations The destinations for the graph.
1679
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1680
 * @param graph_ref The pointer to store the generated concrete graph.
1681
 * @param tensor_arena_ref The pointer to store ccv_nnc_tensor_arena_t.
1682
 * @param graph_exec_arena_ref The pointer to store ccv_nnc_graph_exec_arena_t.
1683
 */
1684
void ccv_nnc_symbolic_graph_compile(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_symbolic_graph_compile_param_t compile_params, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_graph_t** const graph_ref, ccv_nnc_tensor_arena_t** const tensor_arena_ref, ccv_nnc_graph_exec_arena_t** const graph_exec_arena_ref);
1685
/**
1686
 * Free the symbolic graph and its associated memory. Note that if you compiled a graph / tensor arena out of this symbolic graph, these won't be free'd.
1687
 * @param graph The symbolic graph.
1688
 */
1689
void ccv_nnc_symbolic_graph_free(ccv_nnc_symbolic_graph_t* const graph);
1690
/**
1691
 * Find corresponding tensor by a symbol from the tensor arena.
1692
 * @param tensor_arena The tensor arena object generated through compilation,
1693
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1694
 * @return A concrete tensor from the tensor arena.
1695
 */
1696
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_symbol(const ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol);
1697
/**
1698
 * Bind a tensor to a symbol. You still responsible to manage the life-time of the tensor to make sure it is not freed until everything is done.
1699
 * @param tensor_arena The tensor arena object generated through compilation.
1700
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1701
 * @param tensor The new tensor to bind to.
1702
 */
1703
void ccv_nnc_tensor_bind_symbol(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_t* const tensor);
1704
/**
1705
 * Clear existing bindings on the tensor arena.
1706
 * @param tensor_arena The tensor arena object generated through compilation to clear bindings.
1707
 */
1708
void ccv_nnc_tensor_arena_clear_bindings(ccv_nnc_tensor_arena_t* const tensor_arena);
1709
/**
1710
 * Free the data buffer of the tensor arena.
1711
 * @param tensor_arena The tensor arena object generated through compilation.
1712
 */
1713
void ccv_nnc_tensor_arena_buffer_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1714
/**
1715
 * Free the opaque tensor arena structure.
1716
 * @param tensor_arena The tensor arena object generated through compilation.
1717
 */
1718
void ccv_nnc_tensor_arena_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1719
/**
1720
 * Find corresponding graph exec by a exec symbol from graph exec arena.
1721
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1722
 * @param symbol The execution node symbol reference. Because execution node symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1723
 * @return A execution node reference to the concrete graph.
1724
 */
1725
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_from_symbol(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena, const ccv_nnc_graph_exec_symbol_t symbol);
1726
/**
1727
 * Return the node that can drive all the source nodes from the compilation.
1728
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1729
 * @return A execution node reference that is the source.
1730
 */
1731
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_source(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1732
/**
1733
 * Return the node that can drain all the destination nodes from the compilation.
1734
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1735
 * @return A execution node reference that is the destination.
1736
 */
1737
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_destination(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1738
/**
1739
 * Free the opaque graph exec arena structure.
1740
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1741
 */
1742
void ccv_nnc_graph_exec_arena_free(ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1743
/**
1744
 * Write symbolic graph to disk, along with some binding tensors.
1745
 * @param graph The symbolic graph.
1746
 * @param tensor_binds The binding array (pair of tensor symbol and concrete tensor).
1747
 * @param tensor_bind_size The size of the binding array.
1748
 * @param fn The file name.
1749
 */
1750
void ccv_nnc_symbolic_graph_write(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const char* const fn);
1751
/**
1752
 * Read symbolic graph from disk, with some binding tensors.
1753
 * @param fn The file name.
1754
 * @param graph_ref The pointer to store symbolic graph.
1755
 * @param tensor_binds_ref The pointer to store the binding array.
1756
 * @param tensor_bind_size_ref The pointer to store the size of the binding array.
1757
 */
1758
void ccv_nnc_symbolic_graph_read(const char* const fn, ccv_nnc_symbolic_graph_t** const graph_ref, ccv_nnc_tensor_bind_t** const tensor_binds_ref, int* const tensor_bind_size_ref);
1759
1760
/**
1761
 * The format callback function. Note that these are all integer ids. They can be filled to
1762
 * ccv_nnc_graph_exec_symbol_t.d or ccv_nnc_tensor_symbol_t.d.
1763
 * @param graph The symbolic graph.
1764
 * @param node The id for the node. It is unique in the graph.
1765
 * @param name The name for the node. It is either NULL or \0 terminated string.
1766
 * @param cmd The associated command for this node.
1767
 * @param flags The flag that help to identify if it is a sub-graph, which type it is (P_WHILE or CASE_OF)
1768
 * @param incomings The incoming nodes for execution.
1769
 * @param incoming_size The number of incoming nodes for execution.
1770
 * @param outgoings The outgoing nodes for execution.
1771
 * @param outgoing_size The number of outgoing nodes for execution.
1772
 * @param inputs The input tensor symbols.
1773
 * @param input_size The number of the input tensor symbols.
1774
 * @param outputs The output tensor symbols.
1775
 * @param output_size The number of the output tensor symbols.
1776
 * @param context The context passed through ccv_nnc_symbolic_graph_format.
1777
 */
1778
typedef void(*ccv_nnc_symbolic_graph_format_f)(const ccv_nnc_symbolic_graph_t* const graph, const int node, const char* const name, const ccv_nnc_cmd_t cmd, const int flags, const int* const incomings, const int incoming_size, const int* const outgoings, const int outgoing_size, const int* const inputs, const int input_size, const int* const outputs, const int output_size, void* const context);
1779
/**
1780
 * Provide a hook for upper level to do custom formatting of a given symbolic graph. You can
1781
 * implement logic to format the graph into protobuf, or json, or doing persistence. However, this
1782
 * is not the method for you to visit the graph, and do mutations on it. This function doesn't
1783
 * recurse into sub-graphs. You need to inspect each node to know if these are sub-graphs and
1784
 * handle accordingly.
1785
 * @param graph The symbolic graph.
1786
 * @param sources The sources for the graph.
1787
 * @param source_size The size of the sources array. 0 to use default sources.
1788
 * @param destinations The destinations for the graph.
1789
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1790
 * @param format_fn The format callback to be called on every node.
1791
 * @param context The context that will be passed to the callback.
1792
 */
1793
void ccv_nnc_symbolic_graph_format(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
1794
1795
/** @} */
1796
1797
/**
1798
 * @defgroup level_3_others Others
1799
 * @{
1800
 */
1801
1802
/**
1803
 * Return the symbol it alias to.
1804
 * @param graph The symbolic graph.
1805
 * @param tensor_symbol The tensor symbol alias.
1806
 * @return A tensor symbol reference to the original tensor symbol. If this symbol has no reference, return NO_SYMBOL (.graph = 0)
1807
 */
1808
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1809
/**
1810
 * Set the tensor symbol parameters.
1811
 * @param graph The symbolic graph.
1812
 * @param tensor The tensor symbol reference.
1813
 * @param info The new tensor parameters.
1814
 * @return non-zero if encountered errors.
1815
 */
1816
int ccv_nnc_tensor_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const ccv_nnc_tensor_param_t info);
1817
/**
1818
 * Get the parameters for a tensor symbol.
1819
 * @param graph The symbolic graph.
1820
 * @param tensor The tensor symbol reference.
1821
 * @return The tensor parameters.
1822
 */
1823
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_symbol_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1824
/**
1825
 * Get the name for a tensor symbol.
1826
 * @param graph The symbolic graph.
1827
 * @param tensor The tensor symbol reference.
1828
 * @return The tensor name if available. Otherwise 0. The memory is managed by the graph.
1829
 */
1830
CCV_WARN_UNUSED(const char*) ccv_nnc_tensor_symbol_name(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1831
/**
1832
 * Set the tensor symbol alias parameters.
1833
 * @param graph The symbolic graph.
1834
 * @param tensor The tensor symbol reference.
1835
 * @param ofs The offset on each of the dimension.
1836
 * @param stride The stride of each dimension.
1837
 * @return non-zero if it is not a tensor alias.
1838
 */
1839
int ccv_nnc_tensor_symbol_alias_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
1840
/**
1841
 * Get the parameters for a tensor symbol.
1842
 * @param graph The symbolic graph.
1843
 * @param tensor The tensor symbol reference.
1844
 * @param ofs The offset on each of the dimension.
1845
 * @param stride The stride of each dimension.
1846
 * @return non-zero if it is not a tensor alias.
1847
 */
1848
int ccv_nnc_tensor_symbol_alias_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, int ofs[CCV_NNC_MAX_DIM_ALLOC], int stride[CCV_NNC_MAX_DIM_ALLOC]);
1849
/**
1850
 * Set the flags for this tensor symbol. The flags are only used for symbol, not for tensor.
1851
 * @param graph The symbolic graph.
1852
 * @param tensor The tensor symbol reference.
1853
 * @param flags A reserved field for flags.
1854
 */
1855
void ccv_nnc_tensor_symbol_set_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int flags);
1856
/**
1857
 * Get all the flags for a tensor.
1858
 * @param graph The symbolic graph.
1859
 * @param tensor The tensor symbol reference.
1860
 */
1861
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1862
/**
1863
 * Set the cmd of this exec symbol.
1864
 * @param graph The symbolic graph.
1865
 * @param exec The execution node symbol reference.
1866
 * @param cmd The new wrapped command.
1867
 */
1868
void ccv_nnc_graph_exec_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_cmd_t cmd);
1869
/**
1870
 * Set the flags for this exec symbol. The flags are only used for symbol. We can only set higher 16-bit.
1871
 * @param graph The symbolic graph.
1872
 * @param exec The execution node symbol reference.
1873
 * @param flags A reserved field for flags.
1874
 */
1875
void ccv_nnc_graph_exec_symbol_set_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const int flags);
1876
/**
1877
 * Get the flags for a tensor. We can only retrieve the higher 16-bit.
1878
 * @param graph The symbolic graph.
1879
 * @param exec The execution node symbol reference.
1880
 */
1881
CCV_WARN_UNUSED(int) ccv_nnc_graph_exec_symbol_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1882
/**
1883
 * Return the command on this exec symbol.
1884
 * @param graph The symbolic graph.
1885
 * @param exec The execution node symbol reference.
1886
 * @return The wrapped command.
1887
 */
1888
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_graph_exec_symbol_cmd(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1889
/**
1890
 * Return the command on this exec symbol.
1891
 * @param graph The symbolic graph.
1892
 * @param exec The execution node symbol reference.
1893
 * @return The name for the exec symbol if available. The memory is managed by the graph.
1894
 */
1895
CCV_WARN_UNUSED(const char*) ccv_nnc_graph_exec_symbol_name(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1896
/**
1897
 * Set the inputs / outputs for a exec symbol.
1898
 * @param graph The symbolic graph.
1899
 * @param exec The execution node symbol reference.
1900
 * @param inputs The input tensor symbols array.
1901
 * @param input_size The size of input tensor symbols array.
1902
 * @param outputs The output tensor symbols array.
1903
 * @param output_size The size of output tensor symbols array.
1904
 */
1905
void ccv_nnc_graph_exec_symbol_set_io(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size);
1906
/**
1907
 * Manually concatenate input node with an output graph node.
1908
 * @param graph The symbolic graph.
1909
 * @param source The source execution node symbol to connect.
1910
 * @param destination The destination execution node symbol connect to.
1911
 * @return non-zero if cannot concat successfully.
1912
 */
1913
int ccv_nnc_graph_exec_symbol_concat(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1914
/**
1915
 * Manually disconnect input node with an output graph node for this graph.
1916
 * @param graph The symbolic graph.
1917
 * @param source The source execution node symbol to disconnect.
1918
 * @param destination The destination execution node symbol disconnect to.
1919
 * @return non-zero if cannot disjoin successfully.
1920
 */
1921
int ccv_nnc_graph_exec_symbol_disjoin(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1922
/**
1923
 * Number of exec symbols.
1924
 * @param graph The symbolic graph.
1925
 */
1926
CCV_WARN_UNUSED(int) ccv_nnc_graph_exec_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1927
/**
1928
 * Number of active exec symbols.
1929
 * @param graph The symbolic graph.
1930
 * @param type The type of op, can be CCV_NNC_SYMBOL_TENSOR, CCV_NNC_SYMBOL_GRAPH_EXEC (will error out on CCV_NNC_SYMBOL_TENSOR_ALIAS)
1931
 */
1932
CCV_WARN_UNUSED(int) ccv_nnc_symbolic_graph_active_symbol_count(const ccv_nnc_symbolic_graph_t* const graph, const int type);
1933
/**
1934
 * Substitution function. Given an execution node symbol and a command, return a new command.
1935
 */
1936
typedef ccv_nnc_cmd_t(*ccv_nnc_symbolic_graph_subst_f)(const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd);
1937
/**
1938
 * Generate a duplicate of the provided graph.
1939
 * While generating the duplicate, it calls the function pointer to re-process the node type.
1940
 * @param graph The symbolic graph.
1941
 * @param subst The substitution function.
1942
 * @return The duplicated symbolic graph.
1943
 */
1944
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_dup(const ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_symbolic_graph_subst_f subst);
1945
/**
1946
 * Number of tensor symbols.
1947
 * @param graph The symbolic graph.
1948
 */
1949
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1950
/**
1951
 * Compute all the tensor shapes within this graph.
1952
 * @param graph The symbolic graph.
1953
 * @param sources The sources for the graph.
1954
 * @param source_size The size of the sources array. 0 to use default sources.
1955
 * @param destinations The destinations for the graph.
1956
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1957
 */
1958
void ccv_nnc_symbolic_graph_tensor_auto(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1959
/**
1960
 * For a given tensor symbol, this method resolves to its local reference inside the given graph.
1961
 * This is related to the sub-graph of symbolic graphs. A tensor symbol in the sub-graph can still have a
1962
 * representation in the parent graph. This method used to find the local reference in any graph.
1963
 * @param graph The symbolic graph.
1964
 * @param tensor_symbol The tensor symbol we want to resolve.
1965
 * @return A tensor symbol reference in the given graph.
1966
 */
1967
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_resolve(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1968
/**
1969
 * Pass graph's tensor symbol into its sub graph. We will make the connection that the source tensor
1970
 * symbol in the source symbolic graph is the destination tensor symbol in the destination symbolic graph.
1971
 * The reason to do this inference is because a tensor symbol is local to a symbolic graph under the hood.
1972
 * Although you can use tensor symbols from different graphs directly (it calls this method or the resolve
1973
 * method above when create an execution node symbol), sometimes you need this method to do it manually.
1974
 * @param src_graph The source symbolic graph.
1975
 * @param dest_graph The destination symbolic graph.
1976
 * @param src_tensor_symbol The tensor symbol we want to resolve.
1977
 * @param dest_tensor_symbol The tensor symbol we want to resolve.
1978
 */
1979
void ccv_nnc_tensor_symbol_hookup(ccv_nnc_symbolic_graph_t* const src_graph, ccv_nnc_symbolic_graph_t* const dest_graph, const ccv_nnc_tensor_symbol_t src_tensor_symbol, const ccv_nnc_tensor_symbol_t dest_tensor_symbol);
1980
/**
1981
 * Set bypasses for a tensor symbol.
1982
 * For case..of graphs, if the condition doesn't meet, we will skip the execution of a sub-graph.
1983
 * However, in that case, we cannot express easily which output tensor corresponds to which input tensor.
1984
 * This methods provides the way.
1985
 * @param graph The symbolic graph.
1986
 * @param symbol_map The pair of tensors array, source is the input tensor, destination is the output tensor.
1987
 * @param symbol_map_size The size of the tensor pairs array.
1988
 */
1989
void ccv_nnc_tensor_symbol_set_bypasses(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
1990
/**
1991
 * Fetch input / output for an exec symbol. For efficiency consideration, this returns pointer directly.
1992
 * @param graph The symbolic graph.
1993
 * @param symbol The execution node symbol reference.
1994
 * @param inputs The pointer to store input tensor symbols array.
1995
 * @param input_size The pointer to store the size of input tensor symbols array.
1996
 * @param outputs The pointer to store output tensor symbols array.
1997
 * @param output_size The pointer to store the size of output tensor symbols array.
1998
 */
1999
void ccv_nnc_graph_exec_symbol_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const inputs, int* const input_size, const int** const outputs, int* const output_size);
2000
/**
2001
 * Replace a input / output tensor symbol on an exec symbol.
2002
 * @param graph The symbolic graph.
2003
 * @param symbol The execution node symbol reference.
2004
 * @param old_symbol The old tensor symbol to be replaced.
2005
 * @param new_symbol The new tensor symbol on input / output.
2006
 */
2007
void ccv_nnc_graph_exec_symbol_replace_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_tensor_symbol_t old_symbol, const ccv_nnc_tensor_symbol_t new_symbol);
2008
/**
2009
 * Which exec symbol this is connected to. For efficiency consideration, this returns pointer directly.
2010
 * @param graph The symbolic graph.
2011
 * @param symbol The execution node symbol reference.
2012
 * @param tos The pointer to store outgoing indexes of the execution nodes.
2013
 * @param to_size the pointer to store the number of outgoing indexes.
2014
 */
2015
void ccv_nnc_graph_exec_symbol_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const tos, int* const to_size);
2016
/**
2017
 * Find the size allocated on the opaque tensor arena structure.
2018
 * @param tensor_arena The tensor arena object generated through compilation.
2019
 * @return The total allocated size in bytes.
2020
 */
2021
CCV_WARN_UNUSED(uint64_t) ccv_nnc_tensor_arena_size(const ccv_nnc_tensor_arena_t* const tensor_arena);
2022
/**
2023
 * Query whether a set of sources are the ancestors to a set of destination nodes.
2024
 * @param graph The symbolic graph.
2025
 * @param sources The exec sources to check whether they can reach some of the destinations.
2026
 * @param source_size How many sources in the source list.
2027
 * @param destinations The exec destinations to check whether sources can reach.
2028
 * @param destination_size How many destinations in the destination list.
2029
 * @param bitmask Bit return value, each bit represents a source, and 1 meant it can reach some of the destinations.
2030
 */
2031
void ccv_nnc_symbolic_graph_sources_to_destinations(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, uint64_t* const bitmask);
2032
/**
2033
 * Re-init the tensor arena with updated symbolic graph. This won't work if the symbolic graph requires
2034
 * larger tensors than what's available. Use this method properly, you can avoid re-compile a graph
2035
 * just because some tensor shape changed.
2036
 * @param tensor_arena The tensor arena object generated through compilation.
2037
 * @param graph The updated symbolic graph with different tensor shape.
2038
 * @return 0 if successful, -1 if the tensor arena doesn't have enough space to just re-init.
2039
 */
2040
int ccv_nnc_tensor_arena_reinit(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_symbolic_graph_t* const graph);
2041
/**
2042
 * Re-init the graph exec arena with updated symbolic graph. This updated some hyper-parameters of
2043
 * executions to match the updated symbolic graph. Note that this will try to keep the backend / algorithm
2044
 * selection from previous graph if possible (meaning if the command still match).
2045
 * @param graph_exec_arena The graph exec arena object provided mapping between symbolic and concrete graph.
2046
 * @param graph The concrete graph generated through compile method.
2047
 * @param symbolic_graph The updated symbolic graph.
2048
 */
2049
void ccv_nnc_graph_exec_reinit(ccv_nnc_graph_exec_arena_t* const graph_exec_arena, ccv_nnc_graph_t* const graph, const ccv_nnc_symbolic_graph_t* const symbolic_graph);
2050
/**
2051
 * Function prototype for tensor symbol creation callback.
2052
 */
2053
typedef void(*ccv_nnc_tensor_symbol_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_param_t info, const char* const name);
2054
/**
2055
 * Hook into the call to ccv_nnc_tensor_symbol_new, return previous provided context if call into this method.
2056
 * @param graph The symbolic graph.
2057
 * @param hook The function to be called if a new tensor symbol created.
2058
 * @param context The context associated with the callback function.
2059
 * @param previous_hook Return the previous hook if provided.
2060
 * @return The previous context associated with the previous hook function.
2061
 */
2062
void* ccv_nnc_tensor_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_new_hook_f hook, void* context, ccv_nnc_tensor_symbol_new_hook_f* previous_hook);
2063
/**
2064
 * Function prototype for tensor symbol alias creation callback.
2065
 */
2066
typedef void(*ccv_nnc_tensor_symbol_alias_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_symbol_t from_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
2067
/**
2068
 * Hook into the call to ccv_nnc_tensor_symbol_alias_new, return previous provided context if call into this method.
2069
 * @param graph The symbolic graph.
2070
 * @param hook The function to be called if a new tensor symbol alias created.
2071
 * @param context The context associated with the callback function.
2072
 * @param previous_hook The function to be called if a new tensor symbol alias created.
2073
 * @return The previous context associated with the previous hook function.
2074
 */
2075
void* ccv_nnc_tensor_symbol_alias_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_alias_new_hook_f hook, void* context, ccv_nnc_tensor_symbol_alias_new_hook_f* previous_hook);
2076
/**
2077
 * Set the pair reference for tensor symbols. Peer reference for tensor symbols has very specific meanings.
2078
 * For a backward pass involves sub-graphs. The commands in the sub-graph could reference to tensor symbols of
2079
 * a different graph (its forward pass graph). That is not allowed (two graph has no ancestral relationship
2080
 * cannot share a tensor symbol). So we create a new tensor symbol, but set the pair reference.
2081
 * @param graph The symbolic graph.
2082
 * @param tensor_symbol The tensor symbol in the current graph.
2083
 * @param pair_tensor_symbol The tensor symbol in the pair graph.
2084
 */
2085
void ccv_nnc_tensor_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_nnc_tensor_symbol_t pair_tensor_symbol);
2086
/**
2087
 * Function prototype for execution node symbol creation callback.
2088
 */
2089
typedef void(*ccv_nnc_graph_exec_symbol_new_hook_f)(void* context, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
2090
/**
2091
 * Hook into the call to ccv_nnc_graph_exec_symbol_new, return previous provided context if call into this method.
2092
 * @param graph The symbolic graph.
2093
 * @param hook The function to be called if a new execution node symbol created.
2094
 * @param context The context associated with the callback function.
2095
 * @param previous_hook The previous hook function associated with this operation.
2096
 * @return The previous context associated with the previous hook function.
2097
 */
2098
void* ccv_nnc_graph_exec_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_new_hook_f hook, void* context, ccv_nnc_graph_exec_symbol_new_hook_f* previous_hook);
2099
/**
2100
 * Set the pair reference for exec. This is very similar to the one for concrete graph. A pair reference
2101
 * of a backward pass execution node is its forward pass counterpart.
2102
 * @param graph The symbolic graph.
2103
 * @param exec_symbol The execution node symbol in the current graph.
2104
 * @param pair_exec_symbol The pairing execution node symbol.
2105
 */
2106
void ccv_nnc_graph_exec_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec_symbol, const ccv_nnc_graph_exec_symbol_t pair_exec_symbol);
2107
2108
/** @} */
2109
2110
/** @} */
2111
2112
/**
2113
 * @defgroup level_3_5 Level-3.5 API
2114
 * @{
2115
 */
2116
2117
/**
2118
 * @defgroup level_3_5_autograd Automatic Differentiation
2119
 * @{
2120
 */
2121
2122
/**
2123
 * Compute the backward graph, assuming the provided symbolic graph only contain the "forward" part from sources to destinations.
2124
 * This effectively is called the "autograd" or automatic differentiation process (specifically, "reverse AD") in other libs.
2125
 * For a expression y = f(x), to compute dx, x is the wrt_symbol, y is the f_symbol.
2126
 * @param graph The symbolic graph.
2127
 * @param f_symbols The tensor symbols array of the result (or loss).
2128
 * @param f_symbol_size The size of the f symbols array.
2129
 * @param wrt_symbols The tensor symbols array of the inputs.
2130
 * @param wrt_symbol_size The size of the wrt symbols array.
2131
 * @param sources The source execution nodes array for the computation.
2132
 * @param source_size The size of the source nodes array.
2133
 * @param destinations The destination execution nodes array for the computation.
2134
 * @param destination_size The size of the destination nodes array.
2135
 */
2136
void ccv_nnc_symbolic_graph_backward(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const f_symbols, const int f_symbol_size, const ccv_nnc_tensor_symbol_t* const wrt_symbols, const int wrt_symbol_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2137
/**
2138
 * Get the symbol that contains the gradient. The list will be flushed if the ccv_nnc_symbolic_graph_backward function is called again.
2139
 * @param graph The symbolic graph.
2140
 * @param symbol The tensor symbol we want to retrieve its gradient (must be one of the wrt symbols or the f symbols).
2141
 * @return A tensor symbol that represents the gradient.
2142
 */
2143
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
2144
/**
2145
 * Get the execution node symbol for a tensor symbol. This used to retrieve the execution node for a gradient tensor symbol.
2146
 * @param graph The symbolic graph.
2147
 * @param symbol The tensor symbol that represents the gradient (must be one of the wrt symbols).
2148
 * @return A execution node symbol that generates the gradient.
2149
 */
2150
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
2151
2152
/** @} */
2153
2154
/**
2155
 * @defgroup level_3_5_while While Loop
2156
 * @{
2157
 */
2158
2159
/**
2160
 * @page symbolic_while Construct a "while" loop in a symbolic graph
2161
 *
2162
 * (This document was written in 2016, since then, Caffe2 added support for While loop (as sub-graph), similar
2163
 * implementation added for ONNX as well.)
2164
 *
2165
 * In NNC, a computation graph cannot allow cycles. Thus, there is no flexible way to express loops.
2166
 *
2167
 * A little survey on this problem:
2168
 *
2169
 * * Caffe2 supports specific type of recurrent neural network.
2170
 *
2171
 * * TensorFlow as it stands, supports while construct. Its while construct is very straightforward, a body and
2172
 *   a condition is provided, you can construct whatever graph as you want.
2173
 *
2174
 * * mxnet supports recurrent neural network by unrolling it into normal none-looped graph.
2175
 *
2176
 * * Theano supports "scan" ops, which is a terminable loop (with loop variant, known as sequence).
2177
 *
2178
 * * CNTK supports this with custom BrainScript. Within BrainScript, you can access the previous state in a
2179
 *   function, therefore, effectively supports calling a method multiple times (looping over).
2180
 *
2181
 * Of above, Caffe2 and mxnet gave up on supporting generic loop for performance reasons. TensorFlow supports
2182
 * generic while loop, with all the trouble it may introduce (see the Nested while loop bug in TensorFlow that
2183
 * recently fixed). Theano picked a point seems pretty sweet, although there are limitations. CNTK's BrainScript
2184
 * is a DSL, they can do whatever they want with the drawback now that they need to implement a language runtime.
2185
 * TensorFlow, Theano and CNTK all support auto-differentiation over the while loop with tape (Wengert list).
2186
 *
2187
 * A simple way to support loop is to support conditional jump. In fact, conditional jump is a more generic way
2188
 * of doing loops. However, if you put this into the consideration that fully differentiable computation graph
2189
 * wanna to be supported, it is terrible. With conditional jump, it is really hard for you to know which tensor
2190
 * is used where, thus keep track for reverse accumulation (backward propagation). There is no counter or
2191
 * whatsoever, it is pretty hard to trace back on which line is executed how many times. Compounding this with
2192
 * NNC's promise that as long as it shows on the graph can be "parallel" computed, it will be parallel computed,
2193
 * it is close to impossible to track if conditional jump used in its raw form. Certain restrictions must be
2194
 * applied to how to do the loop. The compromise comes from closer examination of NNC's preferences.
2195
 *
2196
 * NNC prefers to have the graph without cycles. It also prefers to be fully differentiable. Another important
2197
 * criteria is that most functions in NNC require SSA (Static Single Assignment) representation. With these in
2198
 * mind, supporting while loop has to be strict.
2199
 *
2200
 * Luckily, there are well-formalized way of supporting this in literature and practice. Because it is
2201
 * well-formalized, translating this into existing NNC implementation is actually pretty straightforward. We
2202
 * are going to introduce a special version of while loop. In literature that discussed about SSA, it may be
2203
 * called parameterized loop. For us, it works like this:
2204
 *
2205
 * To construct a while loop for existing NNC graph, you need to be able to separate the existing graph into
2206
 * two sub-graphs.
2207
 *
2208
 * The while-loop sub-graph (WL sub-graph) contains a set of incoming nodes (I-nodes), Condition false output
2209
 * nodes (CFO-nodes) and end nodes (E-nodes). Each set have its own properties, but in short, all incoming edges
2210
 * to the WL sub-graph connect to one of the I-nodes, but nothing else. All outgoing edges from the WL sub-graph
2211
 * connect to one of the CFO-nodes, but nothing else. A nodes can be either a I-node, CFO-node or E-node,
2212
 * non-exclusively.
2213
 *
2214
 * There are also 3 types of tensors used for all nodes in WL sub-graph: Input tensors (I-tensors) are tensors
2215
 * that are inputs to some nodes, and will never be outputs. Output tensors (O-tensors) are tensors that are
2216
 * outputs from some nodes, but never be inputs to any nodes. I-tensors can be outputs from some nodes that
2217
 * outside of WL sub-graph. O-tensors can be inputs to some nodes that outside of WL sub-graph. Internal
2218
 * tensors (IN-tensors) are not visible outside of WL sub-graph, therefore, they can be both inputs and outputs
2219
 * of some nodes inside the sub-graph. Some tensors can be feedback into the WL sub-graph, given either
2220
 * O-tensors or IN-tensors. A parameter map can be given in these cases to describe which maps to what.
2221
 *
2222
 * The way to drive a WL sub-graph like this: the WL sub-graph runs until all CFO-nodes are reached. At this
2223
 * point, the while_f condition is checked. If true, we continue until all the end-nodes are reached. At this
2224
 * point, we increase the counter, reconfigure the WL sub-graph with parameter map, and run from I-nodes all
2225
 * over again. When reached all CFO-nodes, the condition is checked again, if false, WL sub-graph terminates,
2226
 * and the graph continues from the nodes that are pointed by CFO-nodes.
2227
 *
2228
 * Given these constraints, doing automatic differentiation is not that hard any more. A WL sub-graph, from
2229
 * the whole graph's point of view, is just a giant command supports both forward / backward operations, with
2230
 * some extra information passed around in the form of userdata (tape).
2231
 *
2232
 * For WL sub-graph, we can continue to leverage the compile / backward function that already written for
2233
 * symbolic graph as well.
2234
 *
2235
 * For compile function, we just need to take care of parameter maps (these need to be converted into binded
2236
 * tensors).
2237
 *
2238
 * For backward function, we need to convert parameter maps from assigner (thus, y = x) to accumulator (x += y).
2239
 *
2240
 * This function will replace the nodes that it affects to one sub-graph node. Thus, how to drive this
2241
 * sub-graph is opaque. Its backward form is opaque as well.
2242
 *
2243
 * There are no connection between its nodes and the outside graph nodes other than the three sets:
2244
 *
2245
 * 1. Incoming nodes, the set of nodes that contains the incoming edges from outside, they cannot have edges
2246
 *    points by inside nodes. The sub-graph computation starts from these incoming nodes;
2247
 *
2248
 * 2. Condition false output nodes, when condition is false, we will break out of this while loop, these
2249
 *    nodes pointing to the outside nodes, but no inside nodes;
2250
 *
2251
 * 3. End nodes, the set of nodes that marks the end of the while body, and after these nodes are executed,
2252
 *    we will return to the incoming nodes. These end nodes shouldn't have any edges pointing to inside nodes
2253
 *    (OK if end nodes are condition true output nodes as well);
2254
 *
2255
 * Since these will become a sub-graph (which, to its owner graph, just simple "node"), it will have inputs
2256
 * and outputs. Besides that, the loop body needs to be parameterized to be SSA compliant (see:
2257
 * https://www.cs.cmu.edu/~fp/courses/15411-f13/lectures/06-ssa.pdf). Thus, a list of body parameters need to
2258
 * be provided.
2259
 */
2260
2261
/**
2262
 * @defgroup level_3_5_while_essentials While Loop Essentials
2263
 * @{
2264
 */
2265
2266
/**
2267
 * The given tensors contains all the common / input / output tensors specified in the sub-graph.
2268
 */
2269
typedef int(*ccv_nnc_graph_while_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
2270
/**
2271
 * Create a tensor tape that can be used to record for while loop or case..of.
2272
 * @return A ccv_nnc_tensor_tape_t pointer.
2273
 */
2274
CCV_WARN_UNUSED(ccv_nnc_tensor_tape_t*) ccv_nnc_tensor_tape_new(void);
2275
/**
2276
 * Deallocate the tensor tape and all the memory it allocated.
2277
 * @param tape The tensor tape object.
2278
 */
2279
void ccv_nnc_tensor_tape_free(ccv_nnc_tensor_tape_t* const tape);
2280
/**
2281
 * The API to operate on the symbolic graph is more involved than the concrete graph for while loops.
2282
 * The reason is because symbolic graph operates in SSA form (static single assignment), therefore, the while
2283
 * loops for the symbolic graph has to be parameterized.
2284
 * @param graph The symbolic graph.
2285
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2286
 * @param while_graph The sub-graph to run the while loop.
2287
 * @param name The name of the while loop. Optional.
2288
 * @return A while loop execution symbol (backed by a sub-graph) of the giving graph.
2289
 */
2290
ccv_nnc_graph_exec_symbol_t ccv_nnc_symbolic_graph_while(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, ccv_nnc_symbolic_graph_t* const while_graph, const char* const name);
2291
/**
2292
 * Set the expression to be evaluated, and at which nodes to be evaluated.
2293
 * @param while_graph The symbolic graph that will run the while loop.
2294
 * @param while_expr The function pointer to the expression.
2295
 * @param while_data A custom data provided to the expression evaluation function.
2296
 * @param inputs The input tensor symbols array to the expression evaluation function.
2297
 * @param input_size The size of the input tensor symbols array.
2298
 * @param breakpoints The execution node symbols at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
2299
 * @param breakpoint_size The size of the execution node symbols array.
2300
 */
2301
void ccv_nnc_symbolic_graph_set_while_expr(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const breakpoints, const int breakpoint_size);
2302
/**
2303
 * Set the loop carry parameters when reuse. (parameterized loop, these will be carried over to the next loop).
2304
 * @param while_graph The symbolic graph that will run the while loop.
2305
 * @param symbol_map A pair of tensor symbols array, where the source tensor symbol is the output tensor symbol in this loop, the destination tensor symbol is the input tensor symbol in the next loop.
2306
 * @param symbol_map_size The size of the symbol map array.
2307
 */
2308
void ccv_nnc_symbolic_graph_set_carry_overs(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
2309
/**
2310
 * Retrieve the special (magical) tensor symbol that retains the while loop counter (thus, dimension of 1x1x1, CCV_64S type).
2311
 * @param while_graph The symbolic graph that will run the while loop.
2312
 * @return A tensor symbol represents the implicit loop count.
2313
 */
2314
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_while_count(const ccv_nnc_symbolic_graph_t* const while_graph);
2315
/**
2316
 * Extract the sub-graph of the while loop from a symbol.
2317
 * @param graph The symbolic graph.
2318
 * @param while_symbol The execution node symbol.
2319
 * @return The sub-graph that represents a while loop.
2320
 */
2321
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_from_while_symbol(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t while_symbol);
2322
/**
2323
 * Constructing looped concrete graph. Note that this interface is a little bit simpler than the one for symbolic
2324
 * graph. The reason is that a concrete graph operates on allocated tensors, thus, there is no mapping of tensor
2325
 * symbols between the parent graph and the while graph. (The reason to have a mapping in symbolic graphs is to
2326
 * constraint the variable leaking between the sub graph and parent graph).
2327
 * @param graph The concrete graph.
2328
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2329
 * @param while_graph The sub-graph to run the while loop.
2330
 * @return A execution node that represents the sub-graph.
2331
 */
2332
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_while(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_graph_t* const while_graph);
2333
/**
2334
 * Set the evaluated expression for the while loop. The while loop will break out if the expression evaluates to 0.
2335
 * @param while_graph The concrete graph that will run the while loop.
2336
 * @param while_expr The function pointer to the expression.
2337
 * @param while_data A custom data provided to the expression evaluation function.
2338
 * @param inputs The input tensors array to the expression evaluation function.
2339
 * @param input_size The size of the input tensors array.
2340
 * @param breakpoints The execution nodes at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
2341
 * @param breakpoint_size The size of the execution nodes array.
2342
 */
2343
void ccv_nnc_graph_set_while_expr(ccv_nnc_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_graph_exec_t* const breakpoints, const int breakpoint_size);
2344
/**
2345
 * Get the special tensor for the while loop count. It contains one uint64_t value. We keep an implicit count
2346
 * when evaluate the while loop and you can access it with this tensor.
2347
 * @param while_graph The concrete graph that will run the while loop.
2348
 * @return A special tensor that you can retrieve the loop count at .data.i64[0].
2349
 */
2350
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor_for_while_count(const ccv_nnc_graph_t* const while_graph);
2351
/**
2352
 * Retrieve the sub-graph from a execution node.
2353
 * @param graph The concrete graph.
2354
 * @param exec The execution node represents the sub-graph.
2355
 * @return The sub-graph.
2356
 */
2357
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_from_while_exec(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_t exec);
2358
2359
/** @} */
2360
2361
/**
2362
 * @defgroup level_3_5_while_others While Loop Others
2363
 * @{
2364
 */
2365
2366
/**
2367
 * For a given tape on a given graph, update the input / output tensors so new version will be created (if needed).
2368
 * @param tape The tensor tape object.
2369
 * @param graph The concrete graph this tensor tape is executing in.
2370
 * @param input_flags The flags associated with input tensors.
2371
 * @param inputs The input tensors.
2372
 * @param input_size The size of input tensors array.
2373
 * @param output_flags The flags associated with output tensors.
2374
 * @param outputs The output tensors.
2375
 * @param output_size The size of output tensors array.
2376
 */
2377
void ccv_nnc_tensor_tape_io(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const int* const input_flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, const int* const output_flags, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2378
/**
2379
 * Retrieve the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2380
 * @param tape The tensor tape object.
2381
 * @param graph The concrete graph this tensor tape is executing in.
2382
 * @param exec The execution node.
2383
 * @return The number associated with the execution node.
2384
 */
2385
uint64_t ccv_nnc_tensor_tape_numbering(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec);
2386
/**
2387
 * Set the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2388
 * @param tape The tensor tape object.
2389
 * @param graph The concrete graph this tensor tape is executing in.
2390
 * @param exec The execution node.
2391
 * @param numbering The number associated with the execution node.
2392
 */
2393
void ccv_nnc_tensor_tape_set_numbering(ccv_nnc_tensor_tape_t* const tape, ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const uint64_t numbering);
2394
/**
2395
 * Augmented tensor to run a graph with while loop (An obvious example is dynamic RNN).
2396
 */
2397
typedef struct ccv_nnc_tensor_multiview_s {
2398
  // This is an augmented ccv_nnc_tensor_view_t
2399
  // Namely, it can point to multiple versions of tensors.
2400
  int type; // This type is CCV_NNC_TENSOR_MULTI_VIEW
2401
  // kind specified how the multi-version tensors stored.
2402
  // See the comment on the follow up enums.
2403
  uint8_t kind;
2404
  uint16_t repeat;
2405
  intptr_t anchor; // on which graph this multi-view tensor is wrapped. This helps to determine on which level the multi-view tensor should be unwrapped.
2406
  // If this tensor points to a tensor view, data.u8 - offset is the real pointer start.
2407
  off_t offset;
2408
  struct ccv_nnc_tensor_multiview_s* p; // If this is wrapped with another multiview tensor. Get to the parent one.
2409
  ccv_nnc_tensor_t* it; // Current tensor (tensor in use), this is updated along with the graph computation.
2410
  // This is useful because by just traverse tv, I can get the latest up-to-date reference to this multi-view tensor.
2411
  ccv_array_t* sp; // Synchronized tensor views. This corresponds to ccv_nnc_tensor_synchronize_to_multiview method, that records all the tensors registered for updates.
2412
  ccv_nnc_tensor_t* _inline_data[4];
2413
  ccv_nnc_tensor_t** _heap_data;
2414
} ccv_nnc_tensor_multiview_t;
2415
3.40k
#define CCV_NNC_MULTIVIEW_DATA(x) ((x)->_heap_data ? 
(x)->_heap_data0
: (x)->_inline_data)
2416
234
#define CCV_NNC_MULTIVIEW_PHI (intptr_t)0x1 /**< Denote this is a phi multi-view tensor. */
2417
2418
enum {
2419
  CCV_NNC_MULTIVIEW_K0N = 0, /**< All of them are repeated. */
2420
  CCV_NNC_MULTIVIEW_K1N = 1, /**< The first one is the first, the second one starts to repeat. (0111111...) */
2421
};
2422
#define CCV_NNC_MULTIVIEW_K01(x) ((x)->kind == CCV_NNC_MULTIVIEW_K0N && (x)->repeat == 1)
2423
/**
2424
 * Setup a tensor multiview with a given set of tensors.
2425
 * A multiview tensor point to a list of tensors, and its access depends on the loop count.
2426
 * For example, if we have a multiview tensor with list of [a, b, c, d], and kind is 1N, repeat is 3.
2427
 * For loop count 0, 1, 2, 3, 4, 5, 6, the corresponding tensors used will be a, b, c, d, b, c. If kind
2428
 * is 0N, and repeat is 4, it will be a, b, c, d, a, b.
2429
 * @param data[] The pointer to the list of tensors the multiview object can point to.
2430
 * @param kind Can be either CCV_NNC_MULTIVIEW_K0N or CCV_NNC_MULTIVIEW_K1N, basically whether to keep the initial tensor.
2431
 * @param repeat The length of the repeat.
2432
 * @param graph Which graph this multiview object attaches to.
2433
 * @param tensor_multiview The tensor multiview object to be updated.
2434
 */
2435
void ccv_nnc_tensor_multiview(ccv_nnc_tensor_t* data[], const uint8_t kind, const uint16_t repeat, const ccv_nnc_graph_t* const graph, ccv_nnc_tensor_multiview_t* const tensor_multiview);
2436
/**
2437
 * Since tensor_multiview will never be allocated with *_new method, the *_free method simply frees anything that is dynamically allocated afterwards (such as the reference items).
2438
 * @param tensor_multiview The tensor multiview object to be deallocated.
2439
 */
2440
void ccv_nnc_tensor_multiview_free(const ccv_nnc_tensor_multiview_t tensor_multiview);
2441
/**
2442
 * Setup a tensor as a reference to a tensor multiview, thus, when tensor multiview's tu (current tensor) updates, the tensor reference's data.u8 will get update as well (point to the same memory region as the tu).
2443
 * @param tensor_multiview The tensor multiview object.
2444
 * @param tensor The tensor that will be updated along with the multiview object.
2445
 */
2446
void ccv_nnc_tensor_synchronize_to_multiview(ccv_nnc_tensor_multiview_t* const tensor_multiview, ccv_nnc_tensor_t* const tensor);
2447
/**
2448
 * Send broadcast to subscribers of the multiview, call this in the beginning of exec.
2449
 * @param tensor_multiview The tensor multiview object.
2450
 */
2451
void ccv_nnc_tensor_multiview_synchronize(ccv_nnc_tensor_multiview_t* const tensor_multiview);
2452
2453
/** @} */
2454
2455
/** @} */
2456
2457
/**
2458
 * @defgroup level_3_5_case_of Branching
2459
 * @{
2460
 */
2461
2462
/**
2463
 * @page symbolic_switch Construct "switch" control structure in symbolic graph
2464
 *
2465
 * Here I use the keyword case_of. To provide a "switch" control structure within NNC has some nice properties
2466
 * even though you can simulate this with a while loop technically.
2467
 *
2468
 * 1. More optimal memory allocation: with "switch" control structure, memory can be multiplexed for each code
2469
 *    path because they are mutually exclusive.
2470
 *
2471
 * 2. No tape should be used within each branch: if we simulate with a "while" loop, any results from within
2472
 *    the "switch" statement has to be kept on the tape, which is inefficient because you don't need any tape
2473
 *    for the "switch" statement other than record which path it is taken.
2474
 *
2475
 * The particular "switch" control structure provided here is a multi-way structured "switch". Each branch is a
2476
 * sub-graph, so it is well-scoped. A node branch out based on the case_of condition return value to either of
2477
 * the branch (numbering from 0 to n, -1 means no path taken). If no path taken, the output tensors will be
2478
 * assigned with the default tensors and continue. Otherwise the computation within the sub-graph will be
2479
 * carried out and the output tensors will be assigned with the tensors specified within that sub-graph and
2480
 * continue.
2481
 *
2482
 * If we want to consider speculative execution in the future, we need to revisit our memory allocation scheme.
2483
 */
2484
2485
/**
2486
 * Function prototype to evaluate a branch expression.
2487
 */
2488
typedef int(*ccv_nnc_graph_case_of_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
2489
/**
2490
 * Create a new case..of execution node symbol.
2491
 * @param graph The symbolic graph.
2492
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2493
 * @param inputs The input tensor symbols array for the expression.
2494
 * @param input_size The size of the input tensor symbols array.
2495
 * @param symbol_map The pair of tensor symbols array where the source is the input tensor symbol and the destination is the output tensor symbol.
2496
 * @param symbol_map_size The size of symbol map array.
2497
 * @param name The name of the case..of graph. Optional.
2498
 * @return A execution node symbol that represents the case..of graph.
2499
 */
2500
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_symbolic_graph_case_of_new(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size, const char* const name);
2501
/**
2502
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2503
 * @param graph The symbolic graph.
2504
 * @param exec The execution node symbol that represents the case..of graph.
2505
 * @param case_of The function pointer to evaluate.
2506
 * @param case_of_data The data associated with the function pointer.
2507
 */
2508
void ccv_nnc_symbolic_graph_set_case_of_expr(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data);
2509
/**
2510
 * Set a sub-graph as one of the branch for the case..of graph.
2511
 * @param graph The symbolic graph.
2512
 * @param symbol The execution node symbol that represents the case..of graph.
2513
 * @param case_graph The sub-graph for one of the branch.
2514
 * @param case_of The index assigned to this sub-graph (expression returns this index to determine which sub-graph to execute).
2515
 * @param symbol_map The pair of tensor symbols array where the source is the output tensor symbol of the sub-graph, and the destination is the output tensor symbol of the execution node symbol.
2516
 * @param symbol_map_size The size of the symbol map array.
2517
 */
2518
void ccv_nnc_symbolic_graph_set_case_of(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, ccv_nnc_symbolic_graph_t* const case_graph, const int case_of, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
2519
/**
2520
 * Create a new case..of execution node.
2521
 * @param graph The concrete graph.
2522
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2523
 * @param inputs The input tensors array supplied to the expression.
2524
 * @param input_size The size of the input tensors array.
2525
 * @param outputs The output tensors array.
2526
 * @param output_size The size of the output tensors array.
2527
 * @return A execution node that represents the case..of graph.
2528
 */
2529
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_case_of_new(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2530
/**
2531
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2532
 * @param graph The concrete graph.
2533
 * @param exec The execution node that represents the case..of graph.
2534
 * @param case_of The function pointer to evaluate.
2535
 * @param case_of_data The data associated with the function pointer.
2536
 * @param offset A integer added to the expression output to help choose the index. Thus, real index = expression index + offset.
2537
 */
2538
void ccv_nnc_graph_set_case_of_expr(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data, const int offset);
2539
/**
2540
 * Set a sub-graph as one of the branch for the case..of graph.
2541
 * @param graph The concrete graph.
2542
 * @param exec The execution node that represents the case..of graph.
2543
 * @param case_graph The sub-graph for one of the branch.
2544
 * @param case_of The index assigned to this sub-graph (expression returns this index + offset to determine which sub-graph to execute).
2545
 */
2546
void ccv_nnc_graph_set_case_of(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_t* const case_graph, const int case_of);
2547
2548
/** @} */
2549
2550
/**
2551
 * @defgroup level_3_5_minimizer Gradient-based Optimization
2552
 * @{
2553
 */
2554
2555
/**
2556
 * This is the comparable part to Caffe's solver or TensorFlow's optimizer. It took a step further than just
2557
 * compute the gradient, but also apply the gradient to update parameters to minimize the loss.
2558
 * @param graph The symbolic graph.
2559
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2560
 * @param losses The tensor symbols array of losses.
2561
 * @param loss_size The size of the loss symbols array.
2562
 * @param parameters The parameter tensor symbols to optimize.
2563
 * @param parameter_size The size of parameter symbols array.
2564
 * @param inputs The additional input symbols we compute gradient against.
2565
 * @param input_size The size of the additional input symbols array.
2566
 * @param sources The source execution nodes array.
2567
 * @param source_size The size of source nodes array.
2568
 * @param destinations The destinations execution nodes array.
2569
 * @param destination_size The size of destination nodes array.
2570
 * @param gradients The tensor symbols that represents the gradient for update, should be the same size as the parameters array + input array size. This can be 0 (optional).
2571
 * @param updated_parameters The tensor symbols that represents the updated parameters, should be the same size as the parameters array.
2572
 * @param saved_aux The tensor symbols that is helpful for particular optimization strategy.
2573
 * @param graph_exec_symbols The execution node symbols for the updates, should be the same size as the parameters array.
2574
 */
2575
void ccv_nnc_symbolic_graph_minimize(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_symbol_t* const losses, const int loss_size, const ccv_nnc_tensor_symbol_t* const parameters, const int parameter_size, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_tensor_symbol_t* const gradients, ccv_nnc_tensor_symbol_t* const updated_parameters, ccv_nnc_tensor_symbol_map_t* const saved_aux, ccv_nnc_graph_exec_symbol_t* const graph_exec_symbols);
2576
/**
2577
 * The number of extra saved aux per parameter only depends on the commands. For example, SGD with momentum requires 1 aux (for momentum).
2578
 * Others require more.
2579
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2580
 * @return the number of saved aux per parameter.
2581
 */
2582
CCV_WARN_UNUSED(int) ccv_nnc_minimizer_saved_aux_size(const ccv_nnc_cmd_t minimizer);
2583
2584
/** @} */
2585
2586
/**
2587
 * @defgroup level_3_5_simplify Graph Simplification
2588
 * @{
2589
 */
2590
2591
/**
2592
 * @page symbolic_simplify Symbolic graph simplification
2593
 *
2594
 * We make a distinction between graph simplifications and optimizations (autotune).
2595
 *
2596
 * Simplification: rewrite the graph and the resulting graph will have less nodes. This is done on the symbolic
2597
 * graph only. Passes that is "simplification" include pruning, common sub-expression eliminations, constant
2598
 * folding etc.
2599
 *
2600
 * Optimization (autotune): graph optimization can have more objectives. The most obvious objective is to reduce
2601
 * computation time. For symbolic graph, passes that reduces computation time include data layout optimizations,
2602
 * auto parallel etc (in normal optimization implementations, they have a cost model to guide the optimization.
2603
 * NNC's implementation uses a cost database that profiles the time cost on the device to guide the optimization.
2604
 * We call it autotune to distinguish with the normal optimization passes because we need device profile data).
2605
 * There could be other objectives, for example, in many deep learning applications, reducing memory footprint
2606
 * can be desirable. However, as always in computer science, memory and time is a typical trade-off. Memory
2607
 * optimization almost always results longer computation time, and the objective is to trade between these two
2608
 * with a bias term (in other frameworks such as TensorFlow, the memory optimizer uses a list of "cheap ops" to
2609
 * bias between the time and memory footprint).
2610
 *
2611
 * For graph optimizations, it can happen on both the symbolic graph level as well as the concrete graph level.
2612
 * For NNC, symbolic graph is already very explicit (data layout, device allocation and data transfer between
2613
 * devices / nodes, even the command backend can all be specified on the symbolic graph), however, some
2614
 * information is unknown until it is compiled down to concrete graph (tensor addresses, tensor initialization
2615
 * etc.), and since graph optimizations need all the information to optimize. Keeping the flexibility to do
2616
 * optimization on both symbolic and concrete graph level seems reasonable.
2617
 */
2618
2619
enum {
2620
  /**
2621
   * If two commands generated the same outputs, all the places where the newer output used will be replaced by
2622
   * the old output. Later on the graph pruning stage, the command that generate the newer output will be
2623
   * eliminated.
2624
   */
2625
  CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION,
2626
  /**
2627
   * For the given outputs, eliminate unused input tensors, and then eliminate graph execs that don't contribute
2628
   * to the outputs.
2629
   */
2630
  CCV_NNC_SIMPLIFY_GRAPH_PRUNING,
2631
  /**
2632
   * For CCV_NNC_DATA_TRANSFER, if the input / output is the same (on the same device, no alias), we can skip.
2633
   * Similarly, if it is on the same device, but alias of some, for some cases we can skip as well (if neither
2634
   * are carry overs, bypasses etc.)
2635
   */
2636
  CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT,
2637
  /**
2638
   * Combine a few smaller ops into bigger one. For now, this functionality is limited. I can only address ops
2639
   * that are sequential.
2640
   */
2641
  CCV_NNC_SIMPLIFY_OPS_FUSION,
2642
  // CCV_NNC_SIMPLIFY_CONSTANT_FOLDING, // This currently is not supported, because we don't have efficient way to express constant in symbolic graph.
2643
};
2644
/**
2645
 * Simplify a graph with given list of passes, in that particular order.
2646
 * Note, when a graph is simplified, its sources / destinations are changed as well.
2647
 * @param graph The symbolic graph.
2648
 * @param passes The array of passes we are going to apply.
2649
 * @param pass_size The size of the passes array.
2650
 * @param binds The tensor symbols we may bind to an input later (it doesn't prevent pruning any execution nodes).
2651
 * @param bind_size The size of the bind array.
2652
 * @param outputs The output tensor symbols we want to retain (we are going to prune any execution nodes that is not related to these outputs).
2653
 * @param output_size The size of the output array.
2654
 * @param sources The source execution node symbols array.
2655
 * @param source_size The size of source node symbols array.
2656
 * @param destinations The destinations execution node symbols array.
2657
 * @param destination_size The size of destination node symbols array.
2658
 */
2659
void ccv_nnc_symbolic_graph_simplify(ccv_nnc_symbolic_graph_t* const graph, const int* const passes, const int pass_size, const ccv_nnc_tensor_symbol_t* const binds, const int bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2660
2661
/** @} */
2662
2663
/**
2664
 * @defgroup level_3_5_parallel Automatic Graph Parallelization
2665
 * @{
2666
 */
2667
2668
enum {
2669
  /**
2670
   * Op for reducer / allreducer. Currently only supports sum.
2671
   */
2672
  CCV_NNC_PARALLEL_REDUCE_OP_SUM,
2673
};
2674
2675
/**
2676
 * Turn the existing graph to be capable to run on several devices with different data inputs at parallel.
2677
 * With this method, additional tensor symbols will be created that runs on different devices. That has
2678
 * been said, there are concepts of "broadcast" and "reduce". "broadcast" tensor symbols will be copied to
2679
 * different devices, while "reduce" tensors will be summed from different devices to the default device.
2680
 * "allreducer" concept is simpler. The allreduce operation will be performed on these tensors and then
2681
 * be used on different devices again.
2682
 *
2683
 * Limitations: right now, the way to reduce / allreduce tensors only supports "sum". The data parallel
2684
 * only supports GPU, thus, the nodes will be duplicated are GPU computations and GPU memory backed
2685
 * tensors. Also, right now, the tensors to be broadcasted / allreduced / reduced should have no aliases.
2686
 *
2687
 * @param graph The symbolic graph.
2688
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
2689
 * @param broadcasts The tensor symbols to be broadcasted.
2690
 * @param broadcast_size The size of the broadcast tensor symbols array.
2691
 * @param allreducers The tensor symbols that to be allreduced.
2692
 * @param allreducer_size The size of the allreducer tensor symbols array.
2693
 * @param allreducer_outs Return the tensor symbols for allreducers that before allreduced. Optional, 0
2694
 *        means I don't care about this.
2695
 * @param reducers The tensor symbols to be reduced.
2696
 * @param reducer_size The size of the reducer tensor symbols array.
2697
 * @param reducer_outs Return the tensor symbols for reducers that after reduced. Optional, 0 means
2698
 *        I don't care about this.
2699
 * @param reduce_op_type The reduce op for reducer / allreducer.
2700
 * @param sources The source execution node symbols array.
2701
 * @param source_size The size of source node symbols array.
2702
 * @param destinations The destinations execution node symbols array.
2703
 * @param destination_size The size of destination node symbols array.
2704
 */
2705
void ccv_nnc_symbolic_graph_data_parallel(ccv_nnc_symbolic_graph_t* const graph, const int parallel, const ccv_nnc_tensor_symbol_t* const broadcasts, const int broadcast_size, const ccv_nnc_tensor_symbol_t* const allreducers, const int allreducer_size, ccv_nnc_tensor_symbol_t* const allreducer_outs, const ccv_nnc_tensor_symbol_t* const reducers, const int reducer_size, ccv_nnc_tensor_symbol_t* const reducer_outs, const int reduce_op_type, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2706
/**
2707
 * Get the symbol that is on a device other than the default one. The list will be flushed if the
2708
 * ccv_nnc_symbolic_graph_data_parallel function is called again.
2709
 * @param graph The symbolic graph.
2710
 * @param symbol The tensor symbol we want to retrieve its counterpart on a different device.
2711
 * @param device_id The device numeric id for this symbol.
2712
 * @return A tensor symbol that is on a different device.
2713
 */
2714
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id);
2715
/**
2716
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2717
 * later with ccv_nnc_tensor_symbol_copy
2718
 * @param graph The symbolic graph.
2719
 * @param symbol The tensor symbol we want to set its counterpart on a different device.
2720
 * @param device_id The device numeric id for this symbol.
2721
 * @param copy The tensor symbol counterpart on a different device.
2722
 */
2723
void ccv_nnc_tensor_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id, const ccv_nnc_tensor_symbol_t copy);
2724
/**
2725
 * Get the execution node that is on a device other than the default one. The list will be flushed
2726
 * if the ccv_nnc_symbolic_graph_data_parallel function is called again.
2727
 * @param graph The symbolic graph.
2728
 * @param symbol The execution node we want to retrieve its counterpart on a different device.
2729
 * @param device_id The device numeric id for this symbol.
2730
 * @return A execution node that is on a different device.
2731
 */
2732
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id);
2733
/**
2734
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2735
 * later with ccv_nnc_graph_exec_symbol_copy
2736
 * @param graph The symbolic graph.
2737
 * @param symbol The execution node we want to set its counterpart on a different device.
2738
 * @param device_id The device numeric id for this symbol.
2739
 * @param copy The execution node counterpart on a different device.
2740
 */
2741
void ccv_nnc_graph_exec_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id, const ccv_nnc_graph_exec_symbol_t copy);
2742
2743
/** @} */
2744
2745
/**
2746
 * @defgroup level_3_5_memory_compression Memory Compression
2747
 * @{
2748
 */
2749
2750
/**
2751
 * Apply LSSC memory compression algorithm to the convolution activations. This will compress the activation
2752
 * layer for convolution, therefore, save the overall memory usage during training time.
2753
 *
2754
 * @param graph The symbolic graph.
2755
 * @param sources The source execution node symbols array.
2756
 * @param source_size The size of source node symbols array.
2757
 * @param destinations The destinations execution node symbols array.
2758
 * @param destination_size The size of destination node symbols array.
2759
 */
2760
void ccv_nnc_symbolic_graph_memory_compression(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2761
2762
/** @} */
2763
2764
/**
2765
 * @defgroup level_3_5_memory_reduction Memory Reduction
2766
 * @{
2767
 */
2768
2769
/**
2770
 * Investigate memory reduction opportunities on the graph. Right now, we are looking at datatype
2771
 * conversions that resulted larger datatype, and these larger ones kept during backward pass.
2772
 * For these cases, we will keep the smaller one instead, and reconvert to larger datatype prior
2773
 * to the backward pass.
2774
 *
2775
 * @param graph The symbolic graph.
2776
 * @param sources The source execution node symbols array.
2777
 * @param source_size The size of source node symbols array.
2778
 * @param destinations The destinations execution node symbols array.
2779
 * @param destination_size The size of destination node symbols array.
2780
 */
2781
void ccv_nnc_symbolic_graph_memory_reduction(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2782
2783
/** @} */
2784
2785
/** @} */
2786
2787
/**
2788
 * @defgroup level_4 Level-4 API
2789
 * @{
2790
 */
2791
2792
/**
2793
 * Opaque pointer to the dynamic graph structure.
2794
 */
2795
typedef struct ccv_nnc_dynamic_graph_s ccv_nnc_dynamic_graph_t;
2796
2797
/**
2798
 * Masquerade this as if it is a on stack variable, there is a heap allocation but managed by the dynamic graph.
2799
 * The fact that ccv_nnc_tensor_variable_t is a pointer is an implementation detail. It should be treated as an
2800
 * opaque type throughout. We may later extends this to be some on-stack information or even just a uid.
2801
 */
2802
typedef struct ccv_nnc_tensor_variable_s* ccv_nnc_tensor_variable_t;
2803
2804
/**
2805
 * Create a dynamic graph.
2806
 * @return A newly created dynamic graph.
2807
 */
2808
CCV_WARN_UNUSED(ccv_nnc_dynamic_graph_t*) ccv_nnc_dynamic_graph_new(void);
2809
2810
/** @cond ALL */
2811
// Get a new tensor variable.
2812
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2813
16.5k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_1(graph) ccv_nnc_tensor_variable_new_impl(graph, ccv_nnc_tensor_auto)
2814
14.6k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(_1, _2, _FX, ...) _FX
2815
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2816
31.2k
#define ccv_nnc_tensor_variable_new(graph, ...) CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_variable_new_impl, 
CCV_NNC_TENSOR_VARIABLE_NEW_X_116.5k
)(graph, ##
__VA_ARGS__8.32k
)
2817
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_constant_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2818
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_1(graph) ccv_nnc_tensor_constant_new_impl(graph, ccv_nnc_tensor_auto)
2819
37
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(_1, _2, _FX, ...) _FX
2820
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2821
37
#define ccv_nnc_tensor_constant_new(graph, ...) CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_constant_new_impl, CCV_NNC_TENSOR_CONSTANT_NEW_X_1)(graph, ##
__VA_ARGS__5
)
2822
/** @endcond */
2823
2824
/**
2825
 * Create a new tensor variable that is an alias of a given tensor variable. You can alias any tensor
2826
 * variable that itself not an alias. You can also alias an alias, with some conditions: The tensor
2827
 * variable itself can be alias, but it needs to be contiguous as well. For example, a vector is
2828
 * contiguous. If both conditions satisfied, you can alias an alias.
2829
 * @param graph The dynamic graph.
2830
 * @param tensor_variable The tensor variable we are going to alias from.
2831
 * @param ofs The offset on each of the dimension.
2832
 * @param stride The stride of each dimension. If all 0, it matches the dimension of the tensor_variable.
2833
 * @param info The tensor parameters for the new alias.
2834
 * @return New tensor variable that is an alias.
2835
 */
2836
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_alias_new(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info);
2837
/**
2838
 * Get the parameters for a tensor variable.
2839
 * @param graph The dynamic graph.
2840
 * @param tensor_variable The tensor variable reference.
2841
 * @return The tensor parameters.
2842
 */
2843
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_variable_params(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2844
/**
2845
 * Get the parameters for a tensor variable alias.
2846
 * @param graph The symbolic graph.
2847
 * @param tensor_variable The tensor variable reference.
2848
 * @param ofs The offset on each of the dimension.
2849
 * @param stride The stride of each dimension.
2850
 * @return non-zero if it is not a tensor alias.
2851
 */
2852
int ccv_nnc_tensor_variable_alias_params(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, int ofs[CCV_NNC_MAX_DIM_ALLOC], int stride[CCV_NNC_MAX_DIM_ALLOC]);
2853
2854
/** @cond ALL */
2855
/**
2856
 * Get the underlying tensor for the tensor variable. The tensor allocation may be performed when calling this
2857
 * method. If the tensor cannot be allocated (because no shape specified), return 0.
2858
 * @param graph The dynamic graph.
2859
 * @param tensor_variable The tensor variable to get the underlying tensor.
2860
 * @param stream_context Which stream this command will be executed upon.
2861
 * @return The underlying tensor.
2862
 */
2863
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_variable_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_stream_context_t* const stream_context);
2864
8.52k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_1(graph, tensor_variable) ccv_nnc_tensor_from_variable_impl(graph, tensor_variable, 0)
2865
60.4k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL(_1, _2, _3, _FX, ...) _FX
2866
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2867
68.9k
#define ccv_nnc_tensor_from_variable(graph, tensor_variable, ...) CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL
(graph, tensor_variable, ##__VA_ARGS__, ccv_nnc_tensor_from_variable_impl, 46.0k
CCV_NNC_TENSOR_FROM_VARIABLE_X_18.52k
)(graph, tensor_variable, ##__VA_ARGS__)
2868
/** @endcond */
2869
/**
2870
 * Query whether a given tensor variable is a constant (no gradient).
2871
 * @param graph The dynamic graph.
2872
 * @param tensor_variable The tensor variable to query whether it is a constant.
2873
 */
2874
CCV_WARN_UNUSED(int) ccv_nnc_tensor_variable_is_constant(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2875
/**
2876
 * Set a tensor on the tensor variable. Tensor variable doesn't take over the life-cycle management of the tensor
2877
 * (in similar way as the tensor binds).
2878
 * @param graph The dynamic graph.
2879
 * @param tensor_variable The tensor variable to set.
2880
 * @param tensor The tensor that is going to be associated with the tensor variable.
2881
 */
2882
void ccv_nnc_tensor_variable_set(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_t* const tensor);
2883
/**
2884
 * Detach the tensor variable from current graph. It acts as if computed between
2885
 * ``ccv_nnc_dynamic_graph_set_no_grad``. Thus, there are a few requirements for this:
2886
 * 1. It cannot be an alias when detach. You have to detach the original, not the alias.
2887
 * 2. When detach a variable, it could impact correctness when computing gradients. This cut off backprop, acting as if the
2888
 *    detached variable is a constant (it will be marked as is).
2889
 * After this call, the tensor variable will be marked as constant and you can query that through ``ccv_nnc_tensor_variable_is_constant``.
2890
 * Why this method rather than making this variable as constant to begin with? First, an constant
2891
 * cannot be the output. Second, you may not wrap your computation between no grad, or not all inputs
2892
 * are constants, resulting a tensor variable that is on a graph. This method is helpful to rescue from
2893
 * that situation.
2894
 * @param graph The dynamic graph.
2895
 * @param tensor_variable The tensor variable to be detached.
2896
 */
2897
void ccv_nnc_tensor_variable_detach(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2898
/**
2899
 * A destructor function to be called when a tensor variable will be freed in the sense that no
2900
 * backward computation need it no more.
2901
 * Thus, we pass in tensor rather than tensor variable for the destructor.
2902
 */
2903
typedef void (*ccv_nnc_tensor_variable_destructor_f)(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_t* const tensor, void* const context);
2904
/**
2905
 * Hook into a tensor variable such that when it is actually freed (destroyed), the callback will receive
2906
 * the update.
2907
 * @param graph The dynamic graph.
2908
 * @param tensor_variable The tensor variable to observe when it is destroyed.
2909
 * @param func The callback function.
2910
 * @param context The context to be passed along to the callback function.
2911
 **/
2912
void ccv_nnc_tensor_variable_destructor_hook(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_variable_destructor_f func, void* const context);
2913
/**
2914
 * Check given tensor variables whether have effects to another set of tensor variables.
2915
 * @param graph The dynamic graph.
2916
 * @param source_variables The tensor variables to check whether it has effect to another set of variables.
2917
 * @param source_variable_size The size of source tensor variables.
2918
 * @param destination_variables Whether the source variables has effect to this list of variables.
2919
 * @param destination_variable_size The size of destination tensor variables.
2920
 * @param bitmask Bit return value, each bit represents a source tensor variable, and 1 meant it can reach some of the destinations.
2921
 */
2922
void ccv_nnc_dynamic_graph_has_effect_to_tensor_variables(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t* const source_variables, const int source_variable_size, const ccv_nnc_tensor_variable_t* const destination_variables, const int destination_variable_size, uint64_t* const bitmask);
2923
/**
2924
 * Execute a command with given tensor variables, the output is in the output tensor variables.
2925
 * @param graph The dynamic graph.
2926
 * @param cmd The wrapped command.
2927
 * @param hint The hint associated with the command.
2928
 * @param flags A reserved field for flags.
2929
 * @param inputs The input tensor variables array.
2930
 * @param input_size The size of the input tensor variables array.
2931
 * @param outputs The output tensor variables array.
2932
 * @param output_size The size of the output tensor variables array.
2933
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2934
 * @param stream_context Which stream this command will be executed upon.
2935
 */
2936
int ccv_nnc_dynamic_graph_exec(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2937
/**
2938
 * Compute the gradient of given tensor, with respect to the f. Thus, df / dt.
2939
 * @param dynamic_graph The dynamic graph.
2940
 * @param f_variables The output losses.
2941
 * @param f_variable_size The size of output losses array.
2942
 * @param df_optionals The custom gradients for f. If not provided, will default to 1.
2943
 * @param inputs The input variables.
2944
 * @param input_size The size of the input variables array.
2945
 * @param outputs The gradients with respect to the inputs. If the gradient already have value exist, it will be
2946
 *        accumulated into the final value.
2947
 * @param output_size The size of the outputs array. Should be equal to the input_size.
2948
 * @param stream_context Which stream this computation will be executed upon.
2949
 */
2950
void ccv_nnc_dynamic_graph_backward(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_tensor_variable_t* const f_variables, const int f_variable_size, const ccv_nnc_tensor_variable_t* const df_optionals, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
2951
/**
2952
 * Apply gradients to the set of parameters to update them with appropriate minimizer.
2953
 * @param dynamic_graph The dynamic graph.
2954
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2955
 * @param gradients The computed gradients to be applied.
2956
 * @param gradient_size The size of gradients array.
2957
 * @param parameters The parameters to update.
2958
 * @param parameter_size The size of parameters array, should be the same length as gradients.
2959
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2960
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2961
 * @param stream_context Which stream this computation will be executed upon.
2962
 */
2963
void ccv_nnc_dynamic_graph_apply_gradients(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const gradients, const int gradient_size, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2964
/**
2965
 * Apply one step of minimization (most likely, a gradient descent) to the parameters with a given loss (or
2966
 * losses).
2967
 * @param dynamic_graph The dynamic graph.
2968
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2969
 * @param losses The losses we are trying to minimize.
2970
 * @param loss_size The size of the losses array.
2971
 * @param dloss_optionals The custom gradient for losses. If not provided, will default to 1.
2972
 * @param parameters The parameters to update.
2973
 * @param parameter_size The size of parameters array.
2974
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2975
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2976
 * @param stream_context Which stream this computation will be executed upon.
2977
 */
2978
void ccv_nnc_dynamic_graph_minimize(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const losses, const int loss_size, const ccv_nnc_tensor_variable_t* const dloss_optionals, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2979
/**
2980
 * Read more in Level-5 API section.
2981
 */
2982
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
2983
/**
2984
 * Evaluate a CNNP model on the dynamic graph with set of inputs / outputs.
2985
 * @param dynamic_graph The dynamic graph.
2986
 * @param model The CNNP model to be evaluated against. Note that ccv_nnc_dynamic_graph_backward /
2987
 *              ccv_nnc_dynamic_graph_apply_gradients / ccv_nnc_dynamic_graph_minimize all works with this
2988
 *              model. It takes over the life-cycle of the model, and now you don't need to free it any more.
2989
 * @param is_test Whether we are in test mode or not.
2990
 * @param inputs The input variables.
2991
 * @param input_size The size of the input variables array.
2992
 * @param outputs The gradients with respect to the inputs.
2993
 * @param output_size The size of the outputs array.
2994
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
2995
 * @param stream_context Which stream this computation will be executed upon.
2996
 */
2997
void ccv_nnc_dynamic_graph_evaluate(ccv_nnc_dynamic_graph_t* const dynamic_graph, ccv_cnnp_model_t* const model, const int is_test, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
2998
/**
2999
 * Dry run a CNNP model on the dynamic graph with set of inputs up until the actual execution.
3000
 * @param dynamic_graph The dynamic graph.
3001
 * @param model The CNNP model to be evaluated against. Note that ccv_nnc_dynamic_graph_backward /
3002
 *              ccv_nnc_dynamic_graph_apply_gradients / ccv_nnc_dynamic_graph_minimize all works with this
3003
 *              model. It takes over the life-cycle of the model, and now you don't need to free it any more.
3004
 * @param is_test Whether we are in test mode or not.
3005
 * @param inputs The input variables.
3006
 * @param input_size The size of the input variables array.
3007
 * @param stream_context Which stream this computation will be executed upon.
3008
 */
3009
void ccv_nnc_dynamic_graph_dry_run(ccv_nnc_dynamic_graph_t* const dynamic_graph, ccv_cnnp_model_t* const model, const int is_test, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_stream_context_t* const stream_context);
3010
/**
3011
 * Set the maximum operator-level concurrency. This is a soft-limit, e.g. if you have operations on
3012
 * different devices, they are concurrent.
3013
 * @param graph The dynamic graph.
3014
 * @param max_stream_count The maximum concurrency if the dynamic graph schedules internal streams. 0 is no limit.
3015
 */
3016
void ccv_nnc_dynamic_graph_set_max_concurrency(ccv_nnc_dynamic_graph_t* const graph, const int max_stream_count);
3017
/**
3018
 * Enable or disable gradient computation on a dynamic graph.
3019
 * @param dynamic_graph The dynamic graph.
3020
 * @param no_grad If it is 1, disable gradient computation on the dynamic graph.
3021
 * @return 0 if it turned, otherwise it is not turned.
3022
 */
3023
int ccv_nnc_dynamic_graph_set_no_grad(ccv_nnc_dynamic_graph_t* const dynamic_graph, const int no_grad);
3024
/**
3025
 * Dynamic graph will retain a memory it allocated for efficient reuse. Triggering this method
3026
 * intentionally will force these memory to be collected. This is helpful if you know the existing
3027
 * allocation won't be enough for the future use.
3028
 * @param dynamic_graph The dynamic graph.
3029
 */
3030
void ccv_nnc_dynamic_graph_gc(ccv_nnc_dynamic_graph_t* const dynamic_graph);
3031
/**
3032
 * Dispose a tensor variable. You cannot do any computation against this tensor variable afterwards.
3033
 * @param graph The dynamic graph.
3034
 * @param tensor_variable The tensor variable to be disposed.
3035
 */
3036
void ccv_nnc_tensor_variable_free(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
3037
/**
3038
 * Free the dynamic graph.
3039
 * @param graph The dynamic graph.
3040
 */
3041
void ccv_nnc_dynamic_graph_free(ccv_nnc_dynamic_graph_t* const graph);
3042
/**
3043
 * Generate output that can be parsed by GraphViz (DOT language).
3044
 * @param graph The dynamic graph.
3045
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
3046
 * @param out The output file stream.
3047
 */
3048
void ccv_nnc_dynamic_graph_dot(const ccv_nnc_dynamic_graph_t* const graph, const int flags, FILE* out);
3049
/**
3050
 * Count how many ops we kept for gradient computation purpose. This method is useful when we
3051
 * want to assert at end of some train loop, we shouldn't have any gradient computation left.
3052
 * @param graph The dynamic graph.
3053
 * @param type The type of variables to trace. CCV_NNC_SYMBOL_TENSOR / CCV_NNC_SYMBOL_GRAPH_EXEC
3054
 * @return How many gradient computations we kept.
3055
 */
3056
CCV_WARN_UNUSED(int) ccv_nnc_dynamic_graph_bookkeeping_count(const ccv_nnc_dynamic_graph_t* const graph, const int type);
3057
/**
3058
 * Provide a hook for upper level to do custom formatting of a given dynamic graph for whatever
3059
 * inside. You can implement logic to format the graph into protobuf, or json. However, this
3060
 * is not the method for you to visit the graph, and do mutations on it. If ops are not needed for
3061
 * gradient computation, likely these are not kept on the dynamic graph at all. You probably will
3062
 * get an empty graph. What's still available can be checked with the ccv_nnc_dynamic_graph_bookkeeping_count.
3063
 * @param graph The dynamic graph.
3064
 * @param format_fn The format callback to be called on every node.
3065
 * @param context The context that will be passed to the callback.
3066
 */
3067
void ccv_nnc_dynamic_graph_format(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
3068
3069
/** @} */
3070
3071
/**
3072
 * @defgroup level_5 Level-5 API
3073
 * @{
3074
 */
3075
3076
/**
3077
 * @page dataframe What is "dataframe" in ML?
3078
 *
3079
 * A large part of machine learning consists of go through data, process them to a shape / form that makes sense,
3080
 * and pass that into the model to train. Deep learning frameworks such as TensorFlow or PyTorch provides some
3081
 * dataset APIs for this purpose. It is convenient for these frameworks because by being Python, people can use
3082
 * Pandas to process the data. In Pandas, this is called Dataframe, which again, imitates R language.
3083
 *
3084
 * Another interesting observation comes from recent (2018) release of Create ML framework from Apple. It provides
3085
 * a very close to Pandas style data process API (MLDataTable) but in Swift. This implementation is important because
3086
 * it provides a survey point other than Python.
3087
 *
3088
 * Comparing to Python, Swift is a stronger typed language. Though all being high-level, they all have pretty good
3089
 * string support (of course!), operator overloading, and polymorphism. String support makes column naming natural,
3090
 * Operator overloading makes conditioning and filtering easier, and polymorphism makes column type representation
3091
 * straight-forward. These, unfortunately, are the challenges I need to face when implementing in C with the eye
3092
 * towards that later the similar ideas can be implemented on top on a high-level language based on this one.
3093
 *
3094
 * It seems I haven't answered the most crucial question yet: what's special about these data process APIs? It is
3095
 * easier to answer this to first see what Pandas or MLDataTable does.
3096
 *
3097
 * * They both represent data as tables. Each column represents different type of the data (time, nd-array, scalar
3098
 *   or string). As such, they both have API to add / remove / rename columns, and load tabular data from disk.
3099
 *
3100
 * * They both provide API to filter (remove / add) rows, and derive new column from existing columns.
3101
 *
3102
 * * Pandas provides more API for data alignment (merge columns from different tables into one table), and compute
3103
 *   statistics (group rows by some criteria, and compute min / max / std / mean within that group).
3104
 *
3105
 * * MLDataTable provides API to batching data (random split) which covered in TensorFlow / PyTorch's Dataset API
3106
 *   as well.
3107
 *
3108
 * It turns out when you have a noisy dataset, these functionalities are useful to remove unwanted data quickly.
3109
 * If you have a relatively clean dataset, it also allows you to prepare data in a more elegant way. For NNC,
3110
 * the interesting requirements are:
3111
 *
3112
 * 1. Represents scalars, tensors, string as columns; columns can be named.
3113
 *
3114
 * 2. New columns can be derived, from existing ones.
3115
 *
3116
 * 3. Rows can be filtered, grouped, and statistics can be computed.
3117
 *
3118
 * 4. Columns can be aligned, with some given indexes.
3119
 *
3120
 * 5. All these can be done efficiently, on a scale of hundreds of Gigabytes data.
3121
 */
3122
3123
/**
3124
 * @defgroup level_5_dataframe Dataframe API
3125
 * @{
3126
 */
3127
3128
/**
3129
 * A data enumeration function to supply data for given row indexes.
3130
 */
3131
typedef void (*ccv_cnnp_column_data_enum_f)(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3132
/**
3133
 * A destructor for data.
3134
 */
3135
typedef void (*ccv_cnnp_column_data_deinit_f)(void* const data, void* const context);
3136
/**
3137
 * A destructor for context.
3138
 */
3139
typedef void (*ccv_cnnp_column_data_context_deinit_f)(void* const context);
3140
/**
3141
 * Column data.
3142
 */
3143
typedef struct {
3144
  int stream_type; /**< The type of stream context for this column. Each column only compatible with one stream type. */
3145
  char* name; /**< The name of the column. */
3146
  ccv_cnnp_column_data_enum_f data_enum; /**< The data enumeration function for this column. */
3147
  ccv_cnnp_column_data_deinit_f data_deinit; /**< The deinit function that will be used to destroy the data. */
3148
  void* context; /**< The context go along with this column. */
3149
  ccv_cnnp_column_data_context_deinit_f context_deinit; /**< The deinit function that will be used to destroy the context. */
3150
} ccv_cnnp_column_data_t;
3151
/**
3152
 * An opaque structure point to the dataframe object.
3153
 */
3154
typedef struct ccv_cnnp_dataframe_s ccv_cnnp_dataframe_t;
3155
/**
3156
 * Create a dataframe object with given column data.
3157
 * @param column_data The column data that can be loaded.
3158
 * @param column_size The size of column data array.
3159
 * @param row_count The number of rows in this dataframe.
3160
 */
3161
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_new(const ccv_cnnp_column_data_t* const column_data, const int column_size, const int row_count);
3162
/**
3163
 * Add a new column to the dataframe.
3164
 * @param dataframe The dataframe object to add column to.
3165
 * @param data_enum The data provider function for the new column.
3166
 * @param stream_type The type of stream context for this derived column.
3167
 * @param data_deinit The deinit function will be used to destroy the derived data.
3168
 * @param context The context that can be used to generate new column.
3169
 * @param context_deinit The deinit function will be used to destroy the context.
3170
 * @param name The name of the newly added column.
3171
 * @return The new column index.
3172
 */
3173
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_enum_f data_enum, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
3174
/**
3175
 * A map function that takes the data from multiple columns and derive new data out of it.
3176
 */
3177
typedef void (*ccv_cnnp_column_data_map_f)(void* const* const* const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3178
/**
3179
 * Derive a new column out of existing columns in the dataframe.
3180
 * @param dataframe The dataframe object that contains existing columns.
3181
 * @param map The map function used to derive new column from existing columns.
3182
 * @param stream_type The type of stream context for this derived column.
3183
 * @param data_deinit The deinit function will be used to destroy the derived data.
3184
 * @param column_idxs The columns that will be used to derive new column.
3185
 * @param column_idx_size The size of existing columns array.
3186
 * @param context The context that can be used to generate new column.
3187
 * @param context_deinit The deinit function will be used to destroy the context.
3188
 * @param name The name of the new column.
3189
 * @return The new column index.
3190
 */
3191
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_map(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_map_f map, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, const int* const column_idxs, const int column_idx_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
3192
/**
3193
 * Shuffle an existing dataframe.
3194
 * @param dataframe The dataframe that is about to be shuffled.
3195
 */
3196
void ccv_cnnp_dataframe_shuffle(ccv_cnnp_dataframe_t* const dataframe);
3197
/**
3198
 * Query row count of the dataframe.
3199
 * @param dataframe The dataframe we want to query row count.
3200
 * @return The row count of the dataframe.
3201
 */
3202
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_row_count(ccv_cnnp_dataframe_t* const dataframe);
3203
/**
3204
 * Query the column name of a given column on the dataframe.
3205
 * @param dataframe The dataframe we want to query the column name.
3206
 * @param column_idx The index of a column.
3207
 * @return The name of the column.
3208
 */
3209
CCV_WARN_UNUSED(const char*) ccv_cnnp_dataframe_column_name(ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
3210
/**
3211
 * A sampling function that takes multiple rows of one column, and sample to one row.
3212
 */
3213
typedef void (*ccv_cnnp_column_data_sample_f)(void* const* const input_data, const int batch_size, void** const output_data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3214
/**
3215
 * Sample a dataframe by batch size. Thus, n rows are sampled to 1 row per sample function on
3216
 * one specific column. This will also sample the multi-column dataframe down to 1 column
3217
 * by selecting the one column to sample.
3218
 * @param dataframe The dataframe that is about to be sampled.
3219
 * @param sample The sample function used to sample n rows into 1.
3220
 * @param data_deinit The deinit function will be used to destroy the derived data.
3221
 * @param column_idx The column we selected to sample.
3222
 * @param batch_size How many rows will be sampled to 1 row from the original data.
3223
 * @param context The context that can be used in sample function.
3224
 * @param context_deinit The deinit function will be used to destroy the context.
3225
 * @return The sampled dataframe.
3226
 */
3227
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_sample_new(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_sample_f sample, ccv_cnnp_column_data_deinit_f data_deinit, const int column_idx, const int batch_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit);
3228
/**
3229
 * Extract a value out of a struct. Assuming the data points to a struct. This method extract
3230
 * n-offset value of that struct. For example, if you have struct { ccv_nnc_tensor_t* a; ccv_nnc_tensor_t* b; } S;
3231
 * if you want to extract the b tensor to a different column, you can call this function with
3232
 * offsetof(S, b).
3233
 * @param dataframe The dataframe object to be extracted.
3234
 * @param column_idx The column that we want to extract value of.
3235
 * @param offset The offset. For example, offsetof(S, b).
3236
 * @param name The name of the new column.
3237
 * @return The new column that contains the extracted value.
3238
 */
3239
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_value(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t offset, const char* name);
3240
/**
3241
 * Make a tuple out of columns specified. Thus, the new derived column will contains a tuple
3242
 * with data from all the columns specified here. Tuple here represented as void* tuple[], an
3243
 * array of void* pointers.
3244
 * @param dataframe The dataframe that will contain the new column.
3245
 * @param column_idxs The columns to be tupled.
3246
 * @param column_idx_size The number of columns.
3247
 * @param name The name of the new column.
3248
 * @return The derived column with the tuple.
3249
 */
3250
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_make_tuple(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const char* name);
3251
/**
3252
 * The size of the tuple. It is equal to the number of columns we specified. The behavior of
3253
 * calling this method on a column that is not a tuple is undefined.
3254
 * @param dataframe The dataframe that contains the tuple column.
3255
 * @param column_idx The tuple column we are going to inspect.
3256
 * @return The tuple size of the column.
3257
 */
3258
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_tuple_size(const ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
3259
/**
3260
 * Extract a data out of a tuple.
3261
 * @param dataframe The dataframe that will contain the new column.
3262
 * @param column_idx The column that is a tuple.
3263
 * @param index The index into the tuple.
3264
 * @param name The name of the new column.
3265
 * @return The derived column with the extracted value.
3266
 */
3267
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_tuple(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int index, const char* name);
3268
/**
3269
 * The opaque pointer to the iterator.
3270
 */
3271
typedef struct ccv_cnnp_dataframe_iter_s ccv_cnnp_dataframe_iter_t;
3272
/**
3273
 * Get a new iterator of the dataframe.
3274
 * @param dataframe The dataframe object to iterate through.
3275
 * @param column_idxs The columns that will be iterated.
3276
 * @param column_idx_size The size of columns array.
3277
 * @return The opaque iterator object.
3278
 */
3279
CCV_WARN_UNUSED(ccv_cnnp_dataframe_iter_t*) ccv_cnnp_dataframe_iter_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size);
3280
/**
3281
 * Get the next item from the iterator.
3282
 * @param iter The iterator to go through.
3283
 * @param data_ref The output for the data.
3284
 * @param column_idx_size The size of the data_ref array.
3285
 * @param stream_context The stream context to extract data asynchronously.
3286
 * @return 0 if the iteration is successful, -1 if there is no more row. -2 if it is already ended.
3287
 */
3288
int ccv_cnnp_dataframe_iter_next(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int column_idx_size, ccv_nnc_stream_context_t* const stream_context);
3289
/**
3290
 * Assuming iterator is on the same row, peek into potentially different column index.
3291
 * @param iter The iterator to go through.
3292
 * @param data_ref The output for the data.
3293
 * @param offset The offset for which column in this iterator to peek at.
3294
 * @param data_ref_size How many columns in this iterator to peek at.
3295
 * @param stream_context The stream context to extract data asynchronously.
3296
 */
3297
void ccv_cnnp_dataframe_iter_peek(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int offset, const int data_ref_size, ccv_nnc_stream_context_t* const stream_context);
3298
/**
3299
 * Prefetch next item on the iterator with the given stream context. You can call this method multiple times
3300
 * to prefetch multiple items ahead of time.
3301
 * @param iter The iterator to go through.
3302
 * @param prefetch_count How much ahead we should advance for.
3303
 * @param stream_context The stream context to extract data asynchronously.
3304
 * @return 0 if the prefetch is successful, -1 if it is ended.
3305
 */
3306
int ccv_cnnp_dataframe_iter_prefetch(ccv_cnnp_dataframe_iter_t* const iter, const int prefetch_count, ccv_nnc_stream_context_t* const stream_context);
3307
/**
3308
 * Set the cursor of the iterator. When set to 0, the iterator effectively restarts.
3309
 * @param iter The iterator to go through.
3310
 * @param idx The index of the cursor.
3311
 * @return 0 if it is successful, -1 if it is not (exceed the range).
3312
 */
3313
int ccv_cnnp_dataframe_iter_set_cursor(ccv_cnnp_dataframe_iter_t* const iter, const int idx);
3314
/**
3315
 * Free the dataframe iterator object.
3316
 * @param iter The dataframe iterator to be freed.
3317
 */
3318
void ccv_cnnp_dataframe_iter_free(ccv_cnnp_dataframe_iter_t* const iter);
3319
/**
3320
 * Free the dataframe object.
3321
 * @param dataframe The dataframe object to be freed.
3322
 */
3323
void ccv_cnnp_dataframe_free(ccv_cnnp_dataframe_t* const dataframe);
3324
3325
/** @} */
3326
3327
/**
3328
 * @defgroup level_5_dataframe_add_ons Dataframe Add-ons
3329
 * @{
3330
 */
3331
3332
/**
3333
 * Turn a ccv_array_t to a dataframe object.
3334
 * @param array The array we want to turn into a dataframe object.
3335
 * @return The new dataframe object.
3336
 */
3337
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_array_new(ccv_array_t* const array);
3338
/**
3339
 * Derive a new column that copies a tensor array from given column to the derived column on GPU.
3340
 * @param dataframe The dataframe object that get the derived column.
3341
 * @param column_idx The original column contains tensor array on CPU.
3342
 * @param tensor_offset Only copy as outputs[i] = inputs[i + tensor_offset].
3343
 * @param tensor_size How many tensors in the tensor array.
3344
 * @param device_id The device we want to copy the tensors to.
3345
 * @param name The name of the new column.
3346
 * @return The index of the newly derived column.
3347
 */
3348
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_to_gpu(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int tensor_offset, const int tensor_size, const int device_id, const char* name);
3349
/**
3350
 * Derive a new column by executing a generic command.
3351
 * @param dataframe The dataframe object that get the derived column.
3352
 * @param column_idx The original column contains tensor array.
3353
 * @param cmd The command for this operation.
3354
 * @param hint The hint to run the command.
3355
 * @param flags The flags with the command.
3356
 * @param input_offset Use inputs[i + input_offset] to inputs[i + input_offset + input_size - 1] as the inputs
3357
 * @param input_size How many tensors in the input array.
3358
 * @param output_params The parameters for the outputs.
3359
 * @param output_size How many tensors in the output array.
3360
 * @param stream_type The type of stream context we are going to use.
3361
 * @param name The name of the new column.
3362
 * @return The index of the newly derived column.
3363
 */
3364
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_cmd_exec(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const int input_offset, const int input_size, const ccv_nnc_tensor_param_t* const output_params, const int output_size, const int stream_type, const char* name);
3365
/**
3366
 * Add a new column contains some tensors. This will add a new column that each row is the tensor specified
3367
 * as the parameters. It comes handy when you want to have some auxiliary tensors along with each row.
3368
 * @param dataframe The dataframe object that get the new column.
3369
 * @param params The parameters for the tensors.
3370
 * @param name The name of the new column.
3371
 * @return The index of the newly added column.
3372
 */
3373
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add_aux(ccv_cnnp_dataframe_t* const dataframe, const ccv_nnc_tensor_param_t params, const char* name);
3374
/**
3375
 * Read image off a said column. That column should contain the filename (as char array). The new column
3376
 * will contain the ccv_dense_matrix_t / ccv_nnc_tensor_t (both are toll-free bridging) of the image.
3377
 * @param dataframe The dataframe object that loads the images.
3378
 * @param column_idx The column which contains the filename.
3379
 * @param structof The offset to the filename (as char array) from that column. For example, the column
3380
 *        could be a struct and filename could be one of the field. In that case, you can pass offsetof(S, filename)
3381
 * @param name The name of the new column.
3382
 * @return The index of the newly derived column.
3383
 */
3384
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_read_image(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const char* name);
3385
/**
3386
 * The structure to describe how to apply random jitter to the image.
3387
 */
3388
typedef struct {
3389
  float contrast; /**< The random contrast, the final contrast will be [1 / (1 + contrast), 1 + contrast] */
3390
  float saturation; /**< The saturation, the final saturation will be [1 / (1 + saturation), 1 + saturation] */
3391
  float brightness; /**< The brightness, the final brightness will be between [1 / (1 + brightness), 1 + brightness] */
3392
  float lighting; /**< AlexNet style PCA based image jitter */
3393
  float aspect_ratio; /**< Stretch aspect ratio between [1 / (1 + asepct_ratio), 1 + aspect_ratio] */
3394
  int symmetric; /**< Apply random flip on x-axis (around y-axis */
3395
  int seed; /**< The seed for random generator. */
3396
  int center_crop; /**< Enable crop to the center (otherwise do random crop). */
3397
  struct {
3398
    int min; /**< The minimal dimension of resize */
3399
    int max; /**< The maximal dimension of resize. The final resize can be computed from min + (max - min) * random_unit */
3400
    int roundup; /**< The dimension on both height / width are a multiple of roundup value. */
3401
  } resize;
3402
  struct {
3403
    int rows; /**< The height of the final image. */
3404
    int cols; /**< The width of the final image. */
3405
  } size;
3406
  struct {
3407
    int x; /**< The extra random offset on x-axis. */
3408
    int y; /**< The extra random offset on y-axis. */
3409
  } offset;
3410
  struct {
3411
    float mean[3]; /**< Normalize the image with mean. */
3412
    float std[3];/**< Normalize the image with std. pixel = (pixel - mean) / std */
3413
  } normalize;
3414
} ccv_cnnp_random_jitter_t;
3415
/**
3416
 * Apply random jitter on a image to generate a new image.
3417
 * @param dataframe The dataframe object that contains the original image.
3418
 * @param column_idx The column which contains the original image.
3419
 * @param datatype The final datatype of the image. We only support CCV_32F right now.
3420
 * @param random_jitter The random jitter parameters to be applied to.
3421
 * @param name The name of the new column.
3422
 * @return The index of the newly derived column.
3423
 */
3424
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_image_random_jitter(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int datatype, const ccv_cnnp_random_jitter_t random_jitter, const char* name);
3425
/**
3426
 * Generate a one-hot tensor off the label from a struct.
3427
 * @param dataframe The dataframe object that contains the label.
3428
 * @param column_idx The column which contains the label (as int).
3429
 * @param structof The offset to the label (as int) from that column. For example, the column
3430
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
3431
 * @param range The range of the label, from [0...range - 1]
3432
 * @param onval The value when it hit.
3433
 * @param offval The value for the others.
3434
 * @param datatype The datatype of the tensor.
3435
 * @param format The format of the tensor.
3436
 * @param name The name of the new column.
3437
 * @return The index of the newly derived column.
3438
 */
3439
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_hot(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int range, const float onval, const float offval, const int datatype, const int format, const char* name);
3440
/**
3441
 * Generate a scalar tensor (a tensor with one value) off a value from a struct.
3442
 * @param dataframe The dataframe object that contains the value.
3443
 * @param column_idx The column which contains the value (as datatype).
3444
 * @param structof The offset to the label (as int) from that column. For example, the column
3445
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
3446
 * @param from_dt The datatype of the value.
3447
 * @param to_dt The datatype of the tensor.
3448
 * @param format The format of the tensor.
3449
 * @param name The name of the new column.
3450
 * @return The index of the newly derived column.
3451
 */
3452
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_scalar(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int from_dt, const int to_dt, const int format, const char* name);
3453
/**
3454
 * Generate vector with ones up to a given length, the rest will be zeros. When applied to batched lengths
3455
 * array, this will generate a matrix of these vectors, squared. The derived column will be a tuple of vectors
3456
 * for the given number of columns.
3457
 * @param dataframe The dataframe object that will contain the matrix.
3458
 * @param column_idxs The columns which contain the sequence lengths (a 1d tensor).
3459
 * @param column_idx_size The number of columns. The derived column will be a tuple of vectors.
3460
 * @param variable_size The size of the final vector can vary, depending on the max length of current batch.
3461
 * @param max_length The absolute max length for inputs.
3462
 * @param name The name of the new column.
3463
 * @return The index of the newly derived column.
3464
 */
3465
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_squared(ccv_cnnp_dataframe_t* const dataframe,  const int* const column_idxs, const int column_idx_size, const int variable_size, const int max_length, const char* name);
3466
/**
3467
 * Truncate a given matrix (as a list of vector) to the given size provided by another vector. The truncated
3468
 * column will be a tuple of vectors for the given columns.
3469
 * @param dataframe The dataframe object that will contain the matrix.
3470
 * @param vec_idxs The columns of the given matrix to be truncated.
3471
 * @param vec_idx_size The number of columns for vec_idxs.
3472
 * @param len_idxs The columns of the given sizes as a vector.
3473
 * @param len_idx_size The number of columns for len_idxs.
3474
 * @param name The name of the new column.
3475
 * @return The index of the newly derived column.
3476
 */
3477
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_truncate(ccv_cnnp_dataframe_t* const dataframe, const int* const vec_idxs, const int vec_idx_size, const int* len_idxs, const int len_idx_size, const char* name);
3478
/**
3479
 * Combine multiple tensors in a column into one tensor. This method can take multiple columns, which
3480
 * will result a tuple of tensors. Each tensor in the tuple is a batched one from a given column.
3481
 * @param dataframe The dataframe contains the columns of tensors to be batched.
3482
 * @param column_idxs The columns that contain the tensors.
3483
 * @param column_idx_size The number of columns that contain the tensors.
3484
 * @param batch_count How many tensors in one column to be batched together.
3485
 * @param group_count We can generate many groups of batched tensor. For example, if you have column A, B, C, each
3486
 *        have different tensors. If group_count is 1, the result tuple will be (A_b, B_b, C_b). If group count is
3487
 *        2, the result tuple will be (A_b1, B_b1, C_b1, A_b2, B_b2, C_b2). A_b1 etc. will still contain the same
3488
 *        number of batch_count tensors.
3489
 * @param format The result format of the tensor. We support simply transformation NCHW <=> NHWC with the source tensor.
3490
 * @return The newly created dataframe with the 0-th column is the tuple of batched tensors.
3491
 */
3492
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_combine_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const int batch_count, const int group_count, const int format);
3493
3494
/** @} */
3495
3496
/**
3497
 * @page dataframe_csv Why to support comma-separated-values files in dataframe?
3498
 *
3499
 * C can be used as a parser. It usually can be fast. But most of them can be buggy and has bugs that can either crash, be
3500
 * exploited, or simply incorrect. There really isn't much motivation for me to start write a parser, even as simple as
3501
 * for CSV files.
3502
 *
3503
 * However, it does brought to my attention that a full-speed (defined by saturating the PCIx4 for SSD) implementation would
3504
 * be beneficial. I am also started to use nnc in many places that is handy to load a csv file and generate some tensors out
3505
 * of it.
3506
 *
3507
 * This implementation plan to use a variant of the two-pass approach documented in
3508
 * https://www.microsoft.com/en-us/research/uploads/prod/2019/04/chunker-sigmod19.pdf while first implemented in
3509
 * https://github.com/wiseio/paratext. It is differentiated from these two in these particular ways:
3510
 *
3511
 * 1. The first pass will not only find the quotes and even / odd CRLF, but also collect statistics on how many lines assuming
3512
 *    the first CRLF is within quote / outside of the quote;
3513
 *
3514
 * 2. The second pass will do a copy into a continuous page mirrors the original csv file, but null-terminate each column, and
3515
 *    assign the start pointer for each.
3516
 *
3517
 * The speculative approach while interesting, for many-core system implementation, it can be challenging and the worse-case
3518
 * scenario is indeed worse.
3519
 *
3520
 * The implementation itself follows https://tools.ietf.org/html/rfc4180, with only customization of delimiters (so it can support
3521
 * table-separated-values) and quotes (so you can choose between " and '). Escaping only supports double-quotes for whatever quote
3522
 * symbol you elect.
3523
 */
3524
3525
/**
3526
 * @defgroup level_5_dataframe_csv Dataframe for Comma-Separated-Values Files
3527
 * @{
3528
 */
3529
enum {
3530
  /* It is a file pointer. */
3531
  CCV_CNNP_DATAFRAME_CSV_FILE = 0,
3532
  /* It is a pointer to a memory. */
3533
  CCV_CNNP_DATAFRAME_CSV_MEMORY = 1,
3534
};
3535
3536
/**
3537
 * Create a dataframe object that read a CSV file. This will eagerly load the file into memory, parse each row / column
3538
 * into null-terminated strings, you can later convert these into numerics if needed. Each column will be a column indexed
3539
 * from 0 to column_size - 1. If there are syntax errors, the parser will make guesses and continue to parse to its best knowledge.
3540
 * If it cannot, we will return null for the object. We support both CRLF, LF, and LFCR termination.
3541
 * @param input The FILE handle for on-disk file, or the pointer to the region of the memory we are going to use.
3542
 * @param type The type of either `CCV_CNNP_DATAFRAME_CSV_FILE` or `CCV_CNNP_DATAFRAME_CSV_MEMORY`
3543
 * @param len The length of the memory region, if it is `CCV_CNNP_DATAFRAME_CSV_MEMORY`.
3544
 * @param delim The delim, it is ',' by default (if you provided '\0')
3545
 * @param quote The quote for escape strings, it is '"' by default (if you provided '\0')
3546
 * @param include_header whether to parse the header seperately. 1 means we treat the first line as header.
3547
 * @param column_size The number of columns in the resulted dataframe.
3548
 * @return A dataframe that can represent the csv file. nullptr if failed.
3549
 */
3550
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_csv_new(void* const input, const int type, const size_t len, const char delim, const char quote, const int include_header, int* const column_size);
3551
3552
/** @} */
3553
3554
/**
3555
 * @page model Models, layers, and Keras
3556
 *
3557
 * With Keras API in mind, this model implementation essentially is a light-weight way to group neural network layers
3558
 * together. This is a rare case in NNC (or ccv in general) where Object-Oriented programming makes sense. I borrowed
3559
 * heavily from Objective-C / C++ to implement this Object-Oriented interface.
3560
 *
3561
 * Now back to elaboration of the Model interface. It is specifically designed with Keras in mind, asking question:
3562
 * If we are going to build Keras high-level API in any languages (Ruby, Python, Swift, Julia), what's the underlying
3563
 * C interface would look like? Here is your answer (hint: it looks very much like just Python Keras API).
3564
 *
3565
 * A model consists of a set of inputs and outputs. This sounds very much like what "Command" is in Level-1 APIs,
3566
 * however, they are different: a model is stateful. For example, a convolution command takes 3 inputs: image, kernel
3567
 * weight and bias, has 1 output: image. A convolution model takes 1 input: image, and 1 output: image. kernel weight
3568
 * and bias are internal states to the model (in Keras, it is called "layer" for convolution, and model means a set of
3569
 * layers. In NNC, that kind of differentiation feels superficial, therefore, a layer is a model).
3570
 *
3571
 * A model can be combined, and a new model can be a combination of other models.
3572
 *
3573
 * The simpler composed model is the sequential model. A sequential model is a model that consists a sequence of models
3574
 * that contains one input and one output. The output of the earlier model feed into the later one, thus, a sequential
3575
 * evaluation path.
3576
 */
3577
3578
/**
3579
 * @defgroup level_5_model Model API
3580
 * @{
3581
 */
3582
3583
/**
3584
 * model type is an abstract type, you won't interact with a naked model ever.
3585
 */
3586
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
3587
/**
3588
 * With this type, now in NNC, we have 4 types that represents a "tensor":
3589
 *
3590
 * 1. ccv_nnc_tensor_t / ccv_nnc_tensor_view_t / ccv_nnc_tensor_multiview_t: a concrete tensor with memory allocated.
3591
 *
3592
 * 2. ccv_nnc_tensor_symbol_t: a symbol representation of a tensor, with its data layout, device affinity, and type
3593
 *                             specified.
3594
 *
3595
 * 3. ccv_nnc_tensor_variable_t: in dynamic graph, this represents a concrete tensor with memory allocated, but also
3596
 *                               associated with a recorded execution.
3597
 *
3598
 * 4. ccv_cnnp_model_io_t: this is the most flexible one. No data layout, device affinity or type specified. It can even
3599
 *                         represent a list of tensors rather than just one. This is a handle used by model API to
3600
 *                         associates model inputs / outputs.
3601
 */
3602
typedef struct ccv_cnnp_model_io_s* ccv_cnnp_model_io_t;
3603
/**
3604
 * Create a naked input.
3605
 * @return A ccv_cnnp_model_io_t represents an input.
3606
 */
3607
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_input(void);
3608
/**
3609
 * This method mimics Keras callable for model (thus, override __call__ method in Python class).
3610
 * @param model A model that we can apply a set of inputs to get one output.
3611
 * @param inputs The set of inputs.
3612
 * @param input_size The size of inputs array.
3613
 * @return A ccv_cnnp_model_io_t that represents the output of the given model.
3614
 */
3615
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_apply(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t* const inputs, const int input_size);
3616
/**
3617
 * This method adds non-functional dependencies for a model IO. "Non-functional dependencies" means
3618
 * their outputs are not used for this IO, however, their existence establishes a partial ordering
3619
 * for the execution. In that way, they act as "inputs" but not functional.
3620
 * @param model_io A model IO for which we will add additional non-functional dependencies.
3621
 * @param dependencies The set of dependencies.
3622
 * @param dependency_size The size of dependencies array.
3623
 */
3624
void ccv_cnnp_model_add_dependencies(ccv_cnnp_model_io_t model_io, const ccv_cnnp_model_io_t* const dependencies, const int dependency_size);
3625
enum {
3626
  /* Select only weights, no bias terms. */
3627
  CCV_CNNP_PARAMETER_SELECT_WEIGHT = 0,
3628
  /* Select bias terms, no weights. */
3629
  CCV_CNNP_PARAMETER_SELECT_BIAS = 1,
3630
};
3631
/**
3632
 * This method exposes parameter for a model out as a potential input for another model. Since
3633
 * it is a ccv_cnnp_model_io_t, it can also be used by other methods.
3634
 * @param model A model that we can extract parameters out.
3635
 * @param selector The selector for a parameter. ALL_PARAMETERS means all parameters, or you can select CCV_CNNP_PARAMETER_SELECT_WEIGHT or CCV_CNNP_PARAMETER_SELECT_BIAS.
3636
 * @param index The index into a parameter. ALL_PARAMETERS means all parameters.
3637
 */
3638
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameters(ccv_cnnp_model_t* const model, const int selector, const int index);
3639
/**
3640
 * A notification function such that a model can be notified.
3641
 * This is useful to broadcast a message to all models as sub-model of someone else.
3642
 */
3643
typedef void (*ccv_cnnp_model_notify_f)(const ccv_cnnp_model_t* const model, const int tag, void* const payload, void* const context);
3644
/**
3645
 * Hook into a model such that when there is a notification, the callback will receive it.
3646
 * @param model A model that can be notified.
3647
 * @param func The callback function.
3648
 * @param context The context to be passed along to the callback function.
3649
 **/
3650
void ccv_cnnp_model_notify_hook(ccv_cnnp_model_t* const model, ccv_cnnp_model_notify_f func, void* const context);
3651
/**
3652
 * Notify a model and its sub-models with a tag and a payload. This will be triggered
3653
 * synchronously.
3654
 * @param model A model that will be notified.
3655
 * @param tag An integer to help identify what kind of notification.
3656
 * @param payload A payload pointer that you can carry arbitrary information.
3657
 */
3658
void ccv_cnnp_model_notify(const ccv_cnnp_model_t* const model, const int tag, void* const payload);
3659
/**
3660
 * This method name is deceiving. It return a composed model, not a naked model.
3661
 * This composed model takes set of inputs, and run through various other models to arrive at
3662
 * the set of outputs.
3663
 * @param inputs The set of inputs.
3664
 * @param input_size The size of inputs array.
3665
 * @param outputs The set of outputs.
3666
 * @param output_size The size of outputs array.
3667
 * @param is_trainable Whether the parameters of this model can be trained. -1 means inherent from parent.
3668
 * @param name The unique name of the model.
3669
 * @return A composed model that takes inputs, and generate the outputs.
3670
 */
3671
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_new(const ccv_cnnp_model_io_t* const inputs, const int input_size, const ccv_cnnp_model_io_t* const outputs, const int output_size, const int is_trainable, const char* const name);
3672
/**
3673
 * This method returns a sequential model, which composed from a sequence of models.
3674
 * @param models The list of models, that takes one input, and emit one output, feeding into the subsequent one.
3675
 * @param model_size The size of the list.
3676
 * @param is_trainable Whether the parameters of this model can be trained.
3677
 * @param name The unique name of the model.
3678
 * @return A composed model that applies these models one by one in sequence.
3679
 */
3680
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sequential_new(ccv_cnnp_model_t* const* const models, const int model_size, const int is_trainable, const char* const name);
3681
/**
3682
 * A model generation function to be called for dynamic models.
3683
 */
3684
typedef ccv_cnnp_model_t* (*ccv_cnnp_model_dynamic_f)(const ccv_nnc_tensor_param_t* const inputs, const int input_size, void* const context);
3685
/**
3686
 * This method returns a model that will be recreated if it is recompiled. Put it this way, you can call
3687
 * ccv_cnnp_model_compile multiple times with different inputs and input size, however, the model will
3688
 * only be recompiled to some extent. For example, if you called ccv_cnnp_reshape, the shape is determined
3689
 * at the moment you create that model, recompilation won't change. There are two ways to workaround this:
3690
 * 1. Use models that doesn't have explicit shape specified, for example, ccv_cnnp_dense, and avoid models
3691
 *    that is not as flexible, such as ccv_cnnp_reshape, or ccv_cnnp_cmd_exec.
3692
 * 2. Create with ccv_cnnp_dynamic_new such that the model will be recreated again whenever recompile.
3693
 * @param func The function to be called to create the model.
3694
 * @param context The context used along to create the model.
3695
 * @param name The unique name of the model.
3696
 * @return A model object that is yet to be created until build.
3697
 */
3698
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dynamic_new(ccv_cnnp_model_dynamic_f func, void* const context, const char* const name);
3699
/**
3700
 * Prepare the model to be trained, the input specifies the batch size etc.
3701
 * Input size technically is not needed, here is a safety check.
3702
 * @param model The model to be compiled.
3703
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3704
 * @param input_size The size of the inputs array.
3705
 * @param minimizer The wrapped command that represents a particular optimization strategy.
3706
 * @param loss The wrapped command that computes the loss function.
3707
 */
3708
void ccv_cnnp_model_compile(ccv_cnnp_model_t* const model, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_cmd_t minimizer, const ccv_nnc_cmd_t loss);
3709
/**
3710
 * Absorb a new model into the existing model. This requires the new model has exactly the same parameters
3711
 * but other dimensionality's can change. The new model has to not be compiled yet, its life-cycle management
3712
 * will be take over by the existing model. You don't need to free it separately.
3713
 * @param model The existing model.
3714
 * @param init The new model.
3715
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3716
 * @param input_size The size of the inputs array.
3717
 */
3718
void ccv_cnnp_model_absorb(ccv_cnnp_model_t* const model, ccv_cnnp_model_t* const init, const ccv_nnc_tensor_param_t* const inputs, const int input_size);
3719
/**
3720
 * Create a copy of an existing model.
3721
 * @param model The existing model.
3722
 * @param is_trainable Whether the parameters of this model can be trained.
3723
 * @return The new model that is exactly the same copy of the old one.
3724
 */
3725
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_copy(const ccv_cnnp_model_t* const model, const int is_trainable);
3726
/**
3727
 * Get the output size of the model.
3728
 * @param model The existing model.
3729
 * @return The output size of the model.
3730
 */
3731
CCV_WARN_UNUSED(int) ccv_cnnp_model_output_size(const ccv_cnnp_model_t* const model);
3732
/**
3733
 * Get whether the model is trainable.
3734
 * @param model The existing model.
3735
 * @return Whether the model is trainable, -1 is inherited from its parent.
3736
 */
3737
CCV_WARN_UNUSED(int) ccv_cnnp_model_is_trainable(const ccv_cnnp_model_t* const model);
3738
/**
3739
 * Compute the shape of the output tensor after the model applied to the input.
3740
 * This can only be called after the model is compiled with proper input parameters.
3741
 * @param model The model to compute the output shapes.
3742
 * @param outputs The computed tensor parameters in the output.
3743
 * @param output_size The size of the output array, it has to match the model's output.
3744
 */
3745
void ccv_cnnp_model_tensor_auto(ccv_cnnp_model_t* const model, ccv_nnc_tensor_param_t* const outputs, const int output_size);
3746
/**
3747
 * Generate output that can be parsed by GraphViz (DOT language).
3748
 * @param model The composed model.
3749
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
3750
 * @param outs The output file streams.
3751
 * @param out_size The size of output file stream array.
3752
 */
3753
void ccv_cnnp_model_dot(const ccv_cnnp_model_t* const model, const int flags, FILE** const outs, const int out_size);
3754
/**
3755
 * Provide a hook for upper level to do custom formatting of a given model. You can implement logic
3756
 * to format the model into protobuf, or json. This is only useful after model is compiled.
3757
 * @param model The composed model.
3758
 * @param format_fn The format callback to be called on every node.
3759
 * @param context The context that will be passed to the callback.
3760
 */
3761
void ccv_cnnp_model_format(const ccv_cnnp_model_t* const model, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
3762
/**
3763
 * Fit a model to a given input / output. This is a combination of running ccv_cnnp_model_evaluate /
3764
 * ccv_cnnp_model_backward / ccv_cnnp_model_apply_gradients. The difference is that when calling
3765
 * individual functions, the graph is compiled piece by piece, thus, is less efficient than calling
3766
 * ccv_cnnp_model_fit directly. However, having the separate functions makes this implementation much
3767
 * more versatile, for example, can accumulate gradients for multiple batches, or using custom gradients
3768
 * etc.
3769
 * @param model The composed model.
3770
 * @param inputs The input tensors.
3771
 * @param input_size The size of the input tensors array.
3772
 * @param fits The target tensors.
3773
 * @param fit_size The size of the target tensors array.
3774
 * @param outputs The actual outputs from the model.
3775
 * @param output_size The size of the outputs array.
3776
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3777
 * @param stream_context The stream where the fit can be executed upon.
3778
 */
3779
void ccv_cnnp_model_fit(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const fits, const int fit_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3780
enum {
3781
  /**
3782
   * Don't disable any outgrad.
3783
   */
3784
  CCV_CNNP_DISABLE_OUTGRAD_NONE = (uint64_t)0,
3785
  /**
3786
   * Disable all inputs' outgrads.
3787
   */
3788
  CCV_CNNP_DISABLE_OUTGRAD_ALL = (uint64_t)(int64_t)-1,
3789
};
3790
/**
3791
 * The parameters for how evaluation should behave.
3792
 */
3793
typedef struct {
3794
  int requires_grad; /**< Whether we need to keep intermediate results for gradient computations. */
3795
  int is_test; /**< Whether we evaluate it as test, or just as forward pass of the training process. */
3796
  uint64_t disable_outgrad; /**< Whether we can compute outflow gradients when call ccv_cnnp_model_backward later, this is a bitmask, you can mark for which input the outgrad is disabled. */
3797
} ccv_cnnp_evaluate_param_t;
3798
/**
3799
 * Evaluate model with output.
3800
 * @param model The composed model.
3801
 * @param params The parameters for how evaluation should behave.
3802
 * @param inputs The input tensors.
3803
 * @param input_size The size of the input tensors array.
3804
 * @param outputs The actual outputs from the model.
3805
 * @param output_size The size of the outputs array.
3806
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3807
 * @param stream_context The stream where the evaluation can be executed upon.
3808
 */
3809
void ccv_cnnp_model_evaluate(ccv_cnnp_model_t* const model, const ccv_cnnp_evaluate_param_t params, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3810
/**
3811
 * Dryrun the model with inputs / outputs. This runs the evaluation loop up until the actual execution.
3812
 * @param model The composed model.
3813
 * @param params The parameters for how evaluation should behave.
3814
 * @param inputs The input tensors.
3815
 * @param input_size The size of the input tensors array.
3816
 * @param outputs The actual outputs from the model.
3817
 * @param output_size The size of the outputs array.
3818
 */
3819
void ccv_cnnp_model_dry_run(ccv_cnnp_model_t* const model, const ccv_cnnp_evaluate_param_t params, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
3820
/**
3821
 * Based on the input gradients, compute the output gradients (w.r.t. the inputs). This also adds parameter gradients.
3822
 * @param model The composed model.
3823
 * @param ingrads The input gradients.
3824
 * @param ingrad_size The size of the input gradients array.
3825
 * @param outgrads The output gradients (w.r.t. the inputs).
3826
 * @param outgrad_size The size of the output gradients array.
3827
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3828
 * @param stream_context The stream where the gradient computation can be executed upon.
3829
 */
3830
void ccv_cnnp_model_backward(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const ingrads, const int ingrad_size, ccv_nnc_tensor_t* const* const outgrads, const int outgrad_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3831
/**
3832
 * Apply the computed gradients to the parameter tensors.
3833
 * @param model The composed model.
3834
 * @param stream_context The stream where the gradient computation can be executed upon.
3835
 */
3836
void ccv_cnnp_model_apply_gradients(ccv_cnnp_model_t* const model, ccv_nnc_stream_context_t* const stream_context);
3837
/**
3838
 * Cancel execution of a model, whether it is forward / backward or gradient application pass. You need to make
3839
 * sure the model is currently executing when cancelling. This method will set a flag internally and the
3840
 * execution will check that flag when push compute on the computation device and abort if it is cancelled.
3841
 * When you call other model execution method again, this cancellation won't in effect and you need to call
3842
 * cancel again.
3843
 * @param model The composed model.
3844
 */
3845
void ccv_cnnp_model_cancel(ccv_cnnp_model_t* const model);
3846
/**
3847
 * Set flags for the exec symbols created by the model. See CCV_NNC_GRAPH_EXEC_* for details.
3848
 * Note that practically right now, only DISABLE_OPT is useful.
3849
 * @param model The composed model before apply / evaluate.
3850
 * @param flags The flags to set on all exec symbols potentially associated with this model.
3851
 */
3852
void ccv_cnnp_model_set_flags(ccv_cnnp_model_t* const model, const int flags);
3853
/**
3854
 * Get flags for the exec symbols created by the model. See CCV_NNC_GRAPH_EXEC_* for details.
3855
 * Note that practically right now, only DISABLE_OPT is useful.
3856
 * @param model The composed model before apply / evaluate.
3857
 */
3858
CCV_WARN_UNUSED(int) ccv_cnnp_model_flags(ccv_cnnp_model_t* const model);
3859
enum {
3860
  /**
3861
   * This is the default flag, if the model is not initialized, will attempt to read from the disk.
3862
   * Otherwise, will persist existing parameters to disk.
3863
   */
3864
  CCV_CNNP_MODEL_CHECKPOINT_READ_WRITE,
3865
  /**
3866
   * Only read parameters out of disk, even it is already initialized.
3867
   */
3868
  CCV_CNNP_MODEL_CHECKPOINT_READ_ONLY,
3869
  /**
3870
   * Only write parameters to disk.
3871
   */
3872
  CCV_CNNP_MODEL_CHECKPOINT_WRITE_ONLY,
3873
};
3874
/**
3875
 * Write model's tensors to a SQLite database with a given name. Note that we specifically say
3876
 * "model's tensors" because it doesn't persist the model's structure. Hence, you shouldn't
3877
 * expect us to take a name to then have a fully functional model restored from there. You still
3878
 * need to construct the model. This method only write the tensors (weights and other internal ones)
3879
 * to disk.
3880
 * @param model The model.
3881
 * @param handle The SQLite handle.
3882
 * @param name The name to find the tensors related to the model in the database.
3883
 * @param options The IO options that can do data encode / decode before persistence.
3884
 * @return CCV_IO_FINAL for success, otherwise error.
3885
 */
3886
int ccv_cnnp_model_write(const ccv_cnnp_model_t* const model, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
3887
/**
3888
 * Write model's tensors to a SQLite database implicitly with "" name. This is a convenience method
3889
 * to ccv_cnnp_model_write particularly useful at training time.
3890
 * @param model The composed model.
3891
 * @param fn The file name.
3892
 * @param options The IO options that can do data encode / decode before persistence.
3893
 */
3894
void ccv_cnnp_model_write_to_file(ccv_cnnp_model_t* const model, const char* const fn, const ccv_nnc_tensor_io_option_t* const options);
3895
/**
3896
 * The prototype for the writer function when exporting parameters out.
3897
 * @param tensor The tensor to be written to disk.
3898
 * @param sql The sql to be executed.
3899
 * @param handle The custom handle that you passed in from ``ccv_cnnp_model_write`` method.
3900
 * @param options The IO options that can do data encode / decode before persistence.
3901
 * @param name The name give to a particular parameter.
3902
 */
3903
typedef int (*ccv_cnnp_model_io_writer_f)(const ccv_nnc_tensor_t* const tensor, const char* const sql, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
3904
/**
3905
 * The prototype for the reader function to load parameters.
3906
 * @param handle The custom handle that you passed in from ``ccv_cnnp_model_read`` method.
3907
 * @param name The name give to a particular parameter.
3908
 * @param options The IO options that can do data encode / decode before persistence.
3909
 * @param params The recommended tensor params.
3910
 * @param tensor_out The tensor to be loaded.
3911
 */
3912
typedef int (*ccv_cnnp_model_io_reader_f)(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const ccv_nnc_tensor_param_t params, ccv_nnc_tensor_t** const tensor_out);
3913
/**
3914
 * Set IO interceptor for loading weights from / to the model to replace the default SQLite reader / writer.
3915
 * @param model The model.
3916
 * @param reader The reader function for loading weights.
3917
 * @param writer The writer function for exporting weights out.
3918
 */
3919
void ccv_cnnp_model_set_io(ccv_cnnp_model_t* const model, ccv_cnnp_model_io_reader_f reader, ccv_cnnp_model_io_writer_f writer);
3920
/**
3921
 * Read model's tensors from a SQLite database with a given name.
3922
 * @param handle The SQLite handle.
3923
 * @param name The name to find the tensors related to the model in the database.
3924
 * @param options The IO options that can do data encode / decode before persistence.
3925
 * @param model_out The model which you want to restore the tensors. It should have the same
3926
 *                  structure as the one in write to.
3927
 * @return CCV_IO_FINAL for success, otherwise error.
3928
 */
3929
int ccv_cnnp_model_read(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const ccv_cnnp_model_t* const model_out);
3930
/**
3931
 * Read model's tensors to a SQLite database implicitly with "" name. This is a convenience method
3932
 * to ccv_cnnp_model_read particularly useful at training time.
3933
 * @param fn The file name.
3934
 * @param options The IO options that can do data encode / decode before persistence.
3935
 * @param model_out The model which you want to restore the tensors. It should have the same
3936
 *                  structure as the one in write to.
3937
 */
3938
void ccv_cnnp_model_read_from_file(const char* const fn, const ccv_nnc_tensor_io_option_t* const options, const ccv_cnnp_model_t* const model_out);
3939
/**
3940
 * Apply data parallel to the composed model. This method has to be called before we call either
3941
 * evaluate or fit and after the model is compiled.
3942
 * @param model The composed model.
3943
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
3944
 */
3945
void ccv_cnnp_model_set_data_parallel(ccv_cnnp_model_t* const model, const int parallel);
3946
/**
3947
 * Set the maximum operator-level concurrency. This is a soft-limit, e.g. if you have operations on
3948
 * different devices, they are concurrent.
3949
 * @param model The composed model.
3950
 * @param max_stream_count The maximum concurrency if the model schedules internal streams. 0 is no limit.
3951
 */
3952
void ccv_cnnp_model_set_max_concurrency(ccv_cnnp_model_t* const model, const int max_stream_count);
3953
/**
3954
 * Apply memory compression to the composed model. The memory compression technique can reduce memory
3955
 * usage up to 75% comparing with raw mix-precision model during training time.
3956
 * @param model The composed model.
3957
 * @param memory_compression Whether to enable the memory compression (1 - enable, 0 - disable (default))
3958
 */
3959
void ccv_cnnp_model_set_memory_compression(ccv_cnnp_model_t* const model, const int memory_compression);
3960
/**
3961
 * Apply memory reduction to the composed model. The memory reduction technique can reduce memory
3962
 * usage losslessly. Right now, the supported memory reduction technique is to redo datatype conversion.
3963
 * @param model The composed model.
3964
 * @param memory_reduction Whether to enable the memory reduction (1 - enable, 0 - disable (default))
3965
 */
3966
void ccv_cnnp_model_set_memory_reduction(ccv_cnnp_model_t* const model, const int memory_reduction);
3967
/**
3968
 * Set the computations in this model to be gradient checkpointing. This can be strategically applied
3969
 * to models within the higher-level composed models such that these models can effectively save 0
3970
 * gradients during backprop with the cost of running forward pass twice.
3971
 * @param model The model that will turn on gradient checkpointing.
3972
 * @param gradient_checkpointing Whether to enable gradient checkpointing (1 - enable, 0 - disable (default))
3973
 */
3974
void ccv_cnnp_model_set_gradient_checkpointing(ccv_cnnp_model_t* const model, const int gradient_checkpointing);
3975
/**
3976
 * Get whether gradient checkpointing is enabled or not for this model.
3977
 * @param model The model that will turn on gradient checkpointing.
3978
 */
3979
int ccv_cnnp_model_gradient_checkpointing(ccv_cnnp_model_t* const model);
3980
/**
3981
 * Set compile parameters on the model so it compiles the graph with the said parameters.
3982
 * @param model The composed model.
3983
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
3984
 */
3985
void ccv_cnnp_model_set_compile_params(ccv_cnnp_model_t* const model, const ccv_nnc_symbolic_graph_compile_param_t compile_params);
3986
/**
3987
 * This method set the max workspace size. If the graph is already compiled. It will re-run
3988
 * autotune to use the new workspace size to find the best algorithm.
3989
 * @param model The composed model.
3990
 * @param workspace_size The size in bytes that we can use as workspace (scratch memory).
3991
 */
3992
void ccv_cnnp_model_set_workspace_size(ccv_cnnp_model_t* const model, size_t workspace_size);
3993
/**
3994
 * This method returns the current max workspace size.
3995
 * @param model The composed model.
3996
 */
3997
size_t ccv_cnnp_model_workspace_size(ccv_cnnp_model_t* const model);
3998
/**
3999
 * Set a parameter that is specified by the parameter span. This will override whatever value in that
4000
 * parameter. The given tensor should match the dimension of the parameter. It doesn't matter whether
4001
 * the given tensor is on CPU or GPU, it will be copied over. This method is limited, it can only set
4002
 * tensor once the model is compiled.
4003
 * @param model The composed model.
4004
 * @param parameter The parameter that is used to specify which parameter to override.
4005
 * @param tensor The tensor contains the value we want to copy over.
4006
 */
4007
void ccv_cnnp_model_set_parameter(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, const ccv_nnc_tensor_t* const tensor);
4008
/**
4009
 * Copy a parameter that is specified by the parameter span out of a model. This will override the value
4010
 * in the tensor you provided. The given tensor should match the dimension of the parameter and should
4011
 * already be allocated. It doesn't matter whether the given tensor is on CPU or GPU.
4012
 * @param model The composed model.
4013
 * @param parameter The parameter that is used to specify which parameter to copy from.
4014
 * @param tensor The tensor that receives value.
4015
 */
4016
void ccv_cnnp_model_parameter_copy(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, ccv_nnc_tensor_t* const tensor);
4017
/**
4018
 * Get the ccv_nnc_tensor_param_t for a particular parameter of a model.
4019
 * @param model The composed model.
4020
 * @param parameter The parameter that is used to specify which parameter to retrieve ccv_nnc_tensor_param_t.
4021
 * @return The ccv_nnc_tensor_param_t structure that specifies a tensor shape.
4022
 */
4023
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_cnnp_model_parameter_tensor_params(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter);
4024
/**
4025
 * Get the internal name for a particular parameter of a model.
4026
 * @param model The composed model.
4027
 * @param parameter The parameter that is used to specify which parameter to retrieve ccv_nnc_tensor_param_t.
4028
 * @return The name string for internal name, its life-cycle is managed by the model, and valid until the next invocation of the model either another call or free.
4029
 */
4030
CCV_WARN_UNUSED(const char*) ccv_cnnp_model_parameter_name(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter);
4031
/**
4032
 * This method returns the number of parameters for this particular model. Note that this is only available after
4033
 * model is compiled.
4034
 * @param model A model that is compiled.
4035
 * @return The number of parameters.
4036
 */
4037
CCV_WARN_UNUSED(int) ccv_cnnp_model_parameter_count(ccv_cnnp_model_t* const model);
4038
/**
4039
 * This method returns the total byte size of parameters for this particular model. Note that this is only available after
4040
 * model is compiled.
4041
 * @param model A model that is compiled.
4042
 * @return The total byte size of parameters.
4043
 */
4044
CCV_WARN_UNUSED(uint64_t) ccv_cnnp_model_parameters_size(ccv_cnnp_model_t* const model);
4045
/**
4046
 * This method moved parameters of this particular model to designated device. It invalidates the parameters
4047
 * on a given model and requires to move back if the model needs to be used later.
4048
 * You can consider this as a counterpart for ccv_cnnp_model_parameter_copy, but operates on the whole model.
4049
 * @param model A model that is compiled.
4050
 * @param names The name associated with the tensor parameter.
4051
 * @param tensors The tensor associated with this parameter.
4052
 * @param count The size of the array provided for names and tensors, this should match ccv_cnnp_model_parameter_count call.
4053
 * @param type Either CCV_TENSOR_GPU_MEMORY or CCV_TENSOR_CPU_MEMORY.
4054
 * @return 1 for success.
4055
 */
4056
CCV_WARN_UNUSED(int) ccv_cnnp_model_parameters_move(ccv_cnnp_model_t* const model, char** const names, ccv_nnc_tensor_t** const tensors, const int count, const int type);
4057
/**
4058
 * This method moves or copies parameters from the array to this particular model to designated device.
4059
 * If it is a move, it invalidates the parameters in the array and leaves a "skeleton" tensor.
4060
 * You can consider this as a counterpart for ccv_cnnp_model_set_parameter, but operates on the whole model.
4061
 * @param model A model that is compiled.
4062
 * @param names The name associated with the tensor parameter.
4063
 * @param tensors The tensor associated with this parameter.
4064
 * @param count The size of the array provided for names and tensors, this should match ccv_cnnp_model_parameter_count call.
4065
 * @param invalidates Whether to invalidate the original tensor (1 - to invalidate, use move semantics if possible).
4066
 */
4067
void ccv_cnnp_model_set_parameters_from_key_values(ccv_cnnp_model_t* const model, char* const* const names, ccv_nnc_tensor_t** const tensors, const int count, const int invalidates);
4068
/**
4069
 * Use this to loop over and if the parameter matches, return 1.
4070
 */
4071
typedef int (*ccv_cnnp_model_parameters_filter_f)(const ccv_cnnp_model_t* const model, const char* const name, void* const context);
4072
/**
4073
 * Loop over a compiled model to find a parameter to either write or modify.
4074
 * @param model A model that is compiled.
4075
 * @param filter The callback that determines whether this parameter matches.
4076
 * @param context The context to be passed along with the callback.
4077
 * @return an array of ccv_cnnp_model_io_t.
4078
 */
4079
CCV_WARN_UNUSED(ccv_array_t*) ccv_cnnp_model_parameters_filter(ccv_cnnp_model_t* const model, ccv_cnnp_model_parameters_filter_f filter, void* const context);
4080
/**
4081
 * Loop over a compiled model to find a parameter to either write or modify.
4082
 * @param model A model that is compiled.
4083
 * @param first The callback that determines whether a parameter is found.
4084
 * @param context The context to be passed along with the callback.
4085
 * @return a ccv_cnnp_model_io_t or 0 if not found.
4086
 */
4087
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameter_first(ccv_cnnp_model_t* const model, ccv_cnnp_model_parameters_filter_f first, void* const context);
4088
/**
4089
 * Loop over a compiled model to find a parameter that is not initialized.
4090
 * @param model A model that is compiled.
4091
 * @return a ccv_cnnp_model_io_t or 0 if not found.
4092
 */
4093
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameter_first_uninit(ccv_cnnp_model_t* const model);
4094
/**
4095
 * Set parameters from another model. This will override whatever values in these parameters. The
4096
 * given parameters from another model should match the dimension of the parameter. It doesn't matter
4097
 * whether the given tensor is on CPU or GPU. This method can only set when both models are compiled.
4098
 * @param model The composed model to be set on parameters.
4099
 * @param parameters The parameters to be override.
4100
 * @param from_model The model to copy parameters from.
4101
 * @param from_parameters The parameters to be copied from.
4102
 */
4103
void ccv_cnnp_model_set_parameters(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
4104
4105
/**
4106
 * @param context The context pass to the share method.
4107
 * @param source_name The name of the parameter from the from model.
4108
 * @param updated_name The name of the parameter from the model. You can update the value.
4109
 * @param provided_size The size of the updated_name buffer.
4110
 * @return 0 if succeed. -1 if failed.
4111
 */
4112
typedef int(*ccv_cnnp_model_parameters_renamer_f)(void* const context, const char* const source_name, char* const updated_name, const size_t provided_size);
4113
/**
4114
 * Share parameters between two models. This is a very specific setup to enable memory optimization
4115
 * by sharing parameter weights between two models. The models can be different as long as the weights
4116
 * match. The model is responsible to keep from_model alive / from destroyed. There is no refcount.
4117
 * Besides using the parameters to identify, you can also use the given block to provide name match.
4118
 * @param model The composed model to be set on parameters.
4119
 * @param parameters The parameters to be override.
4120
 * @param from_model The model to copy parameters from.
4121
 * @param from_parameters The parameters to be shared from.
4122
 * @param renamer The provided rename function that can get the new name from the from_parameters.
4123
 * @param context The context for renamer function.
4124
 */
4125
void ccv_cnnp_model_share_parameters(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters, ccv_cnnp_model_parameters_renamer_f renamer, void* const context);
4126
/**
4127
 * Process parameters such as exponential averaging.
4128
 * parameters = zip(from_parameters, to_parameters).map { cmd(to_parameter, from_parameter) }
4129
 * The order is selected in such way because many of our commands only support inplace op if the first
4130
 * parameter matches.
4131
 * @param model The composed model to have parameters zip mapped.
4132
 * @param parameters The parameters to be written (and read).
4133
 * @param cmd The command to apply on the parameters.
4134
 * @param hint The hint supplied to the cmd.
4135
 * @param flags The flags supplied to the cmd.
4136
 * @param aux_ins Additional inputs supplied to the cmd.
4137
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4138
 * @param aux_outs Additional outputs supplied to the cmd.
4139
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4140
 * @param stream_context The stream context to be associated with.
4141
 * @param from_model The other composed model to have parameters zipped.
4142
 * @param from_parameters The parameters to be read.
4143
 */
4144
void ccv_cnnp_model_parameters_zip_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
4145
/**
4146
 * Process parameters such as clipping. parameters = parameters.map { cmd(parameter) }
4147
 * @param model The composed model to have parameters mapped.
4148
 * @param parameters The parameters to be mapped.
4149
 * @param cmd The command to apply on the parameters.
4150
 * @param hint The hint supplied to the cmd.
4151
 * @param flags The flags supplied to the cmd.
4152
 * @param aux_ins Additional inputs supplied to the cmd.
4153
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4154
 * @param aux_outs Additional outputs supplied to the cmd.
4155
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4156
 * @param stream_context The stream context to be associated with.
4157
 */
4158
void ccv_cnnp_model_parameters_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context);
4159
/**
4160
 * Process parameter gradients such as normalization. parameters.grad = parameters.apply { cmd(parameter.grad) }
4161
 * @param model The composed model to have parameters mapped.
4162
 * @param parameters The parameters to be mapped.
4163
 * @param cmd The command to apply on the parameters.
4164
 * @param hint The hint supplied to the cmd.
4165
 * @param flags The flags supplied to the cmd.
4166
 * @param aux_ins Additional inputs supplied to the cmd.
4167
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4168
 * @param aux_outs Additional outputs supplied to the cmd.
4169
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4170
 * @param stream_context The stream context to be associated with.
4171
 */
4172
void ccv_cnnp_model_parameter_gradients_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context);
4173
/**
4174
 * Set a new minimizer for the model. This is useful when you need to update learn rate for stochastic
4175
 * gradient descent for example. This method can be called any time during the training process (after
4176
 * compilation).
4177
 * @param model The composed model.
4178
 * @param minimizer The wrapped command that represents a new optimization strategy.
4179
 * @param reset Reset all previous states of minimizers. This only makes sense if both parameters and parameter_size is 0.
4180
 * @param parameters The parameters to be applied the minimizer on. 0 meant for all.
4181
 * @param parameter_size The number of parameter spans.
4182
 */
4183
void ccv_cnnp_model_set_minimizer(ccv_cnnp_model_t* const model, const ccv_nnc_cmd_t minimizer, const int reset, const ccv_cnnp_model_io_t* const parameters, const int parameter_size);
4184
/**
4185
 * Retrieve the default minimizer for the model. This is set either you call model compile or
4186
 * ccv_cnnp_model_set_minimizer with no parameter spans.
4187
 * @param model The composed model.
4188
 * @return The minimizer command.
4189
 */
4190
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_cnnp_model_minimizer(ccv_cnnp_model_t* const model);
4191
/**
4192
 * Get the default stream from a compiled model. If the model is not compiled, the default stream is
4193
 * 0.
4194
 * @param model The composed model.
4195
 * @return The default stream for this model.
4196
 */
4197
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_cnnp_model_default_stream(const ccv_cnnp_model_t* const model);
4198
/**
4199
 * Get the allocated memory size (exclude workspace) from a compiled model. If the model is not compiled
4200
 * the size is 0.
4201
 * @param model The composed model.
4202
 * @return The number of bytes for memory allocated.
4203
 */
4204
CCV_WARN_UNUSED(uint64_t) ccv_cnnp_model_memory_size(const ccv_cnnp_model_t* const model);
4205
/**
4206
 * Free a given model.
4207
 * @param model The composed model.
4208
 */
4209
void ccv_cnnp_model_free(ccv_cnnp_model_t* const model);
4210
4211
/** @} */
4212
4213
/**
4214
 * @defgroup level_5_model_add_ons Model Add-ons
4215
 * @{
4216
 */
4217
4218
/**
4219
 * Process parameter gradients with normalization. Exactly the same as PyTorch's clip_grad_norm_
4220
 * @param model The composed model to have parameters mapped.
4221
 * @param parameters The parameters to be mapped.
4222
 * @param norm_type Currently only support 2.
4223
 * @param max_norm The max value for norm.
4224
 * @param stream_context The stream context to be associated with.
4225
 */
4226
void ccv_cnnp_model_parameters_clip_grad_norm(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, int norm_type, float max_norm, ccv_nnc_stream_context_t* const stream_context);
4227
/**
4228
 * Process parameter gradients to check if any is nan.
4229
 * @param model The composed model to have parameters mapped.
4230
 * @param parameters The parameters to be mapped.
4231
 * @param stream_context The stream context to be associated with.
4232
 * @return 1 if it has any nan, 0 otherwise.
4233
 */
4234
int ccv_cnnp_model_parameter_gradients_isnan(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, ccv_nnc_stream_context_t* const stream_context);
4235
4236
enum {
4237
  CCV_CNNP_IO, /**< The parameter is a ccv_cnnp_io_t. */
4238
  CCV_CNNP_NO_TENSOR, /**< The parameter is not used. */
4239
  CCV_CNNP_TENSOR_NOT_OUTPUT, /**< This parameter indicates this is a tensor parameter, but it is not an output reflected as ccv_cnnp_io_t */
4240
  CCV_CNNP_INIT_SHARED_TENSOR, /**< The parameter is a provided tensor for initialization. */
4241
  CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE, /**< The parameter is a provided tensor that can be updated. */
4242
};
4243
4244
typedef void(*ccv_cnnp_state_initializer_f)(void* const context, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const input, const ccv_nnc_tensor_symbol_t output_symbol);
4245
typedef void(*ccv_cnnp_cmd_exec_init_state_f)(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context);
4246
typedef void(*ccv_cnnp_cmd_exec_init_state_deinit_f)(void* const context);
4247
typedef void*(*ccv_cnnp_cmd_exec_init_state_copy_f)(void* const context);
4248
4249
typedef struct {
4250
  ccv_nnc_tensor_param_t info; /**< The tensor parameter for this one. */
4251
  void* context; /**< The context for which we initialize tensor. */
4252
  ccv_cnnp_cmd_exec_init_state_f init; /**< The function to init state for a tensor. */
4253
  ccv_cnnp_cmd_exec_init_state_copy_f copy; /**< The function to make a copy of the context. */
4254
  ccv_cnnp_cmd_exec_init_state_deinit_f deinit; /**< The function to release the context. */
4255
} ccv_cnnp_cmd_exec_io_init_state_t;
4256
4257
typedef struct {
4258
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, NO_TENSOR, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
4259
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
4260
} ccv_cnnp_cmd_exec_io_t;
4261
/**
4262
 * A generic model based on the command. If the tensors are labeled as ccv_cnnp_io_t, it will participate
4263
 * as the input / output of the model. If it is a init tensor, the model will use this tensor for that parameter.
4264
 * More over, if it is marked as parameter, that tensor will be differentiated against when you call
4265
 * ccv_cnnp_model_fit. This model however doesn't take over ownership of the tensor. You should manage the life
4266
 * cycle of the given tensor and it is your responsibility to make sure they outlive the model. Also, all inputs and
4267
 * outputs marked as init tensors will be shared if you reuse this model in other places.
4268
 * @param cmd The command to generate this model.
4269
 * @param hint The hint to run the command.
4270
 * @param flags The flags with the command.
4271
 * @param inputs A list of ccv_cnnp_cmd_exec_io_t identify each input as either a init tensor or a ccv_cnnp_io_t.
4272
 * @param input_size The size of input list.
4273
 * @param outputs A list of types identify each output as ccv_cnnp_io_t or a none tensor.
4274
 * @param output_size The size of the outputs. There is no need to give ccv_cnnp_tensor_param_t for outputs because
4275
 *        all of them are CCV_CNNP_IO type.
4276
 * @param is_trainable Whether the parameters of this model can be trained.
4277
 * @param name The unique name of the model.
4278
 * @return A model based on the given command.
4279
 */
4280
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const int is_trainable, const char* const name);
4281
/**
4282
 * Copy a tensor as initialization for the given parameter.
4283
 * @param tensor The tensor to copy from.
4284
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
4285
 */
4286
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_t* const tensor);
4287
/**
4288
 * Initialize a given parameter with the command.
4289
 * @param cmd The command to call when need to initialize.
4290
 * @param hint The hint to accompany the command.
4291
 * @param flags The flags to accompany the command.
4292
 * @param params The tensor configuration.
4293
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
4294
 */
4295
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_param_t params);
4296
4297
typedef struct {
4298
  ccv_nnc_tensor_symbol_t symbol; /**< The tensor symbol this is reference to. */
4299
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
4300
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
4301
} ccv_cnnp_tensor_symbol_param_t;
4302
/**
4303
 * A generic model based on the symbolic graph we provided. A list of tensor symbols are labeled whether it
4304
 * is ccv_cnnp_io_t or not (we identify whether this is a input or output based on whether it is in the graph).
4305
 * If it is not, we init it with a given tensor. If it is marked as parameter, that tensor will be differentiated
4306
 * against when you call ccv_cnnp_model_fit. The model doesn't take ownership over the init tensors. You are
4307
 * responsible to make sure the init tensors outlive the model until the initialization occurred. Also, these
4308
 * tensors will be shared if the model is reused.
4309
 * @param graph The symbolic graph that is our blue print for this model.
4310
 * @param tensor_symbol_params The list of tensor symbol parameters that labels a given symbol.
4311
 * @param tensor_symbol_param_size The size of the list.
4312
 * @param inputs The inputs to this graph. We can figure out which ones are inputs, but this gives us the order.
4313
 * @param input_size The size of the input list.
4314
 * @param outputs The outputs from this graph. We can figure out which ones are outputs, but this gives us the order.
4315
 * @param output_size The size of the output list.
4316
 * @param is_trainable Whether the parameters of this model can be trained.
4317
 * @param name The unique name of the model.
4318
 * @return A model based on the given symbolic graph.
4319
 */
4320
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_graph(const ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_tensor_symbol_param_t* const tensor_symbol_params, const int tensor_symbol_param_size, ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const int is_trainable, const char* const name);
4321
/**
4322
 * Sum multiple input tensors together.
4323
 * @param name The unique name of the model.
4324
 * @return A model that can be applied with multiple inputs, and generate output that is a sum of the inputs.
4325
 */
4326
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sum(const char* const name);
4327
/**
4328
 * Concatenate input tensors together.
4329
 * @param axis Along this axis, we concatenate tensors together. Other dimensions need to be exactly the same.
4330
 * @param name The unique name of the model.
4331
 * @return A model that can be applied with multiple inputs, and generate output that is a concatenation of the inputs.
4332
 */
4333
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_concat(const int axis, const char* const name);
4334
/**
4335
 * Chunk the input tensor into n pieces.
4336
 * @param n How many pieces we chunk the tensor into.
4337
 * @param axis Along this axis, we chunk the tensor. Other dimensions need to be exactly the same.
4338
 * @param name The unique name of the model.
4339
 * @return A model that can be applied with one input, and generate outputs that are chunks of the input.
4340
 */
4341
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_chunk(const int n, const int axis, const char* const name);
4342
/**
4343
 * A convolution model.
4344
 * @param groups The number of kernel groups in the model.
4345
 * @param filters The total number of filters in the model (filters = groups * per group filters).
4346
 * @param kdim The dimensions of the kernel.
4347
 * @param dilation The dilation factor on each dimension.
4348
 * @param no_bias Whether has bias term or not.
4349
 * @param hint The hint for alignment.
4350
 * @param format The format for weights. If 0, it will have the same format as the input.
4351
 * @param is_trainable Whether the parameters of this model can be trained.
4352
 * @param name The unique name of the model.
4353
 * @return A convolution model.
4354
 */
4355
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_convolution(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int dilation[CCV_NNC_MAX_DIM_ALLOC], const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name);
4356
/**
4357
 * A convolution transpose model.
4358
 * @param groups The number of kernel groups in the model.
4359
 * @param filters The total number of filters in the model (filters = groups * per group filters).
4360
 * @param kdim The dimensions of the kernel.
4361
 * @param dilation The dilation factor on each dimension.
4362
 * @param output_padding The padding helps to resolve shape ambiguity when this is inverse of convolution.
4363
 * @param no_bias Whether has bias term or not.
4364
 * @param hint The hint for alignment.
4365
 * @param format The format for weights. If 0, it will have the same format as the input.
4366
 * @param is_trainable Whether the parameters of this model can be trained.
4367
 * @param name The unique name of the model.
4368
 * @return A convolution transpose model.
4369
 */
4370
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_convolution_transpose(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int dilation[CCV_NNC_MAX_DIM_ALLOC], const int output_padding, const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name);
4371
/**
4372
 * A dense layer model.
4373
 * @param count The output dimension.
4374
 * @param no_bias Whether has a bias term or not.
4375
 * @param flags The flags to disable / enable certain features.
4376
 * @param is_trainable Whether the parameters of this model can be trained.
4377
 * @param name The unique name of the model.
4378
 * @return A dense layer model.
4379
 */
4380
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dense(const int count, const int no_bias, const int flags, const int is_trainable, const char* const name);
4381
/**
4382
 * A batch norm layer model.
4383
 * @param momentum The momentum in batch norm parameter.
4384
 * @param epsilon The epsilon in batch norm parameter.
4385
 * @param is_trainable Whether the parameters of this model can be trained.
4386
 * @param name The unique name of the model.
4387
 * @return A batch norm layer model.
4388
 */
4389
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_batch_norm(const float momentum, const float epsilon, const int is_trainable, const char* const name);
4390
/**
4391
 * A RELU activation layer model.
4392
 * @param name The unique name of the model.
4393
 * @return A RELU activation layer model.
4394
 */
4395
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_relu(const char* const name);
4396
/**
4397
 * A sigmoid activation layer model.
4398
 * @param name The unique name of the model.
4399
 * @return A sigmoid activation layer model.
4400
 */
4401
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sigmoid(const char* const name);
4402
/**
4403
 * A tanh activation layer model.
4404
 * @param name The unique name of the model.
4405
 * @return A tanh activation layer model.
4406
 */
4407
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_tanh(const char* const name);
4408
/**
4409
 * A swish activation layer model.
4410
 * @param name The unique name of the model.
4411
 * @return A swish activation layer model.
4412
 */
4413
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_swish(const char* const name);
4414
/**
4415
 * A GELU activation layer model.
4416
 * @param tanh Whether enable fast approximate GELU.
4417
 * @param name The unique name of the model.
4418
 * @return A GELU activation layer model.
4419
 */
4420
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_gelu(const int tanh, const char* const name);
4421
/**
4422
 * A leaky ReLU activation layer model.
4423
 * @param negative_slope The coefficient to be applied when it is negative.
4424
 * @param name The unique name of the model.
4425
 * @return A leaky ReLU activation layer model.
4426
 */
4427
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_leaky_relu(const float negative_slope, const char* const name);
4428
/**
4429
 * A softmax activation layer model.
4430
 * @param name The unique name of the model.
4431
 * @return A softmax activation layer model.
4432
 */
4433
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_softmax(const char* const name);
4434
/**
4435
 * A max pool model.
4436
 * @param kdim The pooling window dimension.
4437
 * @param hint The hint for alignment.
4438
 * @param name The unique name of the model.
4439
 * @return A max pool model.
4440
 */
4441
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
4442
/**
4443
 * An average pool model.
4444
 * @param kdim The pooling window dimension.
4445
 * @param hint The hint for alignment.
4446
 * @param name The unique name of the model.
4447
 * @return An average pool model.
4448
 */
4449
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_average_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
4450
/**
4451
 * Reshape an input into a different dimension.
4452
 * @param format Change the layout format for a given input, 0 is not to change.
4453
 * @param dim The new dimension for the input.
4454
 * @param ofs The offset on each of the dimension.
4455
 * @param stride The line size of each dimension.
4456
 * @param name The unique name of the model.
4457
 * @return A reshape layer model.
4458
 */
4459
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reshape(const int format, const int dim[CCV_NNC_MAX_DIM_ALLOC], const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4460
/**
4461
 * Pad the input with extra dimensions at beginning or the ends. Padding should be > 0.
4462
 * @param type Two types of padding supported: zero and replication.
4463
 * @param begin How many elements to add at the beginning of each dimension.
4464
 * @param end How many elements to add at the end of each dimension.
4465
 * @param name The unique name of the model.
4466
 * @return A pad layer model.
4467
 */
4468
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_pad(const int type, const int begin[CCV_NNC_MAX_DIM_ALLOC], const int end[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4469
/**
4470
 * Identity op that simply copy from input to output without using any data transfer / format conversion methods.
4471
 * @param name The unique name of the model.
4472
 * @return An identity layer model.
4473
 */
4474
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_identity(const char* const name);
4475
/**
4476
 * Permute the input. For example, [2, 0, 1] means moving dimension 2 to 0, dimension 0 to 1, dimension 1 to 2.
4477
 * @param index The index for each dimensions from.
4478
 * @param name The unique name of the model.
4479
 * @return A permute layer model.
4480
 */
4481
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_permute(const int index[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4482
/**
4483
 * Extract one of the multi-outputs. This is useful because ccv_cnnp_model_io_t can contain multiple outputs, this
4484
 * helps to extract one of them out to be used later.
4485
 * @param index The index to the output you want to extract.
4486
 * @param name The unique name of the model.
4487
 * @return A model that can extract one output.
4488
 */
4489
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_extract(const int index, const char* const name);
4490
/**
4491
 * Flatten an input tensor into a one dimensional array.
4492
 * @param name The unique name of the model.
4493
 * @return A flatten layer model.
4494
 */
4495
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_flatten(const char* const name);
4496
/**
4497
 * A layer norm model.
4498
 * @param epsilon The epsilon in layer norm parameter.
4499
 * @param axis The axis are the feature axis to compute norm.
4500
 * @param axis_count How many axis we count as feature.
4501
 * @param elementwise_affine Whether it contains scale / bias.
4502
 * @param is_trainable Whether the parameters of this model can be trained.
4503
 * @param name The unique name of the model.
4504
 * @return A layer norm model.
4505
 */
4506
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_layer_norm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name);
4507
/**
4508
 * A group norm model.
4509
 * @param group_axis The axis are the feature axis to compute norm.
4510
 * @param groups How many groups per axis channel.
4511
 * @param epsilon The epsilon in layer norm parameter.
4512
 * @param reduce_axis The other axes to be reduced.
4513
 * @param axis_count The number of other axes to be reduced.
4514
 * @param elementwise_affine Whether it contains scale / bias.
4515
 * @param is_trainable Whether the parameters of this model can be trained.
4516
 * @param name The unique name of the model.
4517
 * @return A group norm model.
4518
 */
4519
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_group_norm(const int group_axis, const int groups, const float epsilon, const int reduce_axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name);
4520
/**
4521
 * A rmsnorm model.
4522
 * @param epsilon The epsilon in layer norm parameter.
4523
 * @param axis The axis are the feature axis to compute norm.
4524
 * @param axis_count How many axis we count as feature.
4525
 * @param is_trainable Whether the parameters of this model can be trained.
4526
 * @param name The unique name of the model.
4527
 * @return A rmsnorm model.
4528
 */
4529
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_rmsnorm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int is_trainable, const char* const name);
4530
/**
4531
 * Add two input tensors together. Different from sum because this support broadcasting.
4532
 * @param p The weight for the first input.
4533
 * @param q The weight for the second input.
4534
 * @param name The unique name of the model.
4535
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4536
 */
4537
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_add(const float p, const float q, const char* const name);
4538
/**
4539
 * Multiply two input tensors together.
4540
 * @param p The weight for the output.
4541
 * @param name The unique name of the model.
4542
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4543
 */
4544
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_mul(const float p, const char* const name);
4545
/**
4546
 * A scalar multiplication model. Y = aX where a is a scalar.
4547
 * @param a The scalar parameter.
4548
 * @param name The unique name of the model.
4549
 * @return A scalar multiplication model.
4550
 */
4551
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar_mul(const float a, const char* const name);
4552
/**
4553
 * Divide two input tensors together.
4554
 * @param reciprocal Only take one tensor input, effectively compute 1 / input.
4555
 * @param name The unique name of the model.
4556
 * @return A model that can be applied with two inputs, and generate output that is a division of the inputs.
4557
 */
4558
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_div(const int reciprocal, const char* const name);
4559
/**
4560
 * Square root of the input tensor.
4561
 * @param name The unique name of the model.
4562
 * @return A model that can be applied with one input, and generate output that is the square root of the input.
4563
 */
4564
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sqrt(const char* const name);
4565
/**
4566
 * Multiply two input tensors together as if these are complex numbers.
4567
 * @param name The unique name of the model.
4568
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4569
 */
4570
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_cmul(const char* const name);
4571
/**
4572
 * A matrix transpose model.
4573
 * @param axis_a The axis to be exchanged with axis_b
4574
 * @param axis_b The axis to be exchanged with axis_a
4575
 * @param name The unique name of the model.
4576
 * @return A matrix transpose model.
4577
 */
4578
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_transpose(const int axis_a, const int axis_b, const char* const name);
4579
/**
4580
 * A batched matrix multiplication model.
4581
 * @param transpose_a The axis to be transposed in the first matrix.
4582
 * @param transpose_b The axis to be transposed in the second matrix.
4583
 * @param flags The flags to disable / enable certain features.
4584
 * @param name The unique name of the model.
4585
 * @return A batched matrix multiplication model.
4586
 */
4587
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_matmul(const int transpose_a[2], const int transpose_b[2], const int flags, const char* const name);
4588
/**
4589
 * A dropout model.
4590
 * @param p The probability to drop the current value.
4591
 * @param entirety Drop the whole layer with the given probability.
4592
 * @param name The unique name of the model.
4593
 * @return A dropout model.
4594
 */
4595
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dropout(const float p, const int entirety, const char* const name);
4596
/**
4597
 * A masked fill model.
4598
 * @param eq If a value in the given mask tensor is equal to this.
4599
 * @param fill Fill in this value to the output tensor.
4600
 * @param name The unique name of the model.
4601
 * @return A masked fill model.
4602
 */
4603
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_masked_fill(const float eq, const float fill, const char* const name);
4604
/**
4605
 * A index select model.
4606
 * @param name The unique name of the model.
4607
 * @return A index select model.
4608
 */
4609
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_index_select(const char* const name);
4610
/**
4611
 * An dictionary embedding model. This can be thought as index select model but the vocabulary
4612
 * tensor is within this model itself.
4613
 * @param datatype The data type of the vocabulary.
4614
 * @param vocab_size The size of the vocabulary.
4615
 * @param embed_size The size of the embedding.
4616
 * @param is_trainable Whether the parameters of this model can be trained.
4617
 * @param name The unique name of the model.
4618
 * @return A index select model.
4619
 */
4620
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_embedding(const int datatype, const int vocab_size, const int embed_size, const int is_trainable, const char* const name);
4621
/**
4622
 * A upsample model.
4623
 * @param type The type of upsample, whether nearest or bilinear.
4624
 * @param width_scale The scale of the width of the input.
4625
 * @param height_scale The scale of the height of the input.
4626
 * @param align_corners Whether to align corners when doing upsample.
4627
 * @param name The unique name of the model.
4628
 * @return A upsample model.
4629
 */
4630
ccv_cnnp_model_t* ccv_cnnp_upsample(const int type, const float width_scale, const float height_scale, const int align_corners, const char* const name);
4631
/**
4632
 * A sum value reducer model.
4633
 * @param axis The axis to be reduced.
4634
 * @param axis_count The size of the axis array.
4635
 * @param name The unique name of the model.
4636
 * @return A sum value reducer model.
4637
 */
4638
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_sum(const int* const axis, const int axis_count, const char* const name);
4639
/**
4640
 * A mean value reducer model.
4641
 * @param axis The axis to be reduced.
4642
 * @param axis_count The size of the axis array.
4643
 * @param name The unique name of the model.
4644
 * @return A sum value reducer model.
4645
 */
4646
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_mean(const int* const axis, const int axis_count, const char* const name);
4647
/**
4648
 * A max value reducer model.
4649
 * @param axis The axis to be reduced.
4650
 * @param axis_count The size of the axis array.
4651
 * @param name The unique name of the model.
4652
 * @return A max value reducer model.
4653
 */
4654
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_max(const int* const axis, const int axis_count, const char* const name);
4655
/**
4656
 * A min value reducer model.
4657
 * @param axis The axis to be reduced.
4658
 * @param axis_count The size of the axis array.
4659
 * @param name The unique name of the model.
4660
 * @return A min value reducer model.
4661
 */
4662
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_min(const int* const axis, const int axis_count, const char* const name);
4663
/**
4664
 * A norm2 value reducer model.
4665
 * @param axis The axis to be reduced.
4666
 * @param axis_count The size of the axis array.
4667
 * @param name The unique name of the model.
4668
 * @return A norm2 value reducer model.
4669
 */
4670
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_norm2(const int* const axis, const int axis_count, const char* const name);
4671
/**
4672
 * A argmax model.
4673
 * @param axis The axis to be reduced.
4674
 * @param name The unique name of the model.
4675
 * @return A max indices model.
4676
 */
4677
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_argmax(const int axis, const char* const name);
4678
/**
4679
 * A argmin model.
4680
 * @param axis The axis to be reduced.
4681
 * @param name The unique name of the model.
4682
 * @return A min indices model.
4683
 */
4684
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_argmin(const int axis, const char* const name);
4685
/**
4686
 * A element-wise min model.
4687
 * @param name The unique name of the model.
4688
 * @return A element-wise min model.
4689
 */
4690
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_min(const char* const name);
4691
/**
4692
 * A element-wise max model.
4693
 * @param name The unique name of the model.
4694
 * @return A element-wise max model.
4695
 */
4696
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max(const char* const name);
4697
/**
4698
 * A Long-Short Term Memory model.
4699
 * @param masked Whether a mask tensor provided.
4700
 * @param hidden_size The number of features in the hidden state h.
4701
 * @param proj_size The number of features in the hidden state h.
4702
 * @param num_layers The number of layers for RNN.
4703
 * @param bias If 0, the layer won't use bias weights.
4704
 * @param batch_first If 1, will batch before sequence.
4705
 * @param bidirectional Enable bidirectional mode of RNN.
4706
 * @param dropout If non-zero, enable dropout at each layer of RNN.
4707
 * @param is_trainable Whether the parameters of this model can be trained.
4708
 * @param name The unique name of the model.
4709
 * @return A LSTM model.
4710
 */
4711
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_lstm(const int masked, const int hidden_size, const int proj_size, const int num_layers, const int bias, const int batch_first, const int bidirectional, const float dropout, const int is_trainable, const char* const name);
4712
/**
4713
 * Perform datatype conversion for input tensors.
4714
 * @param datatype The desired datatype.
4715
 * @param ref_to_last If there are two inputs to the model, use the last one as a datatype reference.
4716
 * @param name The unique name of the model.
4717
 * @return A model that does data conversion.
4718
 */
4719
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_datatype_conversion(const int datatype, const int ref_to_last, const char* const name);
4720
/**
4721
 * Clamp input tensor to a range.
4722
 * @param min NAN will ignore this.
4723
 * @param max NAN will ignore this.
4724
 * @param name The unique name of the model.
4725
 * @return A model that does clamp.
4726
 */
4727
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_clamp(const float min, const float max, const char* const name);
4728
/**
4729
 * A parameter that can be initialized / loaded.
4730
 * @param params The tensor shape / information about this parameter.
4731
 * @param init_bound The bound for the initial values, in uniform distribution.
4732
 * @param name The unique name of the model.
4733
 * @param is_trainable Whether the parameters of this model can be trained.
4734
 * @return A model that can be applied and return the weight.
4735
 */
4736
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_parameter(const ccv_nnc_tensor_param_t params, const float init_bound, const int is_trainable, const char* const name);
4737
/**
4738
 * A scalar value that can be used.
4739
 * @param type The type of this scalar.
4740
 * @param format The format of this scalar.
4741
 * @param datatype The datatype of this scalar.
4742
 * @param value The value in float.
4743
 * @param name The unique name of the model.
4744
 * @return A model that can be applied and return the scalar.
4745
 */
4746
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar(const int type, const int format, const int datatype, const float value, const char* const name);
4747
/**
4748
 * An empty variable that can be used. This is usually paired to ccv_cnnp_move to make this "input"
4749
 * as destination. This is also different from ccv_cnnp_parameter because that will be persisted.
4750
 * @param params The parameters for the tensor.
4751
 * @param name The unique name of the model.
4752
 * @return A model that can be applied and return the variable.
4753
 */
4754
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_variable(const ccv_nnc_tensor_param_t params, const char* const name);
4755
/**
4756
 * A special model that takes two inputs but copies value in the first input to the second. The
4757
 * second input then returned as the output. This is special because it enables you to violate
4758
 * single-static assignment rule otherwise without using this method, it won't trigger. However,
4759
 * it does have a special place because it enables hand-written optimizations that otherwise require
4760
 * you to either implement a new optimization pass in nnc (difficult to do it correctly) or it is
4761
 * not possible to do with CNNP models and you have to go to Level-3 API, which may not be exposed
4762
 * on high-level language bindings such as s4nnc.
4763
 * @param name The unique name of the model.
4764
 * @return A model that can be applied and copies first input to the second.
4765
 */
4766
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_move(const char* const name);
4767
/**
4768
 * If the input is not contiguous, this model will make it contiguous. Normally, such graph operation
4769
 * will be optimized away when calling ccv_nnc_symbolic_graph_simplify. In this case, we will disable
4770
 * such optimization on the generated node. If the input is not contiguous, the output of this model
4771
 * is the same as the input, hence, skipped.
4772
 * @param name The unique name of the model.
4773
 * @return A model that can be applied and making the input contiguous.
4774
 */
4775
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_contiguous(const char* const name);
4776
/**
4777
 * If the input is a reshape, this model will make it a copy. Normally, such graph operation
4778
 * will be optimized away when calling ccv_nnc_symbolic_graph_simplify. In this case, we will disable
4779
 * such optimization on the generated node. This is useful mainly for memory conservation. In case you
4780
 * are working with a reshape of part of the tensor, make a explicit copy would make sure the original
4781
 * tensor is not retained therefore you can now give the compiler more optimization opportunities on
4782
 * memory conservation.
4783
 * @param name The unique name of the model.
4784
 * @return A model that can be applied and making a copy of the input.
4785
 */
4786
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_copy(const char* const name);
4787
/**
4788
 * Apply the scaled dot product attention to input. Accepting input in the form of (q, k, v)
4789
 * or (q, k, v, attn_mask) if has_attn_mask is 1.
4790
 * @param scale The scale to be applied to the qk dot product.
4791
 * @param is_causal Whether to apply is_causal mask to it. If both attn_mask and is_causal supplied, we will cut attn_mask to upper right triangle.
4792
 * @param has_attn_mask Whether the input would accept a 4th parameter the attention mask.
4793
 * @param flags Which precision is preferred for the attention computation be run at (FP16 or FP32).
4794
 * @param fused_unify_head_weights Whether we also have unifying head weight fused into it. The output would be in shape of (N, S, H * Ev).
4795
 * @param no_bias Whether we have bias or not for the unifying head output.
4796
 * @param is_trainable Whether or not it is trainable (if weight / bias provided).
4797
 * @param name The unique name of the model.
4798
 * @return A model that can apply scaled dot product attention compute.
4799
 */
4800
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scaled_dot_product_attention(const float scale, const int is_causal, const int has_attn_mask, const int flags, const int fused_unify_head_weights, const int no_bias, const int is_trainable, const char* const name);
4801
/**
4802
 * The function prototype to call during the model execution at this position.
4803
 */
4804
typedef void (*ccv_cnnp_model_debug_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_stream_context_t* const stream_context, void* const context);
4805
/**
4806
 * The function prototype to destruct the context.
4807
 */
4808
typedef void (*ccv_cnnp_model_debug_context_deinit_f)(void* const context);
4809
/**
4810
 * The function prototype to copy the context.
4811
 */
4812
typedef void* (*ccv_cnnp_model_debug_context_copy_f)(void* const context);
4813
/**
4814
 * A special model that takes n inputs and output the first values. This is a special model because it
4815
 * generates a graph violates single-static assignment rule by having the outputs the same symbol
4816
 * as the input. It also inserts a custom op allows you to intercept the model execution and possibly
4817
 * output useful information from it (i.e. debug print tensors, generate stats like max / min / nan
4818
 * etc.). This is safe to insert anywhere because it doesn't impact the graph execution process but
4819
 * you are also advised to not use this method to modify the tensors during the execution. There will
4820
 * be another method for you to insert custom op in the model.
4821
 * @param func The func to call during the model execution.
4822
 * @param context The context object to be passed along the callback.
4823
 * @param deinit The deinit method to be used to free up the context.
4824
 * @param copy The copy method to make a duplicate of the context.
4825
 * @param name The unique name of the model.
4826
 * @return A model that can be applied and copies first input to the second.
4827
 */
4828
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_debug(ccv_cnnp_model_debug_f func, void* const context, ccv_cnnp_model_debug_context_deinit_f deinit, ccv_cnnp_model_debug_context_copy_f copy, const char* const name);
4829
/**
4830
 * A sort model. The result are two tensors: values and indices.
4831
 * @param along_axis Sort along which axis.
4832
 * @param descending Whether sort by descending order.
4833
 * @param name The unique name of the model.
4834
 * @return A sort model.
4835
 */
4836
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sort(const int along_axis, const int descending, const char* const name);
4837
/**
4838
 * A partition model. The result are two tensors: values and indices.
4839
 * @param kth Took kth elements.
4840
 * @param along_axis Partition along which axis.
4841
 * @param descending Whether partition by descending order.
4842
 * @param name The unique name of the model.
4843
 * @return A partition model.
4844
 */
4845
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_partition(const int kth, const int along_axis, const int descending, const char* const name);
4846
/**
4847
 * A unique consecutive model. Otherwise known as run-length encode.
4848
 * @param bincount How many unique consecutive elements there are, 0 to match the original.
4849
 * @param name The unique name of the model.
4850
 * @return A unique consecutive model.
4851
 */
4852
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_unique_consecutive(const int bincount, const char* const name);
4853
/**
4854
 * A scatter add model.
4855
 * @param name The unique name of the model.
4856
 * @param bincount How many original elements will be, it needs to be non-zero.
4857
 * @return A scatter add model.
4858
 */
4859
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scatter_add(const int bincount, const char* const name);
4860
/**
4861
 * A segmented dense layer model. Note that the input would be activation, indices and count.
4862
 * @param segments / experts How many segments in this layer.
4863
 * @param count The output dimension.
4864
 * @param no_bias Whether has a bias term or not.
4865
 * @param flags The flags to disable / enable certain features.
4866
 * @param is_trainable Whether the parameters of this model can be trained.
4867
 * @param name The unique name of the model.
4868
 * @return A segmented dense layer model.
4869
 */
4870
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_segmented_dense(const int segments, const int count, const int no_bias, const int flags, const int is_trainable, const char* const name);
4871
4872
/** @} */
4873
4874
/** @} */
4875
4876
#endif