Coverage Report

Created: 2025-05-28 16:14

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/ccv_nnc.h
Line
Count
Source
1
/**********************************************************
2
 * C-based/Cached/Core Computer Vision Library
3
 * Liu Liu, 2010-02-01
4
 **********************************************************/
5
6
/**********************************************************
7
 * CCV - Neural Network Collection
8
 **********************************************************/
9
10
#ifndef GUARD_ccv_nnc_h
11
#define GUARD_ccv_nnc_h
12
13
#include "ccv.h"
14
#include <stddef.h>
15
16
// These are generated by cmd/build-cmd.rb
17
#include "cmd/ccv_nnc_cmd.h"
18
#include "cmd/ccv_nnc_backend.h"
19
20
/**
21
 * @defgroup level_0 Level-0 API
22
 * @{
23
 */
24
25
/**
26
 * Initialize the library.
27
 */
28
void ccv_nnc_init(void);
29
30
enum {
31
  CCV_NNC_DISABLE_MIXED_MPS_GEMM = 0x1,
32
  CCV_NNC_DISABLE_MIXED_MPS_SOFTMAX = 0x2,
33
  CCV_NNC_DISABLE_MMAP_MTL_BUFFER = 0x4,
34
  CCV_NNC_DISABLE_METAL_FLASH_ATTENTION = 0x8,
35
  CCV_NNC_DISABLE_MFA_GEMM = 0x10,
36
};
37
/**
38
 * Enable system-wide specific flag.
39
 */
40
void ccv_nnc_enable_flag(uint64_t flag);
41
/**
42
 * Disable system-wide specific flag.
43
 */
44
void ccv_nnc_disable_flag(uint64_t flag);
45
/**
46
 * Get system-wide specific flag to check.
47
 */
48
uint64_t ccv_nnc_flags(void);
49
50
/** @} */
51
52
/**
53
 * @defgroup level_1 Level-1 API
54
 * @{
55
 */
56
57
/**
58
 * @defgroup level_1_cmd Commands
59
 * @{
60
 */
61
enum {
62
  // Attributes that enable symbolic graph simplification
63
  CCV_NNC_CMD_ATTR_PASSTHROUGH  = 0x01, /**< This doesn't compute anything, but pass the first n tensors to the output (useful for backprop that is identical). */
64
  CCV_NNC_CMD_ATTR_OUTPUT_ONES  = 0x02, /**< All the output tensors are 1s (unit). */
65
  CCV_NNC_CMD_ATTR_NULL_IS_ONES = 0x04, /**< Accept nullptr input as if these are tensors with 1s (unit). */
66
};
67
68
// Flags pass into cmd when executing.
69
enum {
70
  CCV_NNC_ACCUMULATE_OUTPUT = 0x01, /**< Enable accumulate outputs (unsupported). */
71
  CCV_NNC_ZERO_MEMORY_ALLOC = 0x02, /**< Don't allocate any extra memory for this operation. */
72
};
73
74
enum {
75
  CCV_NNC_EXEC_SUCCESS   = 0, /**< Successfully executed the command. */
76
  CCV_NNC_EXEC_INVALID   = -1, /**< Invalid inputs. */
77
  CCV_NNC_EXEC_NO_KERNEL = -2, /**< No kernel available for a given command / backend. */
78
  CCV_NNC_EXEC_OOM       = -3, /**< Out of memory error. */
79
};
80
81
enum {
82
  CCV_NNC_MSE_REDUCE_MEAN = 0, /**< Reduce with mean when computing MSE loss. */
83
  CCV_NNC_MSE_REDUCE_SUM = 1, /**< Reduce with sum when computing MSE loss. */
84
};
85
86
enum {
87
  CCV_NNC_HISTOGRAM_EVEN = 0, /**< The bins are evenly distributed from min to max. */
88
  CCV_NNC_HISTOGRAM_LOGARITHMIC = 1, /**< The bins are distributed follow exponentially curve, growing from min to max with ratio. */
89
  CCV_NNC_HISTOGRAM_BINS = 2, /**< The bins range will be supplied, such as [0, 2, 3, 10]. For result, [-inf, 0, 2, 3, 10, inf] implied. */
90
};
91
92
enum {
93
  CCV_NNC_UPSAMPLE_NEAREST = 0, /**< Using nearest value. */
94
  CCV_NNC_UPSAMPLE_BILINEAR = 1, /**< Using bilinear interpolation. */
95
};
96
97
enum {
98
  CCV_NNC_PAD_ZERO = 0, /**< Pad 0s. */
99
  CCV_NNC_PAD_REPLICATE = 1, /**< Pad by replicating the edge. */
100
};
101
102
enum {
103
  CCV_NNC_GEMM_32F = 0x1, /**< For GEMM (or similar op), whether prefer to use FP32 for accumulator. */
104
  CCV_NNC_GEMM_32TF = 0x2, /**< For GEMM (or similar op), whether prefer to use TF32 for accumulator. */
105
  CCV_NNC_GEMM_16F = 0x4, /**< For GEMM (or similar op), whether prefer to use FP16 for accumulator. */
106
};
107
108
/**
109
 * Parameters for command.
110
 */
111
typedef struct {
112
  struct {
113
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< [size.dim] The window size for the layer. For full connect layer, it is 1 because it is 1x1 convolutional layer with count of filters */
114
  } size;
115
  union {
116
    struct {
117
      int count; /**< [convolution.count] The number of filters for convolutional layer. */
118
      int groups; /**< [convolution.groups] The number of groups for convolutional layer. */
119
      int dilation[CCV_NNC_MAX_DIM_ALLOC]; /**< [convolution.dilation[]] The dilation factor for convolutional layer. Default to 1. */
120
    } convolution;
121
    struct {
122
      int count; /**< [convolution_transpose.count] The number of filters for convolutional layer. */
123
      int groups; /**< [convolution_transpose.groups] The number of groups for convolutional layer. */
124
      int dilation[CCV_NNC_MAX_DIM_ALLOC]; /**< [convolution_transpose.dilation[]] The dilation factor for convolutional layer. Default to 1. */
125
      int output_padding; /**< [convolution_transpose.output_padding] The output padding to resolve ambiguity when treat this as inverse of convolution. */
126
    } convolution_transpose;
127
    struct {
128
      int hidden_size; /**< [rnn.hidden_size] The number of features in the hidden state h. */
129
      int proj_size; /**< [rnn.proj_size] The number of features in the hidden state h. */
130
      int num_layers; /**< [rnn.num_layers] The number of layers for RNN. */
131
      int bias; /**< [rnn.bias] If 0, the layer won't use bias weights. */
132
      int batch_first; /**< [rnn.batch_first] If 1, will batch before sequence. */
133
      int bidirectional; /**< [rnn.bidrectional] Enable bidirectional mode of RNN.*/
134
      float dropout; /**< [rnn.dropout] If non-zero, enable dropout at each layer of RNN.*/
135
      int is_test; /**< [rnn.is_test] Whether running this kernel in test mode or not. */
136
    } rnn;
137
    struct {
138
      int reserved; /**< [pool.reserved] A reserved field. */
139
    } pool;
140
    struct {
141
      float kappa; /**< [rnorm.kappa] As of b[i] = a[i] / (rnorm.kappa + rnorm.alpha * sum(a, i - rnorm.size / 2, i + rnorm.size / 2)) ^ rnorm.beta */
142
      float alpha; /**< [rnorm.alpha] See **rnorm.kappa**. */
143
      float beta; /**< [rnorm.beta] See **rnorm.kappa**. */
144
    } rnorm;
145
    struct {
146
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [bnorm.axis[]] The axis selected to compute mean / variance. */
147
      int count; /**< [bnorm.count] The number of axis selected. */
148
      float epsilon; /**< [bnorm.epsilon] The epsilon for standard derivation. */
149
      int is_test; /**< [bnorm.is_test] Whether in test mode. */
150
      float momentum; /**< [bnorm.momentum] running_mean = running_mean * momentum + mean * (1 - momentum). */
151
    } bnorm;
152
    struct {
153
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [lnorm.axis[]] The axis selected to compute mean / variance. */
154
      int count; /**< [lnorm.count] The number of axis selected. */
155
      float epsilon; /**< [lnorm.epsilon] The epsilon for standard derivation. */
156
      int elementwise_affine; /**< [lnorm.elementwise_affine] Whether it supports scale / bias. */
157
    } lnorm;
158
    struct {
159
      int group_axis; /**< [gnorm.group_axis] The axis selected to be grouped. */
160
      int reduce_axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [gnorm.reduce_axis[]] The other axis selected to compute mean / variance. */
161
      int reduce_count; /**< [gnorm.reduce_count] The number of other axis selected. */
162
      int groups; /**< [gnorm.group] The number of groups that separates channels. */
163
      float epsilon; /**< [gnorm.epsilon] The epsilon for standard derivation. */
164
      int elementwise_affine; /**< [lnorm.elementwise_affine] Whether it supports scale / bias. */
165
    } gnorm;
166
    struct {
167
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [rmsnorm.axis[]] The axis selected to compute mean / variance. */
168
      int count; /**< [rmsnorm.count] The number of axis selected. */
169
      float epsilon; /**< [rmsnorm.epsilon] The epsilon for standard derivation. */
170
    } rmsnorm;
171
    struct {
172
      int nesterov; /**< [sgd.nesterov] Nesterov accelerated gradient. */
173
      float rate; /**< [sgd.rate] The learning rate. */
174
      float scale; /**< [sgd.scale] The scale to be applied to the gradient before doing any minimization. */
175
      float decay; /**< [sgd.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
176
      float momentum; /**< [sgd.momentum] For SGD, this follows http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf. */
177
      float dampening; /**< [sgd.dampening] This usually == momentum, however, it can be changed. */
178
    } sgd;
179
    struct {
180
      int step; /**< [adam.step] Step t in adam optimizer. */
181
      float rate; /**< [adam.rate] The learning rate. */
182
      float scale; /**< [adam.scale] The scale to be applied to the gradient before doing any minimization. */
183
      float beta1; /**< [adam.beta1] The beta1 hyper-parameter in adam optimizer. */
184
      float beta2; /**< [adam.beta2] The beta2 hyper-parameter in adam optimizer. */
185
      float decay; /**< [adam.decay] This is the weight decay parameter, which represents L2 regularization. */
186
      float epsilon; /**< [adam.epsilon] The epsilon for standard derivation. */
187
      int amsgrad; /**< [adam.amsgrad] Whether use amsgrad. */
188
    } adam;
189
    struct {
190
      int step; /**< [lamb.step] Step t in lamb optimizer. */
191
      float rate; /**< [lamb.rate] The learning rate. */
192
      float scale; /**< [lamb.scale] The scale to be applied to the gradient before doing any minimization. */
193
      float beta1; /**< [lamb.beta1] The beta1 hyper-parameter in lamb optimizer. */
194
      float beta2; /**< [lamb.beta2] The beta2 hyper-parameter in lamb optimizer. */
195
      float decay; /**< [lamb.decay] This is the weight decay parameter, which represents L2 regularization. */
196
      float epsilon; /**< [lamb.epsilon] The epsilon for standard derivation. */
197
    } lamb;
198
    struct {
199
      float rate; /**< [rmsprop.rate] The learning rate. */
200
      float scale; /**< [rmsprop.scale] The scale to be applied to the gradient before doing any minimization. */
201
      float decay; /**< [rmsprop.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
202
      float alpha; /**< [rmsprop.momentum] The alpha hyper-parameter. */
203
      float momentum; /**< [rmsprop.momentum] The momentum hyper-parameter. */
204
      float epsilon; /**< [rmsprop.epsilon] The epsilon for standard derivation. */
205
    } rmsprop;
206
    struct {
207
      int transpose_a[2]; /**< [blas.transpose_a[2]] The axis we'd like to transpose for input a. */
208
      int transpose_b[2]; /**< [blas.transpose_b[2]] The axis we'd like to transpose for input b. */
209
      float a[3]; /**< [blas.a[3]] BLAS scalars. */
210
      int flags; /**< [blas.flags] Auxiliary flags to enable certain features for BLAS operation. */
211
    } blas;
212
    struct {
213
      float trim0; /**< [label_smoothing.trim0] The smoothed label for 0. */
214
      float trim1; /**< [label_smoothing.trim1] The smoothed label for 1. */
215
    } label_smoothing;
216
    struct {
217
      float pos_weight; /**< [binary_crossentropy.pos_weight] The pos_weight on the loss: -(pos_weight * y * log(x) + (1 - y) * log(1 - x)) */
218
    } binary_crossentropy;
219
    struct {
220
      float beta; /**< [smooth_l1.beta] The beta on the smooth L1 loss (or Huber loss) */
221
    } smooth_l1;
222
    struct {
223
      int reduce_op; /**< [mse.reduce_op] Whether reduce with mean or with sum */
224
    } mse;
225
    struct {
226
      int tanh; /**< [gelu.tanh] Use tanh approximation */
227
    } gelu;
228
    struct {
229
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [reduce.axis[]] The axis selected to reduce. */
230
      int count; /**< [reduce.count] The number of axis selected. */
231
    } reduce;
232
    struct {
233
      int axis[2]; /**< [transpose.axis[2]] The axis we'd like to transpose for input. */
234
    } transpose;
235
    struct {
236
      float p; /**< [dropout.p] Dropout probability. */
237
      int entirety; /**< [dropout.entirety] Drop the whole layer with the given probability. */
238
    } dropout;
239
    struct {
240
      int type; /**< [upsample.type] 0 - nearest, 1 - bilinear. */
241
      float width_scale; /**< [upsample.width_scale] scale for width parameter. It is between 1 and 2 at the moment. */
242
      float height_scale; /**< [upsample.height_scale] scale for height parameter. It is between 1 and 2 at the moment. */
243
      int align_corners; /**< [upsample.align_corners] Whether to scale to align corners. Thus, for 0...1, if false, it will align to -0.25, 0.25, 0.75, 1.25, if true, it will align to 0, 0.3333, 0.6666, 1.0 */
244
    } upsample;
245
    struct {
246
      float min; /**< [clamp.min] The minimum, NaN is no min. */
247
      float max; /**< [clamp.max] The maximum, NaN is no max. */
248
    } clamp;
249
    struct {
250
      float iou_threshold; /**< [nms.iou_threshold] Threshold between 0 to 1 for IoU threshold. */
251
    } nms;
252
    struct {
253
      int type; /**< [histogram.type] The type, can be even, logarithmic, or bins. */
254
      int bins; /**< [histogram.bins] The number of bins, only applied to even. */
255
      float min; /**< [histogram.min] The minimal number, for even or logarithmic. */
256
      float max; /**< [histogram.min] The maximal number, for even or logarithmic. */
257
      float rate; /**< [histogram.ratio] The rate from min to max, only applied to logarithmic. */
258
    } histogram;
259
    struct {
260
      float negative_slope; /**< [leaky_relu.negative_slop] The negative slope to be applied when activation < 0. */
261
    } leaky_relu;
262
    struct {
263
      float scale; /**< [scaled_dot_product_attention.scale] The scale we multiple to the dot product of Q & K */
264
      int is_causal; /**< [scaled_dot_product_attention.is_causal] Whether we have causal matrix associated with the attention. The attention mask will be cut to triangular if provided. */
265
      int flags; /**< [scaled_dot_product_attention.flags] Which precision is preferred for accumulator, FP16 or FP32. */
266
      int deterministic; /**< [scaled_dot_product_attention.deterministic] Whether we want the attention computation to be deterministic (CUDA only). */
267
    } scaled_dot_product_attention;
268
    struct {
269
      int type; /**< [pad.type] The type of pad, can be either zeros or replicating edge. */
270
      int end[CCV_NNC_MAX_DIM_ALLOC]; /**< [pad.end] Work together with size.dim. size.dim is how much to add at the beginning and pad.end is how much to add at the end. */
271
    } pad;
272
    struct {
273
      int along_axis; /**< [sort.along_axis] Which axis to sort along with. */
274
      int descending; /**< [sort.descending] Whether sorting by descending order. */
275
    } sort;
276
    struct {
277
      int kth; /**< [partition.kth] How many items to retain after partition. */
278
      int along_axis; /**< [partition.along_axis] Which axis to partition along with. */
279
      int descending; /**< [partition.descending] Whether partitioning by descending order. */
280
    } partition;
281
    struct {
282
      int bincount; /**< [unique_consecutive.bincount] Potentially how many unique items there will be, 0 if unknown. */
283
    } unique_consecutive;
284
    struct {
285
      int bincount; /**< [scatter_add.bincount] Potentially how many unique items there will be, 0 if unknown. */
286
    } scatter_add;
287
    void* userdata;
288
  };
289
} ccv_nnc_cmd_param_t;
290
291
/*
292
 * Hints for command.
293
 */
294
typedef struct {
295
  struct {
296
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< Stride for each dimension. */
297
  } stride;
298
  struct {
299
    int begin[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the beginning of a dimension. */
300
    int end[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the end of a dimension. */
301
  } border;
302
} ccv_nnc_hint_t;
303
304
/**
305
 * Opaque pointer to a stream object.
306
 */
307
typedef struct ccv_nnc_stream_context_s ccv_nnc_stream_context_t;
308
309
typedef struct ccv_nnc_cmd_vtab_s ccv_nnc_cmd_vtab_t;
310
311
typedef struct ccv_nnc_cmd_s {
312
  uint32_t cmd; /**< The identifier for command. */
313
  uint32_t backend; /**< The identifier for backend. */
314
  int algorithm; /**< The algorithm selector (as defined by backend). */
315
  ccv_nnc_cmd_param_t info; /**< The command parameters. */
316
  /**
317
   * This is for type CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD
318
   */
319
  ccv_nnc_cmd_vtab_t* isa;
320
  void* data;
321
} ccv_nnc_cmd_t;
322
323
/**
324
 * For forward functions, the input tensors and output tensors can be arbitrary.
325
 * However, for backward functions (backpropagation, or gradient functions in other libs),
326
 * the input is: 0~m-1: gradient for output tensors, 1~n: input tensors for forward functions, n+1~n+m: output tensors for forward functions,
327
 * the output is: 0~n-1: output gradients w.r.t. input tensors.
328
 * Which input / output tensors can be ignored can be specified in the cmd config structs.
329
 */
330
typedef int(*ccv_nnc_cmd_exec_f)(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
331
332
/**
333
 * The function prototype for autotune. The only difference is the max_workspace_size.
334
 * Whoever implement this function prototype means we handled over autotune task to the
335
 * command itself, you are responsible to select the best algorithm.
336
 * @return The selected algorithm.
337
 */
338
typedef int(*ccv_nnc_cmd_autotune_f)(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
339
340
/**
341
 * The function prototype is for automatically deduce tensor shapes.
342
 */
343
344
typedef struct ccv_nnc_cmd_vtab_s {
345
  ccv_nnc_cmd_exec_f exec;
346
  void (*tensor_auto)(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
347
} ccv_nnc_cmd_vtab_t;
348
349
/** @} */
350
351
/**
352
 * @defgroup level_1_uops Micro Commands to Define Commands
353
 * @{
354
 */
355
356
/**
357
 * @page micro_jittor The concept of meta-ops in Jittor is amazing
358
 *
359
 * NNC will never do JIT. Particularly, I will never do codegen and compile at runtime, especially with static shapes.
360
 * The reason is pretty simple. JIT would be too much architectural dependent and with that, almost impossible for NNC
361
 * to be this small embeddable library that you can carry everywhere. However, this shouldn't prevent NNC to generate
362
 * proper descriptions of each command so a JIT version can be built if there are architectural support for it. In this
363
 * way, the core of NNC can be small and embeddable, but a new backend (identified by the backend attribute) can implement
364
 * more sophisticated JIT mechanism.
365
 *
366
 * More over, I need to generate some code for reference implementations, ideally from some descriptions. This is important
367
 * because with 90+ ops, having a correctly implemented command turns out to be more challenging than I expected.
368
 * Especially if I want them to be compliant with the metadata describes it (what shape it accepts, what datatype works,
369
 * whether it can accept tensor views, and how in-place tensors supported). Many of reference commands are not supporting
370
 * all datatypes and tensor views, and this has to be rectified because these are "reference commands", they must be.
371
 *
372
 * Jittor introduced to the world the idea of meta-ops. Basically, it claims every ops (or macro ops) can be break down to
373
 * 3 types of micro ops (they call them meta-ops): a reindex op that can map tensor from one dimensionality to another, an
374
 * element-wise op that does element-wise primitive math, and finally, a reduce op that can reduce along particular axis
375
 * of a tensor with some elementary math. This feels rather limited initially, but when thinking through it, I am convinced
376
 * it should be enough to describe all commands presented in NNC (this shouldn't be a surprise actually).
377
 *
378
 * Thus, the plan now is to use the meta-ops idea, implementing new micro commands that can describe other commands in
379
 * NNC. In this way, I can generate reference implementation from these descriptions and hopefully have better coverage
380
 * than my existing CPU / GPU reference implementations.
381
 *
382
 * To build on-top what Jittor did, if you need to have my dynamism in the ops, it is essential to index with the provided
383
 * tensor. With just reindex, binary operands and reduce, you cannot do that. Thus, on top of these 3, we added the 4th
384
 * micro op (meta-op) that is "select". This will be sufficient to implement ops such as masking.
385
 *
386
 */
387
388
/**
389
 * Abstract vtab for different ccv_nnc_micro_io_t.
390
 */
391
typedef struct ccv_nnc_micro_io_vtab_s ccv_nnc_micro_io_vtab_t;
392
393
enum {
394
  // These could be much more unary ops.
395
  CCV_NNC_MICRO_UNARY_OP_NEG,
396
  CCV_NNC_MICRO_UNARY_OP_LOG,
397
  CCV_NNC_MICRO_UNARY_OP_EXP,
398
};
399
400
enum {
401
  CCV_NNC_MICRO_BINARY_OP_PLUS,
402
  CCV_NNC_MICRO_BINARY_OP_MINUS,
403
  CCV_NNC_MICRO_BINARY_OP_MUL,
404
  CCV_NNC_MICRO_BINARY_OP_DIV,
405
  CCV_NNC_MICRO_BINARY_OP_MAX,
406
  CCV_NNC_MICRO_BINARY_OP_MIN,
407
  CCV_NNC_MICRO_BINARY_OP_EQUAL_TO,
408
  CCV_NNC_MICRO_BINARY_OP_LESS_THAN,
409
};
410
411
enum {
412
  CCV_NNC_MICRO_REDUCE_OP_MAX,
413
  CCV_NNC_MICRO_REDUCE_OP_MIN,
414
  CCV_NNC_MICRO_REDUCE_OP_ARGMAX,
415
  CCV_NNC_MICRO_REDUCE_OP_ARGMIN,
416
  CCV_NNC_MICRO_REDUCE_OP_MEAN, // Mean is complicated, we need a way to compute total for loops after this. It has to be done statically, and that is "interesting".
417
  CCV_NNC_MICRO_REDUCE_OP_SUM,
418
  CCV_NNC_MICRO_REDUCE_OP_PROD,
419
};
420
421
/**
422
 * Abstract micro op representation.
423
 */
424
typedef struct ccv_nnc_micro_io_s* ccv_nnc_micro_io_t;
425
426
struct ccv_nnc_micro_io_s {
427
  const ccv_nnc_micro_io_vtab_t* isa;
428
  ccv_nnc_micro_io_t* inputs;
429
  int input_size;
430
  int dimensions;
431
  int id;
432
};
433
434
typedef struct {
435
  // Type of the scalar is about precision, nothing to restrict the tensor's type. For example, we may assign a int32_t 0
436
  // to a float16 tensor element, this is perfectly fine.
437
  int type;
438
  union {
439
    unsigned char u8;
440
    int i32;
441
    ccv_float16_t f16;
442
    float f32;
443
    int64_t i64;
444
    uint64_t u64;
445
    double f64;
446
  };
447
} ccv_nnc_micro_scalar_t;
448
449
/**
450
 * Create a free-form input that represent a tensor.
451
 * @param dimensions The maximum dimension of the input.
452
 */
453
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_input(const int dimensions);
454
/**
455
 * Use shape and reindex expression to reindex the given tensor into a different shape.
456
 * The expressions can bind integer parameters which starts with $.
457
 *
458
 * The expression follows specific pattern, integer parameters starts with $. Dimensions are represented as dXn, such
459
 * as dA0, dA1, dA2 ... Index into the provided tensor can be represented as i0, i1, i2. These are all 0-indexed.
460
 *
461
 * Constants are supported, such as 235, 431 etc. Operators supported currently are -, +, /, *.
462
 *
463
 * Thus, for broadcast a tensor x[w, h] to y[w, h, h], it can be represented as:
464
 * shape: { "dA0", "dA1", "dA1" }, reindex: { "i0", "i1", "0" }.
465
 * For example, transpose can be represented as:
466
 * shape: { "dA1", "dA0" }, reindex: { "i1", "i0" }
467
 *
468
 * @param shape The shape expressions per axis.
469
 * @param shape_count The dimensions of the output.
470
 * @param ss The tensors to reference shape dimensions.
471
 * @param s_count The number of tensors to reference shape dimensions.
472
 * @param reindex The reindex expressions per axis.
473
 * @param reindex_count The dimensions of the input.
474
 * @param x The input for reindex operation.
475
 * @return The reindexed tensor.
476
 */
477
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reindex(const char* const* const shape, const int shape_count, const ccv_nnc_micro_io_t* const ss, const int s_count, const char* const* const reindex, const int reindex_count, const ccv_nnc_micro_io_t x);
478
/**
479
 * Apply element-wise computations with one tensor.
480
 * @param op The binary operand.
481
 * @param x The input.
482
 * @return The result tensor.
483
 */
484
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_unary(const uint32_t op, const ccv_nnc_micro_io_t x);
485
/**
486
 * Apply pair-wise computations with two tensors. They has to match shape exactly.
487
 * @param op The binary operand.
488
 * @param left The left input.
489
 * @param right The right input.
490
 * @return The result tensor.
491
 */
492
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_binary(const uint32_t op, const ccv_nnc_micro_io_t left, const ccv_nnc_micro_io_t right);
493
/**
494
 * Apply reduction computation against some dimensions and generate the final reduced tensor.
495
 * @param op The reduction operand.
496
 * @param axis The axis to reduce.
497
 * @param axis_count Number of axes.
498
 * @param x The input tensor.
499
 * @return The result tensor after reduction.
500
 */
501
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reduce(const uint8_t op, const int* const axis, const int axis_count, const ccv_nnc_micro_io_t x);
502
/**
503
 * Use the index tensor to select one value from the x per axis.
504
 * @param axis The axis to select.
505
 * @param x The tensor to be indexed.
506
 * @param index The integer tensor of indexes.
507
 * @return The result tensor with values selected from x with index from index tensor.
508
 */
509
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_select(const int axis, const ccv_nnc_micro_io_t x, const ccv_nnc_micro_io_t index);
510
/**
511
 * Return the gradient for a particular output. For example, if x is ccv_nnc_micro_unary(exp, input),
512
 * this represents the gradient of x, not the input. This method is used to generate representation
513
 * of gradients for ccv_nnc_micro_combine_new method.
514
 * @param x The tensor to take a gradient of.
515
 * @return The result tensor that represents the gradient of x.
516
 */
517
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_grad(const ccv_nnc_micro_io_t x);
518
/**
519
 * The combined op from micro ops.
520
 */
521
typedef struct ccv_nnc_micro_combine_s ccv_nnc_micro_combine_t;
522
/**
523
 * Combine micro ops into one, and do some optimization passes. The combined one can be then processed to generate
524
 * optimized kernels. Particularly, we can processed the combined one into C code and CUDA code as reference
525
 * implementations.
526
 * @param inputs The inputs for the combined ops.
527
 * @param input_size The number of the inputs.
528
 * @param parameters The name of the parameters, this determines the order of the these parameters.
529
 * @param parameter_size The number of parameters.
530
 * @param outputs The outputs for the combined ops.
531
 * @param output_size The number of the outputs.
532
 * @param ingrads The gradient inputs for the combined ops, including any inputs / outputs if there are any.
533
 * @param ingrad_size The number of ingrads.
534
 * @param outgrads The gradient outputs for the combined ops.
535
 * @param outgrad_size The number of outgrads.
536
 */
537
CCV_WARN_UNUSED(ccv_nnc_micro_combine_t*) ccv_nnc_micro_combine_new(const ccv_nnc_micro_io_t* const inputs, const int input_size, const char* const* const parameters, const int parameter_size, const ccv_nnc_micro_io_t* const outputs, const int output_size, const ccv_nnc_micro_io_t* const ingrads, const int ingrad_size, const ccv_nnc_micro_io_t* const outgrads, const int outgrad_size);
538
/**
539
 * Free the combined op.
540
 * @param combine The op to be freed.
541
 */
542
void ccv_nnc_micro_combine_free(ccv_nnc_micro_combine_t* const combine);
543
/**
544
 * Run combined op in interpret mode. This is only useful for debug internals. Because this is for
545
 * generic combined op, there is no hint, or flags, or stream context, or cmd.
546
 * @param combine The op.
547
 * @param cmd Choice between CMD_CUSTOM_FORWARD and CMD_CUSTOM_BACKWARD.
548
 * @param inputs The input tensors.
549
 * @param input_size The size of input tensors.
550
 * @param values The value corresponding to the parameters when call ccv_nnc_micro_combine_new.
551
 * @param parameter_size How many parameters. It must match when called ccv_nnc_micro_combine_new.
552
 * @param outputs The output tensors.
553
 * @param output_size The size of output tensors.
554
 */
555
void ccv_nnc_micro_combine_interpret(ccv_nnc_micro_combine_t* const combine, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_micro_scalar_t* const values, const int parameter_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
556
/**
557
 * Generate C code from the combined op.
558
 * @param combine The combined op to generate some C code.
559
 * @return The generated C code string.
560
 */
561
char* ccv_nnc_micro_combine_c(ccv_nnc_micro_combine_t* const combine);
562
563
/** @} */
564
565
/**
566
 * @defgroup level_1_tensor Tensors
567
 * @{
568
 */
569
570
/**
571
 * Count the dimensionality of a tensor.
572
 */
573
static inline int ccv_nnc_tensor_nd(const int dim[CCV_NNC_MAX_DIM_ALLOC])
574
2.54M
{
575
2.54M
  int i;
576
6.75M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++4.21M
)
577
6.75M
    if (dim[i] == 0)
578
2.54M
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
2.54M
}
Unexecuted instantiation: adam.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cifar.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cnnp.core.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compare.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compression.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: concat.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cublas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cudnn.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: datatype.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dense.net.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dynamic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gelu.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.vgg.d.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: imdb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: index.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: lamb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: leaky_relu.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: loss.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: lstm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: mpsblas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: mpsdnn.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nccl.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nms.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: pad.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: palettize.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: parallel.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: partition.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: random.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: reduce.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rmsprop.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: roi_align.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: scatter_add.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: schedule.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: sgd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: smooth_l1.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: sort.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: swish.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.vgg.d.tests.c:ccv_nnc_tensor_nd
tensor.tests.c:ccv_nnc_tensor_nd
Line
Count
Source
574
18
{
575
18
  int i;
576
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++18
)
577
36
    if (dim[i] == 0)
578
18
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
18
}
Unexecuted instantiation: transform.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: unique_consecutive.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: upsample.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: attention.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.vector.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: batch.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: broadcast.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cblas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: complex.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: crossentropy.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: custom.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.addons.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dropout.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: forward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gemm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gradient.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.io.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: group.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: histogram.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: layer.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: micro.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: minimize.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: numa.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rand.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rmsnorm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: simplify.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.compile.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tape.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tensor.bind.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tfb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: winograd.tests.c:ccv_nnc_tensor_nd
ccv_nnc_cmd.c:ccv_nnc_tensor_nd
Line
Count
Source
574
347k
{
575
347k
  int i;
576
844k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++497k
)
577
844k
    if (dim[i] == 0)
578
347k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
347k
}
ccv_nnc_tensor.c:ccv_nnc_tensor_nd
Line
Count
Source
574
365
{
575
365
  int i;
576
1.33k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++972
)
577
1.33k
    if (dim[i] == 0)
578
365
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
365
}
ccv_nnc_tensor_io.c:ccv_nnc_tensor_nd
Line
Count
Source
574
46
{
575
46
  int i;
576
175
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++129
)
577
175
    if (dim[i] == 0)
578
46
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
46
}
Unexecuted instantiation: ccv_nnc_stream.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_core.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_interpret.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_io.c:ccv_nnc_tensor_nd
ccv_nnc_symbolic_graph_compile.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4.66k
{
575
4.66k
  int i;
576
10.4k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++5.79k
)
577
10.4k
    if (dim[i] == 0)
578
4.66k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4.66k
}
ccv_nnc_symbolic_graph_backward.c:ccv_nnc_tensor_nd
Line
Count
Source
574
2.18k
{
575
2.18k
  int i;
576
4.62k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++2.44k
)
577
4.62k
    if (dim[i] == 0)
578
2.18k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
2.18k
}
Unexecuted instantiation: ccv_nnc_symbolic_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tensor_tape.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_parallel.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_memory_compression.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_memory_reduction.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_run.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_xpu_alloc.c:ccv_nnc_tensor_nd
ccv_nnc_dynamic_graph.c:ccv_nnc_tensor_nd
Line
Count
Source
574
1.05k
{
575
1.05k
  int i;
576
2.15k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.10k
)
577
2.15k
    if (dim[i] == 0)
578
1.05k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
1.05k
}
Unexecuted instantiation: ccv_nnc_dynamic_graph_alloc.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_backward.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_apply_gradients.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_evaluate.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe_core.c:ccv_nnc_tensor_nd
ccv_cnnp_dataframe_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
574
183k
{
575
183k
  int i;
576
730k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++546k
)
577
730k
    if (dim[i] == 0)
578
183k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
183k
}
Unexecuted instantiation: ccv_cnnp_dataframe_csv.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_io.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_core.c:ccv_nnc_tensor_nd
ccv_cnnp_model_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
574
3.97k
{
575
3.97k
  int i;
576
9.65k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++5.67k
)
577
9.65k
    if (dim[i] == 0)
578
3.97k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
3.97k
}
Unexecuted instantiation: ccv_nnc_palettize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_gradient_checkpointing.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_chain_decomposition.c:ccv_nnc_tensor_nd
ccv_nnc_adam_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
16.1k
{
575
16.1k
  int i;
576
48.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.1k
)
577
48.2k
    if (dim[i] == 0)
578
16.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
16.1k
}
ccv_nnc_adamw_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
16.1k
{
575
16.1k
  int i;
576
48.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.1k
)
577
48.2k
    if (dim[i] == 0)
578
16.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
16.1k
}
ccv_nnc_gemm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
359k
{
575
359k
  int i;
576
957k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++597k
)
577
957k
    if (dim[i] == 0)
578
359k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
359k
}
ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
574
1.08k
{
575
1.08k
  int i;
576
2.66k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.57k
)
577
2.66k
    if (dim[i] == 0)
578
1.08k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
1.08k
}
ccv_nnc_add_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
205k
{
575
205k
  int i;
576
479k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++274k
)
577
479k
    if (dim[i] == 0)
578
205k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
205k
}
ccv_nnc_mul_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
109k
{
575
109k
  int i;
576
220k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++111k
)
577
220k
    if (dim[i] == 0)
578
109k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
109k
}
ccv_nnc_cmul_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
173
{
575
173
  int i;
576
569
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++396
)
577
569
    if (dim[i] == 0)
578
173
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
173
}
ccv_nnc_segmented_gemm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
97
{
575
97
  int i;
576
318
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++221
)
577
318
    if (dim[i] == 0)
578
97
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
97
}
ccv_nnc_min_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
82
{
575
82
  int i;
576
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
577
312
    if (dim[i] == 0)
578
82
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
82
}
ccv_nnc_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
82
{
575
82
  int i;
576
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
577
312
    if (dim[i] == 0)
578
82
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
82
}
ccv_nnc_lssc_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
96
{
575
96
  int i;
576
448
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++352
)
577
448
    if (dim[i] == 0)
578
96
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
96
}
ccv_nnc_conv_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
10.1k
{
575
10.1k
  int i;
576
47.1k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++37.0k
)
577
47.1k
    if (dim[i] == 0)
578
10.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
10.1k
}
ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
574
3.13k
{
575
3.13k
  int i;
576
14.8k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++11.7k
)
577
14.8k
    if (dim[i] == 0)
578
3.13k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
3.13k
}
ccv_nnc_conv_transpose_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
40
{
575
40
  int i;
576
176
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++136
)
577
176
    if (dim[i] == 0)
578
40
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
40
}
ccv_nnc_dropout_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
24
{
575
24
  int i;
576
72
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
577
72
    if (dim[i] == 0)
578
24
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
24
}
ccv_nnc_ew_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
765k
{
575
765k
  int i;
576
1.89M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.13M
)
577
1.89M
    if (dim[i] == 0)
578
765k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
765k
}
Unexecuted instantiation: ccv_nnc_gelu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_histogram_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4
{
575
4
  int i;
576
20
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++16
)
577
20
    if (dim[i] == 0)
578
4
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4
}
ccv_nnc_index_select_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
33
{
575
33
  int i;
576
84
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++51
)
577
84
    if (dim[i] == 0)
578
33
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
33
}
ccv_nnc_reduce_isnan_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
21
{
575
21
  int i;
576
55
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++34
)
577
55
    if (dim[i] == 0)
578
21
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
21
}
ccv_nnc_lamb_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
48
{
575
48
  int i;
576
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
577
96
    if (dim[i] == 0)
578
48
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
48
}
Unexecuted instantiation: ccv_nnc_leaky_relu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
194
{
575
194
  int i;
576
550
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++356
)
577
550
    if (dim[i] == 0)
578
194
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
194
}
ccv_nnc_categorical_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
35
{
575
35
  int i;
576
93
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
577
93
    if (dim[i] == 0)
578
35
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
35
}
ccv_nnc_mse_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
2.19k
{
575
2.19k
  int i;
576
4.48k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++2.28k
)
577
4.48k
    if (dim[i] == 0)
578
2.19k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
2.19k
}
ccv_nnc_smooth_l1_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
63
{
575
63
  int i;
576
177
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++114
)
577
177
    if (dim[i] == 0)
578
63
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
63
}
ccv_nnc_nms_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
18
{
575
18
  int i;
576
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++30
)
577
48
    if (dim[i] == 0)
578
18
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
18
}
ccv_nnc_batch_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
438
{
575
438
  int i;
576
1.41k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++975
)
577
1.41k
    if (dim[i] == 0)
578
438
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
438
}
ccv_nnc_layer_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
208
{
575
208
  int i;
576
1.04k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++832
)
577
1.04k
    if (dim[i] == 0)
578
208
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
208
}
ccv_nnc_group_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
331
{
575
331
  int i;
576
1.57k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.24k
)
577
1.57k
    if (dim[i] == 0)
578
331
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
331
}
ccv_nnc_rmsnorm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
93
{
575
93
  int i;
576
465
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++372
)
577
465
    if (dim[i] == 0)
578
93
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
93
}
ccv_nnc_pad_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
105
{
575
105
  int i;
576
371
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++266
)
577
371
    if (dim[i] == 0)
578
105
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
105
}
ccv_nnc_partition_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
117
{
575
117
  int i;
576
369
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++252
)
577
369
    if (dim[i] == 0)
578
117
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
117
}
ccv_nnc_max_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4.25k
{
575
4.25k
  int i;
576
21.1k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++16.9k
)
577
21.1k
    if (dim[i] == 0)
578
4.25k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4.25k
}
ccv_nnc_avg_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
5.47k
{
575
5.47k
  int i;
576
27.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++21.8k
)
577
27.2k
    if (dim[i] == 0)
578
5.47k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
5.47k
}
Unexecuted instantiation: ccv_nnc_rand_uniform_cpu_ref.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rand_normal_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_reduce_sum_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
78.6k
{
575
78.6k
  int i;
576
236k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++158k
)
577
236k
    if (dim[i] == 0)
578
78.6k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
78.6k
}
ccv_nnc_reduce_mean_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
41
{
575
41
  int i;
576
112
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++71
)
577
112
    if (dim[i] == 0)
578
41
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
41
}
ccv_nnc_reduce_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
41
{
575
41
  int i;
576
99
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
577
99
    if (dim[i] == 0)
578
41
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
41
}
ccv_nnc_reduce_min_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
21
{
575
21
  int i;
576
59
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++38
)
577
59
    if (dim[i] == 0)
578
21
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
21
}
ccv_nnc_reduce_norm2_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
115
{
575
115
  int i;
576
299
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++184
)
577
299
    if (dim[i] == 0)
578
115
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
115
}
ccv_nnc_argmax_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
8
{
575
8
  int i;
576
27
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++19
)
577
27
    if (dim[i] == 0)
578
8
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
8
}
ccv_nnc_argmin_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
6
{
575
6
  int i;
576
21
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++15
)
577
21
    if (dim[i] == 0)
578
6
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
6
}
Unexecuted instantiation: ccv_nnc_relu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_rmsprop_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
16.0k
{
575
16.0k
  int i;
576
48.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.0k
)
577
48.0k
    if (dim[i] == 0)
578
16.0k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
16.0k
}
ccv_nnc_roi_align_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
104
{
575
104
  int i;
576
406
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++302
)
577
406
    if (dim[i] == 0)
578
104
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
104
}
ccv_nnc_scaled_dot_product_attention_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
145
{
575
145
  int i;
576
710
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++565
)
577
710
    if (dim[i] == 0)
578
145
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
145
}
ccv_nnc_scatter_add_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
27
{
575
27
  int i;
576
68
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++41
)
577
68
    if (dim[i] == 0)
578
27
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
27
}
ccv_nnc_sgd_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
220k
{
575
220k
  int i;
576
570k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++349k
)
577
570k
    if (dim[i] == 0)
578
220k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
220k
}
Unexecuted instantiation: ccv_nnc_sigmoid_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_sigmoid_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
238
{
575
238
  int i;
576
698
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++460
)
577
698
    if (dim[i] == 0)
578
238
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
238
}
ccv_nnc_softmax_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
514
{
575
514
  int i;
576
1.04k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++531
)
577
1.04k
    if (dim[i] == 0)
578
514
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
514
}
ccv_nnc_softmax_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
1.24k
{
575
1.24k
  int i;
576
3.12k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.87k
)
577
3.12k
    if (dim[i] == 0)
578
1.24k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
1.24k
}
ccv_nnc_sort_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
51
{
575
51
  int i;
576
129
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++78
)
577
129
    if (dim[i] == 0)
578
51
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
51
}
Unexecuted instantiation: ccv_nnc_swish_cpu_ref.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tanh_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_unique_consecutive_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
18
{
575
18
  int i;
576
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++18
)
577
36
    if (dim[i] == 0)
578
18
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
18
}
ccv_nnc_upsample_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
114
{
575
114
  int i;
576
456
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++342
)
577
456
    if (dim[i] == 0)
578
114
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
114
}
ccv_nnc_util_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
574
310
{
575
310
  int i;
576
1.30k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++994
)
577
1.30k
    if (dim[i] == 0)
578
310
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
310
}
Unexecuted instantiation: ccv_nnc_adam.c:ccv_nnc_tensor_nd
ccv_nnc_blas.c:ccv_nnc_tensor_nd
Line
Count
Source
574
120k
{
575
120k
  int i;
576
303k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++183k
)
577
303k
    if (dim[i] == 0)
578
120k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
120k
}
_ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
574
597
{
575
597
  int i;
576
1.45k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++861
)
577
1.45k
    if (dim[i] == 0)
578
597
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
597
}
_ccv_nnc_gemm_cpu_sys.c:ccv_nnc_tensor_nd
Line
Count
Source
574
40.1k
{
575
40.1k
  int i;
576
104k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++64.2k
)
577
104k
    if (dim[i] == 0)
578
40.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
40.1k
}
Unexecuted instantiation: ccv_nnc_comm.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_cmp.c:ccv_nnc_tensor_nd
ccv_nnc_compression.c:ccv_nnc_tensor_nd
Line
Count
Source
574
10
{
575
10
  int i;
576
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++38
)
577
48
    if (dim[i] == 0)
578
10
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
10
}
_ccv_nnc_conv_cpu_4x4_3x3_winograd.c:ccv_nnc_tensor_nd
Line
Count
Source
574
380
{
575
380
  int i;
576
1.52k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.14k
)
577
1.52k
    if (dim[i] == 0)
578
380
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
380
}
Unexecuted instantiation: _ccv_nnc_conv_cpu_fft.c:ccv_nnc_tensor_nd
_ccv_nnc_conv_cpu_gemm.c:ccv_nnc_tensor_nd
Line
Count
Source
574
8
{
575
8
  int i;
576
32
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++24
)
577
32
    if (dim[i] == 0)
578
8
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
8
}
_ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4.46k
{
575
4.46k
  int i;
576
21.8k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++17.3k
)
577
21.8k
    if (dim[i] == 0)
578
4.46k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4.46k
}
ccv_nnc_convolution.c:ccv_nnc_tensor_nd
Line
Count
Source
574
13.1k
{
575
13.1k
  int i;
576
61.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48.0k
)
577
61.2k
    if (dim[i] == 0)
578
13.1k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
13.1k
}
Unexecuted instantiation: ccv_nnc_dropout.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_ew.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_gelu.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_histogram.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_index_select.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_reduce_isnan.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_lamb.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_leaky_relu.c:ccv_nnc_tensor_nd
ccv_nnc_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
574
12
{
575
12
  int i;
576
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++24
)
577
36
    if (dim[i] == 0)
578
12
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
12
}
ccv_nnc_categorical_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
574
31
{
575
31
  int i;
576
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++65
)
577
96
    if (dim[i] == 0)
578
31
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
31
}
ccv_nnc_mse.c:ccv_nnc_tensor_nd
Line
Count
Source
574
18
{
575
18
  int i;
576
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++18
)
577
36
    if (dim[i] == 0)
578
18
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
18
}
ccv_nnc_smooth_l1.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4
{
575
4
  int i;
576
12
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8
)
577
12
    if (dim[i] == 0)
578
4
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4
}
Unexecuted instantiation: ccv_nnc_nms.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_norm.c:ccv_nnc_tensor_nd
ccv_nnc_pad.c:ccv_nnc_tensor_nd
Line
Count
Source
574
3
{
575
3
  int i;
576
15
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++12
)
577
15
    if (dim[i] == 0)
578
3
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
3
}
Unexecuted instantiation: ccv_nnc_partition.c:ccv_nnc_tensor_nd
ccv_nnc_pool.c:ccv_nnc_tensor_nd
Line
Count
Source
574
4.07k
{
575
4.07k
  int i;
576
18.3k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++14.2k
)
577
18.3k
    if (dim[i] == 0)
578
4.07k
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
4.07k
}
Unexecuted instantiation: ccv_nnc_rand.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_reduce.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_relu.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rmsprop.c:ccv_nnc_tensor_nd
ccv_nnc_lstm.c:ccv_nnc_tensor_nd
Line
Count
Source
574
11
{
575
11
  int i;
576
44
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++33
)
577
44
    if (dim[i] == 0)
578
11
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
11
}
Unexecuted instantiation: ccv_nnc_roi_align.c:ccv_nnc_tensor_nd
ccv_nnc_scaled_dot_product_attention.c:ccv_nnc_tensor_nd
Line
Count
Source
574
57
{
575
57
  int i;
576
285
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++228
)
577
285
    if (dim[i] == 0)
578
57
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
57
}
Unexecuted instantiation: ccv_nnc_scatter_add.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_sgd.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_sigmoid.c:ccv_nnc_tensor_nd
ccv_nnc_sigmoid_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
574
36
{
575
36
  int i;
576
108
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++72
)
577
108
    if (dim[i] == 0)
578
36
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
36
}
Unexecuted instantiation: ccv_nnc_softmax.c:ccv_nnc_tensor_nd
ccv_nnc_softmax_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
574
860
{
575
860
  int i;
576
2.56k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.70k
)
577
2.56k
    if (dim[i] == 0)
578
860
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
860
}
Unexecuted instantiation: ccv_nnc_sort.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_swish.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tanh.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_unique_consecutive.c:ccv_nnc_tensor_nd
ccv_nnc_upsample.c:ccv_nnc_tensor_nd
Line
Count
Source
574
12
{
575
12
  int i;
576
60
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
577
60
    if (dim[i] == 0)
578
12
      return i;
579
0
  return CCV_NNC_MAX_DIM_ALLOC;
580
12
}
Unexecuted instantiation: ccv_nnc_util.c:ccv_nnc_tensor_nd
581
582
/**
583
 * Create a new tensor.
584
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
585
 * @param params Tensor parameters.
586
 * @param flags Reserved flags for the allocation.
587
 * @return The newly created tensor.
588
 */
589
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
590
enum {
591
  CCV_NNC_TENSOR_MEMORY_MAP_EAGER = 0x1, /**< Load tensor mapped directly. */
592
  CCV_NNC_TENSOR_MEMORY_MAP_ON_DEMAND = 0x2, /**< Defer tensor map until read on supported devices. */
593
};
594
/**
595
 * Create a new tensor with data from a file. This will create a mmap tensor if that is preferred.
596
 * @param params Tensor parameters.
597
 * @param filename The file to load tensor content from.
598
 * @param offset The offset to the tensor content from the file.
599
 * @param flags Reserved flags for this loading.
600
 * @return The newly created tensor.
601
 */
602
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new_from_file(const ccv_nnc_tensor_param_t params, const char* const filename, const off_t offset, const int flags);
603
/**
604
 * Create a new tensor with data from a pointer. This method handles copy to GPU implicitly.
605
 * @param params Tensor parameters.
606
 * @param bufptr The pointer to load tensor content from.
607
 * @param flags Reserved flags for this loading.
608
 * @return The newly created tensor.
609
 */
610
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new_from_raw(const ccv_nnc_tensor_param_t params, const void* const bufptr, const int flags);
611
/**
612
 * Create a new tensor on stack.
613
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
614
 * @param params Tensor parameters.
615
 * @param flags Reserved flags for the allocation.
616
 * @return The tensor struct.
617
 */
618
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
619
/**
620
 * Resize an existing tensor to a new dimension.
621
 * @param tensor The old tensor to be resized.
622
 * @param params Tensor parameters.
623
 * @return Potentially a new tensor, but if the size is sufficient, it will be in-place operation.
624
 */
625
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_resize(ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params);
626
/**
627
 * Pin the tensor memory for faster access on GPU.
628
 * @param tensor A tensor that we want to pin the memory.
629
 * @return 0 for success.
630
 */
631
int ccv_nnc_tensor_pin_memory(ccv_nnc_tensor_t* const tensor);
632
/**
633
 * Free a tensor object.
634
 * @param tensor The tensor to be freed.
635
 */
636
void ccv_nnc_tensor_free(ccv_nnc_tensor_t* const tensor);
637
/**
638
 * Create a tensor view. A tensor view can be non-continuous. Essentially, it provides a view into a tensor.
639
 * @param tensor The tensor that we want to view into.
640
 * @param params The tensor parameters for the tensor view.
641
 * @param ofs The offset on each of the dimension.
642
 * @param stride The stride of each dimension.
643
 * @return The newly created tensor view.
644
 */
645
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t*) ccv_nnc_tensor_view_new(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
646
/**
647
 * Create a tensor view on stack.
648
 * @param tensor The tensor that we want to view into.
649
 * @param params The tensor parameters for the tensor view.
650
 * @param ofs The offset on each of the dimension.
651
 * @param stride The line size of each dimension.
652
 * @return The tensor view struct.
653
 */
654
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t) ccv_nnc_tensor_view(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
655
/**
656
 * Free a tensor view object.
657
 * @param tensor_view The tensor view to be freed.
658
 */
659
void ccv_nnc_tensor_view_free(ccv_nnc_tensor_view_t* const tensor_view);
660
/**
661
 * Zero out a given tensor.
662
 * @param tensor The tensor to be zero out.
663
 */
664
void ccv_nnc_tensor_zero(void* const tensor);
665
/**
666
 * Compare whether two tensors are equal. This will tolerant some floating point issues follow http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
667
 * @param a Tensor a.
668
 * @param b Tensor b.
669
 * @return 0 if equal, -1 otherwise.
670
 */
671
CCV_WARN_UNUSED(int) ccv_nnc_tensor_eq(const ccv_nnc_tensor_t* const a, const ccv_nnc_tensor_t* const b);
672
/**
673
 * Format a tensor output to string so that it can be used as debug output for other languages. This will look like:
674
 * [
675
 *   0.13, 0.44, 0.24, 0.24
676
 * ]
677
 * And format closely to what numpy looks like.
678
 * @param a The input tensor, it can be a tensor or a tensor view. It has to be accessible on CPU.
679
 * @return An allocated string that you can call ccfree to free it.
680
 */
681
CCV_WARN_UNUSED(char*) ccv_nnc_tensor_format_new(const ccv_nnc_tensor_t* const a);
682
/**
683
 * Method to decode tensor into a give buffer.
684
 * @param data The encoded data that needs to be decoded.
685
 * @param data_size The size of the encoded data.
686
 * @param datatype The expected data type of the encoded data.
687
 * @param dimensions The expected dimension for the data.
688
 * @param dimension_count The number of dimensions for the data.
689
 * @param identifier The identifier saved along the encoder (non-zero) that used to identify this decoder.
690
 * @param context The context associated with this decoder.
691
 * @param tensor_params The tensor parameters for the final container. This can be different from the expected values above.
692
 * @param tensor_out The final container for the tensor. It can be nil and you need to initialize it in that case.
693
 * @param decoded The buffer for data to be decoded.
694
 * @param decoded_size The size of the buffer to be decoded.
695
 * @return 1 if it is processed, 0 otherwise.
696
 */
697
typedef int (*ccv_nnc_tensor_io_option_decode_f)(const void* const data, const size_t data_size, const int datatype, const int* const dimensions, const int dimension_count, const unsigned int identifier, void* const context, const ccv_nnc_tensor_param_t tensor_params, ccv_nnc_tensor_t** const tensor_out, void* const decoded, size_t* const decoded_size);
698
/**
699
 * Method to encode tensor into a give buffer.
700
 * @param data The data that needs to be encoded.
701
 * @param data_size The size of the data to be encoded.
702
 * @param datatype The expected data type of the data to be encoded.
703
 * @param dimensions The expected dimension for the data.
704
 * @param dimension_count The number of dimensions for the data.
705
 * @param context The context associated with this encoder.
706
 * @param encoded The buffer for encoded data.
707
 * @param encoded_size The size of the buffer.
708
 * @param tensor_params The tensor parameters that can be modified.
709
 * @param identifier The identifier identifies this encoder (non-zero).
710
 * @return 1 if it is processed, 0 otherwise.
711
 */
712
typedef int (*ccv_nnc_tensor_io_option_encode_f)(const void* const data, const size_t data_size, const int datatype, const int* const dimensions, const int dimension_count, void* const context, void* const encoded, size_t* const encoded_size, ccv_nnc_tensor_param_t* const tensor_params, unsigned int* const identifier);
713
/**
714
 * Additional options to regulate tensor write / read behavior. For example, you can pass
715
 * encryptor / compressor to encrypt / compress the data prior to write to disk. You can
716
 * also only store reference, and use external storage for tensors.
717
 */
718
typedef struct {
719
  ccv_nnc_tensor_io_option_decode_f decode;
720
  ccv_nnc_tensor_io_option_encode_f encode;
721
  void* context;
722
} ccv_nnc_tensor_io_option_t;
723
/**
724
 * Write tensor to a SQLite database with a given name.
725
 * @param tensor The tensor.
726
 * @param handle The SQLite handle.
727
 * @param name The name to find the tensor in the database.
728
 * @param options If provided, we will use this to encode tensor data.
729
 * @return CCV_IO_FINAL for success, otherwise error.
730
 */
731
int ccv_nnc_tensor_write(const ccv_nnc_tensor_t* const tensor, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
732
733
enum {
734
  CCV_NNC_TENSOR_READ_METADATA_ONLY = CCV_NO_DATA_ALLOC, /**< Read tensor that data is nil, with only metadata. */
735
  CCV_NNC_TENSOR_READ_CPU_MEMORY = CCV_TENSOR_CPU_MEMORY, /**< Read tensor to CPU allocated buffer. */
736
};
737
/**
738
 * Read a tensor from a SQLite database with a given name.
739
 * @param handle The SQLite handle.
740
 * @param name The name to find the tensor in the database.
741
 * @param options If provided, we will use this to decode any data that identifier != 0.
742
 * @param flags Additional flag to configure how we read tensor.
743
 * @param tensor_params If provided, we will use this to create the tensor if tensor_out is not provided.
744
 * @param tensor_out The pointer to hold the tensor. If you supply the tensor yourself, we will read the data into the existing tensor.
745
 * @return CCV_IO_FINAL for success, otherwise error.
746
 */
747
int ccv_nnc_tensor_read(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const int flags, const ccv_nnc_tensor_param_t* const tensor_params, ccv_nnc_tensor_t** const tensor_out);
748
/** @} */
749
750
/**
751
 * @addtogroup level_1_cmd
752
 * @{
753
 */
754
755
/**
756
 * Return a high precision time unit. What this time unit is is platform specific.
757
 * @return A monotonic increasing 64-bit integer w.r.t. passing of time.
758
 */
759
uint64_t ccv_nnc_cmd_mono_time(void);
760
/**
761
 * Return UTF-8 encoded name of a given command.
762
 * @return A UTF-8 string (pointing to a static constant).
763
 */
764
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_name(const uint32_t cmd);
765
/**
766
 * Return UTF-8 encoded name of a given backend.
767
 * @return A UTF-8 string (pointing to a static constant).
768
 */
769
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_backend_name(const uint32_t backend);
770
/**
771
 * Check whether a given backend is available for a given command.
772
 * @return 1 if it is available.
773
 */
774
CCV_WARN_UNUSED(int) ccv_nnc_cmd_ok(const uint32_t cmd, const uint32_t backend);
775
/**
776
 * Create a wrapped command with parameters.
777
 * @param cmd The command identifier.
778
 * @param isa If this is a CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD command, this supplies the custom functions.
779
 * @param params The parameters for the command.
780
 * @param flags A reserved field for flags.
781
 * @return A wrapped ccv_nnc_cmd_t structure.
782
 */
783
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd(const uint32_t cmd, ccv_nnc_cmd_vtab_t* const isa, const ccv_nnc_cmd_param_t params, const int flags);
784
/**
785
 * Verify whether a hint is compatible with a given command and a given input tensor parameters / output tensor parameters.
786
 * @param hint The hint for a given command. Hint defines things such as paddings, strides etc. for a given command.
787
 * @param cmd The wrapped command.
788
 * @param a The input tensor parameters.
789
 * @param b The output tensor parameters.
790
 * @return 1 if it passes.
791
 */
792
CCV_WARN_UNUSED(int) ccv_nnc_hint_verify(const ccv_nnc_hint_t hint, const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
793
/**
794
 * Automatically find the best hint for a given input / output (on forward pass only).
795
 * @param cmd The wrapped command.
796
 * @param a The input tensor parameters.
797
 * @param b The output tensor parameters.
798
 * @return Best hint we can guess.
799
 */
800
CCV_WARN_UNUSED(ccv_nnc_hint_t) ccv_nnc_hint_auto(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
801
/**
802
 * Automatically find the outputs for the given inputs / hint.
803
 * @param cmd The wrapped command.
804
 * @param inputs An array of input tensor parameters.
805
 * @param input_size The size of input array.
806
 * @param hint The hint for the given command.
807
 * @param outputs An array for the output tensor parameters.
808
 * @param output_size The size of the output array.
809
 */
810
void ccv_nnc_hint_tensor_auto(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
811
/**
812
 * Find a suitable backend for a given command and tensor settings.
813
 * @param cmd The wrapped command.
814
 * @param tensor_memory The tensor memory setup (whether it is CPU or GPU).
815
 * @param tensor_formats The tensor layout format (NCHW, NHWC, CHWN etc.)
816
 * @param tensor_datatypes The datatype of a given tensor (FP32 etc.)
817
 * @return The backend identifier for the selected backend.
818
 */
819
CCV_WARN_UNUSED(uint32_t) ccv_nnc_cmd_find_backend(const ccv_nnc_cmd_t cmd, const int tensor_memory, const int tensor_formats, const int tensor_datatypes);
820
/**
821
 * Run autotune to find the best kernel and configuration for the given input.
822
 * @param cmd The original wrapped command.
823
 * @param max_workspace_size The maximum memory allowed for this command to execute.
824
 * @param hint The hint for the given command.
825
 * @param flags The reserved field for flags.
826
 * @param inputs An array of input tensors.
827
 * @param input_size The size of input array.
828
 * @param outputs An array of output tensors.
829
 * @param output_size The size of output array.
830
 * @param stream_context The stream we can do the autotune on. 0 uses default stream.
831
 * @return The modified cmd that contains the updated configuration.
832
 */
833
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd_autotune(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
834
/**
835
 * Check whether a given tensor input / output pattern can be computed by the given command.
836
 * bitmasks encode whether a given input tensor / output tensor available at a position.
837
 * @param cmd The wrapped command to check.
838
 * @param input_size The intended size of the input tensor array.
839
 * @param output_size The intended size of the output tensor array.
840
 * @param input_bitmasks The input tensor array encoding in bitmap, 0: no tensor, 1: has a tensor.
841
 * @param input_bitmask_size The size of the input bitmask array.
842
 * @param output_bitmasks The output tensor array encoding in bitmap.
843
 * @param output_bitmask_size The size of the output bitmask array.
844
 * @return 1 if the command can be executed with the given input / output pattern.
845
 */
846
CCV_WARN_UNUSED(int) ccv_nnc_cmd_bitmask(const ccv_nnc_cmd_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size);
847
/**
848
 * Return auxillary information related to a particular command with a particular backend.
849
 * A backend is required to be useful for this method.
850
 * @param cmd The wrapped cmmand to check auxillary information.
851
 * @return The auxillary information specific to a particular command with a particular backend.
852
 */
853
CCV_WARN_UNUSED(void*) ccv_nnc_cmd_aux(const ccv_nnc_cmd_t cmd);
854
/**
855
 * Execute a given command.
856
 * @param cmd The wrapped command to be executed.
857
 * @param hint The hint provided for the command.
858
 * @param flags A reserved field for flags.
859
 * @param inputs The input tensor array.
860
 * @param input_size The size of input tensor array.
861
 * @param outputs The output tensor array.
862
 * @param output_size The size of output tensor array.
863
 * @param stream_context The stream which the command will be executed upon.
864
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
865
 */
866
int ccv_nnc_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
867
/**
868
 * Check whether the command is a forward pass or not.
869
 * @param cmd The wrapped command.
870
 * @return 1 if it is a forward pass.
871
 */
872
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_forward(const ccv_nnc_cmd_t cmd);
873
/**
874
 * Check whether the command is a backward pass or not.
875
 * @param cmd The wrapped command.
876
 * @return 1 if it is a backward pass.
877
 */
878
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_backward(const ccv_nnc_cmd_t cmd);
879
/**
880
 * Check this command against listed attributes.
881
 * @param cmd The wrapped command.
882
 * @param flags The flags to check against the command (unsupported).
883
 * @return 1 if the flag is supported by the command.
884
 */
885
CCV_WARN_UNUSED(int) ccv_nnc_cmd_attr(const ccv_nnc_cmd_t cmd, const int flags);
886
/**
887
 * Check whether this command allow inplace operation against a particular input and output (index from 0).
888
 * @param cmd The wrapped command.
889
 * @param input_idx The index of the input tensor we want to check.
890
 * @param input_size The total number of inputs.
891
 * @param output_idx the index of the output tensor we want to check.
892
 * @param output_size The total number of outputs.
893
 * @return 1 if the input tensor can be used as the output tensor.
894
 */
895
CCV_WARN_UNUSED(int) ccv_nnc_cmd_allow_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
896
/**
897
 * Check whether this command need to enforce inplace operation against a particular input and output (index from 0).
898
 * @param cmd The wrapped command.
899
 * @param input_idx The index of the input tensor we want to check.
900
 * @param input_size The total number of inputs.
901
 * @param output_idx the index of the output tensor we want to check.
902
 * @param output_size The total number of outputs.
903
 * @return 1 if the input tensor is required to be used as the output tensor.
904
 */
905
CCV_WARN_UNUSED(int) ccv_nnc_cmd_enforce_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
906
/**
907
 * Set for a profiler to be on or off. Right now, this just proxy call on to cudaProfilerStart / cudaProfilerStop.
908
 * @param state 1 is on, 0 is off.
909
 */
910
void ccv_nnc_set_profiler(int state);
911
/**
912
 * Set the queue watermark when queueing up GPU commands. This is a Metal-only option.
913
 * @param > 0 is how many in-flight GPU commands can have.
914
 */
915
void ccv_nnc_set_queue_watermark(int state);
916
/**
917
 * Get the queue watermark when queueing up GPU commands. This is a Metal-only option.
918
 * @return How many in-flight GPU commands can have.
919
 */
920
CCV_WARN_UNUSED(int) ccv_nnc_queue_watermark(void);
921
/**
922
 * Set the device mapping to use custom order for device rather than driver imposed order. This is helpful
923
 * to manage code where which GPU to use have no control over. The previous permutation is cleared up on
924
 * each call and you can set 0 size device map to clear up all custom mapping.
925
 * @param type Currently, only CCV_NNC_STREAM_CONTEXT_GPU on NVIDIA systems are supported.
926
 * @param device_map The array of device map, maximum 64 devices.
927
 * @param size The size of the array, only first 64 will be used.
928
 */
929
void ccv_nnc_set_device_permutation(const int type, const int* const device_map, const int size);
930
/**
931
 * Quantize a given memory region of a given datatype / memory resides, into nbits palette.
932
 * @param input The input memory region, it can be CCV_64F, CCV_32F or CCV_16F.
933
 * @param datatype The datatype, it can be CCV_64F, CCV_32F or CCV_16F.
934
 * @param memory_type Where the memory resides. Right now only support CPU_MEMORY.
935
 * @param input_length How many elements in the input.
936
 * @param qbits How many bits for the palette. Right now only 4 / 5 / 6 / 7 / 8 bits supported.
937
 * @param number_in_blocks How many elements share a palette.
938
 * @param output The output memory region.
939
 * @param output_length The maximum size of the output.
940
 * @return The actual length in bytes of the output.
941
 */
942
CCV_WARN_UNUSED(size_t) ccv_nnc_palettize(const void* input, const int datatype, const int memory_type, const size_t input_length, const int qbits, const int number_in_blocks, void* output, const size_t output_length);
943
/**
944
 * Dequantize a given memory region of a given datatype / memory resides, from built-in nbits palette.
945
 * @param input The input memory region.
946
 * @param datatype The datatype, it can be CCV_64F, CCV_32F or CCV_16F.
947
 * @param memory_type Where the memory resides. It can be either CPU_MEMORY or GPU_MEMORY.
948
 * @param input_length The size of the input in bytes.
949
 * @param qbits How many bits for the palette. Right now only 4 / 5 / 6 / 7 / 8 bits supported.
950
 * @param number_in_blocks How many elements share a palette.
951
 * @param output The output memory region, it can be CCV_64F, CCV_32F or CCV_16F.
952
 * @param output_length How many elements in the output.
953
 */
954
void ccv_nnc_depalettize(const void* input, const int datatype, const int memory_type, const size_t input_length, const int qbits, const int number_in_blocks, void* output, const size_t output_length);
955
956
/** @} */
957
958
/**
959
 * @defgroup level_1_stream Streams
960
 * @{
961
 */
962
963
// Control flow constructs
964
// Follow heavily based along CUDA's stream / event idea.
965
enum {
966
  CCV_STREAM_CONTEXT_CPU = 0x1, /**< A CPU based stream context (unsupported). */
967
  CCV_STREAM_CONTEXT_GPU = 0x2, /**< A GPU based stream context. */
968
};
969
211k
#define CCV_STREAM_GET_CONTEXT(type) ((type) & 0x3)
970
#define CCV_STREAM_GET_DEVICE(type) CCV_TENSOR_GET_DEVICE(type)
971
44.9k
#define CCV_STREAM_GET_DEVICE_ID(type) CCV_TENSOR_GET_DEVICE_ID(type)
972
3.26k
#define CCV_STREAM_SET_DEVICE_ID(type, device_id) CCV_TENSOR_SET_DEVICE_ID(type, device_id)
973
/**
974
 * Create a new stream context.
975
 * @param type A combination of CPU / GPU and DEVICE_ID.
976
 * @return The newly created stream context.
977
 */
978
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_new(const int type);
979
/**
980
 * Get the type of the stream context.
981
 * @param stream_context The stream context we want to inspect.
982
 * @return The type of the stream context.
983
 */
984
CCV_WARN_UNUSED(int) ccv_nnc_stream_context_type(const ccv_nnc_stream_context_t* const stream_context);
985
/**
986
 * Get a stream context local workspace memory. This memory region will be reused
987
 * the next time when you call this method on the same stream context.
988
 * @param stream_context The stream context which provides the workspace memory.
989
 * @param workspace_size The size of the workspace memory.
990
 * @param mem The memory type of the said workspace memory (GPU or CPU).
991
 * @return A pointer to the workspace memory.
992
 */
993
CCV_WARN_UNUSED(void*) ccv_nnc_stream_context_get_workspace(ccv_nnc_stream_context_t* const stream_context, const size_t workspace_size, const int mem);
994
/**
995
 * Deallocate any workspace memory on the stream context.
996
 * @param stream The stream context to drain workspace memory.
997
 */
998
void ccv_nnc_stream_context_drain(ccv_nnc_stream_context_t* const stream);
999
/**
1000
 * The callback prototype on the stream context.
1001
 */
1002
typedef void(*ccv_nnc_callback_f)(void* const callback_context);
1003
/**
1004
 * Add a callback function to be called once stream executed to that point.
1005
 * @param stream The stream context to add callback.
1006
 * @param callback The callback function.
1007
 * @param callback_context The context to be called with the callback function.
1008
 */
1009
void ccv_nnc_stream_context_add_callback(ccv_nnc_stream_context_t* const stream, const ccv_nnc_callback_f callback, void* const callback_context);
1010
/**
1011
 * Wait until all tasks submitted (command, graph run etc.) on the stream context
1012
 * completed.
1013
 * @param stream The stream context to wait.
1014
 */
1015
void ccv_nnc_stream_context_wait(const ccv_nnc_stream_context_t* const stream);
1016
/**
1017
 * The hooks to be called when a stream context is destroyed.
1018
 * At the moment, the stream context will be destroyed at the time
1019
 * ccv_nnc_stream_context_free is called, so there is no tricks.
1020
 * This method is useful because we have some resources associated
1021
 * with stream pointer, hence, would be good to free these resources
1022
 * upon free the stream.
1023
 */
1024
typedef void (*ccv_nnc_stream_context_destructor_f)(const ccv_nnc_stream_context_t* const stream, void* const context);
1025
/**
1026
 * Add a new destructor hook callback when a stream is freed.
1027
 * @param stream The stream to be observed.
1028
 * @param destructor The new destructor callback method.
1029
 * @param context additional context.
1030
 * @return A integer identifier to help remove the hook.
1031
 */
1032
int ccv_nnc_stream_context_add_destructor_hook(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_context_destructor_f destructor, void* const context);
1033
/**
1034
 * Remove a destructor hook callback.
1035
 * @param stream The stream we observe.
1036
 * @param hook_id The returned integer when calling the add method.
1037
 */
1038
void ccv_nnc_stream_context_remove_destructor_hook(ccv_nnc_stream_context_t* const stream, const int hook_id);
1039
/**
1040
 * Deallocate the stream context.
1041
 * @param stream_context The stream context to be destroyed.
1042
 */
1043
void ccv_nnc_stream_context_free(ccv_nnc_stream_context_t* const stream_context);
1044
/**
1045
 * Set random seed for stream context.
1046
 * @param stream_context The stream context to set the seed. 0 means use the default stream context.
1047
 * @param seed The seed for the stream context.
1048
 */
1049
void ccv_nnc_stream_context_set_seed(ccv_nnc_stream_context_t* const stream_context, uint32_t seed);
1050
/**
1051
 * Generate uint32_t random number for stream context.
1052
 * These are usually used as seed for other high-performance random number generators.
1053
 * @param stream_context The stream context associated with random number generation.
1054
 */
1055
uint32_t ccv_nnc_stream_context_genrand_uint32(ccv_nnc_stream_context_t* const stream_context);
1056
1057
/**
1058
 * Opaque pointer to the signal object.
1059
 */
1060
typedef struct ccv_nnc_stream_signal_s ccv_nnc_stream_signal_t;
1061
1062
/**
1063
 * Create a new stream signal.
1064
 * @param type A composed type denotes whether it associated with a GPU or CPU stream context, and on which device.
1065
 * @return The newly created stream signal.
1066
 */
1067
CCV_WARN_UNUSED(ccv_nnc_stream_signal_t*) ccv_nnc_stream_signal_new(const int type);
1068
/**
1069
 * Get the type of the stream signal.
1070
 * @param signal The stream signal we want to inspect.
1071
 * @return The type of the stream signal.
1072
 */
1073
CCV_WARN_UNUSED(int) ccv_nnc_stream_signal_type(const ccv_nnc_stream_signal_t* const signal);
1074
/**
1075
 * Emit a signal on a stream.
1076
 * @param stream The stream context where the signal will be emitted.
1077
 * @param signal The signal to be emitted. It has to be on the same device as the stream.
1078
 */
1079
void ccv_nnc_stream_context_emit_signal(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_signal_t* const signal);
1080
/**
1081
 * Emit a signal on a stream directly. It will be managed by the stream. You have to use it immediately after return.
1082
 * @param stream The stream context where the signal will be emitted.
1083
 * @return The new signal emitted on the stream context.
1084
 */
1085
ccv_nnc_stream_signal_t* ccv_nnc_stream_context_emit_signal_new(ccv_nnc_stream_context_t* const stream);
1086
/**
1087
 * Wait a signal on a stream.
1088
 * @param stream The stream context that will be blocked by the signal.
1089
 * @param signal The signal to be waited. It can be on a different device of the stream.
1090
 */
1091
void ccv_nnc_stream_context_wait_signal(const ccv_nnc_stream_context_t* const stream, const ccv_nnc_stream_signal_t* const signal);
1092
/**
1093
 * Get on which stream context this signal is going to be emitted on.
1094
 * @param signal The signal we want to inspect.
1095
 * @return The most recent stream context you called ccv_nnc_stream_context_emit_signal with.
1096
 */
1097
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_signal_get_emitter(const ccv_nnc_stream_signal_t* const signal);
1098
/**
1099
 * Deallocate the signal.
1100
 * @param signal The signal to be destroyed.
1101
 */
1102
void ccv_nnc_stream_signal_free(ccv_nnc_stream_signal_t* const signal);
1103
/**
1104
 * Return number of devices.
1105
 * @param type The type of devices (CCV_NNC_STREAM_CONTEXT_GPU / CCV_NNC_STREAM_CONTEXT_CPU)
1106
 * @return The number of devices.
1107
 */
1108
CCV_WARN_UNUSED(int) ccv_nnc_device_count(const int type);
1109
/**
1110
 * The neighbor discovery function that will be called with the device id.
1111
 */
1112
typedef ccv_nnc_stream_context_t*(*ccv_nnc_stream_context_neighbor_discovery_f)(const int device_id, void* const context);
1113
/**
1114
 * Set the neighbor stream context discovery mechanism. This method exposes how
1115
 * neighbor should be defined per stream context. This method is useful for
1116
 * commands that operates cross devices and need to find the correct stream
1117
 * context for these devices. Stream context itself is bounded to one device
1118
 * only.
1119
 * @param stream_context The stream context that bounds to a discovery mechanism.
1120
 * @param discovery The neighbor discovery function to invoke.
1121
 * @param context The associated context with the neighbor discovery function.
1122
 */
1123
void ccv_nnc_stream_context_set_neighbor_discovery(ccv_nnc_stream_context_t* const stream_context, ccv_nnc_stream_context_neighbor_discovery_f discovery, void* const context);
1124
/**
1125
 * Find a neighbor stream context on a given device id for current stream context.
1126
 * @param stream_context The stream context which we will look for neighbors.
1127
 * @param device_id On which device the stream context may exist.
1128
 * @return 0 if no stream context found. Otherwise, return the stream context on that device.
1129
 */
1130
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_find_neighbor(ccv_nnc_stream_context_t* const stream_context, const int device_id);
1131
1132
/** @} */
1133
1134
/** @} */
1135
1136
/**
1137
 * @defgroup level_2 Level-2 API
1138
 * @{
1139
 */
1140
1141
/**
1142
 * @defgroup level_2_essentials Essentials
1143
 * @{
1144
 */
1145
1146
enum {
1147
  CCV_NNC_SHORT_DOT_GRAPH = 0x0, /**< Display a simplified graph. */
1148
  CCV_NNC_LONG_DOT_GRAPH  = 0x1, /**< Display a graph that contains all information. */
1149
};
1150
1151
/**
1152
 * Opaque pointer holds the concrete graph representation.
1153
 */
1154
typedef struct ccv_nnc_graph_s ccv_nnc_graph_t;
1155
1156
/**
1157
 * The opaque on stack object hold a reference to an execution node within a graph.
1158
 */
1159
typedef struct {
1160
  int32_t d; // This is int because sometimes I piggy-back on negatives to carry out some internal computations.
1161
  ccv_nnc_graph_t* graph;
1162
} ccv_nnc_graph_exec_t;
1163
1164
82.1k
#define CCV_NO_GRAPH_EXEC(exec) ((exec).graph == 0)
1165
1166
/**
1167
 * Create an empty graph.
1168
 * Note that all graph mutation methods are not thread-safe.
1169
 * You should only operate the graph in serial fashion.
1170
 * @return An opaque ccv_nnc_graph_t pointer.
1171
 */
1172
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_new(void);
1173
/**
1174
 * Create a node with specific command execution, as well as its inputs & outputs.
1175
 * Underlying, the graph maintains the backing object for the node, and all you get is
1176
 * a on-stack object to index the backing object from the graph.
1177
 * @param graph The concrete graph.
1178
 * @param cmd The wrapped command.
1179
 * @param hint The hint for this command.
1180
 * @param inputs The input tensors array.
1181
 * @param input_size The size of input tensors array.
1182
 * @param outputs The output tensors array.
1183
 * @param output_size The size of output tensors array.
1184
 * @return An on-stack object that references a execution node.
1185
 */
1186
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_new(ccv_nnc_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
1187
/**
1188
 * Set the command for an existing execution node.
1189
 * @param graph The concrete graph.
1190
 * @param exec The execution node reference.
1191
 * @param cmd The new wrapped command.
1192
 */
1193
void ccv_nnc_graph_exec_set(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_cmd_t cmd);
1194
/**
1195
 * Return the command on an existing execution node.
1196
 * @param graph The concrete graph.
1197
 * @param exec The execution node reference.
1198
 * @return The wrapped command.
1199
 */
1200
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_graph_exec_cmd(const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec);
1201
/**
1202
 * Set hint for an existing execution node.
1203
 * @param graph The concrete graph.
1204
 * @param exec The execution node reference.
1205
 * @param hint The new hint.
1206
 */
1207
void ccv_nnc_graph_exec_set_hint(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_hint_t hint);
1208
/**
1209
 * Set input / output tensors for an existing execution node.
1210
 * @param graph The concrete graph.
1211
 * @param exec The execution node reference.
1212
 * @param inputs The input tensors array.
1213
 * @param input_size The size of input tensors array.
1214
 * @param outputs The output tensors array.
1215
 * @param output_size The size of output tensors array.
1216
 */
1217
void ccv_nnc_graph_exec_set_io(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
1218
/**
1219
 * Concatenate input graph nodes with an output graph node to create a new graph.
1220
 * @param graph The concrete graph.
1221
 * @param source The execution node reference to connect.
1222
 * @param destination The execution node reference connect to.
1223
 * @return Non-zero if cannot concat successfully.
1224
 */
1225
int ccv_nnc_graph_exec_concat(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
1226
/**
1227
 * Disconnect input graph nodes with an output graph nodes in this graph.
1228
 * @param graph The concrete graph.
1229
 * @param source The execution node reference to disconnect.
1230
 * @param destination The execution node reference disconnect to.
1231
 * @return Non-zero if cannot disjoin successfully.
1232
 */
1233
int ccv_nnc_graph_exec_disjoin(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
1234
/**
1235
 * Count number of exec in the graph.
1236
 * @param graph The concrete graph.
1237
 * @return The number of execution nodes in the graph.
1238
 */
1239
int ccv_nnc_graph_exec_count(const ccv_nnc_graph_t* const graph);
1240
/**
1241
 * Generate output that can be parsed by GraphViz (DOT language).
1242
 * @param graph The concrete graph.
1243
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
1244
 * @param out The output file stream.
1245
 */
1246
void ccv_nnc_graph_dot(const ccv_nnc_graph_t* const graph, const int flags, FILE* out);
1247
/**
1248
 * Run the autotune function on all execution node, and assign back with the optimized commands.
1249
 * @param graph The concrete graph.
1250
 * @param max_workspace_size The maximum allowed extra memory usage.
1251
 * @param flags A reserved field for flags.
1252
 * @param sources The source execution nodes to begin. 0 uses default sources.
1253
 * @param source_size The size of source execution nodes.
1254
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
1255
 * @param destination_size The size of destination execution nodes.
1256
 */
1257
void ccv_nnc_graph_autotune(ccv_nnc_graph_t* const graph, const size_t max_workspace_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1258
/**
1259
 * Make the graph topsorted, thus, do a topological sort so when run the graph, no additional memory will be allocated.
1260
 * Otherwise when we run the graph, we need to allocate some memory on heap to faciliate.
1261
 * @param graph The concrete graph.
1262
 * @param exec_cvt The execution node assignments will change, and you can give an array to know the changes.
1263
 * @param exec_cvt_size The provided conversion array size.
1264
 */
1265
void ccv_nnc_graph_topsort(ccv_nnc_graph_t* const graph, int* const exec_cvt, const int exec_cvt_size);
1266
1267
/**
1268
 * Opaque pointer holds the graph schedule.
1269
 */
1270
typedef struct ccv_nnc_graph_static_schedule_s ccv_nnc_graph_static_schedule_t;
1271
/**
1272
 * Assuming the graph runs from the beginning to the end. Allocate a internal schedule object that will
1273
 * run the graph efficiently if it runs from the beginning to the end. It will basically call ccv_nnc_graph_static_schedule
1274
 * and save the end result to a internal schedule object to this graph.
1275
 * @param graph The concrete graph.
1276
 * @param stream_type The type of stream context we are going to use.
1277
 * @param max_stream_count The number of stream contexts to be allocated internally.
1278
 */
1279
void ccv_nnc_graph_set_default_static_schedule(ccv_nnc_graph_t* const graph, const int stream_type, const int max_stream_count);
1280
/**
1281
 * Allocate extra streams to make this graph parallel runnable. Note this requires the graph to be topsorted.
1282
 * After this is done, you can schedule a graph either on its default stream, or a new stream with the schedule
1283
 * object.
1284
 * @param graph The concrete graph.
1285
 * @param stream_type The type of stream context we are going to use.
1286
 * @param max_stream_count The number of stream contexts to be allocated internally.
1287
 * @param sources The source execution nodes to begin. 0 uses default sources.
1288
 * @param source_size The size of source execution nodes.
1289
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
1290
 * @param destination_size The size of destination execution nodes.
1291
 * @return An opaque schedule object that let the graph knows how to run itself efficiently.
1292
 */
1293
CCV_WARN_UNUSED(ccv_nnc_graph_static_schedule_t*) ccv_nnc_graph_static_schedule_new(ccv_nnc_graph_t* const graph, const int stream_type, const int max_stream_count, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1294
/**
1295
 * Free a schedule object for a graph.
1296
 * @param schedule The schedule object returned from ccv_nnc_graph_static_schedule_new.
1297
 */
1298
void ccv_nnc_graph_static_schedule_free(ccv_nnc_graph_static_schedule_t* const schedule);
1299
/**
1300
 * Query the default stream for a given graph.
1301
 * @param graph The concrete graph.
1302
 * @return The default stream context.
1303
 */
1304
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_graph_default_stream(const ccv_nnc_graph_t* const graph);
1305
/**
1306
 * Set default sources for a give graph.
1307
 * @param graph The concrete graph.
1308
 * @param sources The source execution nodes to begin.
1309
 * @param source_size The size of source execution nodes.
1310
 */
1311
void ccv_nnc_graph_set_sources(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const sources, const int source_size);
1312
/**
1313
 * Get the default source execution nodes pointer.
1314
 * @param graph The concrete graph.
1315
 * @return A pointer to an array of default source execution nodes.
1316
 */
1317
ccv_nnc_graph_exec_t* ccv_nnc_graph_sources(const ccv_nnc_graph_t* const graph);
1318
/**
1319
 * Get the number of default source execution nodes.
1320
 * @param graph The concrete graph.
1321
 * @return The number of default source execution nodes.
1322
 */
1323
int ccv_nnc_graph_source_size(const ccv_nnc_graph_t* const graph);
1324
/**
1325
 * Set default destinations for a give graph.
1326
 * @param graph The concrete graph.
1327
 * @param destinations The destination execution nodes which we end.
1328
 * @param destination_size The size of destination execution nodes.
1329
 */
1330
void ccv_nnc_graph_set_destinations(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1331
/**
1332
 * Get the default destination execution nodes pointer.
1333
 * @param graph The concrete graph.
1334
 * @return A pointer to an array of default destination execution nodes.
1335
 */
1336
ccv_nnc_graph_exec_t* ccv_nnc_graph_destinations(const ccv_nnc_graph_t* const graph);
1337
/**
1338
 * Get the number of default destination execution nodes.
1339
 * @param graph The concrete graph.
1340
 * @return The number of default destination execution nodes.
1341
 */
1342
int ccv_nnc_graph_destination_size(const ccv_nnc_graph_t* const graph);
1343
/**
1344
 * This graph, and its relevant auxiliary objects (opaque to user) are deallocated.
1345
 * @param graph The concrete graph.
1346
 */
1347
void ccv_nnc_graph_free(ccv_nnc_graph_t* const graph);
1348
/**
1349
 * Opaque pointer to the tape of tensors. The tape are used by the while loop.
1350
 */
1351
typedef struct ccv_nnc_tensor_tape_s ccv_nnc_tensor_tape_t;
1352
/**
1353
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1354
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1355
 * @param graph The concrete graph.
1356
 * @param flags A reserved field for flags.
1357
 * @param sources The source execution nodes array.
1358
 * @param source_size The size of source execution nodes array. 0 uses default sources.
1359
 * @param destinations The destination execution nodes array.
1360
 * @param destination_size The size of destination execution nodes array. 0 uses default destinations.
1361
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1362
 * @param stream_context Which stream this graph will be executed upon.
1363
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1364
 */
1365
int ccv_nnc_graph_run(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1366
/**
1367
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1368
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1369
 * Comparing with ccv_nnc_graph_run method, this method doesn't take sources / destinations node, rather, it takes the
1370
 * schedule object.
1371
 * @param graph The concrete graph.
1372
 * @param flags A reserved field for flags.
1373
 * @param schedule The schedule object specified the sources / destinations and how to efficiently run this.
1374
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1375
 * @param stream_context Which stream this graph will be executed upon.
1376
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1377
 */
1378
int ccv_nnc_graph_run_with_schedule(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_static_schedule_t* const schedule, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1379
/**
1380
 * Cancel execution of a graph. You need to handle synchronization yourself when calling this method to make
1381
 * sure the graph is currently executing when cancelling. This method will set a flag internally and the
1382
 * graph execution will check that flag when push compute on the computation device and abort if it is cancelled.
1383
 * When you call ccv_nnc_graph_run again, this cancellation won't in effect and you need to call cancel again.
1384
 * @param graph The concrete graph.
1385
 */
1386
void ccv_nnc_graph_cancel(ccv_nnc_graph_t* const graph);
1387
1388
/** @} */
1389
1390
/**
1391
 * @defgroup level_2_others Others
1392
 * @{
1393
 */
1394
1395
/**
1396
 * Set input / output flags for an existing execution node.
1397
 * This must be called after set_io, set additional flags for tensors related to this exec.
1398
 * @param graph The concrete graph.
1399
 * @param exec The execution node reference.
1400
 * @param input_flags The input flags array.
1401
 * @param input_flag_size the size of input flags array, should be the same as input tensors array (or 0).
1402
 * @param output_flags The output flags array.
1403
 * @param output_flag_size the size of output flags array, should be the same as output tensors array (or 0).
1404
 */
1405
void ccv_nnc_graph_exec_set_io_flags(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const int* const input_flags, const int input_flag_size, const int* const output_flags, const int output_flag_size);
1406
/**
1407
 * Set the pair reference for exec. In backward pass, an execution node's pair node is the forward pass node.
1408
 * @param graph The concrete graph.
1409
 * @param exec The execution node reference.
1410
 * @param pair_exec The pair execution node reference.
1411
 */
1412
void ccv_nnc_graph_exec_pair_with(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_graph_exec_t pair_exec);
1413
/**
1414
 * Add tensor pair that can be used to "carry over". (carry over: passing a tensor from current loop to the next loop).
1415
 * @param graph The concrete graph.
1416
 * @param from The tensor we have output in this loop.
1417
 * @param to The tensor we will use as input in the next loop.
1418
 */
1419
void ccv_nnc_graph_add_carry_over(ccv_nnc_graph_t* const graph, const ccv_nnc_tensor_t* const from, const ccv_nnc_tensor_t* const to);
1420
/**
1421
 * Updates are the tensors that not directly involved in the computation, but its pointers need to get updated
1422
 * along with this exec, thus need to be "update" to other exec nodes.
1423
 * @param graph The concrete graph.
1424
 * @param exec The execution node reference.
1425
 * @param update The tensor need to be updated along the execution node.
1426
 */
1427
void ccv_nnc_graph_exec_add_as_affected(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const update);
1428
1429
/** @} */
1430
1431
/** @} */
1432
1433
/**
1434
 * @defgroup level_3 Level-3 API
1435
 * @{
1436
 */
1437
1438
/**
1439
 * @defgroup level_3_essentials Essentials
1440
 * @{
1441
 */
1442
1443
/**
1444
 * Opaque pointer to the symbolic graph object.
1445
 */
1446
typedef struct ccv_nnc_symbolic_graph_s ccv_nnc_symbolic_graph_t;
1447
1448
/**
1449
 * Opaque pointer to an arena of allocated tensors.
1450
 */
1451
typedef struct ccv_nnc_tensor_arena_s ccv_nnc_tensor_arena_t;
1452
1453
/**
1454
 * Opaque pointer to an arena of allocated execs.
1455
 */
1456
typedef struct ccv_nnc_graph_exec_arena_s ccv_nnc_graph_exec_arena_t;
1457
1458
/**
1459
 * On stack object references a tensor symbol in the symbolic graph.
1460
 */
1461
typedef struct {
1462
  int32_t d;
1463
  const ccv_nnc_symbolic_graph_t* graph;
1464
} ccv_nnc_tensor_symbol_t;
1465
1466
/**
1467
 * On stack object references a execution node symbol in the symbolic graph.
1468
 */
1469
typedef struct {
1470
  int32_t d;
1471
  const ccv_nnc_symbolic_graph_t* graph;
1472
} ccv_nnc_graph_exec_symbol_t;
1473
1474
enum {
1475
  CCV_NNC_TENSOR_SYMBOL_INIT_ZEROS = 0x01, /**< Initialize underlying tensor for the symbol with zeros */
1476
  CCV_NNC_TENSOR_SYMBOL_INIT_ONES = 0x02, /**< Initialize underlying tensor for the symbol with ones */
1477
  CCV_NNC_TENSOR_SYMBOL_TAPE_VAR = 0x04, /**< Mark this as a tape variable (it cannot be folded, will contain flag CCV_TAPE_ALLOC) */
1478
  // The one below is special.
1479
  CCV_NNC_TENSOR_SYMBOL_DEAD = 0x80000000, /**< Mark this tensor symbol as dead, any future usage will cause assertion */
1480
};
1481
1482
147k
#define CCV_NNC_TENSOR_SYMBOL_IS_DEAD(x) ((x) & CCV_NNC_TENSOR_SYMBOL_DEAD)
1483
1484
enum {
1485
  CCV_NNC_GRAPH_EXEC_DEAD = 0x1, /**< Mark this node as dead. */
1486
  CCV_NNC_GRAPH_EXEC_P_WHILE = 0x10, /**< Mark this node keyword is while */
1487
  CCV_NNC_GRAPH_EXEC_CASE_OF = 0x20, /**< Mark this node keyword is case_of */
1488
  CCV_NNC_GRAPH_EXEC_DISABLE_OPT = 0x10000, /**< Mark this node to avoid optimization pass. */
1489
};
1490
1491
450k
#define CCV_NNC_GRAPH_EXEC_IS_DEAD(x) ((x) & CCV_NNC_GRAPH_EXEC_DEAD)
1492
25.1k
#define CCV_NNC_GRAPH_REF(x) ((x)->_heap_graph_ref ? 
(x)->_heap_graph_ref178
:
(x)->_inline_graph_ref24.9k
)
1493
1494
enum {
1495
  CCV_NNC_NO_TENSOR_SYMBOL = -1, /**< Special symbol reference for no tensor symbol. */
1496
  CCV_NNC_WHILE_COUNT_TENSOR_SYMBOL = -2, /**< Special symbol reference for while loop count tensor. */
1497
};
1498
1499
enum {
1500
  CCV_NNC_NO_GRAPH_EXEC_SYMBOL = -1, /**< Special symbol reference for no exec symbol. */
1501
};
1502
1503
1504
enum {
1505
  CCV_NNC_SYMBOL_TENSOR, /**< Identifier for tensor symbol */
1506
  CCV_NNC_SYMBOL_TENSOR_ALIAS, /**< Identifier for tensor alias symbol */
1507
  CCV_NNC_SYMBOL_GRAPH_EXEC, /**< Identifier for exec symbol */
1508
};
1509
1510
22
#define CCV_NNC_IS_WHILE_COUNT_TENSOR_SYMBOL(d) (((uint32_t)(d) & 0xf) == 0xe)
1511
1512
/**
1513
 * A data structure to pass in a pair of tensor symbols.
1514
 */
1515
typedef struct {
1516
  ccv_nnc_tensor_symbol_t source; /**< The 'from' tensor symbol. */
1517
  ccv_nnc_tensor_symbol_t destination; /**< The 'to' tensor symbol. */
1518
} ccv_nnc_tensor_symbol_map_t;
1519
1520
/**
1521
 * Create a new empty symbolic graph. It is an opaque data structure that maintains the whole graph of computation in its symbolic form.
1522
 * Note that all graph mutation methods are not thread-safe. You should only operate the graph in serial fashion.
1523
 */
1524
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_new(void);
1525
/**
1526
 * Create an tensor symbol (thus, with no actual memory space allocation) in a symbolic graph.
1527
 * @param graph The symbolic graph.
1528
 * @param info The tensor parameters.
1529
 * @param name The name of the tensor symbol, it is optional.
1530
 * @return A tensor symbol reference.
1531
 */
1532
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_param_t info, const char* const name);
1533
/**
1534
 * Create an alias to the tensor symbol as tensor view (thus, pointing to the same memory region, but with a different header info and offset).
1535
 * @param graph The symbolic graph.
1536
 * @param tensor_symbol The tensor symbol we are going to reference to.
1537
 * @param ofs The offset on each of the dimension.
1538
 * @param stride The stride of each dimension.
1539
 * @param info The tensor parameters for the new alias.
1540
 * @param name The name of the tensor symbol alias, it is optional.
1541
 * @return A tensor symbol alias reference.
1542
 */
1543
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
1544
/**
1545
 * Manually delete a tensor symbol off the symbolic graph.
1546
 * @param graph The symbolic graph.
1547
 * @param tensor The tensor symbol reference.
1548
 */
1549
void ccv_nnc_tensor_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_t tensor);
1550
/**
1551
 * Create a graph execution node (an operation that takes a set of inputs and generates a set of outputs).
1552
 * @param graph The symbolic graph.
1553
 * @param cmd The wrapped command.
1554
 * @param inputs The input tensor symbols array.
1555
 * @param input_size The size of input tensor symbols array.
1556
 * @param outputs The output tensor symbols array.
1557
 * @param output_size The size of output tensor symbols array.
1558
 * @param name The name of this execution node, optional.
1559
 * @return The execution node symbol reference.
1560
 */
1561
ccv_nnc_graph_exec_symbol_t ccv_nnc_graph_exec_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
1562
/**
1563
 * ccv_nnc_graph_exec_symbol_new defaults to use `ccv_nnc_hint_auto` find the best hints for a set of inputs / outputs.
1564
 * However, you can also set your own hints.
1565
 * @param graph The symbolic graph.
1566
 * @param exec The execution node symbol reference.
1567
 * @param hint The hint for the command.
1568
 */
1569
void ccv_nnc_graph_exec_symbol_set_hint(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_hint_t hint);
1570
/**
1571
 * Manually delete a exec symbol off the symbolic graph.
1572
 * @param graph The symbolic graph.
1573
 * @param symbol The execution node symbol reference.
1574
 */
1575
void ccv_nnc_graph_exec_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_t symbol);
1576
enum {
1577
  CCV_NNC_AUTOGEN_ALL_EXECS = 0x1, /**< Automatic concatenation for all execution nodes */
1578
  CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS = 0x2, /**< Automatically find all source and destination nodes. */
1579
};
1580
/**
1581
 * Automatic concatenate these nodes together based on its inputs / outputs.
1582
 * Imagining this is to generate the execution flow based on input tensors and output tensors.
1583
 * nil for execs and 0 for exec_size means to loop over all the execs on the graph and autogen.
1584
 * @param graph The symbolic graph.
1585
 * @param execs The execution nodes array.
1586
 * @param exec_size The size of execution nodes array.
1587
 * @param flags The flags determines what operations to perform when concatenating.
1588
 * @return non-zero if cannot figure out.
1589
 */
1590
int ccv_nnc_graph_exec_symbol_autogen(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const execs, const int exec_size, const int flags);
1591
/**
1592
 * Set the default sources for a symbolic graph.
1593
 * @param graph The symbolic graph.
1594
 * @param sources The source execution nodes array.
1595
 * @param source_size The size of source execution nodes array.
1596
 */
1597
void ccv_nnc_symbolic_graph_set_sources(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size);
1598
/**
1599
 * Add one node to the default sources for a symbolic graph.
1600
 * @param graph The symbolic graph.
1601
 * @param source The source execution node.
1602
 */
1603
void ccv_nnc_symbolic_graph_add_source(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source);
1604
/**
1605
 * Get the pointer to the default sources.
1606
 * @param graph The symbolic graph.
1607
 * @return The pointer to the source execution nodes array.
1608
 */
1609
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_sources(const ccv_nnc_symbolic_graph_t* const graph);
1610
/**
1611
 * Get the size of the default source nodes array.
1612
 * @param graph The symbolic graph.
1613
 * @return The size of the default source nodes array.
1614
 */
1615
int ccv_nnc_symbolic_graph_source_size(const ccv_nnc_symbolic_graph_t* const graph);
1616
/**
1617
 * Set the default destinations for a symbolic graph.
1618
 * @param graph The symbolic graph.
1619
 * @param destinations The destination execution nodes array.
1620
 * @param destination_size The size of destination execution nodes array.
1621
 */
1622
void ccv_nnc_symbolic_graph_set_destinations(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1623
/**
1624
 * Add one node to the default destinations for a symbolic graph.
1625
 * @param graph The symbolic graph.
1626
 * @param destination The destination execution node.
1627
 */
1628
void ccv_nnc_symbolic_graph_add_destination(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t destination);
1629
/**
1630
 * Get the pointer to the default destinations.
1631
 * @param graph The symbolic graph.
1632
 * @return The pointer to the destinationsexecution nodes array.
1633
 */
1634
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_destinations(const ccv_nnc_symbolic_graph_t* const graph);
1635
/**
1636
 * Get the size of the default destination nodes array.
1637
 * @param graph The symbolic graph.
1638
 * @return The size of the default destination nodes array.
1639
 */
1640
int ccv_nnc_symbolic_graph_destination_size(const ccv_nnc_symbolic_graph_t* const graph);
1641
/**
1642
 * Generate output that can be parsed by GraphViz (DOT language).
1643
 * @param graph The symbolic graph.
1644
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
1645
 * @param out The output file stream.
1646
 */
1647
void ccv_nnc_symbolic_graph_dot(const ccv_nnc_symbolic_graph_t* const graph, const int flags, FILE* out);
1648
1649
/**
1650
 * The data structure to wrap a tensor symbol and a concrete tensor together.
1651
 */
1652
typedef struct {
1653
  ccv_nnc_tensor_symbol_t symbol;
1654
  const ccv_nnc_tensor_t* tensor;
1655
} ccv_nnc_tensor_bind_t;
1656
1657
typedef struct {
1658
  void* (*alloc)(const int type, const int pinned_mem /* Currently only used to annotate CCV_TENSOR_PINNED_MEM, future can be expanded to generic flags */, const size_t size, void* const arg);
1659
  void (*free)(void* const ptr, void* const arg);
1660
} ccv_nnc_symbolic_graph_compile_allocator_vtab_t;
1661
1662
typedef struct {
1663
  const ccv_nnc_symbolic_graph_compile_allocator_vtab_t* isa;
1664
  struct {
1665
    void* alloc;
1666
    void* free;
1667
  } context;
1668
} ccv_nnc_symbolic_graph_compile_allocator_t;
1669
1670
typedef struct {
1671
  ccv_nnc_symbolic_graph_compile_allocator_t allocator;
1672
} ccv_nnc_symbolic_graph_compile_param_t;
1673
1674
/**
1675
 * Compile a symbolic graph into a graph that can be executed, and a set of tensors (opaque data structure tensor arena) are allocated based on which tensor symbols are the input and which are the outputs. The tensor allocation is done to minimize the required storage.
1676
 * tensor_binds provide custom binding for these tensors. You still responsible to manage the life-time of these tensors.
1677
 * outputs marks the tensor symbols that need to be kept til the end of the graph.
1678
 * @param graph The symbolic graph.
1679
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
1680
 * @param tensor_binds The binding array (a tensor symbol and a concrete tensor). We replace everywhere that uses the tensor symbol with the concrete tensor.
1681
 * @param tensor_bind_size The size of the binding array.
1682
 * @param outputs The output tensor symbols that we want to keep the value.
1683
 * @param output_size The size of the output tensor symbols array.
1684
 * @param sources The sources for the graph.
1685
 * @param source_size The size of the sources array. 0 to use default sources.
1686
 * @param destinations The destinations for the graph.
1687
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1688
 * @param graph_ref The pointer to store the generated concrete graph.
1689
 * @param tensor_arena_ref The pointer to store ccv_nnc_tensor_arena_t.
1690
 * @param graph_exec_arena_ref The pointer to store ccv_nnc_graph_exec_arena_t.
1691
 */
1692
void ccv_nnc_symbolic_graph_compile(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_symbolic_graph_compile_param_t compile_params, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_graph_t** const graph_ref, ccv_nnc_tensor_arena_t** const tensor_arena_ref, ccv_nnc_graph_exec_arena_t** const graph_exec_arena_ref);
1693
/**
1694
 * Free the symbolic graph and its associated memory. Note that if you compiled a graph / tensor arena out of this symbolic graph, these won't be free'd.
1695
 * @param graph The symbolic graph.
1696
 */
1697
void ccv_nnc_symbolic_graph_free(ccv_nnc_symbolic_graph_t* const graph);
1698
/**
1699
 * Find corresponding tensor by a symbol from the tensor arena.
1700
 * @param tensor_arena The tensor arena object generated through compilation,
1701
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1702
 * @return A concrete tensor from the tensor arena.
1703
 */
1704
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_symbol(const ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol);
1705
/**
1706
 * Bind a tensor to a symbol. You still responsible to manage the life-time of the tensor to make sure it is not freed until everything is done.
1707
 * @param tensor_arena The tensor arena object generated through compilation.
1708
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1709
 * @param tensor The new tensor to bind to.
1710
 */
1711
void ccv_nnc_tensor_bind_symbol(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_t* const tensor);
1712
/**
1713
 * Clear existing bindings on the tensor arena.
1714
 * @param tensor_arena The tensor arena object generated through compilation to clear bindings.
1715
 */
1716
void ccv_nnc_tensor_arena_clear_bindings(ccv_nnc_tensor_arena_t* const tensor_arena);
1717
/**
1718
 * Free the data buffer of the tensor arena.
1719
 * @param tensor_arena The tensor arena object generated through compilation.
1720
 */
1721
void ccv_nnc_tensor_arena_buffer_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1722
/**
1723
 * Free the opaque tensor arena structure.
1724
 * @param tensor_arena The tensor arena object generated through compilation.
1725
 */
1726
void ccv_nnc_tensor_arena_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1727
/**
1728
 * Find corresponding graph exec by a exec symbol from graph exec arena.
1729
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1730
 * @param symbol The execution node symbol reference. Because execution node symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1731
 * @return A execution node reference to the concrete graph.
1732
 */
1733
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_from_symbol(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena, const ccv_nnc_graph_exec_symbol_t symbol);
1734
/**
1735
 * Return the node that can drive all the source nodes from the compilation.
1736
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1737
 * @return A execution node reference that is the source.
1738
 */
1739
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_source(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1740
/**
1741
 * Return the node that can drain all the destination nodes from the compilation.
1742
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1743
 * @return A execution node reference that is the destination.
1744
 */
1745
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_destination(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1746
/**
1747
 * Free the opaque graph exec arena structure.
1748
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1749
 */
1750
void ccv_nnc_graph_exec_arena_free(ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1751
/**
1752
 * Write symbolic graph to disk, along with some binding tensors.
1753
 * @param graph The symbolic graph.
1754
 * @param tensor_binds The binding array (pair of tensor symbol and concrete tensor).
1755
 * @param tensor_bind_size The size of the binding array.
1756
 * @param fn The file name.
1757
 */
1758
void ccv_nnc_symbolic_graph_write(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const char* const fn);
1759
/**
1760
 * Read symbolic graph from disk, with some binding tensors.
1761
 * @param fn The file name.
1762
 * @param graph_ref The pointer to store symbolic graph.
1763
 * @param tensor_binds_ref The pointer to store the binding array.
1764
 * @param tensor_bind_size_ref The pointer to store the size of the binding array.
1765
 */
1766
void ccv_nnc_symbolic_graph_read(const char* const fn, ccv_nnc_symbolic_graph_t** const graph_ref, ccv_nnc_tensor_bind_t** const tensor_binds_ref, int* const tensor_bind_size_ref);
1767
1768
/**
1769
 * The format callback function. Note that these are all integer ids. They can be filled to
1770
 * ccv_nnc_graph_exec_symbol_t.d or ccv_nnc_tensor_symbol_t.d.
1771
 * @param graph The symbolic graph.
1772
 * @param node The id for the node. It is unique in the graph.
1773
 * @param name The name for the node. It is either NULL or \0 terminated string.
1774
 * @param cmd The associated command for this node.
1775
 * @param flags The flag that help to identify if it is a sub-graph, which type it is (P_WHILE or CASE_OF)
1776
 * @param incomings The incoming nodes for execution.
1777
 * @param incoming_size The number of incoming nodes for execution.
1778
 * @param outgoings The outgoing nodes for execution.
1779
 * @param outgoing_size The number of outgoing nodes for execution.
1780
 * @param inputs The input tensor symbols.
1781
 * @param input_size The number of the input tensor symbols.
1782
 * @param outputs The output tensor symbols.
1783
 * @param output_size The number of the output tensor symbols.
1784
 * @param context The context passed through ccv_nnc_symbolic_graph_format.
1785
 */
1786
typedef void(*ccv_nnc_symbolic_graph_format_f)(const ccv_nnc_symbolic_graph_t* const graph, const int node, const char* const name, const ccv_nnc_cmd_t cmd, const int flags, const int* const incomings, const int incoming_size, const int* const outgoings, const int outgoing_size, const int* const inputs, const int input_size, const int* const outputs, const int output_size, void* const context);
1787
/**
1788
 * Provide a hook for upper level to do custom formatting of a given symbolic graph. You can
1789
 * implement logic to format the graph into protobuf, or json, or doing persistence. However, this
1790
 * is not the method for you to visit the graph, and do mutations on it. This function doesn't
1791
 * recurse into sub-graphs. You need to inspect each node to know if these are sub-graphs and
1792
 * handle accordingly.
1793
 * @param graph The symbolic graph.
1794
 * @param sources The sources for the graph.
1795
 * @param source_size The size of the sources array. 0 to use default sources.
1796
 * @param destinations The destinations for the graph.
1797
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1798
 * @param format_fn The format callback to be called on every node.
1799
 * @param context The context that will be passed to the callback.
1800
 */
1801
void ccv_nnc_symbolic_graph_format(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
1802
1803
/** @} */
1804
1805
/**
1806
 * @defgroup level_3_others Others
1807
 * @{
1808
 */
1809
1810
/**
1811
 * Return the symbol it alias to.
1812
 * @param graph The symbolic graph.
1813
 * @param tensor_symbol The tensor symbol alias.
1814
 * @return A tensor symbol reference to the original tensor symbol. If this symbol has no reference, return NO_SYMBOL (.graph = 0)
1815
 */
1816
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1817
/**
1818
 * Set the tensor symbol parameters.
1819
 * @param graph The symbolic graph.
1820
 * @param tensor The tensor symbol reference.
1821
 * @param info The new tensor parameters.
1822
 * @return non-zero if encountered errors.
1823
 */
1824
int ccv_nnc_tensor_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const ccv_nnc_tensor_param_t info);
1825
/**
1826
 * Get the parameters for a tensor symbol.
1827
 * @param graph The symbolic graph.
1828
 * @param tensor The tensor symbol reference.
1829
 * @return The tensor parameters.
1830
 */
1831
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_symbol_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1832
/**
1833
 * Get the name for a tensor symbol.
1834
 * @param graph The symbolic graph.
1835
 * @param tensor The tensor symbol reference.
1836
 * @return The tensor name if available. Otherwise 0. The memory is managed by the graph.
1837
 */
1838
CCV_WARN_UNUSED(const char*) ccv_nnc_tensor_symbol_name(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1839
/**
1840
 * Set the tensor symbol alias parameters.
1841
 * @param graph The symbolic graph.
1842
 * @param tensor The tensor symbol reference.
1843
 * @param ofs The offset on each of the dimension.
1844
 * @param stride The stride of each dimension.
1845
 * @return non-zero if it is not a tensor alias.
1846
 */
1847
int ccv_nnc_tensor_symbol_alias_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
1848
/**
1849
 * Get the parameters for a tensor symbol.
1850
 * @param graph The symbolic graph.
1851
 * @param tensor The tensor symbol reference.
1852
 * @param ofs The offset on each of the dimension.
1853
 * @param stride The stride of each dimension.
1854
 * @return non-zero if it is not a tensor alias.
1855
 */
1856
int ccv_nnc_tensor_symbol_alias_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, int ofs[CCV_NNC_MAX_DIM_ALLOC], int stride[CCV_NNC_MAX_DIM_ALLOC]);
1857
/**
1858
 * Set the flags for this tensor symbol. The flags are only used for symbol, not for tensor.
1859
 * @param graph The symbolic graph.
1860
 * @param tensor The tensor symbol reference.
1861
 * @param flags A reserved field for flags.
1862
 */
1863
void ccv_nnc_tensor_symbol_set_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int flags);
1864
/**
1865
 * Get all the flags for a tensor.
1866
 * @param graph The symbolic graph.
1867
 * @param tensor The tensor symbol reference.
1868
 */
1869
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1870
/**
1871
 * Set the cmd of this exec symbol.
1872
 * @param graph The symbolic graph.
1873
 * @param exec The execution node symbol reference.
1874
 * @param cmd The new wrapped command.
1875
 */
1876
void ccv_nnc_graph_exec_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_cmd_t cmd);
1877
/**
1878
 * Set the flags for this exec symbol. The flags are only used for symbol. We can only set higher 16-bit.
1879
 * @param graph The symbolic graph.
1880
 * @param exec The execution node symbol reference.
1881
 * @param flags A reserved field for flags.
1882
 */
1883
void ccv_nnc_graph_exec_symbol_set_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const int flags);
1884
/**
1885
 * Get the flags for a tensor. We can only retrieve the higher 16-bit.
1886
 * @param graph The symbolic graph.
1887
 * @param exec The execution node symbol reference.
1888
 */
1889
CCV_WARN_UNUSED(int) ccv_nnc_graph_exec_symbol_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1890
/**
1891
 * Return the command on this exec symbol.
1892
 * @param graph The symbolic graph.
1893
 * @param exec The execution node symbol reference.
1894
 * @return The wrapped command.
1895
 */
1896
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_graph_exec_symbol_cmd(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1897
/**
1898
 * Return the command on this exec symbol.
1899
 * @param graph The symbolic graph.
1900
 * @param exec The execution node symbol reference.
1901
 * @return The name for the exec symbol if available. The memory is managed by the graph.
1902
 */
1903
CCV_WARN_UNUSED(const char*) ccv_nnc_graph_exec_symbol_name(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1904
/**
1905
 * Set the inputs / outputs for a exec symbol.
1906
 * @param graph The symbolic graph.
1907
 * @param exec The execution node symbol reference.
1908
 * @param inputs The input tensor symbols array.
1909
 * @param input_size The size of input tensor symbols array.
1910
 * @param outputs The output tensor symbols array.
1911
 * @param output_size The size of output tensor symbols array.
1912
 */
1913
void ccv_nnc_graph_exec_symbol_set_io(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size);
1914
/**
1915
 * Manually concatenate input node with an output graph node.
1916
 * @param graph The symbolic graph.
1917
 * @param source The source execution node symbol to connect.
1918
 * @param destination The destination execution node symbol connect to.
1919
 * @return non-zero if cannot concat successfully.
1920
 */
1921
int ccv_nnc_graph_exec_symbol_concat(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1922
/**
1923
 * Manually disconnect input node with an output graph node for this graph.
1924
 * @param graph The symbolic graph.
1925
 * @param source The source execution node symbol to disconnect.
1926
 * @param destination The destination execution node symbol disconnect to.
1927
 * @return non-zero if cannot disjoin successfully.
1928
 */
1929
int ccv_nnc_graph_exec_symbol_disjoin(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1930
/**
1931
 * Number of exec symbols.
1932
 * @param graph The symbolic graph.
1933
 */
1934
CCV_WARN_UNUSED(int) ccv_nnc_graph_exec_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1935
/**
1936
 * Number of active exec symbols.
1937
 * @param graph The symbolic graph.
1938
 * @param type The type of op, can be CCV_NNC_SYMBOL_TENSOR, CCV_NNC_SYMBOL_GRAPH_EXEC (will error out on CCV_NNC_SYMBOL_TENSOR_ALIAS)
1939
 */
1940
CCV_WARN_UNUSED(int) ccv_nnc_symbolic_graph_active_symbol_count(const ccv_nnc_symbolic_graph_t* const graph, const int type);
1941
/**
1942
 * Substitution function. Given an execution node symbol and a command, return a new command.
1943
 */
1944
typedef ccv_nnc_cmd_t(*ccv_nnc_symbolic_graph_subst_f)(const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd);
1945
/**
1946
 * Generate a duplicate of the provided graph.
1947
 * While generating the duplicate, it calls the function pointer to re-process the node type.
1948
 * @param graph The symbolic graph.
1949
 * @param subst The substitution function.
1950
 * @return The duplicated symbolic graph.
1951
 */
1952
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_dup(const ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_symbolic_graph_subst_f subst);
1953
/**
1954
 * Number of tensor symbols.
1955
 * @param graph The symbolic graph.
1956
 */
1957
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1958
/**
1959
 * Compute all the tensor shapes within this graph.
1960
 * @param graph The symbolic graph.
1961
 * @param sources The sources for the graph.
1962
 * @param source_size The size of the sources array. 0 to use default sources.
1963
 * @param destinations The destinations for the graph.
1964
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1965
 */
1966
void ccv_nnc_symbolic_graph_tensor_auto(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1967
/**
1968
 * For a given tensor symbol, this method resolves to its local reference inside the given graph.
1969
 * This is related to the sub-graph of symbolic graphs. A tensor symbol in the sub-graph can still have a
1970
 * representation in the parent graph. This method used to find the local reference in any graph.
1971
 * @param graph The symbolic graph.
1972
 * @param tensor_symbol The tensor symbol we want to resolve.
1973
 * @return A tensor symbol reference in the given graph.
1974
 */
1975
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_resolve(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1976
/**
1977
 * Pass graph's tensor symbol into its sub graph. We will make the connection that the source tensor
1978
 * symbol in the source symbolic graph is the destination tensor symbol in the destination symbolic graph.
1979
 * The reason to do this inference is because a tensor symbol is local to a symbolic graph under the hood.
1980
 * Although you can use tensor symbols from different graphs directly (it calls this method or the resolve
1981
 * method above when create an execution node symbol), sometimes you need this method to do it manually.
1982
 * @param src_graph The source symbolic graph.
1983
 * @param dest_graph The destination symbolic graph.
1984
 * @param src_tensor_symbol The tensor symbol we want to resolve.
1985
 * @param dest_tensor_symbol The tensor symbol we want to resolve.
1986
 */
1987
void ccv_nnc_tensor_symbol_hookup(ccv_nnc_symbolic_graph_t* const src_graph, ccv_nnc_symbolic_graph_t* const dest_graph, const ccv_nnc_tensor_symbol_t src_tensor_symbol, const ccv_nnc_tensor_symbol_t dest_tensor_symbol);
1988
/**
1989
 * Set bypasses for a tensor symbol.
1990
 * For case..of graphs, if the condition doesn't meet, we will skip the execution of a sub-graph.
1991
 * However, in that case, we cannot express easily which output tensor corresponds to which input tensor.
1992
 * This methods provides the way.
1993
 * @param graph The symbolic graph.
1994
 * @param symbol_map The pair of tensors array, source is the input tensor, destination is the output tensor.
1995
 * @param symbol_map_size The size of the tensor pairs array.
1996
 */
1997
void ccv_nnc_tensor_symbol_set_bypasses(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
1998
/**
1999
 * Fetch input / output for an exec symbol. For efficiency consideration, this returns pointer directly.
2000
 * @param graph The symbolic graph.
2001
 * @param symbol The execution node symbol reference.
2002
 * @param inputs The pointer to store input tensor symbols array.
2003
 * @param input_size The pointer to store the size of input tensor symbols array.
2004
 * @param outputs The pointer to store output tensor symbols array.
2005
 * @param output_size The pointer to store the size of output tensor symbols array.
2006
 */
2007
void ccv_nnc_graph_exec_symbol_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const inputs, int* const input_size, const int** const outputs, int* const output_size);
2008
/**
2009
 * Replace a input / output tensor symbol on an exec symbol.
2010
 * @param graph The symbolic graph.
2011
 * @param symbol The execution node symbol reference.
2012
 * @param old_symbol The old tensor symbol to be replaced.
2013
 * @param new_symbol The new tensor symbol on input / output.
2014
 */
2015
void ccv_nnc_graph_exec_symbol_replace_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_tensor_symbol_t old_symbol, const ccv_nnc_tensor_symbol_t new_symbol);
2016
/**
2017
 * Which exec symbol this is connected to. For efficiency consideration, this returns pointer directly.
2018
 * @param graph The symbolic graph.
2019
 * @param symbol The execution node symbol reference.
2020
 * @param tos The pointer to store outgoing indexes of the execution nodes.
2021
 * @param to_size the pointer to store the number of outgoing indexes.
2022
 */
2023
void ccv_nnc_graph_exec_symbol_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const tos, int* const to_size);
2024
/**
2025
 * Find the size allocated on the opaque tensor arena structure.
2026
 * @param tensor_arena The tensor arena object generated through compilation.
2027
 * @return The total allocated size in bytes.
2028
 */
2029
CCV_WARN_UNUSED(uint64_t) ccv_nnc_tensor_arena_size(const ccv_nnc_tensor_arena_t* const tensor_arena);
2030
/**
2031
 * Query whether a set of sources are the ancestors to a set of destination nodes.
2032
 * @param graph The symbolic graph.
2033
 * @param sources The exec sources to check whether they can reach some of the destinations.
2034
 * @param source_size How many sources in the source list.
2035
 * @param destinations The exec destinations to check whether sources can reach.
2036
 * @param destination_size How many destinations in the destination list.
2037
 * @param bitmask Bit return value, each bit represents a source, and 1 meant it can reach some of the destinations.
2038
 */
2039
void ccv_nnc_symbolic_graph_sources_to_destinations(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, uint64_t* const bitmask);
2040
/**
2041
 * Re-init the tensor arena with updated symbolic graph. This won't work if the symbolic graph requires
2042
 * larger tensors than what's available. Use this method properly, you can avoid re-compile a graph
2043
 * just because some tensor shape changed.
2044
 * @param tensor_arena The tensor arena object generated through compilation.
2045
 * @param graph The updated symbolic graph with different tensor shape.
2046
 * @return 0 if successful, -1 if the tensor arena doesn't have enough space to just re-init.
2047
 */
2048
int ccv_nnc_tensor_arena_reinit(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_symbolic_graph_t* const graph);
2049
/**
2050
 * Re-init the graph exec arena with updated symbolic graph. This updated some hyper-parameters of
2051
 * executions to match the updated symbolic graph. Note that this will try to keep the backend / algorithm
2052
 * selection from previous graph if possible (meaning if the command still match).
2053
 * @param graph_exec_arena The graph exec arena object provided mapping between symbolic and concrete graph.
2054
 * @param graph The concrete graph generated through compile method.
2055
 * @param symbolic_graph The updated symbolic graph.
2056
 */
2057
void ccv_nnc_graph_exec_reinit(ccv_nnc_graph_exec_arena_t* const graph_exec_arena, ccv_nnc_graph_t* const graph, const ccv_nnc_symbolic_graph_t* const symbolic_graph);
2058
/**
2059
 * Function prototype for tensor symbol creation callback.
2060
 */
2061
typedef void(*ccv_nnc_tensor_symbol_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_param_t info, const char* const name);
2062
/**
2063
 * Hook into the call to ccv_nnc_tensor_symbol_new, return previous provided context if call into this method.
2064
 * @param graph The symbolic graph.
2065
 * @param hook The function to be called if a new tensor symbol created.
2066
 * @param context The context associated with the callback function.
2067
 * @param previous_hook Return the previous hook if provided.
2068
 * @return The previous context associated with the previous hook function.
2069
 */
2070
void* ccv_nnc_tensor_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_new_hook_f hook, void* context, ccv_nnc_tensor_symbol_new_hook_f* previous_hook);
2071
/**
2072
 * Function prototype for tensor symbol alias creation callback.
2073
 */
2074
typedef void(*ccv_nnc_tensor_symbol_alias_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_symbol_t from_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
2075
/**
2076
 * Hook into the call to ccv_nnc_tensor_symbol_alias_new, return previous provided context if call into this method.
2077
 * @param graph The symbolic graph.
2078
 * @param hook The function to be called if a new tensor symbol alias created.
2079
 * @param context The context associated with the callback function.
2080
 * @param previous_hook The function to be called if a new tensor symbol alias created.
2081
 * @return The previous context associated with the previous hook function.
2082
 */
2083
void* ccv_nnc_tensor_symbol_alias_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_alias_new_hook_f hook, void* context, ccv_nnc_tensor_symbol_alias_new_hook_f* previous_hook);
2084
/**
2085
 * Set the pair reference for tensor symbols. Peer reference for tensor symbols has very specific meanings.
2086
 * For a backward pass involves sub-graphs. The commands in the sub-graph could reference to tensor symbols of
2087
 * a different graph (its forward pass graph). That is not allowed (two graph has no ancestral relationship
2088
 * cannot share a tensor symbol). So we create a new tensor symbol, but set the pair reference.
2089
 * @param graph The symbolic graph.
2090
 * @param tensor_symbol The tensor symbol in the current graph.
2091
 * @param pair_tensor_symbol The tensor symbol in the pair graph.
2092
 */
2093
void ccv_nnc_tensor_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_nnc_tensor_symbol_t pair_tensor_symbol);
2094
/**
2095
 * Function prototype for execution node symbol creation callback.
2096
 */
2097
typedef void(*ccv_nnc_graph_exec_symbol_new_hook_f)(void* context, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
2098
/**
2099
 * Hook into the call to ccv_nnc_graph_exec_symbol_new, return previous provided context if call into this method.
2100
 * @param graph The symbolic graph.
2101
 * @param hook The function to be called if a new execution node symbol created.
2102
 * @param context The context associated with the callback function.
2103
 * @param previous_hook The previous hook function associated with this operation.
2104
 * @return The previous context associated with the previous hook function.
2105
 */
2106
void* ccv_nnc_graph_exec_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_new_hook_f hook, void* context, ccv_nnc_graph_exec_symbol_new_hook_f* previous_hook);
2107
/**
2108
 * Set the pair reference for exec. This is very similar to the one for concrete graph. A pair reference
2109
 * of a backward pass execution node is its forward pass counterpart.
2110
 * @param graph The symbolic graph.
2111
 * @param exec_symbol The execution node symbol in the current graph.
2112
 * @param pair_exec_symbol The pairing execution node symbol.
2113
 */
2114
void ccv_nnc_graph_exec_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec_symbol, const ccv_nnc_graph_exec_symbol_t pair_exec_symbol);
2115
2116
/** @} */
2117
2118
/** @} */
2119
2120
/**
2121
 * @defgroup level_3_5 Level-3.5 API
2122
 * @{
2123
 */
2124
2125
/**
2126
 * @defgroup level_3_5_autograd Automatic Differentiation
2127
 * @{
2128
 */
2129
2130
/**
2131
 * Compute the backward graph, assuming the provided symbolic graph only contain the "forward" part from sources to destinations.
2132
 * This effectively is called the "autograd" or automatic differentiation process (specifically, "reverse AD") in other libs.
2133
 * For a expression y = f(x), to compute dx, x is the wrt_symbol, y is the f_symbol.
2134
 * @param graph The symbolic graph.
2135
 * @param f_symbols The tensor symbols array of the result (or loss).
2136
 * @param f_symbol_size The size of the f symbols array.
2137
 * @param wrt_symbols The tensor symbols array of the inputs.
2138
 * @param wrt_symbol_size The size of the wrt symbols array.
2139
 * @param sources The source execution nodes array for the computation.
2140
 * @param source_size The size of the source nodes array.
2141
 * @param destinations The destination execution nodes array for the computation.
2142
 * @param destination_size The size of the destination nodes array.
2143
 */
2144
void ccv_nnc_symbolic_graph_backward(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const f_symbols, const int f_symbol_size, const ccv_nnc_tensor_symbol_t* const wrt_symbols, const int wrt_symbol_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2145
/**
2146
 * Get the symbol that contains the gradient. The list will be flushed if the ccv_nnc_symbolic_graph_backward function is called again.
2147
 * @param graph The symbolic graph.
2148
 * @param symbol The tensor symbol we want to retrieve its gradient (must be one of the wrt symbols or the f symbols).
2149
 * @return A tensor symbol that represents the gradient.
2150
 */
2151
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
2152
/**
2153
 * Get the execution node symbol for a tensor symbol. This used to retrieve the execution node for a gradient tensor symbol.
2154
 * @param graph The symbolic graph.
2155
 * @param symbol The tensor symbol that represents the gradient (must be one of the wrt symbols).
2156
 * @return A execution node symbol that generates the gradient.
2157
 */
2158
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
2159
2160
/** @} */
2161
2162
/**
2163
 * @defgroup level_3_5_while While Loop
2164
 * @{
2165
 */
2166
2167
/**
2168
 * @page symbolic_while Construct a "while" loop in a symbolic graph
2169
 *
2170
 * (This document was written in 2016, since then, Caffe2 added support for While loop (as sub-graph), similar
2171
 * implementation added for ONNX as well.)
2172
 *
2173
 * In NNC, a computation graph cannot allow cycles. Thus, there is no flexible way to express loops.
2174
 *
2175
 * A little survey on this problem:
2176
 *
2177
 * * Caffe2 supports specific type of recurrent neural network.
2178
 *
2179
 * * TensorFlow as it stands, supports while construct. Its while construct is very straightforward, a body and
2180
 *   a condition is provided, you can construct whatever graph as you want.
2181
 *
2182
 * * mxnet supports recurrent neural network by unrolling it into normal none-looped graph.
2183
 *
2184
 * * Theano supports "scan" ops, which is a terminable loop (with loop variant, known as sequence).
2185
 *
2186
 * * CNTK supports this with custom BrainScript. Within BrainScript, you can access the previous state in a
2187
 *   function, therefore, effectively supports calling a method multiple times (looping over).
2188
 *
2189
 * Of above, Caffe2 and mxnet gave up on supporting generic loop for performance reasons. TensorFlow supports
2190
 * generic while loop, with all the trouble it may introduce (see the Nested while loop bug in TensorFlow that
2191
 * recently fixed). Theano picked a point seems pretty sweet, although there are limitations. CNTK's BrainScript
2192
 * is a DSL, they can do whatever they want with the drawback now that they need to implement a language runtime.
2193
 * TensorFlow, Theano and CNTK all support auto-differentiation over the while loop with tape (Wengert list).
2194
 *
2195
 * A simple way to support loop is to support conditional jump. In fact, conditional jump is a more generic way
2196
 * of doing loops. However, if you put this into the consideration that fully differentiable computation graph
2197
 * wanna to be supported, it is terrible. With conditional jump, it is really hard for you to know which tensor
2198
 * is used where, thus keep track for reverse accumulation (backward propagation). There is no counter or
2199
 * whatsoever, it is pretty hard to trace back on which line is executed how many times. Compounding this with
2200
 * NNC's promise that as long as it shows on the graph can be "parallel" computed, it will be parallel computed,
2201
 * it is close to impossible to track if conditional jump used in its raw form. Certain restrictions must be
2202
 * applied to how to do the loop. The compromise comes from closer examination of NNC's preferences.
2203
 *
2204
 * NNC prefers to have the graph without cycles. It also prefers to be fully differentiable. Another important
2205
 * criteria is that most functions in NNC require SSA (Static Single Assignment) representation. With these in
2206
 * mind, supporting while loop has to be strict.
2207
 *
2208
 * Luckily, there are well-formalized way of supporting this in literature and practice. Because it is
2209
 * well-formalized, translating this into existing NNC implementation is actually pretty straightforward. We
2210
 * are going to introduce a special version of while loop. In literature that discussed about SSA, it may be
2211
 * called parameterized loop. For us, it works like this:
2212
 *
2213
 * To construct a while loop for existing NNC graph, you need to be able to separate the existing graph into
2214
 * two sub-graphs.
2215
 *
2216
 * The while-loop sub-graph (WL sub-graph) contains a set of incoming nodes (I-nodes), Condition false output
2217
 * nodes (CFO-nodes) and end nodes (E-nodes). Each set have its own properties, but in short, all incoming edges
2218
 * to the WL sub-graph connect to one of the I-nodes, but nothing else. All outgoing edges from the WL sub-graph
2219
 * connect to one of the CFO-nodes, but nothing else. A nodes can be either a I-node, CFO-node or E-node,
2220
 * non-exclusively.
2221
 *
2222
 * There are also 3 types of tensors used for all nodes in WL sub-graph: Input tensors (I-tensors) are tensors
2223
 * that are inputs to some nodes, and will never be outputs. Output tensors (O-tensors) are tensors that are
2224
 * outputs from some nodes, but never be inputs to any nodes. I-tensors can be outputs from some nodes that
2225
 * outside of WL sub-graph. O-tensors can be inputs to some nodes that outside of WL sub-graph. Internal
2226
 * tensors (IN-tensors) are not visible outside of WL sub-graph, therefore, they can be both inputs and outputs
2227
 * of some nodes inside the sub-graph. Some tensors can be feedback into the WL sub-graph, given either
2228
 * O-tensors or IN-tensors. A parameter map can be given in these cases to describe which maps to what.
2229
 *
2230
 * The way to drive a WL sub-graph like this: the WL sub-graph runs until all CFO-nodes are reached. At this
2231
 * point, the while_f condition is checked. If true, we continue until all the end-nodes are reached. At this
2232
 * point, we increase the counter, reconfigure the WL sub-graph with parameter map, and run from I-nodes all
2233
 * over again. When reached all CFO-nodes, the condition is checked again, if false, WL sub-graph terminates,
2234
 * and the graph continues from the nodes that are pointed by CFO-nodes.
2235
 *
2236
 * Given these constraints, doing automatic differentiation is not that hard any more. A WL sub-graph, from
2237
 * the whole graph's point of view, is just a giant command supports both forward / backward operations, with
2238
 * some extra information passed around in the form of userdata (tape).
2239
 *
2240
 * For WL sub-graph, we can continue to leverage the compile / backward function that already written for
2241
 * symbolic graph as well.
2242
 *
2243
 * For compile function, we just need to take care of parameter maps (these need to be converted into binded
2244
 * tensors).
2245
 *
2246
 * For backward function, we need to convert parameter maps from assigner (thus, y = x) to accumulator (x += y).
2247
 *
2248
 * This function will replace the nodes that it affects to one sub-graph node. Thus, how to drive this
2249
 * sub-graph is opaque. Its backward form is opaque as well.
2250
 *
2251
 * There are no connection between its nodes and the outside graph nodes other than the three sets:
2252
 *
2253
 * 1. Incoming nodes, the set of nodes that contains the incoming edges from outside, they cannot have edges
2254
 *    points by inside nodes. The sub-graph computation starts from these incoming nodes;
2255
 *
2256
 * 2. Condition false output nodes, when condition is false, we will break out of this while loop, these
2257
 *    nodes pointing to the outside nodes, but no inside nodes;
2258
 *
2259
 * 3. End nodes, the set of nodes that marks the end of the while body, and after these nodes are executed,
2260
 *    we will return to the incoming nodes. These end nodes shouldn't have any edges pointing to inside nodes
2261
 *    (OK if end nodes are condition true output nodes as well);
2262
 *
2263
 * Since these will become a sub-graph (which, to its owner graph, just simple "node"), it will have inputs
2264
 * and outputs. Besides that, the loop body needs to be parameterized to be SSA compliant (see:
2265
 * https://www.cs.cmu.edu/~fp/courses/15411-f13/lectures/06-ssa.pdf). Thus, a list of body parameters need to
2266
 * be provided.
2267
 */
2268
2269
/**
2270
 * @defgroup level_3_5_while_essentials While Loop Essentials
2271
 * @{
2272
 */
2273
2274
/**
2275
 * The given tensors contains all the common / input / output tensors specified in the sub-graph.
2276
 */
2277
typedef int(*ccv_nnc_graph_while_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
2278
/**
2279
 * Create a tensor tape that can be used to record for while loop or case..of.
2280
 * @return A ccv_nnc_tensor_tape_t pointer.
2281
 */
2282
CCV_WARN_UNUSED(ccv_nnc_tensor_tape_t*) ccv_nnc_tensor_tape_new(void);
2283
/**
2284
 * Deallocate the tensor tape and all the memory it allocated.
2285
 * @param tape The tensor tape object.
2286
 */
2287
void ccv_nnc_tensor_tape_free(ccv_nnc_tensor_tape_t* const tape);
2288
/**
2289
 * The API to operate on the symbolic graph is more involved than the concrete graph for while loops.
2290
 * The reason is because symbolic graph operates in SSA form (static single assignment), therefore, the while
2291
 * loops for the symbolic graph has to be parameterized.
2292
 * @param graph The symbolic graph.
2293
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2294
 * @param while_graph The sub-graph to run the while loop.
2295
 * @param name The name of the while loop. Optional.
2296
 * @return A while loop execution symbol (backed by a sub-graph) of the giving graph.
2297
 */
2298
ccv_nnc_graph_exec_symbol_t ccv_nnc_symbolic_graph_while(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, ccv_nnc_symbolic_graph_t* const while_graph, const char* const name);
2299
/**
2300
 * Set the expression to be evaluated, and at which nodes to be evaluated.
2301
 * @param while_graph The symbolic graph that will run the while loop.
2302
 * @param while_expr The function pointer to the expression.
2303
 * @param while_data A custom data provided to the expression evaluation function.
2304
 * @param inputs The input tensor symbols array to the expression evaluation function.
2305
 * @param input_size The size of the input tensor symbols array.
2306
 * @param breakpoints The execution node symbols at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
2307
 * @param breakpoint_size The size of the execution node symbols array.
2308
 */
2309
void ccv_nnc_symbolic_graph_set_while_expr(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const breakpoints, const int breakpoint_size);
2310
/**
2311
 * Set the loop carry parameters when reuse. (parameterized loop, these will be carried over to the next loop).
2312
 * @param while_graph The symbolic graph that will run the while loop.
2313
 * @param symbol_map A pair of tensor symbols array, where the source tensor symbol is the output tensor symbol in this loop, the destination tensor symbol is the input tensor symbol in the next loop.
2314
 * @param symbol_map_size The size of the symbol map array.
2315
 */
2316
void ccv_nnc_symbolic_graph_set_carry_overs(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
2317
/**
2318
 * Retrieve the special (magical) tensor symbol that retains the while loop counter (thus, dimension of 1x1x1, CCV_64S type).
2319
 * @param while_graph The symbolic graph that will run the while loop.
2320
 * @return A tensor symbol represents the implicit loop count.
2321
 */
2322
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_while_count(const ccv_nnc_symbolic_graph_t* const while_graph);
2323
/**
2324
 * Extract the sub-graph of the while loop from a symbol.
2325
 * @param graph The symbolic graph.
2326
 * @param while_symbol The execution node symbol.
2327
 * @return The sub-graph that represents a while loop.
2328
 */
2329
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_from_while_symbol(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t while_symbol);
2330
/**
2331
 * Constructing looped concrete graph. Note that this interface is a little bit simpler than the one for symbolic
2332
 * graph. The reason is that a concrete graph operates on allocated tensors, thus, there is no mapping of tensor
2333
 * symbols between the parent graph and the while graph. (The reason to have a mapping in symbolic graphs is to
2334
 * constraint the variable leaking between the sub graph and parent graph).
2335
 * @param graph The concrete graph.
2336
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2337
 * @param while_graph The sub-graph to run the while loop.
2338
 * @return A execution node that represents the sub-graph.
2339
 */
2340
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_while(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_graph_t* const while_graph);
2341
/**
2342
 * Set the evaluated expression for the while loop. The while loop will break out if the expression evaluates to 0.
2343
 * @param while_graph The concrete graph that will run the while loop.
2344
 * @param while_expr The function pointer to the expression.
2345
 * @param while_data A custom data provided to the expression evaluation function.
2346
 * @param inputs The input tensors array to the expression evaluation function.
2347
 * @param input_size The size of the input tensors array.
2348
 * @param breakpoints The execution nodes at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
2349
 * @param breakpoint_size The size of the execution nodes array.
2350
 */
2351
void ccv_nnc_graph_set_while_expr(ccv_nnc_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_graph_exec_t* const breakpoints, const int breakpoint_size);
2352
/**
2353
 * Get the special tensor for the while loop count. It contains one uint64_t value. We keep an implicit count
2354
 * when evaluate the while loop and you can access it with this tensor.
2355
 * @param while_graph The concrete graph that will run the while loop.
2356
 * @return A special tensor that you can retrieve the loop count at .data.i64[0].
2357
 */
2358
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor_for_while_count(const ccv_nnc_graph_t* const while_graph);
2359
/**
2360
 * Retrieve the sub-graph from a execution node.
2361
 * @param graph The concrete graph.
2362
 * @param exec The execution node represents the sub-graph.
2363
 * @return The sub-graph.
2364
 */
2365
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_from_while_exec(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_t exec);
2366
2367
/** @} */
2368
2369
/**
2370
 * @defgroup level_3_5_while_others While Loop Others
2371
 * @{
2372
 */
2373
2374
/**
2375
 * For a given tape on a given graph, update the input / output tensors so new version will be created (if needed).
2376
 * @param tape The tensor tape object.
2377
 * @param graph The concrete graph this tensor tape is executing in.
2378
 * @param input_flags The flags associated with input tensors.
2379
 * @param inputs The input tensors.
2380
 * @param input_size The size of input tensors array.
2381
 * @param output_flags The flags associated with output tensors.
2382
 * @param outputs The output tensors.
2383
 * @param output_size The size of output tensors array.
2384
 */
2385
void ccv_nnc_tensor_tape_io(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const int* const input_flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, const int* const output_flags, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2386
/**
2387
 * Retrieve the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2388
 * @param tape The tensor tape object.
2389
 * @param graph The concrete graph this tensor tape is executing in.
2390
 * @param exec The execution node.
2391
 * @return The number associated with the execution node.
2392
 */
2393
uint64_t ccv_nnc_tensor_tape_numbering(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec);
2394
/**
2395
 * Set the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2396
 * @param tape The tensor tape object.
2397
 * @param graph The concrete graph this tensor tape is executing in.
2398
 * @param exec The execution node.
2399
 * @param numbering The number associated with the execution node.
2400
 */
2401
void ccv_nnc_tensor_tape_set_numbering(ccv_nnc_tensor_tape_t* const tape, ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const uint64_t numbering);
2402
/**
2403
 * Augmented tensor to run a graph with while loop (An obvious example is dynamic RNN).
2404
 */
2405
typedef struct ccv_nnc_tensor_multiview_s {
2406
  // This is an augmented ccv_nnc_tensor_view_t
2407
  // Namely, it can point to multiple versions of tensors.
2408
  int type; // This type is CCV_NNC_TENSOR_MULTI_VIEW
2409
  // kind specified how the multi-version tensors stored.
2410
  // See the comment on the follow up enums.
2411
  uint8_t kind;
2412
  uint16_t repeat;
2413
  intptr_t anchor; // on which graph this multi-view tensor is wrapped. This helps to determine on which level the multi-view tensor should be unwrapped.
2414
  // If this tensor points to a tensor view, data.u8 - offset is the real pointer start.
2415
  off_t offset;
2416
  struct ccv_nnc_tensor_multiview_s* p; // If this is wrapped with another multiview tensor. Get to the parent one.
2417
  ccv_nnc_tensor_t* it; // Current tensor (tensor in use), this is updated along with the graph computation.
2418
  // This is useful because by just traverse tv, I can get the latest up-to-date reference to this multi-view tensor.
2419
  ccv_array_t* sp; // Synchronized tensor views. This corresponds to ccv_nnc_tensor_synchronize_to_multiview method, that records all the tensors registered for updates.
2420
  ccv_nnc_tensor_t* _inline_data[4];
2421
  ccv_nnc_tensor_t** _heap_data;
2422
} ccv_nnc_tensor_multiview_t;
2423
3.40k
#define CCV_NNC_MULTIVIEW_DATA(x) ((x)->_heap_data ? 
(x)->_heap_data0
: (x)->_inline_data)
2424
234
#define CCV_NNC_MULTIVIEW_PHI (intptr_t)0x1 /**< Denote this is a phi multi-view tensor. */
2425
2426
enum {
2427
  CCV_NNC_MULTIVIEW_K0N = 0, /**< All of them are repeated. */
2428
  CCV_NNC_MULTIVIEW_K1N = 1, /**< The first one is the first, the second one starts to repeat. (0111111...) */
2429
};
2430
#define CCV_NNC_MULTIVIEW_K01(x) ((x)->kind == CCV_NNC_MULTIVIEW_K0N && (x)->repeat == 1)
2431
/**
2432
 * Setup a tensor multiview with a given set of tensors.
2433
 * A multiview tensor point to a list of tensors, and its access depends on the loop count.
2434
 * For example, if we have a multiview tensor with list of [a, b, c, d], and kind is 1N, repeat is 3.
2435
 * For loop count 0, 1, 2, 3, 4, 5, 6, the corresponding tensors used will be a, b, c, d, b, c. If kind
2436
 * is 0N, and repeat is 4, it will be a, b, c, d, a, b.
2437
 * @param data[] The pointer to the list of tensors the multiview object can point to.
2438
 * @param kind Can be either CCV_NNC_MULTIVIEW_K0N or CCV_NNC_MULTIVIEW_K1N, basically whether to keep the initial tensor.
2439
 * @param repeat The length of the repeat.
2440
 * @param graph Which graph this multiview object attaches to.
2441
 * @param tensor_multiview The tensor multiview object to be updated.
2442
 */
2443
void ccv_nnc_tensor_multiview(ccv_nnc_tensor_t* data[], const uint8_t kind, const uint16_t repeat, const ccv_nnc_graph_t* const graph, ccv_nnc_tensor_multiview_t* const tensor_multiview);
2444
/**
2445
 * Since tensor_multiview will never be allocated with *_new method, the *_free method simply frees anything that is dynamically allocated afterwards (such as the reference items).
2446
 * @param tensor_multiview The tensor multiview object to be deallocated.
2447
 */
2448
void ccv_nnc_tensor_multiview_free(const ccv_nnc_tensor_multiview_t tensor_multiview);
2449
/**
2450
 * Setup a tensor as a reference to a tensor multiview, thus, when tensor multiview's tu (current tensor) updates, the tensor reference's data.u8 will get update as well (point to the same memory region as the tu).
2451
 * @param tensor_multiview The tensor multiview object.
2452
 * @param tensor The tensor that will be updated along with the multiview object.
2453
 */
2454
void ccv_nnc_tensor_synchronize_to_multiview(ccv_nnc_tensor_multiview_t* const tensor_multiview, ccv_nnc_tensor_t* const tensor);
2455
/**
2456
 * Send broadcast to subscribers of the multiview, call this in the beginning of exec.
2457
 * @param tensor_multiview The tensor multiview object.
2458
 */
2459
void ccv_nnc_tensor_multiview_synchronize(ccv_nnc_tensor_multiview_t* const tensor_multiview);
2460
2461
/** @} */
2462
2463
/** @} */
2464
2465
/**
2466
 * @defgroup level_3_5_case_of Branching
2467
 * @{
2468
 */
2469
2470
/**
2471
 * @page symbolic_switch Construct "switch" control structure in symbolic graph
2472
 *
2473
 * Here I use the keyword case_of. To provide a "switch" control structure within NNC has some nice properties
2474
 * even though you can simulate this with a while loop technically.
2475
 *
2476
 * 1. More optimal memory allocation: with "switch" control structure, memory can be multiplexed for each code
2477
 *    path because they are mutually exclusive.
2478
 *
2479
 * 2. No tape should be used within each branch: if we simulate with a "while" loop, any results from within
2480
 *    the "switch" statement has to be kept on the tape, which is inefficient because you don't need any tape
2481
 *    for the "switch" statement other than record which path it is taken.
2482
 *
2483
 * The particular "switch" control structure provided here is a multi-way structured "switch". Each branch is a
2484
 * sub-graph, so it is well-scoped. A node branch out based on the case_of condition return value to either of
2485
 * the branch (numbering from 0 to n, -1 means no path taken). If no path taken, the output tensors will be
2486
 * assigned with the default tensors and continue. Otherwise the computation within the sub-graph will be
2487
 * carried out and the output tensors will be assigned with the tensors specified within that sub-graph and
2488
 * continue.
2489
 *
2490
 * If we want to consider speculative execution in the future, we need to revisit our memory allocation scheme.
2491
 */
2492
2493
/**
2494
 * Function prototype to evaluate a branch expression.
2495
 */
2496
typedef int(*ccv_nnc_graph_case_of_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
2497
/**
2498
 * Create a new case..of execution node symbol.
2499
 * @param graph The symbolic graph.
2500
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2501
 * @param inputs The input tensor symbols array for the expression.
2502
 * @param input_size The size of the input tensor symbols array.
2503
 * @param symbol_map The pair of tensor symbols array where the source is the input tensor symbol and the destination is the output tensor symbol.
2504
 * @param symbol_map_size The size of symbol map array.
2505
 * @param name The name of the case..of graph. Optional.
2506
 * @return A execution node symbol that represents the case..of graph.
2507
 */
2508
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_symbolic_graph_case_of_new(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size, const char* const name);
2509
/**
2510
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2511
 * @param graph The symbolic graph.
2512
 * @param exec The execution node symbol that represents the case..of graph.
2513
 * @param case_of The function pointer to evaluate.
2514
 * @param case_of_data The data associated with the function pointer.
2515
 */
2516
void ccv_nnc_symbolic_graph_set_case_of_expr(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data);
2517
/**
2518
 * Set a sub-graph as one of the branch for the case..of graph.
2519
 * @param graph The symbolic graph.
2520
 * @param symbol The execution node symbol that represents the case..of graph.
2521
 * @param case_graph The sub-graph for one of the branch.
2522
 * @param case_of The index assigned to this sub-graph (expression returns this index to determine which sub-graph to execute).
2523
 * @param symbol_map The pair of tensor symbols array where the source is the output tensor symbol of the sub-graph, and the destination is the output tensor symbol of the execution node symbol.
2524
 * @param symbol_map_size The size of the symbol map array.
2525
 */
2526
void ccv_nnc_symbolic_graph_set_case_of(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, ccv_nnc_symbolic_graph_t* const case_graph, const int case_of, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
2527
/**
2528
 * Create a new case..of execution node.
2529
 * @param graph The concrete graph.
2530
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2531
 * @param inputs The input tensors array supplied to the expression.
2532
 * @param input_size The size of the input tensors array.
2533
 * @param outputs The output tensors array.
2534
 * @param output_size The size of the output tensors array.
2535
 * @return A execution node that represents the case..of graph.
2536
 */
2537
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_case_of_new(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2538
/**
2539
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2540
 * @param graph The concrete graph.
2541
 * @param exec The execution node that represents the case..of graph.
2542
 * @param case_of The function pointer to evaluate.
2543
 * @param case_of_data The data associated with the function pointer.
2544
 * @param offset A integer added to the expression output to help choose the index. Thus, real index = expression index + offset.
2545
 */
2546
void ccv_nnc_graph_set_case_of_expr(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data, const int offset);
2547
/**
2548
 * Set a sub-graph as one of the branch for the case..of graph.
2549
 * @param graph The concrete graph.
2550
 * @param exec The execution node that represents the case..of graph.
2551
 * @param case_graph The sub-graph for one of the branch.
2552
 * @param case_of The index assigned to this sub-graph (expression returns this index + offset to determine which sub-graph to execute).
2553
 */
2554
void ccv_nnc_graph_set_case_of(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_t* const case_graph, const int case_of);
2555
2556
/** @} */
2557
2558
/**
2559
 * @defgroup level_3_5_minimizer Gradient-based Optimization
2560
 * @{
2561
 */
2562
2563
/**
2564
 * This is the comparable part to Caffe's solver or TensorFlow's optimizer. It took a step further than just
2565
 * compute the gradient, but also apply the gradient to update parameters to minimize the loss.
2566
 * @param graph The symbolic graph.
2567
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2568
 * @param losses The tensor symbols array of losses.
2569
 * @param loss_size The size of the loss symbols array.
2570
 * @param parameters The parameter tensor symbols to optimize.
2571
 * @param parameter_size The size of parameter symbols array.
2572
 * @param inputs The additional input symbols we compute gradient against.
2573
 * @param input_size The size of the additional input symbols array.
2574
 * @param sources The source execution nodes array.
2575
 * @param source_size The size of source nodes array.
2576
 * @param destinations The destinations execution nodes array.
2577
 * @param destination_size The size of destination nodes array.
2578
 * @param gradients The tensor symbols that represents the gradient for update, should be the same size as the parameters array + input array size. This can be 0 (optional).
2579
 * @param updated_parameters The tensor symbols that represents the updated parameters, should be the same size as the parameters array.
2580
 * @param saved_aux The tensor symbols that is helpful for particular optimization strategy.
2581
 * @param graph_exec_symbols The execution node symbols for the updates, should be the same size as the parameters array.
2582
 */
2583
void ccv_nnc_symbolic_graph_minimize(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_symbol_t* const losses, const int loss_size, const ccv_nnc_tensor_symbol_t* const parameters, const int parameter_size, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_tensor_symbol_t* const gradients, ccv_nnc_tensor_symbol_t* const updated_parameters, ccv_nnc_tensor_symbol_map_t* const saved_aux, ccv_nnc_graph_exec_symbol_t* const graph_exec_symbols);
2584
/**
2585
 * The number of extra saved aux per parameter only depends on the commands. For example, SGD with momentum requires 1 aux (for momentum).
2586
 * Others require more.
2587
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2588
 * @return the number of saved aux per parameter.
2589
 */
2590
CCV_WARN_UNUSED(int) ccv_nnc_minimizer_saved_aux_size(const ccv_nnc_cmd_t minimizer);
2591
2592
/** @} */
2593
2594
/**
2595
 * @defgroup level_3_5_simplify Graph Simplification
2596
 * @{
2597
 */
2598
2599
/**
2600
 * @page symbolic_simplify Symbolic graph simplification
2601
 *
2602
 * We make a distinction between graph simplifications and optimizations (autotune).
2603
 *
2604
 * Simplification: rewrite the graph and the resulting graph will have less nodes. This is done on the symbolic
2605
 * graph only. Passes that is "simplification" include pruning, common sub-expression eliminations, constant
2606
 * folding etc.
2607
 *
2608
 * Optimization (autotune): graph optimization can have more objectives. The most obvious objective is to reduce
2609
 * computation time. For symbolic graph, passes that reduces computation time include data layout optimizations,
2610
 * auto parallel etc (in normal optimization implementations, they have a cost model to guide the optimization.
2611
 * NNC's implementation uses a cost database that profiles the time cost on the device to guide the optimization.
2612
 * We call it autotune to distinguish with the normal optimization passes because we need device profile data).
2613
 * There could be other objectives, for example, in many deep learning applications, reducing memory footprint
2614
 * can be desirable. However, as always in computer science, memory and time is a typical trade-off. Memory
2615
 * optimization almost always results longer computation time, and the objective is to trade between these two
2616
 * with a bias term (in other frameworks such as TensorFlow, the memory optimizer uses a list of "cheap ops" to
2617
 * bias between the time and memory footprint).
2618
 *
2619
 * For graph optimizations, it can happen on both the symbolic graph level as well as the concrete graph level.
2620
 * For NNC, symbolic graph is already very explicit (data layout, device allocation and data transfer between
2621
 * devices / nodes, even the command backend can all be specified on the symbolic graph), however, some
2622
 * information is unknown until it is compiled down to concrete graph (tensor addresses, tensor initialization
2623
 * etc.), and since graph optimizations need all the information to optimize. Keeping the flexibility to do
2624
 * optimization on both symbolic and concrete graph level seems reasonable.
2625
 */
2626
2627
enum {
2628
  /**
2629
   * If two commands generated the same outputs, all the places where the newer output used will be replaced by
2630
   * the old output. Later on the graph pruning stage, the command that generate the newer output will be
2631
   * eliminated.
2632
   */
2633
  CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION,
2634
  /**
2635
   * For the given outputs, eliminate unused input tensors, and then eliminate graph execs that don't contribute
2636
   * to the outputs.
2637
   */
2638
  CCV_NNC_SIMPLIFY_GRAPH_PRUNING,
2639
  /**
2640
   * For CCV_NNC_DATA_TRANSFER, if the input / output is the same (on the same device, no alias), we can skip.
2641
   * Similarly, if it is on the same device, but alias of some, for some cases we can skip as well (if neither
2642
   * are carry overs, bypasses etc.)
2643
   */
2644
  CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT,
2645
  /**
2646
   * Combine a few smaller ops into bigger one. For now, this functionality is limited. I can only address ops
2647
   * that are sequential.
2648
   */
2649
  CCV_NNC_SIMPLIFY_OPS_FUSION,
2650
  // CCV_NNC_SIMPLIFY_CONSTANT_FOLDING, // This currently is not supported, because we don't have efficient way to express constant in symbolic graph.
2651
};
2652
/**
2653
 * Simplify a graph with given list of passes, in that particular order.
2654
 * Note, when a graph is simplified, its sources / destinations are changed as well.
2655
 * @param graph The symbolic graph.
2656
 * @param passes The array of passes we are going to apply.
2657
 * @param pass_size The size of the passes array.
2658
 * @param binds The tensor symbols we may bind to an input later (it doesn't prevent pruning any execution nodes).
2659
 * @param bind_size The size of the bind array.
2660
 * @param outputs The output tensor symbols we want to retain (we are going to prune any execution nodes that is not related to these outputs).
2661
 * @param output_size The size of the output array.
2662
 * @param sources The source execution node symbols array.
2663
 * @param source_size The size of source node symbols array.
2664
 * @param destinations The destinations execution node symbols array.
2665
 * @param destination_size The size of destination node symbols array.
2666
 */
2667
void ccv_nnc_symbolic_graph_simplify(ccv_nnc_symbolic_graph_t* const graph, const int* const passes, const int pass_size, const ccv_nnc_tensor_symbol_t* const binds, const int bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2668
2669
/** @} */
2670
2671
/**
2672
 * @defgroup level_3_5_parallel Automatic Graph Parallelization
2673
 * @{
2674
 */
2675
2676
enum {
2677
  /**
2678
   * Op for reducer / allreducer. Currently only supports sum.
2679
   */
2680
  CCV_NNC_PARALLEL_REDUCE_OP_SUM,
2681
};
2682
2683
/**
2684
 * Turn the existing graph to be capable to run on several devices with different data inputs at parallel.
2685
 * With this method, additional tensor symbols will be created that runs on different devices. That has
2686
 * been said, there are concepts of "broadcast" and "reduce". "broadcast" tensor symbols will be copied to
2687
 * different devices, while "reduce" tensors will be summed from different devices to the default device.
2688
 * "allreducer" concept is simpler. The allreduce operation will be performed on these tensors and then
2689
 * be used on different devices again.
2690
 *
2691
 * Limitations: right now, the way to reduce / allreduce tensors only supports "sum". The data parallel
2692
 * only supports GPU, thus, the nodes will be duplicated are GPU computations and GPU memory backed
2693
 * tensors. Also, right now, the tensors to be broadcasted / allreduced / reduced should have no aliases.
2694
 *
2695
 * @param graph The symbolic graph.
2696
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
2697
 * @param broadcasts The tensor symbols to be broadcasted.
2698
 * @param broadcast_size The size of the broadcast tensor symbols array.
2699
 * @param allreducers The tensor symbols that to be allreduced.
2700
 * @param allreducer_size The size of the allreducer tensor symbols array.
2701
 * @param allreducer_outs Return the tensor symbols for allreducers that before allreduced. Optional, 0
2702
 *        means I don't care about this.
2703
 * @param reducers The tensor symbols to be reduced.
2704
 * @param reducer_size The size of the reducer tensor symbols array.
2705
 * @param reducer_outs Return the tensor symbols for reducers that after reduced. Optional, 0 means
2706
 *        I don't care about this.
2707
 * @param reduce_op_type The reduce op for reducer / allreducer.
2708
 * @param sources The source execution node symbols array.
2709
 * @param source_size The size of source node symbols array.
2710
 * @param destinations The destinations execution node symbols array.
2711
 * @param destination_size The size of destination node symbols array.
2712
 */
2713
void ccv_nnc_symbolic_graph_data_parallel(ccv_nnc_symbolic_graph_t* const graph, const int parallel, const ccv_nnc_tensor_symbol_t* const broadcasts, const int broadcast_size, const ccv_nnc_tensor_symbol_t* const allreducers, const int allreducer_size, ccv_nnc_tensor_symbol_t* const allreducer_outs, const ccv_nnc_tensor_symbol_t* const reducers, const int reducer_size, ccv_nnc_tensor_symbol_t* const reducer_outs, const int reduce_op_type, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2714
/**
2715
 * Get the symbol that is on a device other than the default one. The list will be flushed if the
2716
 * ccv_nnc_symbolic_graph_data_parallel function is called again.
2717
 * @param graph The symbolic graph.
2718
 * @param symbol The tensor symbol we want to retrieve its counterpart on a different device.
2719
 * @param device_id The device numeric id for this symbol.
2720
 * @return A tensor symbol that is on a different device.
2721
 */
2722
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id);
2723
/**
2724
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2725
 * later with ccv_nnc_tensor_symbol_copy
2726
 * @param graph The symbolic graph.
2727
 * @param symbol The tensor symbol we want to set its counterpart on a different device.
2728
 * @param device_id The device numeric id for this symbol.
2729
 * @param copy The tensor symbol counterpart on a different device.
2730
 */
2731
void ccv_nnc_tensor_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id, const ccv_nnc_tensor_symbol_t copy);
2732
/**
2733
 * Get the execution node that is on a device other than the default one. The list will be flushed
2734
 * if the ccv_nnc_symbolic_graph_data_parallel function is called again.
2735
 * @param graph The symbolic graph.
2736
 * @param symbol The execution node we want to retrieve its counterpart on a different device.
2737
 * @param device_id The device numeric id for this symbol.
2738
 * @return A execution node that is on a different device.
2739
 */
2740
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id);
2741
/**
2742
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2743
 * later with ccv_nnc_graph_exec_symbol_copy
2744
 * @param graph The symbolic graph.
2745
 * @param symbol The execution node we want to set its counterpart on a different device.
2746
 * @param device_id The device numeric id for this symbol.
2747
 * @param copy The execution node counterpart on a different device.
2748
 */
2749
void ccv_nnc_graph_exec_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id, const ccv_nnc_graph_exec_symbol_t copy);
2750
2751
/** @} */
2752
2753
/**
2754
 * @defgroup level_3_5_memory_compression Memory Compression
2755
 * @{
2756
 */
2757
2758
/**
2759
 * Apply LSSC memory compression algorithm to the convolution activations. This will compress the activation
2760
 * layer for convolution, therefore, save the overall memory usage during training time.
2761
 *
2762
 * @param graph The symbolic graph.
2763
 * @param sources The source execution node symbols array.
2764
 * @param source_size The size of source node symbols array.
2765
 * @param destinations The destinations execution node symbols array.
2766
 * @param destination_size The size of destination node symbols array.
2767
 */
2768
void ccv_nnc_symbolic_graph_memory_compression(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2769
2770
/** @} */
2771
2772
/**
2773
 * @defgroup level_3_5_memory_reduction Memory Reduction
2774
 * @{
2775
 */
2776
2777
/**
2778
 * Investigate memory reduction opportunities on the graph. Right now, we are looking at datatype
2779
 * conversions that resulted larger datatype, and these larger ones kept during backward pass.
2780
 * For these cases, we will keep the smaller one instead, and reconvert to larger datatype prior
2781
 * to the backward pass.
2782
 *
2783
 * @param graph The symbolic graph.
2784
 * @param sources The source execution node symbols array.
2785
 * @param source_size The size of source node symbols array.
2786
 * @param destinations The destinations execution node symbols array.
2787
 * @param destination_size The size of destination node symbols array.
2788
 */
2789
void ccv_nnc_symbolic_graph_memory_reduction(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2790
2791
/** @} */
2792
2793
/** @} */
2794
2795
/**
2796
 * @defgroup level_4 Level-4 API
2797
 * @{
2798
 */
2799
2800
/**
2801
 * Opaque pointer to the dynamic graph structure.
2802
 */
2803
typedef struct ccv_nnc_dynamic_graph_s ccv_nnc_dynamic_graph_t;
2804
2805
/**
2806
 * Masquerade this as if it is a on stack variable, there is a heap allocation but managed by the dynamic graph.
2807
 * The fact that ccv_nnc_tensor_variable_t is a pointer is an implementation detail. It should be treated as an
2808
 * opaque type throughout. We may later extends this to be some on-stack information or even just a uid.
2809
 */
2810
typedef struct ccv_nnc_tensor_variable_s* ccv_nnc_tensor_variable_t;
2811
2812
/**
2813
 * Create a dynamic graph.
2814
 * @return A newly created dynamic graph.
2815
 */
2816
CCV_WARN_UNUSED(ccv_nnc_dynamic_graph_t*) ccv_nnc_dynamic_graph_new(void);
2817
2818
/** @cond ALL */
2819
// Get a new tensor variable.
2820
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2821
16.5k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_1(graph) ccv_nnc_tensor_variable_new_impl(graph, ccv_nnc_tensor_auto)
2822
14.6k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(_1, _2, _FX, ...) _FX
2823
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2824
31.2k
#define ccv_nnc_tensor_variable_new(graph, ...) CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_variable_new_impl, 
CCV_NNC_TENSOR_VARIABLE_NEW_X_116.5k
)(graph, ##
__VA_ARGS__8.32k
)
2825
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_constant_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2826
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_1(graph) ccv_nnc_tensor_constant_new_impl(graph, ccv_nnc_tensor_auto)
2827
37
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(_1, _2, _FX, ...) _FX
2828
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2829
37
#define ccv_nnc_tensor_constant_new(graph, ...) CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_constant_new_impl, CCV_NNC_TENSOR_CONSTANT_NEW_X_1)(graph, ##
__VA_ARGS__5
)
2830
/** @endcond */
2831
2832
/**
2833
 * Create a new tensor variable that is an alias of a given tensor variable. You can alias any tensor
2834
 * variable that itself not an alias. You can also alias an alias, with some conditions: The tensor
2835
 * variable itself can be alias, but it needs to be contiguous as well. For example, a vector is
2836
 * contiguous. If both conditions satisfied, you can alias an alias.
2837
 * @param graph The dynamic graph.
2838
 * @param tensor_variable The tensor variable we are going to alias from.
2839
 * @param ofs The offset on each of the dimension.
2840
 * @param stride The stride of each dimension. If all 0, it matches the dimension of the tensor_variable.
2841
 * @param info The tensor parameters for the new alias.
2842
 * @return New tensor variable that is an alias.
2843
 */
2844
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_alias_new(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info);
2845
/**
2846
 * Get the parameters for a tensor variable.
2847
 * @param graph The dynamic graph.
2848
 * @param tensor_variable The tensor variable reference.
2849
 * @return The tensor parameters.
2850
 */
2851
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_variable_params(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2852
/**
2853
 * Get the parameters for a tensor variable alias.
2854
 * @param graph The symbolic graph.
2855
 * @param tensor_variable The tensor variable reference.
2856
 * @param ofs The offset on each of the dimension.
2857
 * @param stride The stride of each dimension.
2858
 * @return non-zero if it is not a tensor alias.
2859
 */
2860
int ccv_nnc_tensor_variable_alias_params(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, int ofs[CCV_NNC_MAX_DIM_ALLOC], int stride[CCV_NNC_MAX_DIM_ALLOC]);
2861
2862
/** @cond ALL */
2863
/**
2864
 * Get the underlying tensor for the tensor variable. The tensor allocation may be performed when calling this
2865
 * method. If the tensor cannot be allocated (because no shape specified), return 0.
2866
 * @param graph The dynamic graph.
2867
 * @param tensor_variable The tensor variable to get the underlying tensor.
2868
 * @param stream_context Which stream this command will be executed upon.
2869
 * @return The underlying tensor.
2870
 */
2871
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_variable_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_stream_context_t* const stream_context);
2872
8.54k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_1(graph, tensor_variable) ccv_nnc_tensor_from_variable_impl(graph, tensor_variable, 0)
2873
60.4k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL(_1, _2, _3, _FX, ...) _FX
2874
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2875
69.0k
#define ccv_nnc_tensor_from_variable(graph, tensor_variable, ...) CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL
(graph, tensor_variable, ##__VA_ARGS__, ccv_nnc_tensor_from_variable_impl, 46.0k
CCV_NNC_TENSOR_FROM_VARIABLE_X_18.54k
)(graph, tensor_variable, ##__VA_ARGS__)
2876
/** @endcond */
2877
/**
2878
 * Query whether a given tensor variable is a constant (no gradient).
2879
 * @param graph The dynamic graph.
2880
 * @param tensor_variable The tensor variable to query whether it is a constant.
2881
 */
2882
CCV_WARN_UNUSED(int) ccv_nnc_tensor_variable_is_constant(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2883
/**
2884
 * Set a tensor on the tensor variable. Tensor variable doesn't take over the life-cycle management of the tensor
2885
 * (in similar way as the tensor binds).
2886
 * @param graph The dynamic graph.
2887
 * @param tensor_variable The tensor variable to set.
2888
 * @param tensor The tensor that is going to be associated with the tensor variable.
2889
 */
2890
void ccv_nnc_tensor_variable_set(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_t* const tensor);
2891
/**
2892
 * Detach the tensor variable from current graph. It acts as if computed between
2893
 * ``ccv_nnc_dynamic_graph_set_no_grad``. Thus, there are a few requirements for this:
2894
 * 1. It cannot be an alias when detach. You have to detach the original, not the alias.
2895
 * 2. When detach a variable, it could impact correctness when computing gradients. This cut off backprop, acting as if the
2896
 *    detached variable is a constant (it will be marked as is).
2897
 * After this call, the tensor variable will be marked as constant and you can query that through ``ccv_nnc_tensor_variable_is_constant``.
2898
 * Why this method rather than making this variable as constant to begin with? First, an constant
2899
 * cannot be the output. Second, you may not wrap your computation between no grad, or not all inputs
2900
 * are constants, resulting a tensor variable that is on a graph. This method is helpful to rescue from
2901
 * that situation.
2902
 * @param graph The dynamic graph.
2903
 * @param tensor_variable The tensor variable to be detached.
2904
 */
2905
void ccv_nnc_tensor_variable_detach(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2906
/**
2907
 * A destructor function to be called when a tensor variable will be freed in the sense that no
2908
 * backward computation need it no more.
2909
 * Thus, we pass in tensor rather than tensor variable for the destructor.
2910
 */
2911
typedef void (*ccv_nnc_tensor_variable_destructor_f)(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_t* const tensor, void* const context);
2912
/**
2913
 * Hook into a tensor variable such that when it is actually freed (destroyed), the callback will receive
2914
 * the update.
2915
 * @param graph The dynamic graph.
2916
 * @param tensor_variable The tensor variable to observe when it is destroyed.
2917
 * @param func The callback function.
2918
 * @param context The context to be passed along to the callback function.
2919
 **/
2920
void ccv_nnc_tensor_variable_destructor_hook(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_variable_destructor_f func, void* const context);
2921
/**
2922
 * Check given tensor variables whether have effects to another set of tensor variables.
2923
 * @param graph The dynamic graph.
2924
 * @param source_variables The tensor variables to check whether it has effect to another set of variables.
2925
 * @param source_variable_size The size of source tensor variables.
2926
 * @param destination_variables Whether the source variables has effect to this list of variables.
2927
 * @param destination_variable_size The size of destination tensor variables.
2928
 * @param bitmask Bit return value, each bit represents a source tensor variable, and 1 meant it can reach some of the destinations.
2929
 */
2930
void ccv_nnc_dynamic_graph_has_effect_to_tensor_variables(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t* const source_variables, const int source_variable_size, const ccv_nnc_tensor_variable_t* const destination_variables, const int destination_variable_size, uint64_t* const bitmask);
2931
/**
2932
 * Execute a command with given tensor variables, the output is in the output tensor variables.
2933
 * @param graph The dynamic graph.
2934
 * @param cmd The wrapped command.
2935
 * @param hint The hint associated with the command.
2936
 * @param flags A reserved field for flags.
2937
 * @param inputs The input tensor variables array.
2938
 * @param input_size The size of the input tensor variables array.
2939
 * @param outputs The output tensor variables array.
2940
 * @param output_size The size of the output tensor variables array.
2941
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2942
 * @param stream_context Which stream this command will be executed upon.
2943
 */
2944
int ccv_nnc_dynamic_graph_exec(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2945
/**
2946
 * Compute the gradient of given tensor, with respect to the f. Thus, df / dt.
2947
 * @param dynamic_graph The dynamic graph.
2948
 * @param f_variables The output losses.
2949
 * @param f_variable_size The size of output losses array.
2950
 * @param df_optionals The custom gradients for f. If not provided, will default to 1.
2951
 * @param inputs The input variables.
2952
 * @param input_size The size of the input variables array.
2953
 * @param outputs The gradients with respect to the inputs. If the gradient already have value exist, it will be
2954
 *        accumulated into the final value.
2955
 * @param output_size The size of the outputs array. Should be equal to the input_size.
2956
 * @param stream_context Which stream this computation will be executed upon.
2957
 */
2958
void ccv_nnc_dynamic_graph_backward(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_tensor_variable_t* const f_variables, const int f_variable_size, const ccv_nnc_tensor_variable_t* const df_optionals, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
2959
/**
2960
 * Apply gradients to the set of parameters to update them with appropriate minimizer.
2961
 * @param dynamic_graph The dynamic graph.
2962
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2963
 * @param gradients The computed gradients to be applied.
2964
 * @param gradient_size The size of gradients array.
2965
 * @param parameters The parameters to update.
2966
 * @param parameter_size The size of parameters array, should be the same length as gradients.
2967
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2968
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2969
 * @param stream_context Which stream this computation will be executed upon.
2970
 */
2971
void ccv_nnc_dynamic_graph_apply_gradients(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const gradients, const int gradient_size, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2972
/**
2973
 * Apply one step of minimization (most likely, a gradient descent) to the parameters with a given loss (or
2974
 * losses).
2975
 * @param dynamic_graph The dynamic graph.
2976
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2977
 * @param losses The losses we are trying to minimize.
2978
 * @param loss_size The size of the losses array.
2979
 * @param dloss_optionals The custom gradient for losses. If not provided, will default to 1.
2980
 * @param parameters The parameters to update.
2981
 * @param parameter_size The size of parameters array.
2982
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2983
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2984
 * @param stream_context Which stream this computation will be executed upon.
2985
 */
2986
void ccv_nnc_dynamic_graph_minimize(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const losses, const int loss_size, const ccv_nnc_tensor_variable_t* const dloss_optionals, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2987
/**
2988
 * Read more in Level-5 API section.
2989
 */
2990
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
2991
/**
2992
 * Evaluate a CNNP model on the dynamic graph with set of inputs / outputs.
2993
 * @param dynamic_graph The dynamic graph.
2994
 * @param model The CNNP model to be evaluated against. Note that ccv_nnc_dynamic_graph_backward /
2995
 *              ccv_nnc_dynamic_graph_apply_gradients / ccv_nnc_dynamic_graph_minimize all works with this
2996
 *              model. It takes over the life-cycle of the model, and now you don't need to free it any more.
2997
 * @param is_test Whether we are in test mode or not.
2998
 * @param inputs The input variables.
2999
 * @param input_size The size of the input variables array.
3000
 * @param outputs The gradients with respect to the inputs.
3001
 * @param output_size The size of the outputs array.
3002
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3003
 * @param stream_context Which stream this computation will be executed upon.
3004
 */
3005
void ccv_nnc_dynamic_graph_evaluate(ccv_nnc_dynamic_graph_t* const dynamic_graph, ccv_cnnp_model_t* const model, const int is_test, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3006
/**
3007
 * Dry run a CNNP model on the dynamic graph with set of inputs up until the actual execution.
3008
 * @param dynamic_graph The dynamic graph.
3009
 * @param model The CNNP model to be evaluated against. Note that ccv_nnc_dynamic_graph_backward /
3010
 *              ccv_nnc_dynamic_graph_apply_gradients / ccv_nnc_dynamic_graph_minimize all works with this
3011
 *              model. It takes over the life-cycle of the model, and now you don't need to free it any more.
3012
 * @param is_test Whether we are in test mode or not.
3013
 * @param inputs The input variables.
3014
 * @param input_size The size of the input variables array.
3015
 * @param stream_context Which stream this computation will be executed upon.
3016
 */
3017
void ccv_nnc_dynamic_graph_dry_run(ccv_nnc_dynamic_graph_t* const dynamic_graph, ccv_cnnp_model_t* const model, const int is_test, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_stream_context_t* const stream_context);
3018
/**
3019
 * Set the maximum operator-level concurrency. This is a soft-limit, e.g. if you have operations on
3020
 * different devices, they are concurrent.
3021
 * @param graph The dynamic graph.
3022
 * @param max_stream_count The maximum concurrency if the dynamic graph schedules internal streams. 0 is no limit.
3023
 */
3024
void ccv_nnc_dynamic_graph_set_max_concurrency(ccv_nnc_dynamic_graph_t* const graph, const int max_stream_count);
3025
/**
3026
 * Enable or disable gradient computation on a dynamic graph.
3027
 * @param dynamic_graph The dynamic graph.
3028
 * @param no_grad If it is 1, disable gradient computation on the dynamic graph.
3029
 * @return 0 if it turned, otherwise it is not turned.
3030
 */
3031
int ccv_nnc_dynamic_graph_set_no_grad(ccv_nnc_dynamic_graph_t* const dynamic_graph, const int no_grad);
3032
/**
3033
 * Dynamic graph will retain a memory it allocated for efficient reuse. Triggering this method
3034
 * intentionally will force these memory to be collected. This is helpful if you know the existing
3035
 * allocation won't be enough for the future use.
3036
 * @param dynamic_graph The dynamic graph.
3037
 */
3038
void ccv_nnc_dynamic_graph_gc(ccv_nnc_dynamic_graph_t* const dynamic_graph);
3039
/**
3040
 * Dispose a tensor variable. You cannot do any computation against this tensor variable afterwards.
3041
 * @param graph The dynamic graph.
3042
 * @param tensor_variable The tensor variable to be disposed.
3043
 */
3044
void ccv_nnc_tensor_variable_free(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
3045
/**
3046
 * Free the dynamic graph.
3047
 * @param graph The dynamic graph.
3048
 */
3049
void ccv_nnc_dynamic_graph_free(ccv_nnc_dynamic_graph_t* const graph);
3050
/**
3051
 * Generate output that can be parsed by GraphViz (DOT language).
3052
 * @param graph The dynamic graph.
3053
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
3054
 * @param out The output file stream.
3055
 */
3056
void ccv_nnc_dynamic_graph_dot(const ccv_nnc_dynamic_graph_t* const graph, const int flags, FILE* out);
3057
/**
3058
 * Count how many ops we kept for gradient computation purpose. This method is useful when we
3059
 * want to assert at end of some train loop, we shouldn't have any gradient computation left.
3060
 * @param graph The dynamic graph.
3061
 * @param type The type of variables to trace. CCV_NNC_SYMBOL_TENSOR / CCV_NNC_SYMBOL_GRAPH_EXEC
3062
 * @return How many gradient computations we kept.
3063
 */
3064
CCV_WARN_UNUSED(int) ccv_nnc_dynamic_graph_bookkeeping_count(const ccv_nnc_dynamic_graph_t* const graph, const int type);
3065
/**
3066
 * Provide a hook for upper level to do custom formatting of a given dynamic graph for whatever
3067
 * inside. You can implement logic to format the graph into protobuf, or json. However, this
3068
 * is not the method for you to visit the graph, and do mutations on it. If ops are not needed for
3069
 * gradient computation, likely these are not kept on the dynamic graph at all. You probably will
3070
 * get an empty graph. What's still available can be checked with the ccv_nnc_dynamic_graph_bookkeeping_count.
3071
 * @param graph The dynamic graph.
3072
 * @param format_fn The format callback to be called on every node.
3073
 * @param context The context that will be passed to the callback.
3074
 */
3075
void ccv_nnc_dynamic_graph_format(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
3076
3077
/** @} */
3078
3079
/**
3080
 * @defgroup level_5 Level-5 API
3081
 * @{
3082
 */
3083
3084
/**
3085
 * @page dataframe What is "dataframe" in ML?
3086
 *
3087
 * A large part of machine learning consists of go through data, process them to a shape / form that makes sense,
3088
 * and pass that into the model to train. Deep learning frameworks such as TensorFlow or PyTorch provides some
3089
 * dataset APIs for this purpose. It is convenient for these frameworks because by being Python, people can use
3090
 * Pandas to process the data. In Pandas, this is called Dataframe, which again, imitates R language.
3091
 *
3092
 * Another interesting observation comes from recent (2018) release of Create ML framework from Apple. It provides
3093
 * a very close to Pandas style data process API (MLDataTable) but in Swift. This implementation is important because
3094
 * it provides a survey point other than Python.
3095
 *
3096
 * Comparing to Python, Swift is a stronger typed language. Though all being high-level, they all have pretty good
3097
 * string support (of course!), operator overloading, and polymorphism. String support makes column naming natural,
3098
 * Operator overloading makes conditioning and filtering easier, and polymorphism makes column type representation
3099
 * straight-forward. These, unfortunately, are the challenges I need to face when implementing in C with the eye
3100
 * towards that later the similar ideas can be implemented on top on a high-level language based on this one.
3101
 *
3102
 * It seems I haven't answered the most crucial question yet: what's special about these data process APIs? It is
3103
 * easier to answer this to first see what Pandas or MLDataTable does.
3104
 *
3105
 * * They both represent data as tables. Each column represents different type of the data (time, nd-array, scalar
3106
 *   or string). As such, they both have API to add / remove / rename columns, and load tabular data from disk.
3107
 *
3108
 * * They both provide API to filter (remove / add) rows, and derive new column from existing columns.
3109
 *
3110
 * * Pandas provides more API for data alignment (merge columns from different tables into one table), and compute
3111
 *   statistics (group rows by some criteria, and compute min / max / std / mean within that group).
3112
 *
3113
 * * MLDataTable provides API to batching data (random split) which covered in TensorFlow / PyTorch's Dataset API
3114
 *   as well.
3115
 *
3116
 * It turns out when you have a noisy dataset, these functionalities are useful to remove unwanted data quickly.
3117
 * If you have a relatively clean dataset, it also allows you to prepare data in a more elegant way. For NNC,
3118
 * the interesting requirements are:
3119
 *
3120
 * 1. Represents scalars, tensors, string as columns; columns can be named.
3121
 *
3122
 * 2. New columns can be derived, from existing ones.
3123
 *
3124
 * 3. Rows can be filtered, grouped, and statistics can be computed.
3125
 *
3126
 * 4. Columns can be aligned, with some given indexes.
3127
 *
3128
 * 5. All these can be done efficiently, on a scale of hundreds of Gigabytes data.
3129
 */
3130
3131
/**
3132
 * @defgroup level_5_dataframe Dataframe API
3133
 * @{
3134
 */
3135
3136
/**
3137
 * A data enumeration function to supply data for given row indexes.
3138
 */
3139
typedef void (*ccv_cnnp_column_data_enum_f)(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3140
/**
3141
 * A destructor for data.
3142
 */
3143
typedef void (*ccv_cnnp_column_data_deinit_f)(void* const data, void* const context);
3144
/**
3145
 * A destructor for context.
3146
 */
3147
typedef void (*ccv_cnnp_column_data_context_deinit_f)(void* const context);
3148
/**
3149
 * Column data.
3150
 */
3151
typedef struct {
3152
  int stream_type; /**< The type of stream context for this column. Each column only compatible with one stream type. */
3153
  char* name; /**< The name of the column. */
3154
  ccv_cnnp_column_data_enum_f data_enum; /**< The data enumeration function for this column. */
3155
  ccv_cnnp_column_data_deinit_f data_deinit; /**< The deinit function that will be used to destroy the data. */
3156
  void* context; /**< The context go along with this column. */
3157
  ccv_cnnp_column_data_context_deinit_f context_deinit; /**< The deinit function that will be used to destroy the context. */
3158
} ccv_cnnp_column_data_t;
3159
/**
3160
 * An opaque structure point to the dataframe object.
3161
 */
3162
typedef struct ccv_cnnp_dataframe_s ccv_cnnp_dataframe_t;
3163
/**
3164
 * Create a dataframe object with given column data.
3165
 * @param column_data The column data that can be loaded.
3166
 * @param column_size The size of column data array.
3167
 * @param row_count The number of rows in this dataframe.
3168
 */
3169
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_new(const ccv_cnnp_column_data_t* const column_data, const int column_size, const int row_count);
3170
/**
3171
 * Add a new column to the dataframe.
3172
 * @param dataframe The dataframe object to add column to.
3173
 * @param data_enum The data provider function for the new column.
3174
 * @param stream_type The type of stream context for this derived column.
3175
 * @param data_deinit The deinit function will be used to destroy the derived data.
3176
 * @param context The context that can be used to generate new column.
3177
 * @param context_deinit The deinit function will be used to destroy the context.
3178
 * @param name The name of the newly added column.
3179
 * @return The new column index.
3180
 */
3181
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_enum_f data_enum, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
3182
/**
3183
 * A map function that takes the data from multiple columns and derive new data out of it.
3184
 */
3185
typedef void (*ccv_cnnp_column_data_map_f)(void* const* const* const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3186
/**
3187
 * Derive a new column out of existing columns in the dataframe.
3188
 * @param dataframe The dataframe object that contains existing columns.
3189
 * @param map The map function used to derive new column from existing columns.
3190
 * @param stream_type The type of stream context for this derived column.
3191
 * @param data_deinit The deinit function will be used to destroy the derived data.
3192
 * @param column_idxs The columns that will be used to derive new column.
3193
 * @param column_idx_size The size of existing columns array.
3194
 * @param context The context that can be used to generate new column.
3195
 * @param context_deinit The deinit function will be used to destroy the context.
3196
 * @param name The name of the new column.
3197
 * @return The new column index.
3198
 */
3199
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_map(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_map_f map, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, const int* const column_idxs, const int column_idx_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
3200
/**
3201
 * Shuffle an existing dataframe.
3202
 * @param dataframe The dataframe that is about to be shuffled.
3203
 */
3204
void ccv_cnnp_dataframe_shuffle(ccv_cnnp_dataframe_t* const dataframe);
3205
/**
3206
 * Query row count of the dataframe.
3207
 * @param dataframe The dataframe we want to query row count.
3208
 * @return The row count of the dataframe.
3209
 */
3210
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_row_count(ccv_cnnp_dataframe_t* const dataframe);
3211
/**
3212
 * Query the column name of a given column on the dataframe.
3213
 * @param dataframe The dataframe we want to query the column name.
3214
 * @param column_idx The index of a column.
3215
 * @return The name of the column.
3216
 */
3217
CCV_WARN_UNUSED(const char*) ccv_cnnp_dataframe_column_name(ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
3218
/**
3219
 * A sampling function that takes multiple rows of one column, and sample to one row.
3220
 */
3221
typedef void (*ccv_cnnp_column_data_sample_f)(void* const* const input_data, const int batch_size, void** const output_data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3222
/**
3223
 * Sample a dataframe by batch size. Thus, n rows are sampled to 1 row per sample function on
3224
 * one specific column. This will also sample the multi-column dataframe down to 1 column
3225
 * by selecting the one column to sample.
3226
 * @param dataframe The dataframe that is about to be sampled.
3227
 * @param sample The sample function used to sample n rows into 1.
3228
 * @param data_deinit The deinit function will be used to destroy the derived data.
3229
 * @param column_idx The column we selected to sample.
3230
 * @param batch_size How many rows will be sampled to 1 row from the original data.
3231
 * @param context The context that can be used in sample function.
3232
 * @param context_deinit The deinit function will be used to destroy the context.
3233
 * @return The sampled dataframe.
3234
 */
3235
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_sample_new(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_sample_f sample, ccv_cnnp_column_data_deinit_f data_deinit, const int column_idx, const int batch_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit);
3236
/**
3237
 * Extract a value out of a struct. Assuming the data points to a struct. This method extract
3238
 * n-offset value of that struct. For example, if you have struct { ccv_nnc_tensor_t* a; ccv_nnc_tensor_t* b; } S;
3239
 * if you want to extract the b tensor to a different column, you can call this function with
3240
 * offsetof(S, b).
3241
 * @param dataframe The dataframe object to be extracted.
3242
 * @param column_idx The column that we want to extract value of.
3243
 * @param offset The offset. For example, offsetof(S, b).
3244
 * @param name The name of the new column.
3245
 * @return The new column that contains the extracted value.
3246
 */
3247
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_value(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t offset, const char* name);
3248
/**
3249
 * Make a tuple out of columns specified. Thus, the new derived column will contains a tuple
3250
 * with data from all the columns specified here. Tuple here represented as void* tuple[], an
3251
 * array of void* pointers.
3252
 * @param dataframe The dataframe that will contain the new column.
3253
 * @param column_idxs The columns to be tupled.
3254
 * @param column_idx_size The number of columns.
3255
 * @param name The name of the new column.
3256
 * @return The derived column with the tuple.
3257
 */
3258
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_make_tuple(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const char* name);
3259
/**
3260
 * The size of the tuple. It is equal to the number of columns we specified. The behavior of
3261
 * calling this method on a column that is not a tuple is undefined.
3262
 * @param dataframe The dataframe that contains the tuple column.
3263
 * @param column_idx The tuple column we are going to inspect.
3264
 * @return The tuple size of the column.
3265
 */
3266
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_tuple_size(const ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
3267
/**
3268
 * Extract a data out of a tuple.
3269
 * @param dataframe The dataframe that will contain the new column.
3270
 * @param column_idx The column that is a tuple.
3271
 * @param index The index into the tuple.
3272
 * @param name The name of the new column.
3273
 * @return The derived column with the extracted value.
3274
 */
3275
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_tuple(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int index, const char* name);
3276
/**
3277
 * The opaque pointer to the iterator.
3278
 */
3279
typedef struct ccv_cnnp_dataframe_iter_s ccv_cnnp_dataframe_iter_t;
3280
/**
3281
 * Get a new iterator of the dataframe.
3282
 * @param dataframe The dataframe object to iterate through.
3283
 * @param column_idxs The columns that will be iterated.
3284
 * @param column_idx_size The size of columns array.
3285
 * @return The opaque iterator object.
3286
 */
3287
CCV_WARN_UNUSED(ccv_cnnp_dataframe_iter_t*) ccv_cnnp_dataframe_iter_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size);
3288
/**
3289
 * Get the next item from the iterator.
3290
 * @param iter The iterator to go through.
3291
 * @param data_ref The output for the data.
3292
 * @param column_idx_size The size of the data_ref array.
3293
 * @param stream_context The stream context to extract data asynchronously.
3294
 * @return 0 if the iteration is successful, -1 if there is no more row. -2 if it is already ended.
3295
 */
3296
int ccv_cnnp_dataframe_iter_next(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int column_idx_size, ccv_nnc_stream_context_t* const stream_context);
3297
/**
3298
 * Assuming iterator is on the same row, peek into potentially different column index.
3299
 * @param iter The iterator to go through.
3300
 * @param data_ref The output for the data.
3301
 * @param offset The offset for which column in this iterator to peek at.
3302
 * @param data_ref_size How many columns in this iterator to peek at.
3303
 * @param stream_context The stream context to extract data asynchronously.
3304
 */
3305
void ccv_cnnp_dataframe_iter_peek(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int offset, const int data_ref_size, ccv_nnc_stream_context_t* const stream_context);
3306
/**
3307
 * Prefetch next item on the iterator with the given stream context. You can call this method multiple times
3308
 * to prefetch multiple items ahead of time.
3309
 * @param iter The iterator to go through.
3310
 * @param prefetch_count How much ahead we should advance for.
3311
 * @param stream_context The stream context to extract data asynchronously.
3312
 * @return 0 if the prefetch is successful, -1 if it is ended.
3313
 */
3314
int ccv_cnnp_dataframe_iter_prefetch(ccv_cnnp_dataframe_iter_t* const iter, const int prefetch_count, ccv_nnc_stream_context_t* const stream_context);
3315
/**
3316
 * Set the cursor of the iterator. When set to 0, the iterator effectively restarts.
3317
 * @param iter The iterator to go through.
3318
 * @param idx The index of the cursor.
3319
 * @return 0 if it is successful, -1 if it is not (exceed the range).
3320
 */
3321
int ccv_cnnp_dataframe_iter_set_cursor(ccv_cnnp_dataframe_iter_t* const iter, const int idx);
3322
/**
3323
 * Free the dataframe iterator object.
3324
 * @param iter The dataframe iterator to be freed.
3325
 */
3326
void ccv_cnnp_dataframe_iter_free(ccv_cnnp_dataframe_iter_t* const iter);
3327
/**
3328
 * Free the dataframe object.
3329
 * @param dataframe The dataframe object to be freed.
3330
 */
3331
void ccv_cnnp_dataframe_free(ccv_cnnp_dataframe_t* const dataframe);
3332
3333
/** @} */
3334
3335
/**
3336
 * @defgroup level_5_dataframe_add_ons Dataframe Add-ons
3337
 * @{
3338
 */
3339
3340
/**
3341
 * Turn a ccv_array_t to a dataframe object.
3342
 * @param array The array we want to turn into a dataframe object.
3343
 * @return The new dataframe object.
3344
 */
3345
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_array_new(ccv_array_t* const array);
3346
/**
3347
 * Derive a new column that copies a tensor array from given column to the derived column on GPU.
3348
 * @param dataframe The dataframe object that get the derived column.
3349
 * @param column_idx The original column contains tensor array on CPU.
3350
 * @param tensor_offset Only copy as outputs[i] = inputs[i + tensor_offset].
3351
 * @param tensor_size How many tensors in the tensor array.
3352
 * @param device_id The device we want to copy the tensors to.
3353
 * @param name The name of the new column.
3354
 * @return The index of the newly derived column.
3355
 */
3356
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_to_gpu(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int tensor_offset, const int tensor_size, const int device_id, const char* name);
3357
/**
3358
 * Derive a new column by executing a generic command.
3359
 * @param dataframe The dataframe object that get the derived column.
3360
 * @param column_idx The original column contains tensor array.
3361
 * @param cmd The command for this operation.
3362
 * @param hint The hint to run the command.
3363
 * @param flags The flags with the command.
3364
 * @param input_offset Use inputs[i + input_offset] to inputs[i + input_offset + input_size - 1] as the inputs
3365
 * @param input_size How many tensors in the input array.
3366
 * @param output_params The parameters for the outputs.
3367
 * @param output_size How many tensors in the output array.
3368
 * @param stream_type The type of stream context we are going to use.
3369
 * @param name The name of the new column.
3370
 * @return The index of the newly derived column.
3371
 */
3372
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_cmd_exec(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const int input_offset, const int input_size, const ccv_nnc_tensor_param_t* const output_params, const int output_size, const int stream_type, const char* name);
3373
/**
3374
 * Add a new column contains some tensors. This will add a new column that each row is the tensor specified
3375
 * as the parameters. It comes handy when you want to have some auxiliary tensors along with each row.
3376
 * @param dataframe The dataframe object that get the new column.
3377
 * @param params The parameters for the tensors.
3378
 * @param name The name of the new column.
3379
 * @return The index of the newly added column.
3380
 */
3381
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add_aux(ccv_cnnp_dataframe_t* const dataframe, const ccv_nnc_tensor_param_t params, const char* name);
3382
/**
3383
 * Read image off a said column. That column should contain the filename (as char array). The new column
3384
 * will contain the ccv_dense_matrix_t / ccv_nnc_tensor_t (both are toll-free bridging) of the image.
3385
 * @param dataframe The dataframe object that loads the images.
3386
 * @param column_idx The column which contains the filename.
3387
 * @param structof The offset to the filename (as char array) from that column. For example, the column
3388
 *        could be a struct and filename could be one of the field. In that case, you can pass offsetof(S, filename)
3389
 * @param name The name of the new column.
3390
 * @return The index of the newly derived column.
3391
 */
3392
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_read_image(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const char* name);
3393
/**
3394
 * The structure to describe how to apply random jitter to the image.
3395
 */
3396
typedef struct {
3397
  float contrast; /**< The random contrast, the final contrast will be [1 / (1 + contrast), 1 + contrast] */
3398
  float saturation; /**< The saturation, the final saturation will be [1 / (1 + saturation), 1 + saturation] */
3399
  float brightness; /**< The brightness, the final brightness will be between [1 / (1 + brightness), 1 + brightness] */
3400
  float lighting; /**< AlexNet style PCA based image jitter */
3401
  float aspect_ratio; /**< Stretch aspect ratio between [1 / (1 + asepct_ratio), 1 + aspect_ratio] */
3402
  int symmetric; /**< Apply random flip on x-axis (around y-axis */
3403
  int seed; /**< The seed for random generator. */
3404
  int center_crop; /**< Enable crop to the center (otherwise do random crop). */
3405
  struct {
3406
    int min; /**< The minimal dimension of resize */
3407
    int max; /**< The maximal dimension of resize. The final resize can be computed from min + (max - min) * random_unit */
3408
    int roundup; /**< The dimension on both height / width are a multiple of roundup value. */
3409
  } resize;
3410
  struct {
3411
    int rows; /**< The height of the final image. */
3412
    int cols; /**< The width of the final image. */
3413
  } size;
3414
  struct {
3415
    int x; /**< The extra random offset on x-axis. */
3416
    int y; /**< The extra random offset on y-axis. */
3417
  } offset;
3418
  struct {
3419
    float mean[3]; /**< Normalize the image with mean. */
3420
    float std[3];/**< Normalize the image with std. pixel = (pixel - mean) / std */
3421
  } normalize;
3422
} ccv_cnnp_random_jitter_t;
3423
/**
3424
 * Apply random jitter on a image to generate a new image.
3425
 * @param dataframe The dataframe object that contains the original image.
3426
 * @param column_idx The column which contains the original image.
3427
 * @param datatype The final datatype of the image. We only support CCV_32F right now.
3428
 * @param random_jitter The random jitter parameters to be applied to.
3429
 * @param name The name of the new column.
3430
 * @return The index of the newly derived column.
3431
 */
3432
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_image_random_jitter(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int datatype, const ccv_cnnp_random_jitter_t random_jitter, const char* name);
3433
/**
3434
 * Generate a one-hot tensor off the label from a struct.
3435
 * @param dataframe The dataframe object that contains the label.
3436
 * @param column_idx The column which contains the label (as int).
3437
 * @param structof The offset to the label (as int) from that column. For example, the column
3438
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
3439
 * @param range The range of the label, from [0...range - 1]
3440
 * @param onval The value when it hit.
3441
 * @param offval The value for the others.
3442
 * @param datatype The datatype of the tensor.
3443
 * @param format The format of the tensor.
3444
 * @param name The name of the new column.
3445
 * @return The index of the newly derived column.
3446
 */
3447
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_hot(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int range, const float onval, const float offval, const int datatype, const int format, const char* name);
3448
/**
3449
 * Generate a scalar tensor (a tensor with one value) off a value from a struct.
3450
 * @param dataframe The dataframe object that contains the value.
3451
 * @param column_idx The column which contains the value (as datatype).
3452
 * @param structof The offset to the label (as int) from that column. For example, the column
3453
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
3454
 * @param from_dt The datatype of the value.
3455
 * @param to_dt The datatype of the tensor.
3456
 * @param format The format of the tensor.
3457
 * @param name The name of the new column.
3458
 * @return The index of the newly derived column.
3459
 */
3460
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_scalar(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int from_dt, const int to_dt, const int format, const char* name);
3461
/**
3462
 * Generate vector with ones up to a given length, the rest will be zeros. When applied to batched lengths
3463
 * array, this will generate a matrix of these vectors, squared. The derived column will be a tuple of vectors
3464
 * for the given number of columns.
3465
 * @param dataframe The dataframe object that will contain the matrix.
3466
 * @param column_idxs The columns which contain the sequence lengths (a 1d tensor).
3467
 * @param column_idx_size The number of columns. The derived column will be a tuple of vectors.
3468
 * @param variable_size The size of the final vector can vary, depending on the max length of current batch.
3469
 * @param max_length The absolute max length for inputs.
3470
 * @param name The name of the new column.
3471
 * @return The index of the newly derived column.
3472
 */
3473
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_squared(ccv_cnnp_dataframe_t* const dataframe,  const int* const column_idxs, const int column_idx_size, const int variable_size, const int max_length, const char* name);
3474
/**
3475
 * Truncate a given matrix (as a list of vector) to the given size provided by another vector. The truncated
3476
 * column will be a tuple of vectors for the given columns.
3477
 * @param dataframe The dataframe object that will contain the matrix.
3478
 * @param vec_idxs The columns of the given matrix to be truncated.
3479
 * @param vec_idx_size The number of columns for vec_idxs.
3480
 * @param len_idxs The columns of the given sizes as a vector.
3481
 * @param len_idx_size The number of columns for len_idxs.
3482
 * @param name The name of the new column.
3483
 * @return The index of the newly derived column.
3484
 */
3485
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_truncate(ccv_cnnp_dataframe_t* const dataframe, const int* const vec_idxs, const int vec_idx_size, const int* len_idxs, const int len_idx_size, const char* name);
3486
/**
3487
 * Combine multiple tensors in a column into one tensor. This method can take multiple columns, which
3488
 * will result a tuple of tensors. Each tensor in the tuple is a batched one from a given column.
3489
 * @param dataframe The dataframe contains the columns of tensors to be batched.
3490
 * @param column_idxs The columns that contain the tensors.
3491
 * @param column_idx_size The number of columns that contain the tensors.
3492
 * @param batch_count How many tensors in one column to be batched together.
3493
 * @param group_count We can generate many groups of batched tensor. For example, if you have column A, B, C, each
3494
 *        have different tensors. If group_count is 1, the result tuple will be (A_b, B_b, C_b). If group count is
3495
 *        2, the result tuple will be (A_b1, B_b1, C_b1, A_b2, B_b2, C_b2). A_b1 etc. will still contain the same
3496
 *        number of batch_count tensors.
3497
 * @param format The result format of the tensor. We support simply transformation NCHW <=> NHWC with the source tensor.
3498
 * @return The newly created dataframe with the 0-th column is the tuple of batched tensors.
3499
 */
3500
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_combine_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const int batch_count, const int group_count, const int format);
3501
3502
/** @} */
3503
3504
/**
3505
 * @page dataframe_csv Why to support comma-separated-values files in dataframe?
3506
 *
3507
 * C can be used as a parser. It usually can be fast. But most of them can be buggy and has bugs that can either crash, be
3508
 * exploited, or simply incorrect. There really isn't much motivation for me to start write a parser, even as simple as
3509
 * for CSV files.
3510
 *
3511
 * However, it does brought to my attention that a full-speed (defined by saturating the PCIx4 for SSD) implementation would
3512
 * be beneficial. I am also started to use nnc in many places that is handy to load a csv file and generate some tensors out
3513
 * of it.
3514
 *
3515
 * This implementation plan to use a variant of the two-pass approach documented in
3516
 * https://www.microsoft.com/en-us/research/uploads/prod/2019/04/chunker-sigmod19.pdf while first implemented in
3517
 * https://github.com/wiseio/paratext. It is differentiated from these two in these particular ways:
3518
 *
3519
 * 1. The first pass will not only find the quotes and even / odd CRLF, but also collect statistics on how many lines assuming
3520
 *    the first CRLF is within quote / outside of the quote;
3521
 *
3522
 * 2. The second pass will do a copy into a continuous page mirrors the original csv file, but null-terminate each column, and
3523
 *    assign the start pointer for each.
3524
 *
3525
 * The speculative approach while interesting, for many-core system implementation, it can be challenging and the worse-case
3526
 * scenario is indeed worse.
3527
 *
3528
 * The implementation itself follows https://tools.ietf.org/html/rfc4180, with only customization of delimiters (so it can support
3529
 * table-separated-values) and quotes (so you can choose between " and '). Escaping only supports double-quotes for whatever quote
3530
 * symbol you elect.
3531
 */
3532
3533
/**
3534
 * @defgroup level_5_dataframe_csv Dataframe for Comma-Separated-Values Files
3535
 * @{
3536
 */
3537
enum {
3538
  /* It is a file pointer. */
3539
  CCV_CNNP_DATAFRAME_CSV_FILE = 0,
3540
  /* It is a pointer to a memory. */
3541
  CCV_CNNP_DATAFRAME_CSV_MEMORY = 1,
3542
};
3543
3544
/**
3545
 * Create a dataframe object that read a CSV file. This will eagerly load the file into memory, parse each row / column
3546
 * into null-terminated strings, you can later convert these into numerics if needed. Each column will be a column indexed
3547
 * from 0 to column_size - 1. If there are syntax errors, the parser will make guesses and continue to parse to its best knowledge.
3548
 * If it cannot, we will return null for the object. We support both CRLF, LF, and LFCR termination.
3549
 * @param input The FILE handle for on-disk file, or the pointer to the region of the memory we are going to use.
3550
 * @param type The type of either `CCV_CNNP_DATAFRAME_CSV_FILE` or `CCV_CNNP_DATAFRAME_CSV_MEMORY`
3551
 * @param len The length of the memory region, if it is `CCV_CNNP_DATAFRAME_CSV_MEMORY`.
3552
 * @param delim The delim, it is ',' by default (if you provided '\0')
3553
 * @param quote The quote for escape strings, it is '"' by default (if you provided '\0')
3554
 * @param include_header whether to parse the header seperately. 1 means we treat the first line as header.
3555
 * @param column_size The number of columns in the resulted dataframe.
3556
 * @return A dataframe that can represent the csv file. nullptr if failed.
3557
 */
3558
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_csv_new(void* const input, const int type, const size_t len, const char delim, const char quote, const int include_header, int* const column_size);
3559
3560
/** @} */
3561
3562
/**
3563
 * @page model Models, layers, and Keras
3564
 *
3565
 * With Keras API in mind, this model implementation essentially is a light-weight way to group neural network layers
3566
 * together. This is a rare case in NNC (or ccv in general) where Object-Oriented programming makes sense. I borrowed
3567
 * heavily from Objective-C / C++ to implement this Object-Oriented interface.
3568
 *
3569
 * Now back to elaboration of the Model interface. It is specifically designed with Keras in mind, asking question:
3570
 * If we are going to build Keras high-level API in any languages (Ruby, Python, Swift, Julia), what's the underlying
3571
 * C interface would look like? Here is your answer (hint: it looks very much like just Python Keras API).
3572
 *
3573
 * A model consists of a set of inputs and outputs. This sounds very much like what "Command" is in Level-1 APIs,
3574
 * however, they are different: a model is stateful. For example, a convolution command takes 3 inputs: image, kernel
3575
 * weight and bias, has 1 output: image. A convolution model takes 1 input: image, and 1 output: image. kernel weight
3576
 * and bias are internal states to the model (in Keras, it is called "layer" for convolution, and model means a set of
3577
 * layers. In NNC, that kind of differentiation feels superficial, therefore, a layer is a model).
3578
 *
3579
 * A model can be combined, and a new model can be a combination of other models.
3580
 *
3581
 * The simpler composed model is the sequential model. A sequential model is a model that consists a sequence of models
3582
 * that contains one input and one output. The output of the earlier model feed into the later one, thus, a sequential
3583
 * evaluation path.
3584
 */
3585
3586
/**
3587
 * @defgroup level_5_model Model API
3588
 * @{
3589
 */
3590
3591
/**
3592
 * model type is an abstract type, you won't interact with a naked model ever.
3593
 */
3594
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
3595
/**
3596
 * With this type, now in NNC, we have 4 types that represents a "tensor":
3597
 *
3598
 * 1. ccv_nnc_tensor_t / ccv_nnc_tensor_view_t / ccv_nnc_tensor_multiview_t: a concrete tensor with memory allocated.
3599
 *
3600
 * 2. ccv_nnc_tensor_symbol_t: a symbol representation of a tensor, with its data layout, device affinity, and type
3601
 *                             specified.
3602
 *
3603
 * 3. ccv_nnc_tensor_variable_t: in dynamic graph, this represents a concrete tensor with memory allocated, but also
3604
 *                               associated with a recorded execution.
3605
 *
3606
 * 4. ccv_cnnp_model_io_t: this is the most flexible one. No data layout, device affinity or type specified. It can even
3607
 *                         represent a list of tensors rather than just one. This is a handle used by model API to
3608
 *                         associates model inputs / outputs.
3609
 */
3610
typedef struct ccv_cnnp_model_io_s* ccv_cnnp_model_io_t;
3611
/**
3612
 * Create a naked input.
3613
 * @return A ccv_cnnp_model_io_t represents an input.
3614
 */
3615
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_input(void);
3616
/**
3617
 * This method mimics Keras callable for model (thus, override __call__ method in Python class).
3618
 * @param model A model that we can apply a set of inputs to get one output.
3619
 * @param inputs The set of inputs.
3620
 * @param input_size The size of inputs array.
3621
 * @return A ccv_cnnp_model_io_t that represents the output of the given model.
3622
 */
3623
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_apply(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t* const inputs, const int input_size);
3624
/**
3625
 * This method adds non-functional dependencies for a model IO. "Non-functional dependencies" means
3626
 * their outputs are not used for this IO, however, their existence establishes a partial ordering
3627
 * for the execution. In that way, they act as "inputs" but not functional.
3628
 * @param model_io A model IO for which we will add additional non-functional dependencies.
3629
 * @param dependencies The set of dependencies.
3630
 * @param dependency_size The size of dependencies array.
3631
 */
3632
void ccv_cnnp_model_add_dependencies(ccv_cnnp_model_io_t model_io, const ccv_cnnp_model_io_t* const dependencies, const int dependency_size);
3633
enum {
3634
  /* Select only weights, no bias terms. */
3635
  CCV_CNNP_PARAMETER_SELECT_WEIGHT = 0,
3636
  /* Select bias terms, no weights. */
3637
  CCV_CNNP_PARAMETER_SELECT_BIAS = 1,
3638
};
3639
/**
3640
 * This method exposes parameter for a model out as a potential input for another model. Since
3641
 * it is a ccv_cnnp_model_io_t, it can also be used by other methods.
3642
 * @param model A model that we can extract parameters out.
3643
 * @param selector The selector for a parameter. ALL_PARAMETERS means all parameters, or you can select CCV_CNNP_PARAMETER_SELECT_WEIGHT or CCV_CNNP_PARAMETER_SELECT_BIAS.
3644
 * @param index The index into a parameter. ALL_PARAMETERS means all parameters.
3645
 */
3646
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameters(ccv_cnnp_model_t* const model, const int selector, const int index);
3647
/**
3648
 * A notification function such that a model can be notified.
3649
 * This is useful to broadcast a message to all models as sub-model of someone else.
3650
 */
3651
typedef void (*ccv_cnnp_model_notify_f)(const ccv_cnnp_model_t* const model, const int tag, void* const payload, void* const context);
3652
/**
3653
 * Hook into a model such that when there is a notification, the callback will receive it.
3654
 * @param model A model that can be notified.
3655
 * @param func The callback function.
3656
 * @param context The context to be passed along to the callback function.
3657
 **/
3658
void ccv_cnnp_model_notify_hook(ccv_cnnp_model_t* const model, ccv_cnnp_model_notify_f func, void* const context);
3659
/**
3660
 * Notify a model and its sub-models with a tag and a payload. This will be triggered
3661
 * synchronously.
3662
 * @param model A model that will be notified.
3663
 * @param tag An integer to help identify what kind of notification.
3664
 * @param payload A payload pointer that you can carry arbitrary information.
3665
 */
3666
void ccv_cnnp_model_notify(const ccv_cnnp_model_t* const model, const int tag, void* const payload);
3667
/**
3668
 * This method name is deceiving. It return a composed model, not a naked model.
3669
 * This composed model takes set of inputs, and run through various other models to arrive at
3670
 * the set of outputs.
3671
 * @param inputs The set of inputs.
3672
 * @param input_size The size of inputs array.
3673
 * @param outputs The set of outputs.
3674
 * @param output_size The size of outputs array.
3675
 * @param is_trainable Whether the parameters of this model can be trained. -1 means inherent from parent.
3676
 * @param name The unique name of the model.
3677
 * @return A composed model that takes inputs, and generate the outputs.
3678
 */
3679
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_new(const ccv_cnnp_model_io_t* const inputs, const int input_size, const ccv_cnnp_model_io_t* const outputs, const int output_size, const int is_trainable, const char* const name);
3680
/**
3681
 * This method returns a sequential model, which composed from a sequence of models.
3682
 * @param models The list of models, that takes one input, and emit one output, feeding into the subsequent one.
3683
 * @param model_size The size of the list.
3684
 * @param is_trainable Whether the parameters of this model can be trained.
3685
 * @param name The unique name of the model.
3686
 * @return A composed model that applies these models one by one in sequence.
3687
 */
3688
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sequential_new(ccv_cnnp_model_t* const* const models, const int model_size, const int is_trainable, const char* const name);
3689
/**
3690
 * A model generation function to be called for dynamic models.
3691
 */
3692
typedef ccv_cnnp_model_t* (*ccv_cnnp_model_dynamic_f)(const ccv_nnc_tensor_param_t* const inputs, const int input_size, void* const context);
3693
/**
3694
 * This method returns a model that will be recreated if it is recompiled. Put it this way, you can call
3695
 * ccv_cnnp_model_compile multiple times with different inputs and input size, however, the model will
3696
 * only be recompiled to some extent. For example, if you called ccv_cnnp_reshape, the shape is determined
3697
 * at the moment you create that model, recompilation won't change. There are two ways to workaround this:
3698
 * 1. Use models that doesn't have explicit shape specified, for example, ccv_cnnp_dense, and avoid models
3699
 *    that is not as flexible, such as ccv_cnnp_reshape, or ccv_cnnp_cmd_exec.
3700
 * 2. Create with ccv_cnnp_dynamic_new such that the model will be recreated again whenever recompile.
3701
 * @param func The function to be called to create the model.
3702
 * @param context The context used along to create the model.
3703
 * @param name The unique name of the model.
3704
 * @return A model object that is yet to be created until build.
3705
 */
3706
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dynamic_new(ccv_cnnp_model_dynamic_f func, void* const context, const char* const name);
3707
/**
3708
 * Prepare the model to be trained, the input specifies the batch size etc.
3709
 * Input size technically is not needed, here is a safety check.
3710
 * @param model The model to be compiled.
3711
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3712
 * @param input_size The size of the inputs array.
3713
 * @param minimizer The wrapped command that represents a particular optimization strategy.
3714
 * @param loss The wrapped command that computes the loss function.
3715
 */
3716
void ccv_cnnp_model_compile(ccv_cnnp_model_t* const model, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_cmd_t minimizer, const ccv_nnc_cmd_t loss);
3717
/**
3718
 * Absorb a new model into the existing model. This requires the new model has exactly the same parameters
3719
 * but other dimensionality's can change. The new model has to not be compiled yet, its life-cycle management
3720
 * will be take over by the existing model. You don't need to free it separately.
3721
 * @param model The existing model.
3722
 * @param init The new model.
3723
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3724
 * @param input_size The size of the inputs array.
3725
 */
3726
void ccv_cnnp_model_absorb(ccv_cnnp_model_t* const model, ccv_cnnp_model_t* const init, const ccv_nnc_tensor_param_t* const inputs, const int input_size);
3727
/**
3728
 * Create a copy of an existing model.
3729
 * @param model The existing model.
3730
 * @param is_trainable Whether the parameters of this model can be trained.
3731
 * @return The new model that is exactly the same copy of the old one.
3732
 */
3733
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_copy(const ccv_cnnp_model_t* const model, const int is_trainable);
3734
/**
3735
 * Get the output size of the model.
3736
 * @param model The existing model.
3737
 * @return The output size of the model.
3738
 */
3739
CCV_WARN_UNUSED(int) ccv_cnnp_model_output_size(const ccv_cnnp_model_t* const model);
3740
/**
3741
 * Get whether the model is trainable.
3742
 * @param model The existing model.
3743
 * @return Whether the model is trainable, -1 is inherited from its parent.
3744
 */
3745
CCV_WARN_UNUSED(int) ccv_cnnp_model_is_trainable(const ccv_cnnp_model_t* const model);
3746
/**
3747
 * Compute the shape of the output tensor after the model applied to the input.
3748
 * This can only be called after the model is compiled with proper input parameters.
3749
 * @param model The model to compute the output shapes.
3750
 * @param outputs The computed tensor parameters in the output.
3751
 * @param output_size The size of the output array, it has to match the model's output.
3752
 */
3753
void ccv_cnnp_model_tensor_auto(ccv_cnnp_model_t* const model, ccv_nnc_tensor_param_t* const outputs, const int output_size);
3754
/**
3755
 * Generate output that can be parsed by GraphViz (DOT language).
3756
 * @param model The composed model.
3757
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
3758
 * @param outs The output file streams.
3759
 * @param out_size The size of output file stream array.
3760
 */
3761
void ccv_cnnp_model_dot(const ccv_cnnp_model_t* const model, const int flags, FILE** const outs, const int out_size);
3762
/**
3763
 * Provide a hook for upper level to do custom formatting of a given model. You can implement logic
3764
 * to format the model into protobuf, or json. This is only useful after model is compiled.
3765
 * @param model The composed model.
3766
 * @param format_fn The format callback to be called on every node.
3767
 * @param context The context that will be passed to the callback.
3768
 */
3769
void ccv_cnnp_model_format(const ccv_cnnp_model_t* const model, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
3770
/**
3771
 * Fit a model to a given input / output. This is a combination of running ccv_cnnp_model_evaluate /
3772
 * ccv_cnnp_model_backward / ccv_cnnp_model_apply_gradients. The difference is that when calling
3773
 * individual functions, the graph is compiled piece by piece, thus, is less efficient than calling
3774
 * ccv_cnnp_model_fit directly. However, having the separate functions makes this implementation much
3775
 * more versatile, for example, can accumulate gradients for multiple batches, or using custom gradients
3776
 * etc.
3777
 * @param model The composed model.
3778
 * @param inputs The input tensors.
3779
 * @param input_size The size of the input tensors array.
3780
 * @param fits The target tensors.
3781
 * @param fit_size The size of the target tensors array.
3782
 * @param outputs The actual outputs from the model.
3783
 * @param output_size The size of the outputs array.
3784
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3785
 * @param stream_context The stream where the fit can be executed upon.
3786
 */
3787
void ccv_cnnp_model_fit(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const fits, const int fit_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3788
enum {
3789
  /**
3790
   * Don't disable any outgrad.
3791
   */
3792
  CCV_CNNP_DISABLE_OUTGRAD_NONE = (uint64_t)0,
3793
  /**
3794
   * Disable all inputs' outgrads.
3795
   */
3796
  CCV_CNNP_DISABLE_OUTGRAD_ALL = (uint64_t)(int64_t)-1,
3797
};
3798
/**
3799
 * The parameters for how evaluation should behave.
3800
 */
3801
typedef struct {
3802
  int requires_grad; /**< Whether we need to keep intermediate results for gradient computations. */
3803
  int is_test; /**< Whether we evaluate it as test, or just as forward pass of the training process. */
3804
  uint64_t disable_outgrad; /**< Whether we can compute outflow gradients when call ccv_cnnp_model_backward later, this is a bitmask, you can mark for which input the outgrad is disabled. */
3805
} ccv_cnnp_evaluate_param_t;
3806
/**
3807
 * Evaluate model with output.
3808
 * @param model The composed model.
3809
 * @param params The parameters for how evaluation should behave.
3810
 * @param inputs The input tensors.
3811
 * @param input_size The size of the input tensors array.
3812
 * @param outputs The actual outputs from the model.
3813
 * @param output_size The size of the outputs array.
3814
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3815
 * @param stream_context The stream where the evaluation can be executed upon.
3816
 */
3817
void ccv_cnnp_model_evaluate(ccv_cnnp_model_t* const model, const ccv_cnnp_evaluate_param_t params, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3818
/**
3819
 * Dryrun the model with inputs / outputs. This runs the evaluation loop up until the actual execution.
3820
 * @param model The composed model.
3821
 * @param params The parameters for how evaluation should behave.
3822
 * @param inputs The input tensors.
3823
 * @param input_size The size of the input tensors array.
3824
 * @param outputs The actual outputs from the model.
3825
 * @param output_size The size of the outputs array.
3826
 */
3827
void ccv_cnnp_model_dry_run(ccv_cnnp_model_t* const model, const ccv_cnnp_evaluate_param_t params, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
3828
/**
3829
 * Based on the input gradients, compute the output gradients (w.r.t. the inputs). This also adds parameter gradients.
3830
 * @param model The composed model.
3831
 * @param ingrads The input gradients.
3832
 * @param ingrad_size The size of the input gradients array.
3833
 * @param outgrads The output gradients (w.r.t. the inputs).
3834
 * @param outgrad_size The size of the output gradients array.
3835
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3836
 * @param stream_context The stream where the gradient computation can be executed upon.
3837
 */
3838
void ccv_cnnp_model_backward(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const ingrads, const int ingrad_size, ccv_nnc_tensor_t* const* const outgrads, const int outgrad_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3839
/**
3840
 * Apply the computed gradients to the parameter tensors.
3841
 * @param model The composed model.
3842
 * @param stream_context The stream where the gradient computation can be executed upon.
3843
 */
3844
void ccv_cnnp_model_apply_gradients(ccv_cnnp_model_t* const model, ccv_nnc_stream_context_t* const stream_context);
3845
/**
3846
 * Cancel execution of a model, whether it is forward / backward or gradient application pass. You need to make
3847
 * sure the model is currently executing when cancelling. This method will set a flag internally and the
3848
 * execution will check that flag when push compute on the computation device and abort if it is cancelled.
3849
 * When you call other model execution method again, this cancellation won't in effect and you need to call
3850
 * cancel again.
3851
 * @param model The composed model.
3852
 */
3853
void ccv_cnnp_model_cancel(ccv_cnnp_model_t* const model);
3854
/**
3855
 * Set flags for the exec symbols created by the model. See CCV_NNC_GRAPH_EXEC_* for details.
3856
 * Note that practically right now, only DISABLE_OPT is useful.
3857
 * @param model The composed model before apply / evaluate.
3858
 * @param flags The flags to set on all exec symbols potentially associated with this model.
3859
 */
3860
void ccv_cnnp_model_set_flags(ccv_cnnp_model_t* const model, const int flags);
3861
/**
3862
 * Get flags for the exec symbols created by the model. See CCV_NNC_GRAPH_EXEC_* for details.
3863
 * Note that practically right now, only DISABLE_OPT is useful.
3864
 * @param model The composed model before apply / evaluate.
3865
 */
3866
CCV_WARN_UNUSED(int) ccv_cnnp_model_flags(ccv_cnnp_model_t* const model);
3867
enum {
3868
  /**
3869
   * This is the default flag, if the model is not initialized, will attempt to read from the disk.
3870
   * Otherwise, will persist existing parameters to disk.
3871
   */
3872
  CCV_CNNP_MODEL_CHECKPOINT_READ_WRITE,
3873
  /**
3874
   * Only read parameters out of disk, even it is already initialized.
3875
   */
3876
  CCV_CNNP_MODEL_CHECKPOINT_READ_ONLY,
3877
  /**
3878
   * Only write parameters to disk.
3879
   */
3880
  CCV_CNNP_MODEL_CHECKPOINT_WRITE_ONLY,
3881
};
3882
/**
3883
 * Write model's tensors to a SQLite database with a given name. Note that we specifically say
3884
 * "model's tensors" because it doesn't persist the model's structure. Hence, you shouldn't
3885
 * expect us to take a name to then have a fully functional model restored from there. You still
3886
 * need to construct the model. This method only write the tensors (weights and other internal ones)
3887
 * to disk.
3888
 * @param model The model.
3889
 * @param handle The SQLite handle.
3890
 * @param name The name to find the tensors related to the model in the database.
3891
 * @param options The IO options that can do data encode / decode before persistence.
3892
 * @return CCV_IO_FINAL for success, otherwise error.
3893
 */
3894
int ccv_cnnp_model_write(const ccv_cnnp_model_t* const model, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
3895
/**
3896
 * Write model's tensors to a SQLite database implicitly with "" name. This is a convenience method
3897
 * to ccv_cnnp_model_write particularly useful at training time.
3898
 * @param model The composed model.
3899
 * @param fn The file name.
3900
 * @param options The IO options that can do data encode / decode before persistence.
3901
 */
3902
void ccv_cnnp_model_write_to_file(ccv_cnnp_model_t* const model, const char* const fn, const ccv_nnc_tensor_io_option_t* const options);
3903
/**
3904
 * The prototype for the writer function when exporting parameters out.
3905
 * @param tensor The tensor to be written to disk.
3906
 * @param sql The sql to be executed.
3907
 * @param handle The custom handle that you passed in from ``ccv_cnnp_model_write`` method.
3908
 * @param options The IO options that can do data encode / decode before persistence.
3909
 * @param name The name give to a particular parameter.
3910
 */
3911
typedef int (*ccv_cnnp_model_io_writer_f)(const ccv_nnc_tensor_t* const tensor, const char* const sql, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
3912
/**
3913
 * The prototype for the reader function to load parameters.
3914
 * @param handle The custom handle that you passed in from ``ccv_cnnp_model_read`` method.
3915
 * @param name The name give to a particular parameter.
3916
 * @param options The IO options that can do data encode / decode before persistence.
3917
 * @param params The recommended tensor params.
3918
 * @param tensor_out The tensor to be loaded.
3919
 */
3920
typedef int (*ccv_cnnp_model_io_reader_f)(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const ccv_nnc_tensor_param_t params, ccv_nnc_tensor_t** const tensor_out);
3921
/**
3922
 * Set IO interceptor for loading weights from / to the model to replace the default SQLite reader / writer.
3923
 * @param model The model.
3924
 * @param reader The reader function for loading weights.
3925
 * @param writer The writer function for exporting weights out.
3926
 */
3927
void ccv_cnnp_model_set_io(ccv_cnnp_model_t* const model, ccv_cnnp_model_io_reader_f reader, ccv_cnnp_model_io_writer_f writer);
3928
/**
3929
 * Read model's tensors from a SQLite database with a given name.
3930
 * @param handle The SQLite handle.
3931
 * @param name The name to find the tensors related to the model in the database.
3932
 * @param options The IO options that can do data encode / decode before persistence.
3933
 * @param model_out The model which you want to restore the tensors. It should have the same
3934
 *                  structure as the one in write to.
3935
 * @return CCV_IO_FINAL for success, otherwise error.
3936
 */
3937
int ccv_cnnp_model_read(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const ccv_cnnp_model_t* const model_out);
3938
/**
3939
 * Read model's tensors to a SQLite database implicitly with "" name. This is a convenience method
3940
 * to ccv_cnnp_model_read particularly useful at training time.
3941
 * @param fn The file name.
3942
 * @param options The IO options that can do data encode / decode before persistence.
3943
 * @param model_out The model which you want to restore the tensors. It should have the same
3944
 *                  structure as the one in write to.
3945
 */
3946
void ccv_cnnp_model_read_from_file(const char* const fn, const ccv_nnc_tensor_io_option_t* const options, const ccv_cnnp_model_t* const model_out);
3947
/**
3948
 * Apply data parallel to the composed model. This method has to be called before we call either
3949
 * evaluate or fit and after the model is compiled.
3950
 * @param model The composed model.
3951
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
3952
 */
3953
void ccv_cnnp_model_set_data_parallel(ccv_cnnp_model_t* const model, const int parallel);
3954
/**
3955
 * Set the maximum operator-level concurrency. This is a soft-limit, e.g. if you have operations on
3956
 * different devices, they are concurrent.
3957
 * @param model The composed model.
3958
 * @param max_stream_count The maximum concurrency if the model schedules internal streams. 0 is no limit.
3959
 */
3960
void ccv_cnnp_model_set_max_concurrency(ccv_cnnp_model_t* const model, const int max_stream_count);
3961
/**
3962
 * Apply memory compression to the composed model. The memory compression technique can reduce memory
3963
 * usage up to 75% comparing with raw mix-precision model during training time.
3964
 * @param model The composed model.
3965
 * @param memory_compression Whether to enable the memory compression (1 - enable, 0 - disable (default))
3966
 */
3967
void ccv_cnnp_model_set_memory_compression(ccv_cnnp_model_t* const model, const int memory_compression);
3968
/**
3969
 * Apply memory reduction to the composed model. The memory reduction technique can reduce memory
3970
 * usage losslessly. Right now, the supported memory reduction technique is to redo datatype conversion.
3971
 * @param model The composed model.
3972
 * @param memory_reduction Whether to enable the memory reduction (1 - enable, 0 - disable (default))
3973
 */
3974
void ccv_cnnp_model_set_memory_reduction(ccv_cnnp_model_t* const model, const int memory_reduction);
3975
/**
3976
 * Set the computations in this model to be gradient checkpointing. This can be strategically applied
3977
 * to models within the higher-level composed models such that these models can effectively save 0
3978
 * gradients during backprop with the cost of running forward pass twice.
3979
 * @param model The model that will turn on gradient checkpointing.
3980
 * @param gradient_checkpointing Whether to enable gradient checkpointing (1 - enable, 0 - disable (default))
3981
 */
3982
void ccv_cnnp_model_set_gradient_checkpointing(ccv_cnnp_model_t* const model, const int gradient_checkpointing);
3983
/**
3984
 * Get whether gradient checkpointing is enabled or not for this model.
3985
 * @param model The model that will turn on gradient checkpointing.
3986
 */
3987
int ccv_cnnp_model_gradient_checkpointing(ccv_cnnp_model_t* const model);
3988
/**
3989
 * Set compile parameters on the model so it compiles the graph with the said parameters.
3990
 * @param model The composed model.
3991
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
3992
 */
3993
void ccv_cnnp_model_set_compile_params(ccv_cnnp_model_t* const model, const ccv_nnc_symbolic_graph_compile_param_t compile_params);
3994
/**
3995
 * This method set the max workspace size. If the graph is already compiled. It will re-run
3996
 * autotune to use the new workspace size to find the best algorithm.
3997
 * @param model The composed model.
3998
 * @param workspace_size The size in bytes that we can use as workspace (scratch memory).
3999
 */
4000
void ccv_cnnp_model_set_workspace_size(ccv_cnnp_model_t* const model, size_t workspace_size);
4001
/**
4002
 * This method returns the current max workspace size.
4003
 * @param model The composed model.
4004
 */
4005
size_t ccv_cnnp_model_workspace_size(ccv_cnnp_model_t* const model);
4006
/**
4007
 * Set a parameter that is specified by the parameter span. This will override whatever value in that
4008
 * parameter. The given tensor should match the dimension of the parameter. It doesn't matter whether
4009
 * the given tensor is on CPU or GPU, it will be copied over. This method is limited, it can only set
4010
 * tensor once the model is compiled.
4011
 * @param model The composed model.
4012
 * @param parameter The parameter that is used to specify which parameter to override.
4013
 * @param tensor The tensor contains the value we want to copy over.
4014
 */
4015
void ccv_cnnp_model_set_parameter(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, const ccv_nnc_tensor_t* const tensor);
4016
/**
4017
 * Copy a parameter that is specified by the parameter span out of a model. This will override the value
4018
 * in the tensor you provided. The given tensor should match the dimension of the parameter and should
4019
 * already be allocated. It doesn't matter whether the given tensor is on CPU or GPU.
4020
 * @param model The composed model.
4021
 * @param parameter The parameter that is used to specify which parameter to copy from.
4022
 * @param tensor The tensor that receives value.
4023
 */
4024
void ccv_cnnp_model_parameter_copy(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, ccv_nnc_tensor_t* const tensor);
4025
/**
4026
 * Get the ccv_nnc_tensor_param_t for a particular parameter of a model.
4027
 * @param model The composed model.
4028
 * @param parameter The parameter that is used to specify which parameter to retrieve ccv_nnc_tensor_param_t.
4029
 * @return The ccv_nnc_tensor_param_t structure that specifies a tensor shape.
4030
 */
4031
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_cnnp_model_parameter_tensor_params(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter);
4032
/**
4033
 * Get the internal name for a particular parameter of a model.
4034
 * @param model The composed model.
4035
 * @param parameter The parameter that is used to specify which parameter to retrieve ccv_nnc_tensor_param_t.
4036
 * @return The name string for internal name, its life-cycle is managed by the model, and valid until the next invocation of the model either another call or free.
4037
 */
4038
CCV_WARN_UNUSED(const char*) ccv_cnnp_model_parameter_name(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter);
4039
/**
4040
 * This method returns the number of parameters for this particular model. Note that this is only available after
4041
 * model is compiled.
4042
 * @param model A model that is compiled.
4043
 * @return The number of parameters.
4044
 */
4045
CCV_WARN_UNUSED(int) ccv_cnnp_model_parameter_count(ccv_cnnp_model_t* const model);
4046
/**
4047
 * This method returns the total byte size of parameters for this particular model. Note that this is only available after
4048
 * model is compiled.
4049
 * @param model A model that is compiled.
4050
 * @return The total byte size of parameters.
4051
 */
4052
CCV_WARN_UNUSED(uint64_t) ccv_cnnp_model_parameters_size(ccv_cnnp_model_t* const model);
4053
/**
4054
 * This method moved parameters of this particular model to designated device. It invalidates the parameters
4055
 * on a given model and requires to move back if the model needs to be used later.
4056
 * You can consider this as a counterpart for ccv_cnnp_model_parameter_copy, but operates on the whole model.
4057
 * @param model A model that is compiled.
4058
 * @param names The name associated with the tensor parameter.
4059
 * @param tensors The tensor associated with this parameter.
4060
 * @param count The size of the array provided for names and tensors, this should match ccv_cnnp_model_parameter_count call.
4061
 * @param type Either CCV_TENSOR_GPU_MEMORY or CCV_TENSOR_CPU_MEMORY.
4062
 * @return 1 for success.
4063
 */
4064
CCV_WARN_UNUSED(int) ccv_cnnp_model_parameters_move(ccv_cnnp_model_t* const model, char** const names, ccv_nnc_tensor_t** const tensors, const int count, const int type);
4065
/**
4066
 * This method moves or copies parameters from the array to this particular model to designated device.
4067
 * If it is a move, it invalidates the parameters in the array and leaves a "skeleton" tensor.
4068
 * You can consider this as a counterpart for ccv_cnnp_model_set_parameter, but operates on the whole model.
4069
 * @param model A model that is compiled.
4070
 * @param names The name associated with the tensor parameter.
4071
 * @param tensors The tensor associated with this parameter.
4072
 * @param count The size of the array provided for names and tensors, this should match ccv_cnnp_model_parameter_count call.
4073
 * @param invalidates Whether to invalidate the original tensor (1 - to invalidate, use move semantics if possible).
4074
 */
4075
void ccv_cnnp_model_set_parameters_from_key_values(ccv_cnnp_model_t* const model, char* const* const names, ccv_nnc_tensor_t** const tensors, const int count, const int invalidates);
4076
/**
4077
 * Use this to loop over and if the parameter matches, return 1.
4078
 */
4079
typedef int (*ccv_cnnp_model_parameters_filter_f)(const ccv_cnnp_model_t* const model, const char* const name, void* const context);
4080
/**
4081
 * Loop over a compiled model to find a parameter to either write or modify.
4082
 * @param model A model that is compiled.
4083
 * @param filter The callback that determines whether this parameter matches.
4084
 * @param context The context to be passed along with the callback.
4085
 * @return an array of ccv_cnnp_model_io_t.
4086
 */
4087
CCV_WARN_UNUSED(ccv_array_t*) ccv_cnnp_model_parameters_filter(ccv_cnnp_model_t* const model, ccv_cnnp_model_parameters_filter_f filter, void* const context);
4088
/**
4089
 * Loop over a compiled model to find a parameter to either write or modify.
4090
 * @param model A model that is compiled.
4091
 * @param first The callback that determines whether a parameter is found.
4092
 * @param context The context to be passed along with the callback.
4093
 * @return a ccv_cnnp_model_io_t or 0 if not found.
4094
 */
4095
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameter_first(ccv_cnnp_model_t* const model, ccv_cnnp_model_parameters_filter_f first, void* const context);
4096
/**
4097
 * Loop over a compiled model to find a parameter that is not initialized.
4098
 * @param model A model that is compiled.
4099
 * @return a ccv_cnnp_model_io_t or 0 if not found.
4100
 */
4101
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameter_first_uninit(ccv_cnnp_model_t* const model);
4102
/**
4103
 * Set parameters from another model. This will override whatever values in these parameters. The
4104
 * given parameters from another model should match the dimension of the parameter. It doesn't matter
4105
 * whether the given tensor is on CPU or GPU. This method can only set when both models are compiled.
4106
 * @param model The composed model to be set on parameters.
4107
 * @param parameters The parameters to be override.
4108
 * @param from_model The model to copy parameters from.
4109
 * @param from_parameters The parameters to be copied from.
4110
 */
4111
void ccv_cnnp_model_set_parameters(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
4112
4113
/**
4114
 * @param context The context pass to the share method.
4115
 * @param source_name The name of the parameter from the from model.
4116
 * @param updated_name The name of the parameter from the model. You can update the value.
4117
 * @param provided_size The size of the updated_name buffer.
4118
 * @return 0 if succeed. -1 if failed.
4119
 */
4120
typedef int(*ccv_cnnp_model_parameters_renamer_f)(void* const context, const char* const source_name, char* const updated_name, const size_t provided_size);
4121
/**
4122
 * Share parameters between two models. This is a very specific setup to enable memory optimization
4123
 * by sharing parameter weights between two models. The models can be different as long as the weights
4124
 * match. The model is responsible to keep from_model alive / from destroyed. There is no refcount.
4125
 * Besides using the parameters to identify, you can also use the given block to provide name match.
4126
 * @param model The composed model to be set on parameters.
4127
 * @param parameters The parameters to be override.
4128
 * @param from_model The model to copy parameters from.
4129
 * @param from_parameters The parameters to be shared from.
4130
 * @param renamer The provided rename function that can get the new name from the from_parameters.
4131
 * @param context The context for renamer function.
4132
 */
4133
void ccv_cnnp_model_share_parameters(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters, ccv_cnnp_model_parameters_renamer_f renamer, void* const context);
4134
/**
4135
 * Process parameters such as exponential averaging.
4136
 * parameters = zip(from_parameters, to_parameters).map { cmd(to_parameter, from_parameter) }
4137
 * The order is selected in such way because many of our commands only support inplace op if the first
4138
 * parameter matches.
4139
 * @param model The composed model to have parameters zip mapped.
4140
 * @param parameters The parameters to be written (and read).
4141
 * @param cmd The command to apply on the parameters.
4142
 * @param hint The hint supplied to the cmd.
4143
 * @param flags The flags supplied to the cmd.
4144
 * @param aux_ins Additional inputs supplied to the cmd.
4145
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4146
 * @param aux_outs Additional outputs supplied to the cmd.
4147
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4148
 * @param stream_context The stream context to be associated with.
4149
 * @param from_model The other composed model to have parameters zipped.
4150
 * @param from_parameters The parameters to be read.
4151
 */
4152
void ccv_cnnp_model_parameters_zip_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
4153
/**
4154
 * Process parameters such as clipping. parameters = parameters.map { cmd(parameter) }
4155
 * @param model The composed model to have parameters mapped.
4156
 * @param parameters The parameters to be mapped.
4157
 * @param cmd The command to apply on the parameters.
4158
 * @param hint The hint supplied to the cmd.
4159
 * @param flags The flags supplied to the cmd.
4160
 * @param aux_ins Additional inputs supplied to the cmd.
4161
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4162
 * @param aux_outs Additional outputs supplied to the cmd.
4163
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4164
 * @param stream_context The stream context to be associated with.
4165
 */
4166
void ccv_cnnp_model_parameters_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context);
4167
/**
4168
 * Process parameter gradients such as normalization. parameters.grad = parameters.apply { cmd(parameter.grad) }
4169
 * @param model The composed model to have parameters mapped.
4170
 * @param parameters The parameters to be mapped.
4171
 * @param cmd The command to apply on the parameters.
4172
 * @param hint The hint supplied to the cmd.
4173
 * @param flags The flags supplied to the cmd.
4174
 * @param aux_ins Additional inputs supplied to the cmd.
4175
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4176
 * @param aux_outs Additional outputs supplied to the cmd.
4177
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4178
 * @param stream_context The stream context to be associated with.
4179
 */
4180
void ccv_cnnp_model_parameter_gradients_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context);
4181
/**
4182
 * Set a new minimizer for the model. This is useful when you need to update learn rate for stochastic
4183
 * gradient descent for example. This method can be called any time during the training process (after
4184
 * compilation).
4185
 * @param model The composed model.
4186
 * @param minimizer The wrapped command that represents a new optimization strategy.
4187
 * @param reset Reset all previous states of minimizers. This only makes sense if both parameters and parameter_size is 0.
4188
 * @param parameters The parameters to be applied the minimizer on. 0 meant for all.
4189
 * @param parameter_size The number of parameter spans.
4190
 */
4191
void ccv_cnnp_model_set_minimizer(ccv_cnnp_model_t* const model, const ccv_nnc_cmd_t minimizer, const int reset, const ccv_cnnp_model_io_t* const parameters, const int parameter_size);
4192
/**
4193
 * Retrieve the default minimizer for the model. This is set either you call model compile or
4194
 * ccv_cnnp_model_set_minimizer with no parameter spans.
4195
 * @param model The composed model.
4196
 * @return The minimizer command.
4197
 */
4198
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_cnnp_model_minimizer(ccv_cnnp_model_t* const model);
4199
/**
4200
 * Get the default stream from a compiled model. If the model is not compiled, the default stream is
4201
 * 0.
4202
 * @param model The composed model.
4203
 * @return The default stream for this model.
4204
 */
4205
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_cnnp_model_default_stream(const ccv_cnnp_model_t* const model);
4206
/**
4207
 * Get the allocated memory size (exclude workspace) from a compiled model. If the model is not compiled
4208
 * the size is 0.
4209
 * @param model The composed model.
4210
 * @return The number of bytes for memory allocated.
4211
 */
4212
CCV_WARN_UNUSED(uint64_t) ccv_cnnp_model_memory_size(const ccv_cnnp_model_t* const model);
4213
/**
4214
 * Free a given model.
4215
 * @param model The composed model.
4216
 */
4217
void ccv_cnnp_model_free(ccv_cnnp_model_t* const model);
4218
4219
/** @} */
4220
4221
/**
4222
 * @defgroup level_5_model_add_ons Model Add-ons
4223
 * @{
4224
 */
4225
4226
/**
4227
 * Process parameter gradients with normalization. Exactly the same as PyTorch's clip_grad_norm_
4228
 * @param model The composed model to have parameters mapped.
4229
 * @param parameters The parameters to be mapped.
4230
 * @param norm_type Currently only support 2.
4231
 * @param max_norm The max value for norm.
4232
 * @param stream_context The stream context to be associated with.
4233
 */
4234
void ccv_cnnp_model_parameters_clip_grad_norm(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, int norm_type, float max_norm, ccv_nnc_stream_context_t* const stream_context);
4235
/**
4236
 * Process parameter gradients to check if any is nan.
4237
 * @param model The composed model to have parameters mapped.
4238
 * @param parameters The parameters to be mapped.
4239
 * @param stream_context The stream context to be associated with.
4240
 * @return 1 if it has any nan, 0 otherwise.
4241
 */
4242
int ccv_cnnp_model_parameter_gradients_isnan(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, ccv_nnc_stream_context_t* const stream_context);
4243
4244
enum {
4245
  CCV_CNNP_IO, /**< The parameter is a ccv_cnnp_io_t. */
4246
  CCV_CNNP_NO_TENSOR, /**< The parameter is not used. */
4247
  CCV_CNNP_TENSOR_NOT_OUTPUT, /**< This parameter indicates this is a tensor parameter, but it is not an output reflected as ccv_cnnp_io_t */
4248
  CCV_CNNP_INIT_SHARED_TENSOR, /**< The parameter is a provided tensor for initialization. */
4249
  CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE, /**< The parameter is a provided tensor that can be updated. */
4250
};
4251
4252
typedef void(*ccv_cnnp_state_initializer_f)(void* const context, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const input, const ccv_nnc_tensor_symbol_t output_symbol);
4253
typedef void(*ccv_cnnp_cmd_exec_init_state_f)(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context);
4254
typedef void(*ccv_cnnp_cmd_exec_init_state_deinit_f)(void* const context);
4255
typedef void*(*ccv_cnnp_cmd_exec_init_state_copy_f)(void* const context);
4256
4257
typedef struct {
4258
  ccv_nnc_tensor_param_t info; /**< The tensor parameter for this one. */
4259
  void* context; /**< The context for which we initialize tensor. */
4260
  ccv_cnnp_cmd_exec_init_state_f init; /**< The function to init state for a tensor. */
4261
  ccv_cnnp_cmd_exec_init_state_copy_f copy; /**< The function to make a copy of the context. */
4262
  ccv_cnnp_cmd_exec_init_state_deinit_f deinit; /**< The function to release the context. */
4263
} ccv_cnnp_cmd_exec_io_init_state_t;
4264
4265
typedef struct {
4266
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, NO_TENSOR, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
4267
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
4268
} ccv_cnnp_cmd_exec_io_t;
4269
/**
4270
 * A generic model based on the command. If the tensors are labeled as ccv_cnnp_io_t, it will participate
4271
 * as the input / output of the model. If it is a init tensor, the model will use this tensor for that parameter.
4272
 * More over, if it is marked as parameter, that tensor will be differentiated against when you call
4273
 * ccv_cnnp_model_fit. This model however doesn't take over ownership of the tensor. You should manage the life
4274
 * cycle of the given tensor and it is your responsibility to make sure they outlive the model. Also, all inputs and
4275
 * outputs marked as init tensors will be shared if you reuse this model in other places.
4276
 * @param cmd The command to generate this model.
4277
 * @param hint The hint to run the command.
4278
 * @param flags The flags with the command.
4279
 * @param inputs A list of ccv_cnnp_cmd_exec_io_t identify each input as either a init tensor or a ccv_cnnp_io_t.
4280
 * @param input_size The size of input list.
4281
 * @param outputs A list of types identify each output as ccv_cnnp_io_t or a none tensor.
4282
 * @param output_size The size of the outputs. There is no need to give ccv_cnnp_tensor_param_t for outputs because
4283
 *        all of them are CCV_CNNP_IO type.
4284
 * @param is_trainable Whether the parameters of this model can be trained.
4285
 * @param name The unique name of the model.
4286
 * @return A model based on the given command.
4287
 */
4288
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const int is_trainable, const char* const name);
4289
/**
4290
 * Copy a tensor as initialization for the given parameter.
4291
 * @param tensor The tensor to copy from.
4292
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
4293
 */
4294
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_t* const tensor);
4295
/**
4296
 * Initialize a given parameter with the command.
4297
 * @param cmd The command to call when need to initialize.
4298
 * @param hint The hint to accompany the command.
4299
 * @param flags The flags to accompany the command.
4300
 * @param params The tensor configuration.
4301
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
4302
 */
4303
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_param_t params);
4304
4305
typedef struct {
4306
  ccv_nnc_tensor_symbol_t symbol; /**< The tensor symbol this is reference to. */
4307
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
4308
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
4309
} ccv_cnnp_tensor_symbol_param_t;
4310
/**
4311
 * A generic model based on the symbolic graph we provided. A list of tensor symbols are labeled whether it
4312
 * is ccv_cnnp_io_t or not (we identify whether this is a input or output based on whether it is in the graph).
4313
 * If it is not, we init it with a given tensor. If it is marked as parameter, that tensor will be differentiated
4314
 * against when you call ccv_cnnp_model_fit. The model doesn't take ownership over the init tensors. You are
4315
 * responsible to make sure the init tensors outlive the model until the initialization occurred. Also, these
4316
 * tensors will be shared if the model is reused.
4317
 * @param graph The symbolic graph that is our blue print for this model.
4318
 * @param tensor_symbol_params The list of tensor symbol parameters that labels a given symbol.
4319
 * @param tensor_symbol_param_size The size of the list.
4320
 * @param inputs The inputs to this graph. We can figure out which ones are inputs, but this gives us the order.
4321
 * @param input_size The size of the input list.
4322
 * @param outputs The outputs from this graph. We can figure out which ones are outputs, but this gives us the order.
4323
 * @param output_size The size of the output list.
4324
 * @param is_trainable Whether the parameters of this model can be trained.
4325
 * @param name The unique name of the model.
4326
 * @return A model based on the given symbolic graph.
4327
 */
4328
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_graph(const ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_tensor_symbol_param_t* const tensor_symbol_params, const int tensor_symbol_param_size, ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const int is_trainable, const char* const name);
4329
/**
4330
 * Sum multiple input tensors together.
4331
 * @param name The unique name of the model.
4332
 * @return A model that can be applied with multiple inputs, and generate output that is a sum of the inputs.
4333
 */
4334
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sum(const char* const name);
4335
/**
4336
 * Concatenate input tensors together.
4337
 * @param axis Along this axis, we concatenate tensors together. Other dimensions need to be exactly the same.
4338
 * @param name The unique name of the model.
4339
 * @return A model that can be applied with multiple inputs, and generate output that is a concatenation of the inputs.
4340
 */
4341
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_concat(const int axis, const char* const name);
4342
/**
4343
 * Chunk the input tensor into n pieces.
4344
 * @param n How many pieces we chunk the tensor into.
4345
 * @param axis Along this axis, we chunk the tensor. Other dimensions need to be exactly the same.
4346
 * @param name The unique name of the model.
4347
 * @return A model that can be applied with one input, and generate outputs that are chunks of the input.
4348
 */
4349
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_chunk(const int n, const int axis, const char* const name);
4350
/**
4351
 * A convolution model.
4352
 * @param groups The number of kernel groups in the model.
4353
 * @param filters The total number of filters in the model (filters = groups * per group filters).
4354
 * @param kdim The dimensions of the kernel.
4355
 * @param dilation The dilation factor on each dimension.
4356
 * @param no_bias Whether has bias term or not.
4357
 * @param hint The hint for alignment.
4358
 * @param format The format for weights. If 0, it will have the same format as the input.
4359
 * @param is_trainable Whether the parameters of this model can be trained.
4360
 * @param name The unique name of the model.
4361
 * @return A convolution model.
4362
 */
4363
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_convolution(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int dilation[CCV_NNC_MAX_DIM_ALLOC], const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name);
4364
/**
4365
 * A convolution transpose model.
4366
 * @param groups The number of kernel groups in the model.
4367
 * @param filters The total number of filters in the model (filters = groups * per group filters).
4368
 * @param kdim The dimensions of the kernel.
4369
 * @param dilation The dilation factor on each dimension.
4370
 * @param output_padding The padding helps to resolve shape ambiguity when this is inverse of convolution.
4371
 * @param no_bias Whether has bias term or not.
4372
 * @param hint The hint for alignment.
4373
 * @param format The format for weights. If 0, it will have the same format as the input.
4374
 * @param is_trainable Whether the parameters of this model can be trained.
4375
 * @param name The unique name of the model.
4376
 * @return A convolution transpose model.
4377
 */
4378
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_convolution_transpose(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int dilation[CCV_NNC_MAX_DIM_ALLOC], const int output_padding, const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name);
4379
/**
4380
 * A dense layer model.
4381
 * @param count The output dimension.
4382
 * @param no_bias Whether has a bias term or not.
4383
 * @param flags The flags to disable / enable certain features.
4384
 * @param is_trainable Whether the parameters of this model can be trained.
4385
 * @param name The unique name of the model.
4386
 * @return A dense layer model.
4387
 */
4388
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dense(const int count, const int no_bias, const int flags, const int is_trainable, const char* const name);
4389
/**
4390
 * A batch norm layer model.
4391
 * @param momentum The momentum in batch norm parameter.
4392
 * @param epsilon The epsilon in batch norm parameter.
4393
 * @param is_trainable Whether the parameters of this model can be trained.
4394
 * @param name The unique name of the model.
4395
 * @return A batch norm layer model.
4396
 */
4397
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_batch_norm(const float momentum, const float epsilon, const int is_trainable, const char* const name);
4398
/**
4399
 * A RELU activation layer model.
4400
 * @param name The unique name of the model.
4401
 * @return A RELU activation layer model.
4402
 */
4403
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_relu(const char* const name);
4404
/**
4405
 * A sigmoid activation layer model.
4406
 * @param name The unique name of the model.
4407
 * @return A sigmoid activation layer model.
4408
 */
4409
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sigmoid(const char* const name);
4410
/**
4411
 * A tanh activation layer model.
4412
 * @param name The unique name of the model.
4413
 * @return A tanh activation layer model.
4414
 */
4415
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_tanh(const char* const name);
4416
/**
4417
 * A swish activation layer model.
4418
 * @param name The unique name of the model.
4419
 * @return A swish activation layer model.
4420
 */
4421
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_swish(const char* const name);
4422
/**
4423
 * A GELU activation layer model.
4424
 * @param tanh Whether enable fast approximate GELU.
4425
 * @param name The unique name of the model.
4426
 * @return A GELU activation layer model.
4427
 */
4428
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_gelu(const int tanh, const char* const name);
4429
/**
4430
 * A leaky ReLU activation layer model.
4431
 * @param negative_slope The coefficient to be applied when it is negative.
4432
 * @param name The unique name of the model.
4433
 * @return A leaky ReLU activation layer model.
4434
 */
4435
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_leaky_relu(const float negative_slope, const char* const name);
4436
/**
4437
 * A softmax activation layer model.
4438
 * @param name The unique name of the model.
4439
 * @return A softmax activation layer model.
4440
 */
4441
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_softmax(const char* const name);
4442
/**
4443
 * A max pool model.
4444
 * @param kdim The pooling window dimension.
4445
 * @param hint The hint for alignment.
4446
 * @param name The unique name of the model.
4447
 * @return A max pool model.
4448
 */
4449
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
4450
/**
4451
 * An average pool model.
4452
 * @param kdim The pooling window dimension.
4453
 * @param hint The hint for alignment.
4454
 * @param name The unique name of the model.
4455
 * @return An average pool model.
4456
 */
4457
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_average_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
4458
/**
4459
 * Reshape an input into a different dimension.
4460
 * @param format Change the layout format for a given input, 0 is not to change.
4461
 * @param dim The new dimension for the input.
4462
 * @param ofs The offset on each of the dimension.
4463
 * @param stride The line size of each dimension.
4464
 * @param name The unique name of the model.
4465
 * @return A reshape layer model.
4466
 */
4467
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reshape(const int format, const int dim[CCV_NNC_MAX_DIM_ALLOC], const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4468
/**
4469
 * Pad the input with extra dimensions at beginning or the ends. Padding should be > 0.
4470
 * @param type Two types of padding supported: zero and replication.
4471
 * @param begin How many elements to add at the beginning of each dimension.
4472
 * @param end How many elements to add at the end of each dimension.
4473
 * @param name The unique name of the model.
4474
 * @return A pad layer model.
4475
 */
4476
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_pad(const int type, const int begin[CCV_NNC_MAX_DIM_ALLOC], const int end[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4477
/**
4478
 * Identity op that simply copy from input to output without using any data transfer / format conversion methods.
4479
 * @param name The unique name of the model.
4480
 * @return An identity layer model.
4481
 */
4482
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_identity(const char* const name);
4483
/**
4484
 * Permute the input. For example, [2, 0, 1] means moving dimension 2 to 0, dimension 0 to 1, dimension 1 to 2.
4485
 * @param index The index for each dimensions from.
4486
 * @param name The unique name of the model.
4487
 * @return A permute layer model.
4488
 */
4489
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_permute(const int index[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4490
/**
4491
 * Extract one of the multi-outputs. This is useful because ccv_cnnp_model_io_t can contain multiple outputs, this
4492
 * helps to extract one of them out to be used later.
4493
 * @param index The index to the output you want to extract.
4494
 * @param name The unique name of the model.
4495
 * @return A model that can extract one output.
4496
 */
4497
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_extract(const int index, const char* const name);
4498
/**
4499
 * Flatten an input tensor into a one dimensional array.
4500
 * @param name The unique name of the model.
4501
 * @return A flatten layer model.
4502
 */
4503
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_flatten(const char* const name);
4504
/**
4505
 * A layer norm model.
4506
 * @param epsilon The epsilon in layer norm parameter.
4507
 * @param axis The axis are the feature axis to compute norm.
4508
 * @param axis_count How many axis we count as feature.
4509
 * @param elementwise_affine Whether it contains scale / bias.
4510
 * @param is_trainable Whether the parameters of this model can be trained.
4511
 * @param name The unique name of the model.
4512
 * @return A layer norm model.
4513
 */
4514
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_layer_norm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name);
4515
/**
4516
 * A group norm model.
4517
 * @param group_axis The axis are the feature axis to compute norm.
4518
 * @param groups How many groups per axis channel.
4519
 * @param epsilon The epsilon in layer norm parameter.
4520
 * @param reduce_axis The other axes to be reduced.
4521
 * @param axis_count The number of other axes to be reduced.
4522
 * @param elementwise_affine Whether it contains scale / bias.
4523
 * @param is_trainable Whether the parameters of this model can be trained.
4524
 * @param name The unique name of the model.
4525
 * @return A group norm model.
4526
 */
4527
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_group_norm(const int group_axis, const int groups, const float epsilon, const int reduce_axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name);
4528
/**
4529
 * A rmsnorm model.
4530
 * @param epsilon The epsilon in layer norm parameter.
4531
 * @param axis The axis are the feature axis to compute norm.
4532
 * @param axis_count How many axis we count as feature.
4533
 * @param is_trainable Whether the parameters of this model can be trained.
4534
 * @param name The unique name of the model.
4535
 * @return A rmsnorm model.
4536
 */
4537
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_rmsnorm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int is_trainable, const char* const name);
4538
/**
4539
 * Add two input tensors together. Different from sum because this support broadcasting.
4540
 * @param p The weight for the first input.
4541
 * @param q The weight for the second input.
4542
 * @param name The unique name of the model.
4543
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4544
 */
4545
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_add(const float p, const float q, const char* const name);
4546
/**
4547
 * Multiply two input tensors together.
4548
 * @param p The weight for the output.
4549
 * @param name The unique name of the model.
4550
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4551
 */
4552
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_mul(const float p, const char* const name);
4553
/**
4554
 * A scalar multiplication model. Y = aX where a is a scalar.
4555
 * @param a The scalar parameter.
4556
 * @param name The unique name of the model.
4557
 * @return A scalar multiplication model.
4558
 */
4559
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar_mul(const float a, const char* const name);
4560
/**
4561
 * Divide two input tensors together.
4562
 * @param reciprocal Only take one tensor input, effectively compute 1 / input.
4563
 * @param name The unique name of the model.
4564
 * @return A model that can be applied with two inputs, and generate output that is a division of the inputs.
4565
 */
4566
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_div(const int reciprocal, const char* const name);
4567
/**
4568
 * Square root of the input tensor.
4569
 * @param name The unique name of the model.
4570
 * @return A model that can be applied with one input, and generate output that is the square root of the input.
4571
 */
4572
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sqrt(const char* const name);
4573
/**
4574
 * Multiply two input tensors together as if these are complex numbers.
4575
 * @param name The unique name of the model.
4576
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4577
 */
4578
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_cmul(const char* const name);
4579
/**
4580
 * A matrix transpose model.
4581
 * @param axis_a The axis to be exchanged with axis_b
4582
 * @param axis_b The axis to be exchanged with axis_a
4583
 * @param name The unique name of the model.
4584
 * @return A matrix transpose model.
4585
 */
4586
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_transpose(const int axis_a, const int axis_b, const char* const name);
4587
/**
4588
 * A batched matrix multiplication model.
4589
 * @param transpose_a The axis to be transposed in the first matrix.
4590
 * @param transpose_b The axis to be transposed in the second matrix.
4591
 * @param flags The flags to disable / enable certain features.
4592
 * @param name The unique name of the model.
4593
 * @return A batched matrix multiplication model.
4594
 */
4595
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_matmul(const int transpose_a[2], const int transpose_b[2], const int flags, const char* const name);
4596
/**
4597
 * A dropout model.
4598
 * @param p The probability to drop the current value.
4599
 * @param entirety Drop the whole layer with the given probability.
4600
 * @param name The unique name of the model.
4601
 * @return A dropout model.
4602
 */
4603
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dropout(const float p, const int entirety, const char* const name);
4604
/**
4605
 * A masked fill model.
4606
 * @param eq If a value in the given mask tensor is equal to this.
4607
 * @param fill Fill in this value to the output tensor.
4608
 * @param name The unique name of the model.
4609
 * @return A masked fill model.
4610
 */
4611
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_masked_fill(const float eq, const float fill, const char* const name);
4612
/**
4613
 * A index select model.
4614
 * @param name The unique name of the model.
4615
 * @return A index select model.
4616
 */
4617
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_index_select(const char* const name);
4618
/**
4619
 * An dictionary embedding model. This can be thought as index select model but the vocabulary
4620
 * tensor is within this model itself.
4621
 * @param datatype The data type of the vocabulary.
4622
 * @param vocab_size The size of the vocabulary.
4623
 * @param embed_size The size of the embedding.
4624
 * @param is_trainable Whether the parameters of this model can be trained.
4625
 * @param name The unique name of the model.
4626
 * @return A index select model.
4627
 */
4628
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_embedding(const int datatype, const int vocab_size, const int embed_size, const int is_trainable, const char* const name);
4629
/**
4630
 * A upsample model.
4631
 * @param type The type of upsample, whether nearest or bilinear.
4632
 * @param width_scale The scale of the width of the input.
4633
 * @param height_scale The scale of the height of the input.
4634
 * @param align_corners Whether to align corners when doing upsample.
4635
 * @param name The unique name of the model.
4636
 * @return A upsample model.
4637
 */
4638
ccv_cnnp_model_t* ccv_cnnp_upsample(const int type, const float width_scale, const float height_scale, const int align_corners, const char* const name);
4639
/**
4640
 * A sum value reducer model.
4641
 * @param axis The axis to be reduced.
4642
 * @param axis_count The size of the axis array.
4643
 * @param name The unique name of the model.
4644
 * @return A sum value reducer model.
4645
 */
4646
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_sum(const int* const axis, const int axis_count, const char* const name);
4647
/**
4648
 * A mean value reducer model.
4649
 * @param axis The axis to be reduced.
4650
 * @param axis_count The size of the axis array.
4651
 * @param name The unique name of the model.
4652
 * @return A sum value reducer model.
4653
 */
4654
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_mean(const int* const axis, const int axis_count, const char* const name);
4655
/**
4656
 * A max value reducer model.
4657
 * @param axis The axis to be reduced.
4658
 * @param axis_count The size of the axis array.
4659
 * @param name The unique name of the model.
4660
 * @return A max value reducer model.
4661
 */
4662
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_max(const int* const axis, const int axis_count, const char* const name);
4663
/**
4664
 * A min value reducer model.
4665
 * @param axis The axis to be reduced.
4666
 * @param axis_count The size of the axis array.
4667
 * @param name The unique name of the model.
4668
 * @return A min value reducer model.
4669
 */
4670
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_min(const int* const axis, const int axis_count, const char* const name);
4671
/**
4672
 * A norm2 value reducer model.
4673
 * @param axis The axis to be reduced.
4674
 * @param axis_count The size of the axis array.
4675
 * @param name The unique name of the model.
4676
 * @return A norm2 value reducer model.
4677
 */
4678
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_norm2(const int* const axis, const int axis_count, const char* const name);
4679
/**
4680
 * A argmax model.
4681
 * @param axis The axis to be reduced.
4682
 * @param name The unique name of the model.
4683
 * @return A max indices model.
4684
 */
4685
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_argmax(const int axis, const char* const name);
4686
/**
4687
 * A argmin model.
4688
 * @param axis The axis to be reduced.
4689
 * @param name The unique name of the model.
4690
 * @return A min indices model.
4691
 */
4692
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_argmin(const int axis, const char* const name);
4693
/**
4694
 * A element-wise min model.
4695
 * @param name The unique name of the model.
4696
 * @return A element-wise min model.
4697
 */
4698
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_min(const char* const name);
4699
/**
4700
 * A element-wise max model.
4701
 * @param name The unique name of the model.
4702
 * @return A element-wise max model.
4703
 */
4704
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max(const char* const name);
4705
/**
4706
 * A Long-Short Term Memory model.
4707
 * @param masked Whether a mask tensor provided.
4708
 * @param hidden_size The number of features in the hidden state h.
4709
 * @param proj_size The number of features in the hidden state h.
4710
 * @param num_layers The number of layers for RNN.
4711
 * @param bias If 0, the layer won't use bias weights.
4712
 * @param batch_first If 1, will batch before sequence.
4713
 * @param bidirectional Enable bidirectional mode of RNN.
4714
 * @param dropout If non-zero, enable dropout at each layer of RNN.
4715
 * @param is_trainable Whether the parameters of this model can be trained.
4716
 * @param name The unique name of the model.
4717
 * @return A LSTM model.
4718
 */
4719
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_lstm(const int masked, const int hidden_size, const int proj_size, const int num_layers, const int bias, const int batch_first, const int bidirectional, const float dropout, const int is_trainable, const char* const name);
4720
/**
4721
 * Perform datatype conversion for input tensors.
4722
 * @param datatype The desired datatype.
4723
 * @param ref_to_last If there are two inputs to the model, use the last one as a datatype reference.
4724
 * @param name The unique name of the model.
4725
 * @return A model that does data conversion.
4726
 */
4727
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_datatype_conversion(const int datatype, const int ref_to_last, const char* const name);
4728
/**
4729
 * Clamp input tensor to a range.
4730
 * @param min NAN will ignore this.
4731
 * @param max NAN will ignore this.
4732
 * @param name The unique name of the model.
4733
 * @return A model that does clamp.
4734
 */
4735
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_clamp(const float min, const float max, const char* const name);
4736
/**
4737
 * A parameter that can be initialized / loaded.
4738
 * @param params The tensor shape / information about this parameter.
4739
 * @param init_bound The bound for the initial values, in uniform distribution.
4740
 * @param name The unique name of the model.
4741
 * @param is_trainable Whether the parameters of this model can be trained.
4742
 * @return A model that can be applied and return the weight.
4743
 */
4744
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_parameter(const ccv_nnc_tensor_param_t params, const float init_bound, const int is_trainable, const char* const name);
4745
/**
4746
 * A scalar value that can be used.
4747
 * @param type The type of this scalar.
4748
 * @param format The format of this scalar.
4749
 * @param datatype The datatype of this scalar.
4750
 * @param value The value in float.
4751
 * @param name The unique name of the model.
4752
 * @return A model that can be applied and return the scalar.
4753
 */
4754
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar(const int type, const int format, const int datatype, const float value, const char* const name);
4755
/**
4756
 * An empty variable that can be used. This is usually paired to ccv_cnnp_move to make this "input"
4757
 * as destination. This is also different from ccv_cnnp_parameter because that will be persisted.
4758
 * @param params The parameters for the tensor.
4759
 * @param name The unique name of the model.
4760
 * @return A model that can be applied and return the variable.
4761
 */
4762
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_variable(const ccv_nnc_tensor_param_t params, const char* const name);
4763
/**
4764
 * A special model that takes two inputs but copies value in the first input to the second. The
4765
 * second input then returned as the output. This is special because it enables you to violate
4766
 * single-static assignment rule otherwise without using this method, it won't trigger. However,
4767
 * it does have a special place because it enables hand-written optimizations that otherwise require
4768
 * you to either implement a new optimization pass in nnc (difficult to do it correctly) or it is
4769
 * not possible to do with CNNP models and you have to go to Level-3 API, which may not be exposed
4770
 * on high-level language bindings such as s4nnc.
4771
 * @param name The unique name of the model.
4772
 * @return A model that can be applied and copies first input to the second.
4773
 */
4774
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_move(const char* const name);
4775
/**
4776
 * If the input is not contiguous, this model will make it contiguous. Normally, such graph operation
4777
 * will be optimized away when calling ccv_nnc_symbolic_graph_simplify. In this case, we will disable
4778
 * such optimization on the generated node. If the input is not contiguous, the output of this model
4779
 * is the same as the input, hence, skipped.
4780
 * @param name The unique name of the model.
4781
 * @return A model that can be applied and making the input contiguous.
4782
 */
4783
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_contiguous(const char* const name);
4784
/**
4785
 * If the input is a reshape, this model will make it a copy. Normally, such graph operation
4786
 * will be optimized away when calling ccv_nnc_symbolic_graph_simplify. In this case, we will disable
4787
 * such optimization on the generated node. This is useful mainly for memory conservation. In case you
4788
 * are working with a reshape of part of the tensor, make a explicit copy would make sure the original
4789
 * tensor is not retained therefore you can now give the compiler more optimization opportunities on
4790
 * memory conservation.
4791
 * @param name The unique name of the model.
4792
 * @return A model that can be applied and making a copy of the input.
4793
 */
4794
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_copy(const char* const name);
4795
/**
4796
 * Apply the scaled dot product attention to input. Accepting input in the form of (q, k, v)
4797
 * or (q, k, v, attn_mask) if has_attn_mask is 1.
4798
 * @param scale The scale to be applied to the qk dot product.
4799
 * @param is_causal Whether to apply is_causal mask to it. If both attn_mask and is_causal supplied, we will cut attn_mask to upper right triangle.
4800
 * @param has_attn_mask Whether the input would accept a 4th parameter the attention mask.
4801
 * @param flags Which precision is preferred for the attention computation be run at (FP16 or FP32).
4802
 * @param fused_unify_head_weights Whether we also have unifying head weight fused into it. The output would be in shape of (N, S, H * Ev).
4803
 * @param no_bias Whether we have bias or not for the unifying head output.
4804
 * @param is_trainable Whether or not it is trainable (if weight / bias provided).
4805
 * @param name The unique name of the model.
4806
 * @return A model that can apply scaled dot product attention compute.
4807
 */
4808
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scaled_dot_product_attention(const float scale, const int is_causal, const int has_attn_mask, const int flags, const int fused_unify_head_weights, const int no_bias, const int is_trainable, const char* const name);
4809
/**
4810
 * The function prototype to call during the model execution at this position.
4811
 */
4812
typedef void (*ccv_cnnp_model_debug_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_stream_context_t* const stream_context, void* const context);
4813
/**
4814
 * The function prototype to destruct the context.
4815
 */
4816
typedef void (*ccv_cnnp_model_debug_context_deinit_f)(void* const context);
4817
/**
4818
 * The function prototype to copy the context.
4819
 */
4820
typedef void* (*ccv_cnnp_model_debug_context_copy_f)(void* const context);
4821
/**
4822
 * A special model that takes n inputs and output the first values. This is a special model because it
4823
 * generates a graph violates single-static assignment rule by having the outputs the same symbol
4824
 * as the input. It also inserts a custom op allows you to intercept the model execution and possibly
4825
 * output useful information from it (i.e. debug print tensors, generate stats like max / min / nan
4826
 * etc.). This is safe to insert anywhere because it doesn't impact the graph execution process but
4827
 * you are also advised to not use this method to modify the tensors during the execution. There will
4828
 * be another method for you to insert custom op in the model.
4829
 * @param func The func to call during the model execution.
4830
 * @param context The context object to be passed along the callback.
4831
 * @param deinit The deinit method to be used to free up the context.
4832
 * @param copy The copy method to make a duplicate of the context.
4833
 * @param name The unique name of the model.
4834
 * @return A model that can be applied and copies first input to the second.
4835
 */
4836
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_debug(ccv_cnnp_model_debug_f func, void* const context, ccv_cnnp_model_debug_context_deinit_f deinit, ccv_cnnp_model_debug_context_copy_f copy, const char* const name);
4837
/**
4838
 * A sort model. The result are two tensors: values and indices.
4839
 * @param along_axis Sort along which axis.
4840
 * @param descending Whether sort by descending order.
4841
 * @param name The unique name of the model.
4842
 * @return A sort model.
4843
 */
4844
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sort(const int along_axis, const int descending, const char* const name);
4845
/**
4846
 * A partition model. The result are two tensors: values and indices.
4847
 * @param kth Took kth elements.
4848
 * @param along_axis Partition along which axis.
4849
 * @param descending Whether partition by descending order.
4850
 * @param name The unique name of the model.
4851
 * @return A partition model.
4852
 */
4853
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_partition(const int kth, const int along_axis, const int descending, const char* const name);
4854
/**
4855
 * A unique consecutive model. Otherwise known as run-length encode.
4856
 * @param bincount How many unique consecutive elements there are, 0 to match the original.
4857
 * @param name The unique name of the model.
4858
 * @return A unique consecutive model.
4859
 */
4860
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_unique_consecutive(const int bincount, const char* const name);
4861
/**
4862
 * A scatter add model.
4863
 * @param name The unique name of the model.
4864
 * @param bincount How many original elements will be, it needs to be non-zero.
4865
 * @return A scatter add model.
4866
 */
4867
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scatter_add(const int bincount, const char* const name);
4868
/**
4869
 * A segmented dense layer model. Note that the input would be activation, indices and count.
4870
 * @param segments / experts How many segments in this layer.
4871
 * @param count The output dimension.
4872
 * @param no_bias Whether has a bias term or not.
4873
 * @param flags The flags to disable / enable certain features.
4874
 * @param is_trainable Whether the parameters of this model can be trained.
4875
 * @param name The unique name of the model.
4876
 * @return A segmented dense layer model.
4877
 */
4878
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_segmented_dense(const int segments, const int count, const int no_bias, const int flags, const int is_trainable, const char* const name);
4879
4880
/** @} */
4881
4882
/** @} */
4883
4884
#endif