Coverage Report

Created: 2024-06-21 10:32

/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/ccv_nnc.h
Line
Count
Source (jump to first uncovered line)
1
/**********************************************************
2
 * C-based/Cached/Core Computer Vision Library
3
 * Liu Liu, 2010-02-01
4
 **********************************************************/
5
6
/**********************************************************
7
 * CCV - Neural Network Collection
8
 **********************************************************/
9
10
#ifndef GUARD_ccv_nnc_h
11
#define GUARD_ccv_nnc_h
12
13
#include "ccv.h"
14
#include <stddef.h>
15
16
// These are generated by cmd/build-cmd.rb
17
#include "cmd/ccv_nnc_cmd.h"
18
#include "cmd/ccv_nnc_backend.h"
19
20
/**
21
 * @defgroup level_0 Level-0 API
22
 * @{
23
 */
24
25
/**
26
 * Initialize the library.
27
 */
28
void ccv_nnc_init(void);
29
30
enum {
31
  CCV_NNC_DISABLE_MIXED_MPS_GEMM = 0x1,
32
  CCV_NNC_DISABLE_MIXED_MPS_SOFTMAX = 0x2,
33
  CCV_NNC_DISABLE_MMAP_MTL_BUFFER = 0x4,
34
  CCV_NNC_DISABLE_METAL_FLASH_ATTENTION = 0x8,
35
  CCV_NNC_DISABLE_MFA_GEMM = 0x10,
36
};
37
/**
38
 * Enable system-wide specific flag.
39
 */
40
void ccv_nnc_enable_flag(uint64_t flag);
41
/**
42
 * Disable system-wide specific flag.
43
 */
44
void ccv_nnc_disable_flag(uint64_t flag);
45
/**
46
 * Get system-wide specific flag to check.
47
 */
48
uint64_t ccv_nnc_flags(void);
49
50
/** @} */
51
52
/**
53
 * @defgroup level_1 Level-1 API
54
 * @{
55
 */
56
57
/**
58
 * @defgroup level_1_cmd Commands
59
 * @{
60
 */
61
enum {
62
  // Attributes that enable symbolic graph simplification
63
  CCV_NNC_CMD_ATTR_PASSTHROUGH  = 0x01, /**< This doesn't compute anything, but pass the first n tensors to the output (useful for backprop that is identical). */
64
  CCV_NNC_CMD_ATTR_OUTPUT_ONES  = 0x02, /**< All the output tensors are 1s (unit). */
65
  CCV_NNC_CMD_ATTR_NULL_IS_ONES = 0x04, /**< Accept nullptr input as if these are tensors with 1s (unit). */
66
};
67
68
// Flags pass into cmd when executing.
69
enum {
70
  CCV_NNC_ACCUMULATE_OUTPUT = 0x01, /**< Enable accumulate outputs (unsupported). */
71
  CCV_NNC_ZERO_MEMORY_ALLOC = 0x02, /**< Don't allocate any extra memory for this operation. */
72
};
73
74
enum {
75
  CCV_NNC_EXEC_SUCCESS   = 0, /**< Successfully executed the command. */
76
  CCV_NNC_EXEC_INVALID   = -1, /**< Invalid inputs. */
77
  CCV_NNC_EXEC_NO_KERNEL = -2, /**< No kernel available for a given command / backend. */
78
  CCV_NNC_EXEC_OOM       = -3, /**< Out of memory error. */
79
};
80
81
enum {
82
  CCV_NNC_MSE_REDUCE_MEAN = 0, /**< Reduce with mean when computing MSE loss. */
83
  CCV_NNC_MSE_REDUCE_SUM = 1, /**< Reduce with sum when computing MSE loss. */
84
};
85
86
enum {
87
  CCV_NNC_HISTOGRAM_EVEN = 0, /**< The bins are evenly distributed from min to max. */
88
  CCV_NNC_HISTOGRAM_LOGARITHMIC = 1, /**< The bins are distributed follow exponentially curve, growing from min to max with ratio. */
89
  CCV_NNC_HISTOGRAM_BINS = 2, /**< The bins range will be supplied, such as [0, 2, 3, 10]. For result, [-inf, 0, 2, 3, 10, inf] implied. */
90
};
91
92
enum {
93
  CCV_NNC_UPSAMPLE_NEAREST = 0, /**< Using nearest value. */
94
  CCV_NNC_UPSAMPLE_BILINEAR = 1, /**< Using bilinear interpolation. */
95
};
96
97
enum {
98
  CCV_NNC_PAD_ZERO = 0, /**< Pad 0s. */
99
  CCV_NNC_PAD_REPLICATE = 1, /**< Pad by replicating the edge. */
100
};
101
102
/**
103
 * Parameters for command.
104
 */
105
typedef struct {
106
  struct {
107
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< [size.dim] The window size for the layer. For full connect layer, it is 1 because it is 1x1 convolutional layer with count of filters */
108
  } size;
109
  union {
110
    struct {
111
      int count; /**< [convolution.count] The number of filters for convolutional layer. */
112
      int groups; /**< [convolution.groups] The number of groups for convolutional layer. */
113
      int dilation[CCV_NNC_MAX_DIM_ALLOC]; /**< [convolution.dilation[]] The dilation factor for convolutional layer. Default to 1. */
114
    } convolution;
115
    struct {
116
      int count; /**< [convolution_transpose.count] The number of filters for convolutional layer. */
117
      int groups; /**< [convolution_transpose.groups] The number of groups for convolutional layer. */
118
      int dilation[CCV_NNC_MAX_DIM_ALLOC]; /**< [convolution_transpose.dilation[]] The dilation factor for convolutional layer. Default to 1. */
119
      int output_padding; /**< [convolution_transpose.output_padding] The output padding to resolve ambiguity when treat this as inverse of convolution. */
120
    } convolution_transpose;
121
    struct {
122
      int hidden_size; /**< [rnn.hidden_size] The number of features in the hidden state h. */
123
      int proj_size; /**< [rnn.proj_size] The number of features in the hidden state h. */
124
      int num_layers; /**< [rnn.num_layers] The number of layers for RNN. */
125
      int bias; /**< [rnn.bias] If 0, the layer won't use bias weights. */
126
      int batch_first; /**< [rnn.batch_first] If 1, will batch before sequence. */
127
      int bidirectional; /**< [rnn.bidrectional] Enable bidirectional mode of RNN.*/
128
      float dropout; /**< [rnn.dropout] If non-zero, enable dropout at each layer of RNN.*/
129
      int is_test; /**< [rnn.is_test] Whether running this kernel in test mode or not. */
130
    } rnn;
131
    struct {
132
      int reserved; /**< [pool.reserved] A reserved field. */
133
    } pool;
134
    struct {
135
      float kappa; /**< [rnorm.kappa] As of b[i] = a[i] / (rnorm.kappa + rnorm.alpha * sum(a, i - rnorm.size / 2, i + rnorm.size / 2)) ^ rnorm.beta */
136
      float alpha; /**< [rnorm.alpha] See **rnorm.kappa**. */
137
      float beta; /**< [rnorm.beta] See **rnorm.kappa**. */
138
    } rnorm;
139
    struct {
140
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [bnorm.axis[]] The axis selected to compute mean / variance. */
141
      int count; /**< [bnorm.count] The number of axis selected. */
142
      float epsilon; /**< [bnorm.epsilon] The epsilon for standard derivation. */
143
      int is_test; /**< [bnorm.is_test] Whether in test mode. */
144
      float momentum; /**< [bnorm.momentum] running_mean = running_mean * momentum + mean * (1 - momentum). */
145
    } bnorm;
146
    struct {
147
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [lnorm.axis[]] The axis selected to compute mean / variance. */
148
      int count; /**< [lnorm.count] The number of axis selected. */
149
      float epsilon; /**< [lnorm.epsilon] The epsilon for standard derivation. */
150
      int elementwise_affine; /**< [lnorm.elementwise_affine] Whether it supports scale / bias. */
151
    } lnorm;
152
    struct {
153
      int group_axis; /**< [gnorm.group_axis] The axis selected to be grouped. */
154
      int reduce_axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [gnorm.reduce_axis[]] The other axis selected to compute mean / variance. */
155
      int reduce_count; /**< [gnorm.reduce_count] The number of other axis selected. */
156
      int groups; /**< [gnorm.group] The number of groups that separates channels. */
157
      float epsilon; /**< [gnorm.epsilon] The epsilon for standard derivation. */
158
      int elementwise_affine; /**< [lnorm.elementwise_affine] Whether it supports scale / bias. */
159
    } gnorm;
160
    struct {
161
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [rmsnorm.axis[]] The axis selected to compute mean / variance. */
162
      int count; /**< [rmsnorm.count] The number of axis selected. */
163
      float epsilon; /**< [rmsnorm.epsilon] The epsilon for standard derivation. */
164
    } rmsnorm;
165
    struct {
166
      int nesterov; /**< [sgd.nesterov] Nesterov accelerated gradient. */
167
      float rate; /**< [sgd.rate] The learning rate. */
168
      float scale; /**< [sgd.scale] The scale to be applied to the gradient before doing any minimization. */
169
      float decay; /**< [sgd.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
170
      float momentum; /**< [sgd.momentum] For SGD, this follows http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf. */
171
      float dampening; /**< [sgd.dampening] This usually == momentum, however, it can be changed. */
172
    } sgd;
173
    struct {
174
      int step; /**< [adam.step] Step t in adam optimizer. */
175
      float rate; /**< [adam.rate] The learning rate. */
176
      float scale; /**< [adam.scale] The scale to be applied to the gradient before doing any minimization. */
177
      float beta1; /**< [adam.beta1] The beta1 hyper-parameter in adam optimizer. */
178
      float beta2; /**< [adam.beta2] The beta2 hyper-parameter in adam optimizer. */
179
      float decay; /**< [adam.decay] This is the weight decay parameter, which represents L2 regularization. */
180
      float epsilon; /**< [adam.epsilon] The epsilon for standard derivation. */
181
      int amsgrad; /**< [adam.amsgrad] Whether use amsgrad. */
182
    } adam;
183
    struct {
184
      int step; /**< [lamb.step] Step t in lamb optimizer. */
185
      float rate; /**< [lamb.rate] The learning rate. */
186
      float scale; /**< [lamb.scale] The scale to be applied to the gradient before doing any minimization. */
187
      float beta1; /**< [lamb.beta1] The beta1 hyper-parameter in lamb optimizer. */
188
      float beta2; /**< [lamb.beta2] The beta2 hyper-parameter in lamb optimizer. */
189
      float decay; /**< [lamb.decay] This is the weight decay parameter, which represents L2 regularization. */
190
      float epsilon; /**< [lamb.epsilon] The epsilon for standard derivation. */
191
    } lamb;
192
    struct {
193
      float rate; /**< [rmsprop.rate] The learning rate. */
194
      float scale; /**< [rmsprop.scale] The scale to be applied to the gradient before doing any minimization. */
195
      float decay; /**< [rmsprop.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
196
      float alpha; /**< [rmsprop.momentum] The alpha hyper-parameter. */
197
      float momentum; /**< [rmsprop.momentum] The momentum hyper-parameter. */
198
      float epsilon; /**< [rmsprop.epsilon] The epsilon for standard derivation. */
199
    } rmsprop;
200
    struct {
201
      int transpose_a[2]; /**< [blas.transpose_a[2]] The axis we'd like to transpose for input a. */
202
      int transpose_b[2]; /**< [blas.transpose_b[2]] The axis we'd like to transpose for input b. */
203
      float a[3]; /**< [blas.a[3]] BLAS scalars. */
204
    } blas;
205
    struct {
206
      float trim0; /**< [label_smoothing.trim0] The smoothed label for 0. */
207
      float trim1; /**< [label_smoothing.trim1] The smoothed label for 1. */
208
    } label_smoothing;
209
    struct {
210
      float pos_weight; /**< [binary_crossentropy.pos_weight] The pos_weight on the loss: -(pos_weight * y * log(x) + (1 - y) * log(1 - x)) */
211
    } binary_crossentropy;
212
    struct {
213
      float beta; /**< [smooth_l1.beta] The beta on the smooth L1 loss (or Huber loss) */
214
    } smooth_l1;
215
    struct {
216
      int reduce_op; /**< [mse.reduce_op] Whether reduce with mean or with sum */
217
    } mse;
218
    struct {
219
      int tanh; /**< [gelu.tanh] Use tanh approximation */
220
    } gelu;
221
    struct {
222
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [reduce.axis[]] The axis selected to reduce. */
223
      int count; /**< [reduce.count] The number of axis selected. */
224
    } reduce;
225
    struct {
226
      int axis[2]; /**< [transpose.axis[2]] The axis we'd like to transpose for input. */
227
    } transpose;
228
    struct {
229
      float p; /**< [dropout.p] Dropout probability. */
230
      int entirety; /**< [dropout.entirety] Drop the whole layer with the given probability. */
231
    } dropout;
232
    struct {
233
      int type; /**< [upsample.type] 0 - nearest, 1 - bilinear. */
234
      float width_scale; /**< [upsample.width_scale] scale for width parameter. It is between 1 and 2 at the moment. */
235
      float height_scale; /**< [upsample.height_scale] scale for height parameter. It is between 1 and 2 at the moment. */
236
      int align_corners; /**< [upsample.align_corners] Whether to scale to align corners. Thus, for 0...1, if false, it will align to -0.25, 0.25, 0.75, 1.25, if true, it will align to 0, 0.3333, 0.6666, 1.0 */
237
    } upsample;
238
    struct {
239
      float min; /**< [clamp.min] The minimum, NaN is no min. */
240
      float max; /**< [clamp.max] The maximum, NaN is no max. */
241
    } clamp;
242
    struct {
243
      float iou_threshold; /**< [nms.iou_threshold] Threshold between 0 to 1 for IoU threshold. */
244
    } nms;
245
    struct {
246
      int type; /**< [histogram.type] The type, can be even, logarithmic, or bins. */
247
      int bins; /**< [histogram.bins] The number of bins, only applied to even. */
248
      float min; /**< [histogram.min] The minimal number, for even or logarithmic. */
249
      float max; /**< [histogram.min] The maximal number, for even or logarithmic. */
250
      float rate; /**< [histogram.ratio] The rate from min to max, only applied to logarithmic. */
251
    } histogram;
252
    struct {
253
      float negative_slope; /**< [leaky_relu.negative_slop] The negative slope to be applied when activation < 0. */
254
    } leaky_relu;
255
    struct {
256
      float scale; /**< [scaled_dot_product_attention.scale] The scale we multiple to the dot product of Q & K */
257
      int is_causal; /**< [scaled_dot_product_attention.is_causal] Whether we have causal matrix associated with the attention. The attention mask will be cut to triangular if provided. */
258
      int upcast; /**< [scaled_dot_product_attention.upcast] Whether we want to run the attention computation at higher precision (from FP16 to FP32). */
259
      int deterministic; /**< [scaled_dot_product_attention.deterministic] Whether we want the attention computation to be deterministic (CUDA only). */
260
    } scaled_dot_product_attention;
261
    struct {
262
      int type; /**< [pad.type] The type of pad, can be either zeros or replicating edge. */
263
      int end[CCV_NNC_MAX_DIM_ALLOC]; /**< [pad.end] Work together with size.dim. size.dim is how much to add at the beginning and pad.end is how much to add at the end. */
264
    } pad;
265
    void* userdata;
266
  };
267
} ccv_nnc_cmd_param_t;
268
269
/*
270
 * Hints for command.
271
 */
272
typedef struct {
273
  struct {
274
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< Stride for each dimension. */
275
  } stride;
276
  struct {
277
    int begin[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the beginning of a dimension. */
278
    int end[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the end of a dimension. */
279
  } border;
280
} ccv_nnc_hint_t;
281
282
/**
283
 * Opaque pointer to a stream object.
284
 */
285
typedef struct ccv_nnc_stream_context_s ccv_nnc_stream_context_t;
286
287
typedef struct ccv_nnc_cmd_vtab_s ccv_nnc_cmd_vtab_t;
288
289
typedef struct ccv_nnc_cmd_s {
290
  uint32_t cmd; /**< The identifier for command. */
291
  uint32_t backend; /**< The identifier for backend. */
292
  int algorithm; /**< The algorithm selector (as defined by backend). */
293
  ccv_nnc_cmd_param_t info; /**< The command parameters. */
294
  /**
295
   * This is for type CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD
296
   */
297
  ccv_nnc_cmd_vtab_t* isa;
298
  void* data;
299
} ccv_nnc_cmd_t;
300
301
/**
302
 * For forward functions, the input tensors and output tensors can be arbitrary.
303
 * However, for backward functions (backpropagation, or gradient functions in other libs),
304
 * the input is: 0~m-1: gradient for output tensors, 1~n: input tensors for forward functions, n+1~n+m: output tensors for forward functions,
305
 * the output is: 0~n-1: output gradients w.r.t. input tensors.
306
 * Which input / output tensors can be ignored can be specified in the cmd config structs.
307
 */
308
typedef int(*ccv_nnc_cmd_exec_f)(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
309
310
/**
311
 * The function prototype for autotune. The only difference is the max_workspace_size.
312
 * Whoever implement this function prototype means we handled over autotune task to the
313
 * command itself, you are responsible to select the best algorithm.
314
 * @return The selected algorithm.
315
 */
316
typedef int(*ccv_nnc_cmd_autotune_f)(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
317
318
/**
319
 * The function prototype is for automatically deduce tensor shapes.
320
 */
321
322
typedef struct ccv_nnc_cmd_vtab_s {
323
  ccv_nnc_cmd_exec_f exec;
324
  void (*tensor_auto)(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
325
} ccv_nnc_cmd_vtab_t;
326
327
/** @} */
328
329
/**
330
 * @defgroup level_1_uops Micro Commands to Define Commands
331
 * @{
332
 */
333
334
/**
335
 * @page micro_jittor The concept of meta-ops in Jittor is amazing
336
 *
337
 * NNC will never do JIT. Particularly, I will never do codegen and compile at runtime, especially with static shapes.
338
 * The reason is pretty simple. JIT would be too much architectural dependent and with that, almost impossible for NNC
339
 * to be this small embeddable library that you can carry everywhere. However, this shouldn't prevent NNC to generate
340
 * proper descriptions of each command so a JIT version can be built if there are architectural support for it. In this
341
 * way, the core of NNC can be small and embeddable, but a new backend (identified by the backend attribute) can implement
342
 * more sophisticated JIT mechanism.
343
 *
344
 * More over, I need to generate some code for reference implementations, ideally from some descriptions. This is important
345
 * because with 90+ ops, having a correctly implemented command turns out to be more challenging than I expected.
346
 * Especially if I want them to be compliant with the metadata describes it (what shape it accepts, what datatype works,
347
 * whether it can accept tensor views, and how in-place tensors supported). Many of reference commands are not supporting
348
 * all datatypes and tensor views, and this has to be rectified because these are "reference commands", they must be.
349
 *
350
 * Jittor introduced to the world the idea of meta-ops. Basically, it claims every ops (or macro ops) can be break down to
351
 * 3 types of micro ops (they call them meta-ops): a reindex op that can map tensor from one dimensionality to another, an
352
 * element-wise op that does element-wise primitive math, and finally, a reduce op that can reduce along particular axis
353
 * of a tensor with some elementary math. This feels rather limited initially, but when thinking through it, I am convinced
354
 * it should be enough to describe all commands presented in NNC (this shouldn't be a surprise actually).
355
 *
356
 * Thus, the plan now is to use the meta-ops idea, implementing new micro commands that can describe other commands in
357
 * NNC. In this way, I can generate reference implementation from these descriptions and hopefully have better coverage
358
 * than my existing CPU / GPU reference implementations.
359
 *
360
 * To build on-top what Jittor did, if you need to have my dynamism in the ops, it is essential to index with the provided
361
 * tensor. With just reindex, binary operands and reduce, you cannot do that. Thus, on top of these 3, we added the 4th
362
 * micro op (meta-op) that is "select". This will be sufficient to implement ops such as masking.
363
 *
364
 */
365
366
/**
367
 * Abstract vtab for different ccv_nnc_micro_io_t.
368
 */
369
typedef struct ccv_nnc_micro_io_vtab_s ccv_nnc_micro_io_vtab_t;
370
371
enum {
372
  // These could be much more unary ops.
373
  CCV_NNC_MICRO_UNARY_OP_NEG,
374
  CCV_NNC_MICRO_UNARY_OP_LOG,
375
  CCV_NNC_MICRO_UNARY_OP_EXP,
376
};
377
378
enum {
379
  CCV_NNC_MICRO_BINARY_OP_PLUS,
380
  CCV_NNC_MICRO_BINARY_OP_MINUS,
381
  CCV_NNC_MICRO_BINARY_OP_MUL,
382
  CCV_NNC_MICRO_BINARY_OP_DIV,
383
  CCV_NNC_MICRO_BINARY_OP_MAX,
384
  CCV_NNC_MICRO_BINARY_OP_MIN,
385
  CCV_NNC_MICRO_BINARY_OP_EQUAL_TO,
386
  CCV_NNC_MICRO_BINARY_OP_LESS_THAN,
387
};
388
389
enum {
390
  CCV_NNC_MICRO_REDUCE_OP_MAX,
391
  CCV_NNC_MICRO_REDUCE_OP_MIN,
392
  CCV_NNC_MICRO_REDUCE_OP_ARGMAX,
393
  CCV_NNC_MICRO_REDUCE_OP_ARGMIN,
394
  CCV_NNC_MICRO_REDUCE_OP_MEAN, // Mean is complicated, we need a way to compute total for loops after this. It has to be done statically, and that is "interesting".
395
  CCV_NNC_MICRO_REDUCE_OP_SUM,
396
  CCV_NNC_MICRO_REDUCE_OP_PROD,
397
};
398
399
/**
400
 * Abstract micro op representation.
401
 */
402
typedef struct ccv_nnc_micro_io_s* ccv_nnc_micro_io_t;
403
404
struct ccv_nnc_micro_io_s {
405
  const ccv_nnc_micro_io_vtab_t* isa;
406
  ccv_nnc_micro_io_t* inputs;
407
  int input_size;
408
  int dimensions;
409
  int id;
410
};
411
412
typedef struct {
413
  // Type of the scalar is about precision, nothing to restrict the tensor's type. For example, we may assign a int32_t 0
414
  // to a float16 tensor element, this is perfectly fine.
415
  int type;
416
  union {
417
    unsigned char u8;
418
    int i32;
419
    ccv_float16_t f16;
420
    float f32;
421
    int64_t i64;
422
    uint64_t u64;
423
    double f64;
424
  };
425
} ccv_nnc_micro_scalar_t;
426
427
/**
428
 * Create a free-form input that represent a tensor.
429
 * @param dimensions The maximum dimension of the input.
430
 */
431
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_input(const int dimensions);
432
/**
433
 * Use shape and reindex expression to reindex the given tensor into a different shape.
434
 * The expressions can bind integer parameters which starts with $.
435
 *
436
 * The expression follows specific pattern, integer parameters starts with $. Dimensions are represented as dXn, such
437
 * as dA0, dA1, dA2 ... Index into the provided tensor can be represented as i0, i1, i2. These are all 0-indexed.
438
 *
439
 * Constants are supported, such as 235, 431 etc. Operators supported currently are -, +, /, *.
440
 *
441
 * Thus, for broadcast a tensor x[w, h] to y[w, h, h], it can be represented as:
442
 * shape: { "dA0", "dA1", "dA1" }, reindex: { "i0", "i1", "0" }.
443
 * For example, transpose can be represented as:
444
 * shape: { "dA1", "dA0" }, reindex: { "i1", "i0" }
445
 *
446
 * @param shape The shape expressions per axis.
447
 * @param shape_count The dimensions of the output.
448
 * @param ss The tensors to reference shape dimensions.
449
 * @param s_count The number of tensors to reference shape dimensions.
450
 * @param reindex The reindex expressions per axis.
451
 * @param reindex_count The dimensions of the input.
452
 * @param x The input for reindex operation.
453
 * @return The reindexed tensor.
454
 */
455
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reindex(const char* const* const shape, const int shape_count, const ccv_nnc_micro_io_t* const ss, const int s_count, const char* const* const reindex, const int reindex_count, const ccv_nnc_micro_io_t x);
456
/**
457
 * Apply element-wise computations with one tensor.
458
 * @param op The binary operand.
459
 * @param x The input.
460
 * @return The result tensor.
461
 */
462
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_unary(const uint32_t op, const ccv_nnc_micro_io_t x);
463
/**
464
 * Apply pair-wise computations with two tensors. They has to match shape exactly.
465
 * @param op The binary operand.
466
 * @param left The left input.
467
 * @param right The right input.
468
 * @return The result tensor.
469
 */
470
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_binary(const uint32_t op, const ccv_nnc_micro_io_t left, const ccv_nnc_micro_io_t right);
471
/**
472
 * Apply reduction computation against some dimensions and generate the final reduced tensor.
473
 * @param op The reduction operand.
474
 * @param axis The axis to reduce.
475
 * @param axis_count Number of axes.
476
 * @param x The input tensor.
477
 * @return The result tensor after reduction.
478
 */
479
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reduce(const uint8_t op, const int* const axis, const int axis_count, const ccv_nnc_micro_io_t x);
480
/**
481
 * Use the index tensor to select one value from the x per axis.
482
 * @param axis The axis to select.
483
 * @param x The tensor to be indexed.
484
 * @param index The integer tensor of indexes.
485
 * @return The result tensor with values selected from x with index from index tensor.
486
 */
487
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_select(const int axis, const ccv_nnc_micro_io_t x, const ccv_nnc_micro_io_t index);
488
/**
489
 * Return the gradient for a particular output. For example, if x is ccv_nnc_micro_unary(exp, input),
490
 * this represents the gradient of x, not the input. This method is used to generate representation
491
 * of gradients for ccv_nnc_micro_combine_new method.
492
 * @param x The tensor to take a gradient of.
493
 * @return The result tensor that represents the gradient of x.
494
 */
495
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_grad(const ccv_nnc_micro_io_t x);
496
/**
497
 * The combined op from micro ops.
498
 */
499
typedef struct ccv_nnc_micro_combine_s ccv_nnc_micro_combine_t;
500
/**
501
 * Combine micro ops into one, and do some optimization passes. The combined one can be then processed to generate
502
 * optimized kernels. Particularly, we can processed the combined one into C code and CUDA code as reference
503
 * implementations.
504
 * @param inputs The inputs for the combined ops.
505
 * @param input_size The number of the inputs.
506
 * @param parameters The name of the parameters, this determines the order of the these parameters.
507
 * @param parameter_size The number of parameters.
508
 * @param outputs The outputs for the combined ops.
509
 * @param output_size The number of the outputs.
510
 * @param ingrads The gradient inputs for the combined ops, including any inputs / outputs if there are any.
511
 * @param ingrad_size The number of ingrads.
512
 * @param outgrads The gradient outputs for the combined ops.
513
 * @param outgrad_size The number of outgrads.
514
 */
515
CCV_WARN_UNUSED(ccv_nnc_micro_combine_t*) ccv_nnc_micro_combine_new(const ccv_nnc_micro_io_t* const inputs, const int input_size, const char* const* const parameters, const int parameter_size, const ccv_nnc_micro_io_t* const outputs, const int output_size, const ccv_nnc_micro_io_t* const ingrads, const int ingrad_size, const ccv_nnc_micro_io_t* const outgrads, const int outgrad_size);
516
/**
517
 * Free the combined op.
518
 * @param combine The op to be freed.
519
 */
520
void ccv_nnc_micro_combine_free(ccv_nnc_micro_combine_t* const combine);
521
/**
522
 * Run combined op in interpret mode. This is only useful for debug internals. Because this is for
523
 * generic combined op, there is no hint, or flags, or stream context, or cmd.
524
 * @param combine The op.
525
 * @param cmd Choice between CMD_CUSTOM_FORWARD and CMD_CUSTOM_BACKWARD.
526
 * @param inputs The input tensors.
527
 * @param input_size The size of input tensors.
528
 * @param values The value corresponding to the parameters when call ccv_nnc_micro_combine_new.
529
 * @param parameter_size How many parameters. It must match when called ccv_nnc_micro_combine_new.
530
 * @param outputs The output tensors.
531
 * @param output_size The size of output tensors.
532
 */
533
void ccv_nnc_micro_combine_interpret(ccv_nnc_micro_combine_t* const combine, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_micro_scalar_t* const values, const int parameter_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
534
/**
535
 * Generate C code from the combined op.
536
 * @param combine The combined op to generate some C code.
537
 * @return The generated C code string.
538
 */
539
char* ccv_nnc_micro_combine_c(ccv_nnc_micro_combine_t* const combine);
540
541
/** @} */
542
543
/**
544
 * @defgroup level_1_tensor Tensors
545
 * @{
546
 */
547
548
/**
549
 * Count the dimensionality of a tensor.
550
 */
551
static inline int ccv_nnc_tensor_nd(const int dim[CCV_NNC_MAX_DIM_ALLOC])
552
2.41M
{
553
2.41M
  int i;
554
6.42M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++4.01M
)
555
6.42M
    if (dim[i] == 0)
556
2.41M
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
2.41M
}
Unexecuted instantiation: gradient.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: upsample.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: palettize.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: concat.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: pad.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tensor.bind.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.vector.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dropout.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: custom.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: reduce.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tfb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: batch.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: crossentropy.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cnnp.core.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: group.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: micro.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compression.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: transform.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gemm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: roi_align.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: complex.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: swish.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: index.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: minimize.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.compile.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: histogram.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rmsnorm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.tests.c:ccv_nnc_tensor_nd
tensor.tests.c:ccv_nnc_tensor_nd
Line
Count
Source
552
18
{
553
18
  int i;
554
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++18
)
555
36
    if (dim[i] == 0)
556
18
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
18
}
Unexecuted instantiation: rand.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nms.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.io.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cblas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: simplify.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gelu.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: numa.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: loss.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tape.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dynamic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: layer.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: parallel.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: winograd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.addons.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: attention.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: broadcast.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compare.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: smooth_l1.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: forward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cublas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: mpsblas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.vgg.d.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: imdb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: lstm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: datatype.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.vgg.d.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: leaky_relu.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: random.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cudnn.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dense.net.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cifar.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rmsprop.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: sgd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nccl.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: schedule.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: mpsdnn.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: adam.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: lamb.tests.c:ccv_nnc_tensor_nd
ccv_nnc_cmd.c:ccv_nnc_tensor_nd
Line
Count
Source
552
223k
{
553
223k
  int i;
554
538k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++314k
)
555
538k
    if (dim[i] == 0)
556
223k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
223k
}
ccv_nnc_tensor.c:ccv_nnc_tensor_nd
Line
Count
Source
552
340
{
553
340
  int i;
554
1.26k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++922
)
555
1.26k
    if (dim[i] == 0)
556
340
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
340
}
ccv_nnc_tensor_io.c:ccv_nnc_tensor_nd
Line
Count
Source
552
46
{
553
46
  int i;
554
175
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++129
)
555
175
    if (dim[i] == 0)
556
46
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
46
}
Unexecuted instantiation: ccv_nnc_stream.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_core.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_interpret.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_io.c:ccv_nnc_tensor_nd
ccv_nnc_symbolic_graph_compile.c:ccv_nnc_tensor_nd
Line
Count
Source
552
4.65k
{
553
4.65k
  int i;
554
10.4k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++5.79k
)
555
10.4k
    if (dim[i] == 0)
556
4.65k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
4.65k
}
ccv_nnc_symbolic_graph_backward.c:ccv_nnc_tensor_nd
Line
Count
Source
552
2.16k
{
553
2.16k
  int i;
554
4.58k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++2.41k
)
555
4.58k
    if (dim[i] == 0)
556
2.16k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
2.16k
}
Unexecuted instantiation: ccv_nnc_symbolic_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tensor_tape.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_parallel.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_memory_compression.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_memory_reduction.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_run.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_xpu_alloc.c:ccv_nnc_tensor_nd
ccv_nnc_dynamic_graph.c:ccv_nnc_tensor_nd
Line
Count
Source
552
1.05k
{
553
1.05k
  int i;
554
2.15k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.10k
)
555
2.15k
    if (dim[i] == 0)
556
1.05k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
1.05k
}
Unexecuted instantiation: ccv_nnc_dynamic_graph_alloc.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_backward.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_apply_gradients.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_evaluate.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe_core.c:ccv_nnc_tensor_nd
ccv_cnnp_dataframe_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
552
183k
{
553
183k
  int i;
554
730k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++546k
)
555
730k
    if (dim[i] == 0)
556
183k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
183k
}
Unexecuted instantiation: ccv_cnnp_dataframe_csv.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_io.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_core.c:ccv_nnc_tensor_nd
ccv_cnnp_model_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
552
3.97k
{
553
3.97k
  int i;
554
9.86k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++5.89k
)
555
9.86k
    if (dim[i] == 0)
556
3.97k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
3.97k
}
Unexecuted instantiation: ccv_nnc_palettize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_gradient_checkpointing.c:ccv_nnc_tensor_nd
ccv_nnc_adam_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
16.1k
{
553
16.1k
  int i;
554
48.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.1k
)
555
48.2k
    if (dim[i] == 0)
556
16.1k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
16.1k
}
ccv_nnc_adamw_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
16.1k
{
553
16.1k
  int i;
554
48.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.1k
)
555
48.2k
    if (dim[i] == 0)
556
16.1k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
16.1k
}
ccv_nnc_gemm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
355k
{
553
355k
  int i;
554
948k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++592k
)
555
948k
    if (dim[i] == 0)
556
355k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
355k
}
ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
552
1.34k
{
553
1.34k
  int i;
554
3.30k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.96k
)
555
3.30k
    if (dim[i] == 0)
556
1.34k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
1.34k
}
ccv_nnc_add_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
204k
{
553
204k
  int i;
554
478k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++273k
)
555
478k
    if (dim[i] == 0)
556
204k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
204k
}
ccv_nnc_mul_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
109k
{
553
109k
  int i;
554
220k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++111k
)
555
220k
    if (dim[i] == 0)
556
109k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
109k
}
ccv_nnc_cmul_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
143
{
553
143
  int i;
554
419
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++276
)
555
419
    if (dim[i] == 0)
556
143
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
143
}
ccv_nnc_min_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
82
{
553
82
  int i;
554
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
555
312
    if (dim[i] == 0)
556
82
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
82
}
ccv_nnc_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
82
{
553
82
  int i;
554
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
555
312
    if (dim[i] == 0)
556
82
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
82
}
ccv_nnc_lssc_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
96
{
553
96
  int i;
554
448
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++352
)
555
448
    if (dim[i] == 0)
556
96
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
96
}
ccv_nnc_conv_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
9.45k
{
553
9.45k
  int i;
554
44.5k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++35.0k
)
555
44.5k
    if (dim[i] == 0)
556
9.45k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
9.45k
}
ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
552
3.32k
{
553
3.32k
  int i;
554
15.6k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++12.3k
)
555
15.6k
    if (dim[i] == 0)
556
3.32k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
3.32k
}
ccv_nnc_conv_transpose_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
36
{
553
36
  int i;
554
156
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++120
)
555
156
    if (dim[i] == 0)
556
36
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
36
}
ccv_nnc_dropout_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
24
{
553
24
  int i;
554
72
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
555
72
    if (dim[i] == 0)
556
24
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
24
}
ccv_nnc_ew_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
763k
{
553
763k
  int i;
554
1.89M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.12M
)
555
1.89M
    if (dim[i] == 0)
556
763k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
763k
}
Unexecuted instantiation: ccv_nnc_gelu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_histogram_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
4
{
553
4
  int i;
554
20
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++16
)
555
20
    if (dim[i] == 0)
556
4
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
4
}
ccv_nnc_index_select_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
33
{
553
33
  int i;
554
84
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++51
)
555
84
    if (dim[i] == 0)
556
33
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
33
}
ccv_nnc_reduce_isnan_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
21
{
553
21
  int i;
554
55
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++34
)
555
55
    if (dim[i] == 0)
556
21
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
21
}
ccv_nnc_lamb_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
48
{
553
48
  int i;
554
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
555
96
    if (dim[i] == 0)
556
48
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
48
}
Unexecuted instantiation: ccv_nnc_leaky_relu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
194
{
553
194
  int i;
554
550
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++356
)
555
550
    if (dim[i] == 0)
556
194
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
194
}
ccv_nnc_categorical_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
35
{
553
35
  int i;
554
93
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
555
93
    if (dim[i] == 0)
556
35
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
35
}
ccv_nnc_mse_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
2.19k
{
553
2.19k
  int i;
554
4.48k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++2.28k
)
555
4.48k
    if (dim[i] == 0)
556
2.19k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
2.19k
}
ccv_nnc_smooth_l1_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
63
{
553
63
  int i;
554
177
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++114
)
555
177
    if (dim[i] == 0)
556
63
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
63
}
ccv_nnc_nms_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
18
{
553
18
  int i;
554
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++30
)
555
48
    if (dim[i] == 0)
556
18
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
18
}
ccv_nnc_batch_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
438
{
553
438
  int i;
554
1.41k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++975
)
555
1.41k
    if (dim[i] == 0)
556
438
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
438
}
ccv_nnc_layer_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
208
{
553
208
  int i;
554
1.04k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++832
)
555
1.04k
    if (dim[i] == 0)
556
208
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
208
}
ccv_nnc_group_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
331
{
553
331
  int i;
554
1.57k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.24k
)
555
1.57k
    if (dim[i] == 0)
556
331
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
331
}
ccv_nnc_rmsnorm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
93
{
553
93
  int i;
554
465
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++372
)
555
465
    if (dim[i] == 0)
556
93
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
93
}
ccv_nnc_pad_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
105
{
553
105
  int i;
554
371
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++266
)
555
371
    if (dim[i] == 0)
556
105
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
105
}
ccv_nnc_max_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
4.25k
{
553
4.25k
  int i;
554
21.1k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++16.9k
)
555
21.1k
    if (dim[i] == 0)
556
4.25k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
4.25k
}
ccv_nnc_avg_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
5.47k
{
553
5.47k
  int i;
554
27.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++21.8k
)
555
27.2k
    if (dim[i] == 0)
556
5.47k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
5.47k
}
Unexecuted instantiation: ccv_nnc_rand_uniform_cpu_ref.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rand_normal_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_reduce_sum_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
78.6k
{
553
78.6k
  int i;
554
236k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++158k
)
555
236k
    if (dim[i] == 0)
556
78.6k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
78.6k
}
ccv_nnc_reduce_mean_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
41
{
553
41
  int i;
554
112
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++71
)
555
112
    if (dim[i] == 0)
556
41
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
41
}
ccv_nnc_reduce_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
41
{
553
41
  int i;
554
99
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
555
99
    if (dim[i] == 0)
556
41
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
41
}
ccv_nnc_reduce_min_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
21
{
553
21
  int i;
554
59
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++38
)
555
59
    if (dim[i] == 0)
556
21
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
21
}
ccv_nnc_reduce_norm2_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
115
{
553
115
  int i;
554
299
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++184
)
555
299
    if (dim[i] == 0)
556
115
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
115
}
ccv_nnc_argmax_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
8
{
553
8
  int i;
554
27
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++19
)
555
27
    if (dim[i] == 0)
556
8
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
8
}
ccv_nnc_argmin_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
6
{
553
6
  int i;
554
21
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++15
)
555
21
    if (dim[i] == 0)
556
6
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
6
}
Unexecuted instantiation: ccv_nnc_relu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_rmsprop_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
16.0k
{
553
16.0k
  int i;
554
48.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.0k
)
555
48.0k
    if (dim[i] == 0)
556
16.0k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
16.0k
}
ccv_nnc_roi_align_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
104
{
553
104
  int i;
554
406
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++302
)
555
406
    if (dim[i] == 0)
556
104
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
104
}
ccv_nnc_scaled_dot_product_attention_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
469
{
553
469
  int i;
554
2.33k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.86k
)
555
2.33k
    if (dim[i] == 0)
556
469
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
469
}
ccv_nnc_sgd_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
220k
{
553
220k
  int i;
554
569k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++349k
)
555
569k
    if (dim[i] == 0)
556
220k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
220k
}
Unexecuted instantiation: ccv_nnc_sigmoid_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_sigmoid_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
238
{
553
238
  int i;
554
698
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++460
)
555
698
    if (dim[i] == 0)
556
238
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
238
}
ccv_nnc_softmax_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
514
{
553
514
  int i;
554
1.04k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++531
)
555
1.04k
    if (dim[i] == 0)
556
514
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
514
}
ccv_nnc_softmax_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
1.24k
{
553
1.24k
  int i;
554
3.12k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.87k
)
555
3.12k
    if (dim[i] == 0)
556
1.24k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
1.24k
}
Unexecuted instantiation: ccv_nnc_swish_cpu_ref.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tanh_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_upsample_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
114
{
553
114
  int i;
554
456
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++342
)
555
456
    if (dim[i] == 0)
556
114
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
114
}
ccv_nnc_util_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
552
310
{
553
310
  int i;
554
1.30k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++994
)
555
1.30k
    if (dim[i] == 0)
556
310
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
310
}
Unexecuted instantiation: ccv_nnc_adam.c:ccv_nnc_tensor_nd
ccv_nnc_blas.c:ccv_nnc_tensor_nd
Line
Count
Source
552
120k
{
553
120k
  int i;
554
303k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++182k
)
555
303k
    if (dim[i] == 0)
556
120k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
120k
}
_ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
552
705
{
553
705
  int i;
554
1.71k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.01k
)
555
1.71k
    if (dim[i] == 0)
556
705
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
705
}
_ccv_nnc_gemm_cpu_sys.c:ccv_nnc_tensor_nd
Line
Count
Source
552
47.2k
{
553
47.2k
  int i;
554
121k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++73.9k
)
555
121k
    if (dim[i] == 0)
556
47.2k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
47.2k
}
Unexecuted instantiation: ccv_nnc_comm.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_cmp.c:ccv_nnc_tensor_nd
ccv_nnc_compression.c:ccv_nnc_tensor_nd
Line
Count
Source
552
10
{
553
10
  int i;
554
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++38
)
555
48
    if (dim[i] == 0)
556
10
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
10
}
_ccv_nnc_conv_cpu_4x4_3x3_winograd.c:ccv_nnc_tensor_nd
Line
Count
Source
552
476
{
553
476
  int i;
554
1.90k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.42k
)
555
1.90k
    if (dim[i] == 0)
556
476
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
476
}
Unexecuted instantiation: _ccv_nnc_conv_cpu_fft.c:ccv_nnc_tensor_nd
_ccv_nnc_conv_cpu_gemm.c:ccv_nnc_tensor_nd
Line
Count
Source
552
8
{
553
8
  int i;
554
32
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++24
)
555
32
    if (dim[i] == 0)
556
8
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
8
}
_ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
552
4.56k
{
553
4.56k
  int i;
554
22.1k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++17.6k
)
555
22.1k
    if (dim[i] == 0)
556
4.56k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
4.56k
}
ccv_nnc_convolution.c:ccv_nnc_tensor_nd
Line
Count
Source
552
8.76k
{
553
8.76k
  int i;
554
43.6k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++34.9k
)
555
43.6k
    if (dim[i] == 0)
556
8.76k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
8.76k
}
Unexecuted instantiation: ccv_nnc_dropout.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_ew.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_gelu.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_histogram.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_index_select.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_reduce_isnan.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_lamb.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_leaky_relu.c:ccv_nnc_tensor_nd
ccv_nnc_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
552
12
{
553
12
  int i;
554
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++24
)
555
36
    if (dim[i] == 0)
556
12
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
12
}
ccv_nnc_categorical_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
552
31
{
553
31
  int i;
554
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++65
)
555
96
    if (dim[i] == 0)
556
31
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
31
}
ccv_nnc_mse.c:ccv_nnc_tensor_nd
Line
Count
Source
552
18
{
553
18
  int i;
554
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++18
)
555
36
    if (dim[i] == 0)
556
18
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
18
}
ccv_nnc_smooth_l1.c:ccv_nnc_tensor_nd
Line
Count
Source
552
4
{
553
4
  int i;
554
12
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8
)
555
12
    if (dim[i] == 0)
556
4
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
4
}
Unexecuted instantiation: ccv_nnc_nms.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_norm.c:ccv_nnc_tensor_nd
ccv_nnc_pad.c:ccv_nnc_tensor_nd
Line
Count
Source
552
3
{
553
3
  int i;
554
15
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++12
)
555
15
    if (dim[i] == 0)
556
3
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
3
}
ccv_nnc_pool.c:ccv_nnc_tensor_nd
Line
Count
Source
552
2.03k
{
553
2.03k
  int i;
554
10.1k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8.13k
)
555
10.1k
    if (dim[i] == 0)
556
2.03k
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
2.03k
}
Unexecuted instantiation: ccv_nnc_rand.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_reduce.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_relu.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rmsprop.c:ccv_nnc_tensor_nd
ccv_nnc_lstm.c:ccv_nnc_tensor_nd
Line
Count
Source
552
11
{
553
11
  int i;
554
44
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++33
)
555
44
    if (dim[i] == 0)
556
11
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
11
}
Unexecuted instantiation: ccv_nnc_roi_align.c:ccv_nnc_tensor_nd
ccv_nnc_scaled_dot_product_attention.c:ccv_nnc_tensor_nd
Line
Count
Source
552
57
{
553
57
  int i;
554
285
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++228
)
555
285
    if (dim[i] == 0)
556
57
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
57
}
Unexecuted instantiation: ccv_nnc_sgd.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_sigmoid.c:ccv_nnc_tensor_nd
ccv_nnc_sigmoid_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
552
36
{
553
36
  int i;
554
108
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++72
)
555
108
    if (dim[i] == 0)
556
36
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
36
}
Unexecuted instantiation: ccv_nnc_softmax.c:ccv_nnc_tensor_nd
ccv_nnc_softmax_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
552
860
{
553
860
  int i;
554
2.56k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.70k
)
555
2.56k
    if (dim[i] == 0)
556
860
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
860
}
Unexecuted instantiation: ccv_nnc_swish.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tanh.c:ccv_nnc_tensor_nd
ccv_nnc_upsample.c:ccv_nnc_tensor_nd
Line
Count
Source
552
12
{
553
12
  int i;
554
60
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
555
60
    if (dim[i] == 0)
556
12
      return i;
557
0
  return CCV_NNC_MAX_DIM_ALLOC;
558
12
}
Unexecuted instantiation: ccv_nnc_util.c:ccv_nnc_tensor_nd
559
560
/**
561
 * Create a new tensor.
562
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
563
 * @param params Tensor parameters.
564
 * @param flags Reserved flags for the allocation.
565
 * @return The newly created tensor.
566
 */
567
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
568
enum {
569
  CCV_NNC_TENSOR_MEMORY_MAP_EAGER = 0x1, /**< Load tensor mapped directly. */
570
  CCV_NNC_TENSOR_MEMORY_MAP_ON_DEMAND = 0x2, /**< Defer tensor map until read on supported devices. */
571
};
572
/**
573
 * Create a new tensor with data from a file. This will create a mmap tensor if that is preferred.
574
 * @param params Tensor parameters.
575
 * @param filename The file to load tensor content from.
576
 * @param offset The offset to the tensor content from the file.
577
 * @param flags Reserved flags for this loading.
578
 * @return The newly created tensor.
579
 */
580
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new_from_file(const ccv_nnc_tensor_param_t params, const char* const filename, const off_t offset, const int flags);
581
/**
582
 * Create a new tensor on stack.
583
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
584
 * @param params Tensor parameters.
585
 * @param flags Reserved flags for the allocation.
586
 * @return The tensor struct.
587
 */
588
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
589
/**
590
 * Resize an existing tensor to a new dimension.
591
 * @param tensor The old tensor to be resized.
592
 * @param params Tensor parameters.
593
 * @return Potentially a new tensor, but if the size is sufficient, it will be in-place operation.
594
 */
595
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_resize(ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params);
596
/**
597
 * Pin the tensor memory for faster access on GPU.
598
 * @param tensor A tensor that we want to pin the memory.
599
 * @return 0 for success.
600
 */
601
int ccv_nnc_tensor_pin_memory(ccv_nnc_tensor_t* const tensor);
602
/**
603
 * Free a tensor object.
604
 * @param tensor The tensor to be freed.
605
 */
606
void ccv_nnc_tensor_free(ccv_nnc_tensor_t* const tensor);
607
/**
608
 * Create a tensor view. A tensor view can be non-continuous. Essentially, it provides a view into a tensor.
609
 * @param tensor The tensor that we want to view into.
610
 * @param params The tensor parameters for the tensor view.
611
 * @param ofs The offset on each of the dimension.
612
 * @param stride The stride of each dimension.
613
 * @return The newly created tensor view.
614
 */
615
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t*) ccv_nnc_tensor_view_new(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
616
/**
617
 * Create a tensor view on stack.
618
 * @param tensor The tensor that we want to view into.
619
 * @param params The tensor parameters for the tensor view.
620
 * @param ofs The offset on each of the dimension.
621
 * @param stride The line size of each dimension.
622
 * @return The tensor view struct.
623
 */
624
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t) ccv_nnc_tensor_view(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
625
/**
626
 * Free a tensor view object.
627
 * @param tensor_view The tensor view to be freed.
628
 */
629
void ccv_nnc_tensor_view_free(ccv_nnc_tensor_view_t* const tensor_view);
630
/**
631
 * Zero out a given tensor.
632
 * @param tensor The tensor to be zero out.
633
 */
634
void ccv_nnc_tensor_zero(void* const tensor);
635
/**
636
 * Compare whether two tensors are equal. This will tolerant some floating point issues follow http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
637
 * @param a Tensor a.
638
 * @param b Tensor b.
639
 * @return 0 if equal, -1 otherwise.
640
 */
641
CCV_WARN_UNUSED(int) ccv_nnc_tensor_eq(const ccv_nnc_tensor_t* const a, const ccv_nnc_tensor_t* const b);
642
/**
643
 * Format a tensor output to string so that it can be used as debug output for other languages. This will look like:
644
 * [
645
 *   0.13, 0.44, 0.24, 0.24
646
 * ]
647
 * And format closely to what numpy looks like.
648
 * @param a The input tensor, it can be a tensor or a tensor view. It has to be accessible on CPU.
649
 * @return An allocated string that you can call ccfree to free it.
650
 */
651
CCV_WARN_UNUSED(char*) ccv_nnc_tensor_format_new(const ccv_nnc_tensor_t* const a);
652
/**
653
 * Method to decode tensor into a give buffer.
654
 * @param data The encoded data that needs to be decoded.
655
 * @param data_size The size of the encoded data.
656
 * @param datatype The expected data type of the encoded data.
657
 * @param dimensions The expected dimension for the data.
658
 * @param dimension_count The number of dimensions for the data.
659
 * @param identifier The identifier saved along the encoder (non-zero) that used to identify this decoder.
660
 * @param context The context associated with this decoder.
661
 * @param tensor_params The tensor parameters for the final container. This can be different from the expected values above.
662
 * @param tensor_out The final container for the tensor. It can be nil and you need to initialize it in that case.
663
 * @param decoded The buffer for data to be decoded.
664
 * @param decoded_size The size of the buffer to be decoded.
665
 * @return 1 if it is processed, 0 otherwise.
666
 */
667
typedef int (*ccv_nnc_tensor_io_option_decode_f)(const void* const data, const size_t data_size, const int datatype, const int* const dimensions, const int dimension_count, const unsigned int identifier, void* const context, const ccv_nnc_tensor_param_t tensor_params, ccv_nnc_tensor_t** const tensor_out, void* const decoded, size_t* const decoded_size);
668
/**
669
 * Method to encode tensor into a give buffer.
670
 * @param data The data that needs to be encoded.
671
 * @param data_size The size of the data to be encoded.
672
 * @param datatype The expected data type of the data to be encoded.
673
 * @param dimensions The expected dimension for the data.
674
 * @param dimension_count The number of dimensions for the data.
675
 * @param context The context associated with this encoder.
676
 * @param encoded The buffer for encoded data.
677
 * @param encoded_size The size of the buffer.
678
 * @param tensor_params The tensor parameters that can be modified.
679
 * @param identifier The identifier identifies this encoder (non-zero).
680
 * @return 1 if it is processed, 0 otherwise.
681
 */
682
typedef int (*ccv_nnc_tensor_io_option_encode_f)(const void* const data, const size_t data_size, const int datatype, const int* const dimensions, const int dimension_count, void* const context, void* const encoded, size_t* const encoded_size, ccv_nnc_tensor_param_t* const tensor_params, unsigned int* const identifier);
683
/**
684
 * Additional options to regulate tensor write / read behavior. For example, you can pass
685
 * encryptor / compressor to encrypt / compress the data prior to write to disk. You can
686
 * also only store reference, and use external storage for tensors.
687
 */
688
typedef struct {
689
  ccv_nnc_tensor_io_option_decode_f decode;
690
  ccv_nnc_tensor_io_option_encode_f encode;
691
  void* context;
692
} ccv_nnc_tensor_io_option_t;
693
/**
694
 * Write tensor to a SQLite database with a given name.
695
 * @param tensor The tensor.
696
 * @param handle The SQLite handle.
697
 * @param name The name to find the tensor in the database.
698
 * @param options If provided, we will use this to encode tensor data.
699
 * @return CCV_IO_FINAL for success, otherwise error.
700
 */
701
int ccv_nnc_tensor_write(const ccv_nnc_tensor_t* const tensor, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
702
703
enum {
704
  CCV_NNC_TENSOR_READ_METADATA_ONLY = CCV_NO_DATA_ALLOC, /**< Read tensor that data is nil, with only metadata. */
705
};
706
/**
707
 * Read a tensor from a SQLite database with a given name.
708
 * @param handle The SQLite handle.
709
 * @param name The name to find the tensor in the database.
710
 * @param options If provided, we will use this to decode any data that identifier != 0.
711
 * @param flags Additional flag to configure how we read tensor.
712
 * @param tensor_params If provided, we will use this to create the tensor if tensor_out is not provided.
713
 * @param tensor_out The pointer to hold the tensor. If you supply the tensor yourself, we will read the data into the existing tensor.
714
 * @return CCV_IO_FINAL for success, otherwise error.
715
 */
716
int ccv_nnc_tensor_read(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const int flags, const ccv_nnc_tensor_param_t* const tensor_params, ccv_nnc_tensor_t** const tensor_out);
717
/** @} */
718
719
/**
720
 * @addtogroup level_1_cmd
721
 * @{
722
 */
723
724
/**
725
 * Return a high precision time unit. What this time unit is is platform specific.
726
 * @return A monotonic increasing 64-bit integer w.r.t. passing of time.
727
 */
728
uint64_t ccv_nnc_cmd_mono_time(void);
729
/**
730
 * Return UTF-8 encoded name of a given command.
731
 * @return A UTF-8 string (pointing to a static constant).
732
 */
733
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_name(const uint32_t cmd);
734
/**
735
 * Return UTF-8 encoded name of a given backend.
736
 * @return A UTF-8 string (pointing to a static constant).
737
 */
738
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_backend_name(const uint32_t backend);
739
/**
740
 * Check whether a given backend is available for a given command.
741
 * @return 1 if it is available.
742
 */
743
CCV_WARN_UNUSED(int) ccv_nnc_cmd_ok(const uint32_t cmd, const uint32_t backend);
744
/**
745
 * Create a wrapped command with parameters.
746
 * @param cmd The command identifier.
747
 * @param isa If this is a CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD command, this supplies the custom functions.
748
 * @param params The parameters for the command.
749
 * @param flags A reserved field for flags.
750
 * @return A wrapped ccv_nnc_cmd_t structure.
751
 */
752
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd(const uint32_t cmd, ccv_nnc_cmd_vtab_t* const isa, const ccv_nnc_cmd_param_t params, const int flags);
753
/**
754
 * Verify whether a hint is compatible with a given command and a given input tensor parameters / output tensor parameters.
755
 * @param hint The hint for a given command. Hint defines things such as paddings, strides etc. for a given command.
756
 * @param cmd The wrapped command.
757
 * @param a The input tensor parameters.
758
 * @param b The output tensor parameters.
759
 * @return 1 if it passes.
760
 */
761
CCV_WARN_UNUSED(int) ccv_nnc_hint_verify(const ccv_nnc_hint_t hint, const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
762
/**
763
 * Automatically find the best hint for a given input / output (on forward pass only).
764
 * @param cmd The wrapped command.
765
 * @param a The input tensor parameters.
766
 * @param b The output tensor parameters.
767
 * @return Best hint we can guess.
768
 */
769
CCV_WARN_UNUSED(ccv_nnc_hint_t) ccv_nnc_hint_auto(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
770
/**
771
 * Automatically find the outputs for the given inputs / hint.
772
 * @param cmd The wrapped command.
773
 * @param inputs An array of input tensor parameters.
774
 * @param input_size The size of input array.
775
 * @param hint The hint for the given command.
776
 * @param outputs An array for the output tensor parameters.
777
 * @param output_size The size of the output array.
778
 */
779
void ccv_nnc_hint_tensor_auto(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
780
/**
781
 * Find a suitable backend for a given command and tensor settings.
782
 * @param cmd The wrapped command.
783
 * @param tensor_memory The tensor memory setup (whether it is CPU or GPU).
784
 * @param tensor_formats The tensor layout format (NCHW, NHWC, CHWN etc.)
785
 * @param tensor_datatypes The datatype of a given tensor (FP32 etc.)
786
 * @return The backend identifier for the selected backend.
787
 */
788
CCV_WARN_UNUSED(uint32_t) ccv_nnc_cmd_find_backend(const ccv_nnc_cmd_t cmd, const int tensor_memory, const int tensor_formats, const int tensor_datatypes);
789
/**
790
 * Run autotune to find the best kernel and configuration for the given input.
791
 * @param cmd The original wrapped command.
792
 * @param max_workspace_size The maximum memory allowed for this command to execute.
793
 * @param hint The hint for the given command.
794
 * @param flags The reserved field for flags.
795
 * @param inputs An array of input tensors.
796
 * @param input_size The size of input array.
797
 * @param outputs An array of output tensors.
798
 * @param output_size The size of output array.
799
 * @param stream_context The stream we can do the autotune on. 0 uses default stream.
800
 * @return The modified cmd that contains the updated configuration.
801
 */
802
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd_autotune(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
803
/**
804
 * Check whether a given tensor input / output pattern can be computed by the given command.
805
 * bitmasks encode whether a given input tensor / output tensor available at a position.
806
 * @param cmd The wrapped command to check.
807
 * @param input_size The intended size of the input tensor array.
808
 * @param output_size The intended size of the output tensor array.
809
 * @param input_bitmasks The input tensor array encoding in bitmap, 0: no tensor, 1: has a tensor.
810
 * @param input_bitmask_size The size of the input bitmask array.
811
 * @param output_bitmasks The output tensor array encoding in bitmap.
812
 * @param output_bitmask_size The size of the output bitmask array.
813
 * @return 1 if the command can be executed with the given input / output pattern.
814
 */
815
CCV_WARN_UNUSED(int) ccv_nnc_cmd_bitmask(const ccv_nnc_cmd_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size);
816
/**
817
 * Return auxillary information related to a particular command with a particular backend.
818
 * A backend is required to be useful for this method.
819
 * @param cmd The wrapped cmmand to check auxillary information.
820
 * @return The auxillary information specific to a particular command with a particular backend.
821
 */
822
CCV_WARN_UNUSED(void*) ccv_nnc_cmd_aux(const ccv_nnc_cmd_t cmd);
823
/**
824
 * Execute a given command.
825
 * @param cmd The wrapped command to be executed.
826
 * @param hint The hint provided for the command.
827
 * @param flags A reserved field for flags.
828
 * @param inputs The input tensor array.
829
 * @param input_size The size of input tensor array.
830
 * @param outputs The output tensor array.
831
 * @param output_size The size of output tensor array.
832
 * @param stream_context The stream which the command will be executed upon.
833
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
834
 */
835
int ccv_nnc_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
836
/**
837
 * Check whether the command is a forward pass or not.
838
 * @param cmd The wrapped command.
839
 * @return 1 if it is a forward pass.
840
 */
841
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_forward(const ccv_nnc_cmd_t cmd);
842
/**
843
 * Check whether the command is a backward pass or not.
844
 * @param cmd The wrapped command.
845
 * @return 1 if it is a backward pass.
846
 */
847
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_backward(const ccv_nnc_cmd_t cmd);
848
/**
849
 * Check this command against listed attributes.
850
 * @param cmd The wrapped command.
851
 * @param flags The flags to check against the command (unsupported).
852
 * @return 1 if the flag is supported by the command.
853
 */
854
CCV_WARN_UNUSED(int) ccv_nnc_cmd_attr(const ccv_nnc_cmd_t cmd, const int flags);
855
/**
856
 * Check whether this command allow inplace operation against a particular input and output (index from 0).
857
 * @param cmd The wrapped command.
858
 * @param input_idx The index of the input tensor we want to check.
859
 * @param input_size The total number of inputs.
860
 * @param output_idx the index of the output tensor we want to check.
861
 * @param output_size The total number of outputs.
862
 * @return 1 if the input tensor can be used as the output tensor.
863
 */
864
CCV_WARN_UNUSED(int) ccv_nnc_cmd_allow_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
865
/**
866
 * Check whether this command need to enforce inplace operation against a particular input and output (index from 0).
867
 * @param cmd The wrapped command.
868
 * @param input_idx The index of the input tensor we want to check.
869
 * @param input_size The total number of inputs.
870
 * @param output_idx the index of the output tensor we want to check.
871
 * @param output_size The total number of outputs.
872
 * @return 1 if the input tensor is required to be used as the output tensor.
873
 */
874
CCV_WARN_UNUSED(int) ccv_nnc_cmd_enforce_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
875
/**
876
 * Set for a profiler to be on or off. Right now, this just proxy call on to cudaProfilerStart / cudaProfilerStop.
877
 * @param state 1 is on, 0 is off.
878
 */
879
void ccv_nnc_set_profiler(int state);
880
/**
881
 * When have choices between doing things, prefer to be more memory efficient and take performance hit. This is relevant to MPSGraph because if we dispatch all command buffers at full speed, we risk of holding a lot of resources up until all of them executed. Alternatively, we can wait previous one done before proceed, with obvious performance penalties.
882
 * @param state 1 is on, 0 is off. Default to off.
883
 */
884
void ccv_nnc_set_memory_efficient(int state);
885
/**
886
 * Quantize a given memory region of a given datatype / memory resides, into nbits palette.
887
 * @param input The input memory region, it can be CCV_64F, CCV_32F or CCV_16F.
888
 * @param datatype The datatype, it can be CCV_64F, CCV_32F or CCV_16F.
889
 * @param memory_type Where the memory resides. Right now only support CPU_MEMORY.
890
 * @param input_length How many elements in the input.
891
 * @param qbits How many bits for the palette. Right now only 4 / 5 / 6 / 7 / 8 bits supported.
892
 * @param number_in_blocks How many elements share a palette.
893
 * @param output The output memory region.
894
 * @param output_length The maximum size of the output.
895
 * @return The actual length in bytes of the output.
896
 */
897
CCV_WARN_UNUSED(size_t) ccv_nnc_palettize(const void* input, const int datatype, const int memory_type, const size_t input_length, const int qbits, const int number_in_blocks, void* output, const size_t output_length);
898
/**
899
 * Dequantize a given memory region of a given datatype / memory resides, from built-in nbits palette.
900
 * @param input The input memory region.
901
 * @param datatype The datatype, it can be CCV_64F, CCV_32F or CCV_16F.
902
 * @param memory_type Where the memory resides. It can be either CPU_MEMORY or GPU_MEMORY.
903
 * @param input_length The size of the input in bytes.
904
 * @param qbits How many bits for the palette. Right now only 4 / 5 / 6 / 7 / 8 bits supported.
905
 * @param number_in_blocks How many elements share a palette.
906
 * @param output The output memory region, it can be CCV_64F, CCV_32F or CCV_16F.
907
 * @param output_length How many elements in the output.
908
 */
909
void ccv_nnc_depalettize(const void* input, const int datatype, const int memory_type, const size_t input_length, const int qbits, const int number_in_blocks, void* output, const size_t output_length);
910
911
/** @} */
912
913
/**
914
 * @defgroup level_1_stream Streams
915
 * @{
916
 */
917
918
// Control flow constructs
919
// Follow heavily based along CUDA's stream / event idea.
920
enum {
921
  CCV_STREAM_CONTEXT_CPU = 0x1, /**< A CPU based stream context (unsupported). */
922
  CCV_STREAM_CONTEXT_GPU = 0x2, /**< A GPU based stream context. */
923
};
924
211k
#define CCV_STREAM_GET_CONTEXT(type) ((type) & 0x3)
925
#define CCV_STREAM_GET_DEVICE(type) CCV_TENSOR_GET_DEVICE(type)
926
44.9k
#define CCV_STREAM_GET_DEVICE_ID(type) CCV_TENSOR_GET_DEVICE_ID(type)
927
3.21k
#define CCV_STREAM_SET_DEVICE_ID(type, device_id) CCV_TENSOR_SET_DEVICE_ID(type, device_id)
928
/**
929
 * Create a new stream context.
930
 * @param type A combination of CPU / GPU and DEVICE_ID.
931
 * @return The newly created stream context.
932
 */
933
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_new(const int type);
934
/**
935
 * Get the type of the stream context.
936
 * @param stream_context The stream context we want to inspect.
937
 * @return The type of the stream context.
938
 */
939
CCV_WARN_UNUSED(int) ccv_nnc_stream_context_type(const ccv_nnc_stream_context_t* const stream_context);
940
/**
941
 * Get a stream context local workspace memory. This memory region will be reused
942
 * the next time when you call this method on the same stream context.
943
 * @param stream_context The stream context which provides the workspace memory.
944
 * @param workspace_size The size of the workspace memory.
945
 * @param mem The memory type of the said workspace memory (GPU or CPU).
946
 * @return A pointer to the workspace memory.
947
 */
948
CCV_WARN_UNUSED(void*) ccv_nnc_stream_context_get_workspace(ccv_nnc_stream_context_t* const stream_context, const size_t workspace_size, const int mem);
949
/**
950
 * Deallocate any workspace memory on the stream context.
951
 * @param stream The stream context to drain workspace memory.
952
 */
953
void ccv_nnc_stream_context_drain(ccv_nnc_stream_context_t* const stream);
954
/**
955
 * The callback prototype on the stream context.
956
 */
957
typedef void(*ccv_nnc_callback_f)(void* const callback_context);
958
/**
959
 * Add a callback function to be called once stream executed to that point.
960
 * @param stream The stream context to add callback.
961
 * @param callback The callback function.
962
 * @param callback_context The context to be called with the callback function.
963
 */
964
void ccv_nnc_stream_context_add_callback(ccv_nnc_stream_context_t* const stream, const ccv_nnc_callback_f callback, void* const callback_context);
965
/**
966
 * Wait until all tasks submitted (command, graph run etc.) on the stream context
967
 * completed.
968
 * @param stream The stream context to wait.
969
 */
970
void ccv_nnc_stream_context_wait(const ccv_nnc_stream_context_t* const stream);
971
/**
972
 * The hooks to be called when a stream context is destroyed.
973
 * At the moment, the stream context will be destroyed at the time
974
 * ccv_nnc_stream_context_free is called, so there is no tricks.
975
 * This method is useful because we have some resources associated
976
 * with stream pointer, hence, would be good to free these resources
977
 * upon free the stream.
978
 */
979
typedef void (*ccv_nnc_stream_context_destructor_f)(const ccv_nnc_stream_context_t* const stream, void* const context);
980
/**
981
 * Add a new destructor hook callback when a stream is freed.
982
 * @param stream The stream to be observed.
983
 * @param destructor The new destructor callback method.
984
 * @param context additional context.
985
 * @return A integer identifier to help remove the hook.
986
 */
987
int ccv_nnc_stream_context_add_destructor_hook(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_context_destructor_f destructor, void* const context);
988
/**
989
 * Remove a destructor hook callback.
990
 * @param stream The stream we observe.
991
 * @param hook_id The returned integer when calling the add method.
992
 */
993
void ccv_nnc_stream_context_remove_destructor_hook(ccv_nnc_stream_context_t* const stream, const int hook_id);
994
/**
995
 * Deallocate the stream context.
996
 * @param stream_context The stream context to be destroyed.
997
 */
998
void ccv_nnc_stream_context_free(ccv_nnc_stream_context_t* const stream_context);
999
/**
1000
 * Set random seed for stream context.
1001
 * @param stream_context The stream context to set the seed. 0 means use the default stream context.
1002
 * @param seed The seed for the stream context.
1003
 */
1004
void ccv_nnc_stream_context_set_seed(ccv_nnc_stream_context_t* const stream_context, uint32_t seed);
1005
/**
1006
 * Generate uint32_t random number for stream context.
1007
 * These are usually used as seed for other high-performance random number generators.
1008
 * @param stream_context The stream context associated with random number generation.
1009
 */
1010
uint32_t ccv_nnc_stream_context_genrand_uint32(ccv_nnc_stream_context_t* const stream_context);
1011
1012
/**
1013
 * Opaque pointer to the signal object.
1014
 */
1015
typedef struct ccv_nnc_stream_signal_s ccv_nnc_stream_signal_t;
1016
1017
/**
1018
 * Create a new stream signal.
1019
 * @param type A composed type denotes whether it associated with a GPU or CPU stream context, and on which device.
1020
 * @return The newly created stream signal.
1021
 */
1022
CCV_WARN_UNUSED(ccv_nnc_stream_signal_t*) ccv_nnc_stream_signal_new(const int type);
1023
/**
1024
 * Get the type of the stream signal.
1025
 * @param signal The stream signal we want to inspect.
1026
 * @return The type of the stream signal.
1027
 */
1028
CCV_WARN_UNUSED(int) ccv_nnc_stream_signal_type(const ccv_nnc_stream_signal_t* const signal);
1029
/**
1030
 * Emit a signal on a stream.
1031
 * @param stream The stream context where the signal will be emitted.
1032
 * @param signal The signal to be emitted. It has to be on the same device as the stream.
1033
 */
1034
void ccv_nnc_stream_context_emit_signal(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_signal_t* const signal);
1035
/**
1036
 * Emit a signal on a stream directly. It will be managed by the stream. You have to use it immediately after return.
1037
 * @param stream The stream context where the signal will be emitted.
1038
 * @return The new signal emitted on the stream context.
1039
 */
1040
ccv_nnc_stream_signal_t* ccv_nnc_stream_context_emit_signal_new(ccv_nnc_stream_context_t* const stream);
1041
/**
1042
 * Wait a signal on a stream.
1043
 * @param stream The stream context that will be blocked by the signal.
1044
 * @param signal The signal to be waited. It can be on a different device of the stream.
1045
 */
1046
void ccv_nnc_stream_context_wait_signal(const ccv_nnc_stream_context_t* const stream, const ccv_nnc_stream_signal_t* const signal);
1047
/**
1048
 * Get on which stream context this signal is going to be emitted on.
1049
 * @param signal The signal we want to inspect.
1050
 * @return The most recent stream context you called ccv_nnc_stream_context_emit_signal with.
1051
 */
1052
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_signal_get_emitter(const ccv_nnc_stream_signal_t* const signal);
1053
/**
1054
 * Deallocate the signal.
1055
 * @param signal The signal to be destroyed.
1056
 */
1057
void ccv_nnc_stream_signal_free(ccv_nnc_stream_signal_t* const signal);
1058
/**
1059
 * Return number of devices.
1060
 * @param type The type of devices (CCV_NNC_STREAM_CONTEXT_GPU / CCV_NNC_STREAM_CONTEXT_CPU)
1061
 * @return The number of devices.
1062
 */
1063
CCV_WARN_UNUSED(int) ccv_nnc_device_count(const int type);
1064
/**
1065
 * Remap a source device as the destination device.
1066
 * @param type The type of devices (CCV_NNC_STREAM_CONTEXT_GPU / CCV_NNC_STREAM_CONTEXT_CPU)
1067
 * @param source The original device id.
1068
 * @param destination The new device id.
1069
 * @return 0 if the device remap is successful, -1 if it is not.
1070
 */
1071
CCV_WARN_UNUSED(int) ccv_nnc_device_remap(const int type, const int source, const int destination);
1072
/**
1073
 * The neighbor discovery function that will be called with the device id.
1074
 */
1075
typedef ccv_nnc_stream_context_t*(*ccv_nnc_stream_context_neighbor_discovery_f)(const int device_id, void* const context);
1076
/**
1077
 * Set the neighbor stream context discovery mechanism. This method exposes how
1078
 * neighbor should be defined per stream context. This method is useful for
1079
 * commands that operates cross devices and need to find the correct stream
1080
 * context for these devices. Stream context itself is bounded to one device
1081
 * only.
1082
 * @param stream_context The stream context that bounds to a discovery mechanism.
1083
 * @param discovery The neighbor discovery function to invoke.
1084
 * @param context The associated context with the neighbor discovery function.
1085
 */
1086
void ccv_nnc_stream_context_set_neighbor_discovery(ccv_nnc_stream_context_t* const stream_context, ccv_nnc_stream_context_neighbor_discovery_f discovery, void* const context);
1087
/**
1088
 * Find a neighbor stream context on a given device id for current stream context.
1089
 * @param stream_context The stream context which we will look for neighbors.
1090
 * @param device_id On which device the stream context may exist.
1091
 * @return 0 if no stream context found. Otherwise, return the stream context on that device.
1092
 */
1093
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_find_neighbor(ccv_nnc_stream_context_t* const stream_context, const int device_id);
1094
1095
/** @} */
1096
1097
/** @} */
1098
1099
/**
1100
 * @defgroup level_2 Level-2 API
1101
 * @{
1102
 */
1103
1104
/**
1105
 * @defgroup level_2_essentials Essentials
1106
 * @{
1107
 */
1108
1109
enum {
1110
  CCV_NNC_SHORT_DOT_GRAPH = 0x0, /**< Display a simplified graph. */
1111
  CCV_NNC_LONG_DOT_GRAPH  = 0x1, /**< Display a graph that contains all information. */
1112
};
1113
1114
/**
1115
 * Opaque pointer holds the concrete graph representation.
1116
 */
1117
typedef struct ccv_nnc_graph_s ccv_nnc_graph_t;
1118
1119
/**
1120
 * The opaque on stack object hold a reference to an execution node within a graph.
1121
 */
1122
typedef struct {
1123
  int32_t d; // This is int because sometimes I piggy-back on negatives to carry out some internal computations.
1124
  ccv_nnc_graph_t* graph;
1125
} ccv_nnc_graph_exec_t;
1126
1127
82.1k
#define CCV_NO_GRAPH_EXEC(exec) ((exec).graph == 0)
1128
1129
/**
1130
 * Create an empty graph.
1131
 * Note that all graph mutation methods are not thread-safe.
1132
 * You should only operate the graph in serial fashion.
1133
 * @return An opaque ccv_nnc_graph_t pointer.
1134
 */
1135
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_new(void);
1136
/**
1137
 * Create a node with specific command execution, as well as its inputs & outputs.
1138
 * Underlying, the graph maintains the backing object for the node, and all you get is
1139
 * a on-stack object to index the backing object from the graph.
1140
 * @param graph The concrete graph.
1141
 * @param cmd The wrapped command.
1142
 * @param hint The hint for this command.
1143
 * @param inputs The input tensors array.
1144
 * @param input_size The size of input tensors array.
1145
 * @param outputs The output tensors array.
1146
 * @param output_size The size of output tensors array.
1147
 * @return An on-stack object that references a execution node.
1148
 */
1149
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_new(ccv_nnc_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
1150
/**
1151
 * Set the command for an existing execution node.
1152
 * @param graph The concrete graph.
1153
 * @param exec The execution node reference.
1154
 * @param cmd The new wrapped command.
1155
 */
1156
void ccv_nnc_graph_exec_set(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_cmd_t cmd);
1157
/**
1158
 * Return the command on an existing execution node.
1159
 * @param graph The concrete graph.
1160
 * @param exec The execution node reference.
1161
 * @return The wrapped command.
1162
 */
1163
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_graph_exec_cmd(const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec);
1164
/**
1165
 * Set hint for an existing execution node.
1166
 * @param graph The concrete graph.
1167
 * @param exec The execution node reference.
1168
 * @param hint The new hint.
1169
 */
1170
void ccv_nnc_graph_exec_set_hint(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_hint_t hint);
1171
/**
1172
 * Set input / output tensors for an existing execution node.
1173
 * @param graph The concrete graph.
1174
 * @param exec The execution node reference.
1175
 * @param inputs The input tensors array.
1176
 * @param input_size The size of input tensors array.
1177
 * @param outputs The output tensors array.
1178
 * @param output_size The size of output tensors array.
1179
 */
1180
void ccv_nnc_graph_exec_set_io(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
1181
/**
1182
 * Concatenate input graph nodes with an output graph node to create a new graph.
1183
 * @param graph The concrete graph.
1184
 * @param source The execution node reference to connect.
1185
 * @param destination The execution node reference connect to.
1186
 * @return Non-zero if cannot concat successfully.
1187
 */
1188
int ccv_nnc_graph_exec_concat(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
1189
/**
1190
 * Disconnect input graph nodes with an output graph nodes in this graph.
1191
 * @param graph The concrete graph.
1192
 * @param source The execution node reference to disconnect.
1193
 * @param destination The execution node reference disconnect to.
1194
 * @return Non-zero if cannot disjoin successfully.
1195
 */
1196
int ccv_nnc_graph_exec_disjoin(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
1197
/**
1198
 * Count number of exec in the graph.
1199
 * @param graph The concrete graph.
1200
 * @return The number of execution nodes in the graph.
1201
 */
1202
int ccv_nnc_graph_exec_count(const ccv_nnc_graph_t* const graph);
1203
/**
1204
 * Generate output that can be parsed by GraphViz (DOT language).
1205
 * @param graph The concrete graph.
1206
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
1207
 * @param out The output file stream.
1208
 */
1209
void ccv_nnc_graph_dot(const ccv_nnc_graph_t* const graph, const int flags, FILE* out);
1210
/**
1211
 * Run the autotune function on all execution node, and assign back with the optimized commands.
1212
 * @param graph The concrete graph.
1213
 * @param max_workspace_size The maximum allowed extra memory usage.
1214
 * @param flags A reserved field for flags.
1215
 * @param sources The source execution nodes to begin. 0 uses default sources.
1216
 * @param source_size The size of source execution nodes.
1217
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
1218
 * @param destination_size The size of destination execution nodes.
1219
 */
1220
void ccv_nnc_graph_autotune(ccv_nnc_graph_t* const graph, const size_t max_workspace_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1221
/**
1222
 * Make the graph topsorted, thus, do a topological sort so when run the graph, no additional memory will be allocated.
1223
 * Otherwise when we run the graph, we need to allocate some memory on heap to faciliate.
1224
 * @param graph The concrete graph.
1225
 * @param exec_cvt The execution node assignments will change, and you can give an array to know the changes.
1226
 * @param exec_cvt_size The provided conversion array size.
1227
 */
1228
void ccv_nnc_graph_topsort(ccv_nnc_graph_t* const graph, int* const exec_cvt, const int exec_cvt_size);
1229
1230
/**
1231
 * Opaque pointer holds the graph schedule.
1232
 */
1233
typedef struct ccv_nnc_graph_static_schedule_s ccv_nnc_graph_static_schedule_t;
1234
/**
1235
 * Assuming the graph runs from the beginning to the end. Allocate a internal schedule object that will
1236
 * run the graph efficiently if it runs from the beginning to the end. It will basically call ccv_nnc_graph_static_schedule
1237
 * and save the end result to a internal schedule object to this graph.
1238
 * @param graph The concrete graph.
1239
 * @param stream_type The type of stream context we are going to use.
1240
 * @param max_stream_count The number of stream contexts to be allocated internally.
1241
 */
1242
void ccv_nnc_graph_set_default_static_schedule(ccv_nnc_graph_t* const graph, const int stream_type, const int max_stream_count);
1243
/**
1244
 * Allocate extra streams to make this graph parallel runnable. Note this requires the graph to be topsorted.
1245
 * After this is done, you can schedule a graph either on its default stream, or a new stream with the schedule
1246
 * object.
1247
 * @param graph The concrete graph.
1248
 * @param stream_type The type of stream context we are going to use.
1249
 * @param max_stream_count The number of stream contexts to be allocated internally.
1250
 * @param sources The source execution nodes to begin. 0 uses default sources.
1251
 * @param source_size The size of source execution nodes.
1252
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
1253
 * @param destination_size The size of destination execution nodes.
1254
 * @return An opaque schedule object that let the graph knows how to run itself efficiently.
1255
 */
1256
CCV_WARN_UNUSED(ccv_nnc_graph_static_schedule_t*) ccv_nnc_graph_static_schedule_new(ccv_nnc_graph_t* const graph, const int stream_type, const int max_stream_count, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1257
/**
1258
 * Free a schedule object for a graph.
1259
 * @param schedule The schedule object returned from ccv_nnc_graph_static_schedule_new.
1260
 */
1261
void ccv_nnc_graph_static_schedule_free(ccv_nnc_graph_static_schedule_t* const schedule);
1262
/**
1263
 * Query the default stream for a given graph.
1264
 * @param graph The concrete graph.
1265
 * @return The default stream context.
1266
 */
1267
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_graph_default_stream(const ccv_nnc_graph_t* const graph);
1268
/**
1269
 * Set default sources for a give graph.
1270
 * @param graph The concrete graph.
1271
 * @param sources The source execution nodes to begin.
1272
 * @param source_size The size of source execution nodes.
1273
 */
1274
void ccv_nnc_graph_set_sources(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const sources, const int source_size);
1275
/**
1276
 * Get the default source execution nodes pointer.
1277
 * @param graph The concrete graph.
1278
 * @return A pointer to an array of default source execution nodes.
1279
 */
1280
ccv_nnc_graph_exec_t* ccv_nnc_graph_sources(const ccv_nnc_graph_t* const graph);
1281
/**
1282
 * Get the number of default source execution nodes.
1283
 * @param graph The concrete graph.
1284
 * @return The number of default source execution nodes.
1285
 */
1286
int ccv_nnc_graph_source_size(const ccv_nnc_graph_t* const graph);
1287
/**
1288
 * Set default destinations for a give graph.
1289
 * @param graph The concrete graph.
1290
 * @param destinations The destination execution nodes which we end.
1291
 * @param destination_size The size of destination execution nodes.
1292
 */
1293
void ccv_nnc_graph_set_destinations(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1294
/**
1295
 * Get the default destination execution nodes pointer.
1296
 * @param graph The concrete graph.
1297
 * @return A pointer to an array of default destination execution nodes.
1298
 */
1299
ccv_nnc_graph_exec_t* ccv_nnc_graph_destinations(const ccv_nnc_graph_t* const graph);
1300
/**
1301
 * Get the number of default destination execution nodes.
1302
 * @param graph The concrete graph.
1303
 * @return The number of default destination execution nodes.
1304
 */
1305
int ccv_nnc_graph_destination_size(const ccv_nnc_graph_t* const graph);
1306
/**
1307
 * This graph, and its relevant auxiliary objects (opaque to user) are deallocated.
1308
 * @param graph The concrete graph.
1309
 */
1310
void ccv_nnc_graph_free(ccv_nnc_graph_t* const graph);
1311
/**
1312
 * Opaque pointer to the tape of tensors. The tape are used by the while loop.
1313
 */
1314
typedef struct ccv_nnc_tensor_tape_s ccv_nnc_tensor_tape_t;
1315
/**
1316
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1317
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1318
 * @param graph The concrete graph.
1319
 * @param flags A reserved field for flags.
1320
 * @param sources The source execution nodes array.
1321
 * @param source_size The size of source execution nodes array. 0 uses default sources.
1322
 * @param destinations The destination execution nodes array.
1323
 * @param destination_size The size of destination execution nodes array. 0 uses default destinations.
1324
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1325
 * @param stream_context Which stream this graph will be executed upon.
1326
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1327
 */
1328
int ccv_nnc_graph_run(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1329
/**
1330
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1331
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1332
 * Comparing with ccv_nnc_graph_run method, this method doesn't take sources / destinations node, rather, it takes the
1333
 * schedule object.
1334
 * @param graph The concrete graph.
1335
 * @param flags A reserved field for flags.
1336
 * @param schedule The schedule object specified the sources / destinations and how to efficiently run this.
1337
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1338
 * @param stream_context Which stream this graph will be executed upon.
1339
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1340
 */
1341
int ccv_nnc_graph_run_with_schedule(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_static_schedule_t* const schedule, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1342
1343
/** @} */
1344
1345
/**
1346
 * @defgroup level_2_others Others
1347
 * @{
1348
 */
1349
1350
/**
1351
 * Set input / output flags for an existing execution node.
1352
 * This must be called after set_io, set additional flags for tensors related to this exec.
1353
 * @param graph The concrete graph.
1354
 * @param exec The execution node reference.
1355
 * @param input_flags The input flags array.
1356
 * @param input_flag_size the size of input flags array, should be the same as input tensors array (or 0).
1357
 * @param output_flags The output flags array.
1358
 * @param output_flag_size the size of output flags array, should be the same as output tensors array (or 0).
1359
 */
1360
void ccv_nnc_graph_exec_set_io_flags(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const int* const input_flags, const int input_flag_size, const int* const output_flags, const int output_flag_size);
1361
/**
1362
 * Set the pair reference for exec. In backward pass, an execution node's pair node is the forward pass node.
1363
 * @param graph The concrete graph.
1364
 * @param exec The execution node reference.
1365
 * @param pair_exec The pair execution node reference.
1366
 */
1367
void ccv_nnc_graph_exec_pair_with(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_graph_exec_t pair_exec);
1368
/**
1369
 * Add tensor pair that can be used to "carry over". (carry over: passing a tensor from current loop to the next loop).
1370
 * @param graph The concrete graph.
1371
 * @param from The tensor we have output in this loop.
1372
 * @param to The tensor we will use as input in the next loop.
1373
 */
1374
void ccv_nnc_graph_add_carry_over(ccv_nnc_graph_t* const graph, const ccv_nnc_tensor_t* const from, const ccv_nnc_tensor_t* const to);
1375
/**
1376
 * Updates are the tensors that not directly involved in the computation, but its pointers need to get updated
1377
 * along with this exec, thus need to be "update" to other exec nodes.
1378
 * @param graph The concrete graph.
1379
 * @param exec The execution node reference.
1380
 * @param update The tensor need to be updated along the execution node.
1381
 */
1382
void ccv_nnc_graph_exec_add_as_affected(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const update);
1383
1384
/** @} */
1385
1386
/** @} */
1387
1388
/**
1389
 * @defgroup level_3 Level-3 API
1390
 * @{
1391
 */
1392
1393
/**
1394
 * @defgroup level_3_essentials Essentials
1395
 * @{
1396
 */
1397
1398
/**
1399
 * Opaque pointer to the symbolic graph object.
1400
 */
1401
typedef struct ccv_nnc_symbolic_graph_s ccv_nnc_symbolic_graph_t;
1402
1403
/**
1404
 * Opaque pointer to an arena of allocated tensors.
1405
 */
1406
typedef struct ccv_nnc_tensor_arena_s ccv_nnc_tensor_arena_t;
1407
1408
/**
1409
 * Opaque pointer to an arena of allocated execs.
1410
 */
1411
typedef struct ccv_nnc_graph_exec_arena_s ccv_nnc_graph_exec_arena_t;
1412
1413
/**
1414
 * On stack object references a tensor symbol in the symbolic graph.
1415
 */
1416
typedef struct {
1417
  int32_t d;
1418
  const ccv_nnc_symbolic_graph_t* graph;
1419
} ccv_nnc_tensor_symbol_t;
1420
1421
/**
1422
 * On stack object references a execution node symbol in the symbolic graph.
1423
 */
1424
typedef struct {
1425
  int32_t d;
1426
  const ccv_nnc_symbolic_graph_t* graph;
1427
} ccv_nnc_graph_exec_symbol_t;
1428
1429
enum {
1430
  CCV_NNC_TENSOR_SYMBOL_INIT_ZEROS = 0x01, /**< Initialize underlying tensor for the symbol with zeros */
1431
  CCV_NNC_TENSOR_SYMBOL_INIT_ONES = 0x02, /**< Initialize underlying tensor for the symbol with ones */
1432
  CCV_NNC_TENSOR_SYMBOL_TAPE_VAR = 0x04, /**< Mark this as a tape variable (it cannot be folded, will contain flag CCV_TAPE_ALLOC) */
1433
  // The one below is special.
1434
  CCV_NNC_TENSOR_SYMBOL_DEAD = 0x80000000, /**< Mark this tensor symbol as dead, any future usage will cause assertion */
1435
};
1436
1437
147k
#define CCV_NNC_TENSOR_SYMBOL_IS_DEAD(x) ((x) & CCV_NNC_TENSOR_SYMBOL_DEAD)
1438
1439
enum {
1440
  CCV_NNC_GRAPH_EXEC_DEAD = 0x1, /**< Mark this node as dead. */
1441
  CCV_NNC_GRAPH_EXEC_P_WHILE = 0x10, /**< Mark this node keyword is while */
1442
  CCV_NNC_GRAPH_EXEC_CASE_OF = 0x20, /**< Mark this node keyword is case_of */
1443
  CCV_NNC_GRAPH_EXEC_DISABLE_OPT = 0x10000, /**< Mark this node to avoid optimization pass. */
1444
};
1445
1446
449k
#define CCV_NNC_GRAPH_EXEC_IS_DEAD(x) ((x) & CCV_NNC_GRAPH_EXEC_DEAD)
1447
25.0k
#define CCV_NNC_GRAPH_REF(x) ((x)->_heap_graph_ref ? 
(x)->_heap_graph_ref178
:
(x)->_inline_graph_ref24.8k
)
1448
1449
enum {
1450
  CCV_NNC_NO_TENSOR_SYMBOL = -1, /**< Special symbol reference for no tensor symbol. */
1451
  CCV_NNC_WHILE_COUNT_TENSOR_SYMBOL = -2, /**< Special symbol reference for while loop count tensor. */
1452
};
1453
1454
enum {
1455
  CCV_NNC_NO_GRAPH_EXEC_SYMBOL = -1, /**< Special symbol reference for no exec symbol. */
1456
};
1457
1458
1459
enum {
1460
  CCV_NNC_SYMBOL_TENSOR, /**< Identifier for tensor symbol */
1461
  CCV_NNC_SYMBOL_TENSOR_ALIAS, /**< Identifier for tensor alias symbol */
1462
  CCV_NNC_SYMBOL_GRAPH_EXEC, /**< Identifier for exec symbol */
1463
};
1464
1465
22
#define CCV_NNC_IS_WHILE_COUNT_TENSOR_SYMBOL(d) (((uint32_t)(d) & 0xf) == 0xe)
1466
1467
/**
1468
 * A data structure to pass in a pair of tensor symbols.
1469
 */
1470
typedef struct {
1471
  ccv_nnc_tensor_symbol_t source; /**< The 'from' tensor symbol. */
1472
  ccv_nnc_tensor_symbol_t destination; /**< The 'to' tensor symbol. */
1473
} ccv_nnc_tensor_symbol_map_t;
1474
1475
/**
1476
 * Create a new empty symbolic graph. It is an opaque data structure that maintains the whole graph of computation in its symbolic form.
1477
 * Note that all graph mutation methods are not thread-safe. You should only operate the graph in serial fashion.
1478
 */
1479
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_new(void);
1480
/**
1481
 * Create an tensor symbol (thus, with no actual memory space allocation) in a symbolic graph.
1482
 * @param graph The symbolic graph.
1483
 * @param info The tensor parameters.
1484
 * @param name The name of the tensor symbol, it is optional.
1485
 * @return A tensor symbol reference.
1486
 */
1487
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_param_t info, const char* const name);
1488
/**
1489
 * Create an alias to the tensor symbol as tensor view (thus, pointing to the same memory region, but with a different header info and offset).
1490
 * @param graph The symbolic graph.
1491
 * @param tensor_symbol The tensor symbol we are going to reference to.
1492
 * @param ofs The offset on each of the dimension.
1493
 * @param stride The stride of each dimension.
1494
 * @param info The tensor parameters for the new alias.
1495
 * @param name The name of the tensor symbol alias, it is optional.
1496
 * @return A tensor symbol alias reference.
1497
 */
1498
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
1499
/**
1500
 * Manually delete a tensor symbol off the symbolic graph.
1501
 * @param graph The symbolic graph.
1502
 * @param tensor The tensor symbol reference.
1503
 */
1504
void ccv_nnc_tensor_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_t tensor);
1505
/**
1506
 * Create a graph execution node (an operation that takes a set of inputs and generates a set of outputs).
1507
 * @param graph The symbolic graph.
1508
 * @param cmd The wrapped command.
1509
 * @param inputs The input tensor symbols array.
1510
 * @param input_size The size of input tensor symbols array.
1511
 * @param outputs The output tensor symbols array.
1512
 * @param output_size The size of output tensor symbols array.
1513
 * @param name The name of this execution node, optional.
1514
 * @return The execution node symbol reference.
1515
 */
1516
ccv_nnc_graph_exec_symbol_t ccv_nnc_graph_exec_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
1517
/**
1518
 * ccv_nnc_graph_exec_symbol_new defaults to use `ccv_nnc_hint_auto` find the best hints for a set of inputs / outputs.
1519
 * However, you can also set your own hints.
1520
 * @param graph The symbolic graph.
1521
 * @param exec The execution node symbol reference.
1522
 * @param hint The hint for the command.
1523
 */
1524
void ccv_nnc_graph_exec_symbol_set_hint(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_hint_t hint);
1525
/**
1526
 * Manually delete a exec symbol off the symbolic graph.
1527
 * @param graph The symbolic graph.
1528
 * @param symbol The execution node symbol reference.
1529
 */
1530
void ccv_nnc_graph_exec_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_t symbol);
1531
enum {
1532
  CCV_NNC_AUTOGEN_ALL_EXECS = 0x1, /**< Automatic concatenation for all execution nodes */
1533
  CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS = 0x2, /**< Automatically find all source and destination nodes. */
1534
};
1535
/**
1536
 * Automatic concatenate these nodes together based on its inputs / outputs.
1537
 * Imagining this is to generate the execution flow based on input tensors and output tensors.
1538
 * nil for execs and 0 for exec_size means to loop over all the execs on the graph and autogen.
1539
 * @param graph The symbolic graph.
1540
 * @param execs The execution nodes array.
1541
 * @param exec_size The size of execution nodes array.
1542
 * @param flags The flags determines what operations to perform when concatenating.
1543
 * @return non-zero if cannot figure out.
1544
 */
1545
int ccv_nnc_graph_exec_symbol_autogen(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const execs, const int exec_size, const int flags);
1546
/**
1547
 * Set the default sources for a symbolic graph.
1548
 * @param graph The symbolic graph.
1549
 * @param sources The source execution nodes array.
1550
 * @param source_size The size of source execution nodes array.
1551
 */
1552
void ccv_nnc_symbolic_graph_set_sources(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size);
1553
/**
1554
 * Add one node to the default sources for a symbolic graph.
1555
 * @param graph The symbolic graph.
1556
 * @param source The source execution node.
1557
 */
1558
void ccv_nnc_symbolic_graph_add_source(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source);
1559
/**
1560
 * Get the pointer to the default sources.
1561
 * @param graph The symbolic graph.
1562
 * @return The pointer to the source execution nodes array.
1563
 */
1564
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_sources(const ccv_nnc_symbolic_graph_t* const graph);
1565
/**
1566
 * Get the size of the default source nodes array.
1567
 * @param graph The symbolic graph.
1568
 * @return The size of the default source nodes array.
1569
 */
1570
int ccv_nnc_symbolic_graph_source_size(const ccv_nnc_symbolic_graph_t* const graph);
1571
/**
1572
 * Set the default destinations for a symbolic graph.
1573
 * @param graph The symbolic graph.
1574
 * @param destinations The destination execution nodes array.
1575
 * @param destination_size The size of destination execution nodes array.
1576
 */
1577
void ccv_nnc_symbolic_graph_set_destinations(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1578
/**
1579
 * Add one node to the default destinations for a symbolic graph.
1580
 * @param graph The symbolic graph.
1581
 * @param destination The destination execution node.
1582
 */
1583
void ccv_nnc_symbolic_graph_add_destination(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t destination);
1584
/**
1585
 * Get the pointer to the default destinations.
1586
 * @param graph The symbolic graph.
1587
 * @return The pointer to the destinationsexecution nodes array.
1588
 */
1589
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_destinations(const ccv_nnc_symbolic_graph_t* const graph);
1590
/**
1591
 * Get the size of the default destination nodes array.
1592
 * @param graph The symbolic graph.
1593
 * @return The size of the default destination nodes array.
1594
 */
1595
int ccv_nnc_symbolic_graph_destination_size(const ccv_nnc_symbolic_graph_t* const graph);
1596
/**
1597
 * Generate output that can be parsed by GraphViz (DOT language).
1598
 * @param graph The symbolic graph.
1599
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
1600
 * @param out The output file stream.
1601
 */
1602
void ccv_nnc_symbolic_graph_dot(const ccv_nnc_symbolic_graph_t* const graph, const int flags, FILE* out);
1603
1604
/**
1605
 * The data structure to wrap a tensor symbol and a concrete tensor together.
1606
 */
1607
typedef struct {
1608
  ccv_nnc_tensor_symbol_t symbol;
1609
  const ccv_nnc_tensor_t* tensor;
1610
} ccv_nnc_tensor_bind_t;
1611
1612
typedef struct {
1613
  void* (*alloc)(const int type, const int pinned_mem /* Currently only used to annotate CCV_TENSOR_PINNED_MEM, future can be expanded to generic flags */, const size_t size, void* const arg);
1614
  void (*free)(void* const ptr, void* const arg);
1615
} ccv_nnc_symbolic_graph_compile_allocator_vtab_t;
1616
1617
typedef struct {
1618
  const ccv_nnc_symbolic_graph_compile_allocator_vtab_t* isa;
1619
  struct {
1620
    void* alloc;
1621
    void* free;
1622
  } context;
1623
} ccv_nnc_symbolic_graph_compile_allocator_t;
1624
1625
typedef struct {
1626
  ccv_nnc_symbolic_graph_compile_allocator_t allocator;
1627
} ccv_nnc_symbolic_graph_compile_param_t;
1628
1629
/**
1630
 * Compile a symbolic graph into a graph that can be executed, and a set of tensors (opaque data structure tensor arena) are allocated based on which tensor symbols are the input and which are the outputs. The tensor allocation is done to minimize the required storage.
1631
 * tensor_binds provide custom binding for these tensors. You still responsible to manage the life-time of these tensors.
1632
 * outputs marks the tensor symbols that need to be kept til the end of the graph.
1633
 * @param graph The symbolic graph.
1634
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
1635
 * @param tensor_binds The binding array (a tensor symbol and a concrete tensor). We replace everywhere that uses the tensor symbol with the concrete tensor.
1636
 * @param tensor_bind_size The size of the binding array.
1637
 * @param outputs The output tensor symbols that we want to keep the value.
1638
 * @param output_size The size of the output tensor symbols array.
1639
 * @param sources The sources for the graph.
1640
 * @param source_size The size of the sources array. 0 to use default sources.
1641
 * @param destinations The destinations for the graph.
1642
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1643
 * @param graph_ref The pointer to store the generated concrete graph.
1644
 * @param tensor_arena_ref The pointer to store ccv_nnc_tensor_arena_t.
1645
 * @param graph_exec_arena_ref The pointer to store ccv_nnc_graph_exec_arena_t.
1646
 */
1647
void ccv_nnc_symbolic_graph_compile(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_symbolic_graph_compile_param_t compile_params, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_graph_t** const graph_ref, ccv_nnc_tensor_arena_t** const tensor_arena_ref, ccv_nnc_graph_exec_arena_t** const graph_exec_arena_ref);
1648
/**
1649
 * Free the symbolic graph and its associated memory. Note that if you compiled a graph / tensor arena out of this symbolic graph, these won't be free'd.
1650
 * @param graph The symbolic graph.
1651
 */
1652
void ccv_nnc_symbolic_graph_free(ccv_nnc_symbolic_graph_t* const graph);
1653
/**
1654
 * Find corresponding tensor by a symbol from the tensor arena.
1655
 * @param tensor_arena The tensor arena object generated through compilation,
1656
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1657
 * @return A concrete tensor from the tensor arena.
1658
 */
1659
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_symbol(const ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol);
1660
/**
1661
 * Bind a tensor to a symbol. You still responsible to manage the life-time of the tensor to make sure it is not freed until everything is done.
1662
 * @param tensor_arena The tensor arena object generated through compilation.
1663
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1664
 * @param tensor The new tensor to bind to.
1665
 */
1666
void ccv_nnc_tensor_bind_symbol(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_t* const tensor);
1667
/**
1668
 * Clear existing bindings on the tensor arena.
1669
 * @param tensor_arena The tensor arena object generated through compilation to clear bindings.
1670
 */
1671
void ccv_nnc_tensor_arena_clear_bindings(ccv_nnc_tensor_arena_t* const tensor_arena);
1672
/**
1673
 * Free the data buffer of the tensor arena.
1674
 * @param tensor_arena The tensor arena object generated through compilation.
1675
 */
1676
void ccv_nnc_tensor_arena_buffer_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1677
/**
1678
 * Free the opaque tensor arena structure.
1679
 * @param tensor_arena The tensor arena object generated through compilation.
1680
 */
1681
void ccv_nnc_tensor_arena_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1682
/**
1683
 * Find corresponding graph exec by a exec symbol from graph exec arena.
1684
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1685
 * @param symbol The execution node symbol reference. Because execution node symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1686
 * @return A execution node reference to the concrete graph.
1687
 */
1688
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_from_symbol(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena, const ccv_nnc_graph_exec_symbol_t symbol);
1689
/**
1690
 * Return the node that can drive all the source nodes from the compilation.
1691
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1692
 * @return A execution node reference that is the source.
1693
 */
1694
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_source(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1695
/**
1696
 * Return the node that can drain all the destination nodes from the compilation.
1697
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1698
 * @return A execution node reference that is the destination.
1699
 */
1700
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_destination(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1701
/**
1702
 * Free the opaque graph exec arena structure.
1703
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1704
 */
1705
void ccv_nnc_graph_exec_arena_free(ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1706
/**
1707
 * Write symbolic graph to disk, along with some binding tensors.
1708
 * @param graph The symbolic graph.
1709
 * @param tensor_binds The binding array (pair of tensor symbol and concrete tensor).
1710
 * @param tensor_bind_size The size of the binding array.
1711
 * @param fn The file name.
1712
 */
1713
void ccv_nnc_symbolic_graph_write(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const char* const fn);
1714
/**
1715
 * Read symbolic graph from disk, with some binding tensors.
1716
 * @param fn The file name.
1717
 * @param graph_ref The pointer to store symbolic graph.
1718
 * @param tensor_binds_ref The pointer to store the binding array.
1719
 * @param tensor_bind_size_ref The pointer to store the size of the binding array.
1720
 */
1721
void ccv_nnc_symbolic_graph_read(const char* const fn, ccv_nnc_symbolic_graph_t** const graph_ref, ccv_nnc_tensor_bind_t** const tensor_binds_ref, int* const tensor_bind_size_ref);
1722
1723
/**
1724
 * The format callback function. Note that these are all integer ids. They can be filled to
1725
 * ccv_nnc_graph_exec_symbol_t.d or ccv_nnc_tensor_symbol_t.d.
1726
 * @param graph The symbolic graph.
1727
 * @param node The id for the node. It is unique in the graph.
1728
 * @param name The name for the node. It is either NULL or \0 terminated string.
1729
 * @param cmd The associated command for this node.
1730
 * @param flags The flag that help to identify if it is a sub-graph, which type it is (P_WHILE or CASE_OF)
1731
 * @param incomings The incoming nodes for execution.
1732
 * @param incoming_size The number of incoming nodes for execution.
1733
 * @param outgoings The outgoing nodes for execution.
1734
 * @param outgoing_size The number of outgoing nodes for execution.
1735
 * @param inputs The input tensor symbols.
1736
 * @param input_size The number of the input tensor symbols.
1737
 * @param outputs The output tensor symbols.
1738
 * @param output_size The number of the output tensor symbols.
1739
 * @param context The context passed through ccv_nnc_symbolic_graph_format.
1740
 */
1741
typedef void(*ccv_nnc_symbolic_graph_format_f)(const ccv_nnc_symbolic_graph_t* const graph, const int node, const char* const name, const ccv_nnc_cmd_t cmd, const int flags, const int* const incomings, const int incoming_size, const int* const outgoings, const int outgoing_size, const int* const inputs, const int input_size, const int* const outputs, const int output_size, void* const context);
1742
/**
1743
 * Provide a hook for upper level to do custom formatting of a given symbolic graph. You can
1744
 * implement logic to format the graph into protobuf, or json, or doing persistence. However, this
1745
 * is not the method for you to visit the graph, and do mutations on it. This function doesn't
1746
 * recurse into sub-graphs. You need to inspect each node to know if these are sub-graphs and
1747
 * handle accordingly.
1748
 * @param graph The symbolic graph.
1749
 * @param sources The sources for the graph.
1750
 * @param source_size The size of the sources array. 0 to use default sources.
1751
 * @param destinations The destinations for the graph.
1752
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1753
 * @param format_fn The format callback to be called on every node.
1754
 * @param context The context that will be passed to the callback.
1755
 */
1756
void ccv_nnc_symbolic_graph_format(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
1757
1758
/** @} */
1759
1760
/**
1761
 * @defgroup level_3_others Others
1762
 * @{
1763
 */
1764
1765
/**
1766
 * Return the symbol it alias to.
1767
 * @param graph The symbolic graph.
1768
 * @param tensor_symbol The tensor symbol alias.
1769
 * @return A tensor symbol reference to the original tensor symbol. If this symbol has no reference, return NO_SYMBOL (.graph = 0)
1770
 */
1771
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1772
/**
1773
 * Set the tensor symbol parameters.
1774
 * @param graph The symbolic graph.
1775
 * @param tensor The tensor symbol reference.
1776
 * @param info The new tensor parameters.
1777
 * @return non-zero if encountered errors.
1778
 */
1779
int ccv_nnc_tensor_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const ccv_nnc_tensor_param_t info);
1780
/**
1781
 * Get the parameters for a tensor symbol.
1782
 * @param graph The symbolic graph.
1783
 * @param tensor The tensor symbol reference.
1784
 * @return The tensor parameters.
1785
 */
1786
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_symbol_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1787
/**
1788
 * Get the name for a tensor symbol.
1789
 * @param graph The symbolic graph.
1790
 * @param tensor The tensor symbol reference.
1791
 * @return The tensor name if available. Otherwise 0. The memory is managed by the graph.
1792
 */
1793
CCV_WARN_UNUSED(const char*) ccv_nnc_tensor_symbol_name(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1794
/**
1795
 * Set the tensor symbol alias parameters.
1796
 * @param graph The symbolic graph.
1797
 * @param tensor The tensor symbol reference.
1798
 * @param ofs The offset on each of the dimension.
1799
 * @param stride The stride of each dimension.
1800
 * @return non-zero if it is not a tensor alias.
1801
 */
1802
int ccv_nnc_tensor_symbol_alias_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC]);
1803
/**
1804
 * Get the parameters for a tensor symbol.
1805
 * @param graph The symbolic graph.
1806
 * @param tensor The tensor symbol reference.
1807
 * @param ofs The offset on each of the dimension.
1808
 * @param stride The stride of each dimension.
1809
 * @return non-zero if it is not a tensor alias.
1810
 */
1811
int ccv_nnc_tensor_symbol_alias_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, int ofs[CCV_NNC_MAX_DIM_ALLOC], int stride[CCV_NNC_MAX_DIM_ALLOC]);
1812
/**
1813
 * Set the flags for this tensor symbol. The flags are only used for symbol, not for tensor.
1814
 * @param graph The symbolic graph.
1815
 * @param tensor The tensor symbol reference.
1816
 * @param flags A reserved field for flags.
1817
 */
1818
int ccv_nnc_tensor_symbol_set_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int flags);
1819
/**
1820
 * Get all the flags for a tensor.
1821
 * @param graph The symbolic graph.
1822
 * @param tensor The tensor symbol reference.
1823
 */
1824
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1825
/**
1826
 * Set the cmd of this exec symbol.
1827
 * @param graph The symbolic graph.
1828
 * @param exec The execution node symbol reference.
1829
 * @param cmd The new wrapped command.
1830
 */
1831
void ccv_nnc_graph_exec_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_cmd_t cmd);
1832
/**
1833
 * Set the flags for this exec symbol. The flags are only used for symbol. We can only set higher 16-bit.
1834
 * @param graph The symbolic graph.
1835
 * @param exec The execution node symbol reference.
1836
 * @param flags A reserved field for flags.
1837
 */
1838
void ccv_nnc_graph_exec_symbol_set_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const int flags);
1839
/**
1840
 * Get the flags for a tensor. We can only retrieve the higher 16-bit.
1841
 * @param graph The symbolic graph.
1842
 * @param exec The execution node symbol reference.
1843
 */
1844
CCV_WARN_UNUSED(int) ccv_nnc_graph_exec_symbol_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1845
/**
1846
 * Return the command on this exec symbol.
1847
 * @param graph The symbolic graph.
1848
 * @param exec The execution node symbol reference.
1849
 * @return The wrapped command.
1850
 */
1851
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_graph_exec_symbol_cmd(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1852
/**
1853
 * Return the command on this exec symbol.
1854
 * @param graph The symbolic graph.
1855
 * @param exec The execution node symbol reference.
1856
 * @return The name for the exec symbol if available. The memory is managed by the graph.
1857
 */
1858
CCV_WARN_UNUSED(const char*) ccv_nnc_graph_exec_symbol_name(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1859
/**
1860
 * Set the inputs / outputs for a exec symbol.
1861
 * @param graph The symbolic graph.
1862
 * @param exec The execution node symbol reference.
1863
 * @param inputs The input tensor symbols array.
1864
 * @param input_size The size of input tensor symbols array.
1865
 * @param outputs The output tensor symbols array.
1866
 * @param output_size The size of output tensor symbols array.
1867
 */
1868
void ccv_nnc_graph_exec_symbol_set_io(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size);
1869
/**
1870
 * Manually concatenate input node with an output graph node.
1871
 * @param graph The symbolic graph.
1872
 * @param source The source execution node symbol to connect.
1873
 * @param destination The destination execution node symbol connect to.
1874
 * @return non-zero if cannot concat successfully.
1875
 */
1876
int ccv_nnc_graph_exec_symbol_concat(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1877
/**
1878
 * Manually disconnect input node with an output graph node for this graph.
1879
 * @param graph The symbolic graph.
1880
 * @param source The source execution node symbol to disconnect.
1881
 * @param destination The destination execution node symbol disconnect to.
1882
 * @return non-zero if cannot disjoin successfully.
1883
 */
1884
int ccv_nnc_graph_exec_symbol_disjoin(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1885
/**
1886
 * Number of exec symbols.
1887
 * @param graph The symbolic graph.
1888
 */
1889
CCV_WARN_UNUSED(int) ccv_nnc_graph_exec_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1890
/**
1891
 * Number of active exec symbols.
1892
 * @param graph The symbolic graph.
1893
 * @param type The type of op, can be CCV_NNC_SYMBOL_TENSOR, CCV_NNC_SYMBOL_GRAPH_EXEC (will error out on CCV_NNC_SYMBOL_TENSOR_ALIAS)
1894
 */
1895
CCV_WARN_UNUSED(int) ccv_nnc_symbolic_graph_active_symbol_count(const ccv_nnc_symbolic_graph_t* const graph, const int type);
1896
/**
1897
 * Substitution function. Given an execution node symbol and a command, return a new command.
1898
 */
1899
typedef ccv_nnc_cmd_t(*ccv_nnc_symbolic_graph_subst_f)(const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd);
1900
/**
1901
 * Generate a duplicate of the provided graph.
1902
 * While generating the duplicate, it calls the function pointer to re-process the node type.
1903
 * @param graph The symbolic graph.
1904
 * @param subst The substitution function.
1905
 * @return The duplicated symbolic graph.
1906
 */
1907
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_dup(const ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_symbolic_graph_subst_f subst);
1908
/**
1909
 * Number of tensor symbols.
1910
 * @param graph The symbolic graph.
1911
 */
1912
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1913
/**
1914
 * Compute all the tensor shapes within this graph.
1915
 * @param graph The symbolic graph.
1916
 * @param sources The sources for the graph.
1917
 * @param source_size The size of the sources array. 0 to use default sources.
1918
 * @param destinations The destinations for the graph.
1919
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1920
 */
1921
void ccv_nnc_symbolic_graph_tensor_auto(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1922
/**
1923
 * For a given tensor symbol, this method resolves to its local reference inside the given graph.
1924
 * This is related to the sub-graph of symbolic graphs. A tensor symbol in the sub-graph can still have a
1925
 * representation in the parent graph. This method used to find the local reference in any graph.
1926
 * @param graph The symbolic graph.
1927
 * @param tensor_symbol The tensor symbol we want to resolve.
1928
 * @return A tensor symbol reference in the given graph.
1929
 */
1930
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_resolve(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1931
/**
1932
 * Pass graph's tensor symbol into its sub graph. We will make the connection that the source tensor
1933
 * symbol in the source symbolic graph is the destination tensor symbol in the destination symbolic graph.
1934
 * The reason to do this inference is because a tensor symbol is local to a symbolic graph under the hood.
1935
 * Although you can use tensor symbols from different graphs directly (it calls this method or the resolve
1936
 * method above when create an execution node symbol), sometimes you need this method to do it manually.
1937
 * @param src_graph The source symbolic graph.
1938
 * @param dest_graph The destination symbolic graph.
1939
 * @param src_tensor_symbol The tensor symbol we want to resolve.
1940
 * @param dest_tensor_symbol The tensor symbol we want to resolve.
1941
 */
1942
void ccv_nnc_tensor_symbol_hookup(ccv_nnc_symbolic_graph_t* const src_graph, ccv_nnc_symbolic_graph_t* const dest_graph, const ccv_nnc_tensor_symbol_t src_tensor_symbol, const ccv_nnc_tensor_symbol_t dest_tensor_symbol);
1943
/**
1944
 * Set bypasses for a tensor symbol.
1945
 * For case..of graphs, if the condition doesn't meet, we will skip the execution of a sub-graph.
1946
 * However, in that case, we cannot express easily which output tensor corresponds to which input tensor.
1947
 * This methods provides the way.
1948
 * @param graph The symbolic graph.
1949
 * @param symbol_map The pair of tensors array, source is the input tensor, destination is the output tensor.
1950
 * @param symbol_map_size The size of the tensor pairs array.
1951
 */
1952
void ccv_nnc_tensor_symbol_set_bypasses(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
1953
/**
1954
 * Fetch input / output for an exec symbol. For efficiency consideration, this returns pointer directly.
1955
 * @param graph The symbolic graph.
1956
 * @param symbol The execution node symbol reference.
1957
 * @param inputs The pointer to store input tensor symbols array.
1958
 * @param input_size The pointer to store the size of input tensor symbols array.
1959
 * @param outputs The pointer to store output tensor symbols array.
1960
 * @param output_size The pointer to store the size of output tensor symbols array.
1961
 */
1962
void ccv_nnc_graph_exec_symbol_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const inputs, int* const input_size, const int** const outputs, int* const output_size);
1963
/**
1964
 * Replace a input / output tensor symbol on an exec symbol.
1965
 * @param graph The symbolic graph.
1966
 * @param symbol The execution node symbol reference.
1967
 * @param old_symbol The old tensor symbol to be replaced.
1968
 * @param new_symbol The new tensor symbol on input / output.
1969
 */
1970
void ccv_nnc_graph_exec_symbol_replace_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_tensor_symbol_t old_symbol, const ccv_nnc_tensor_symbol_t new_symbol);
1971
/**
1972
 * Which exec symbol this is connected to. For efficiency consideration, this returns pointer directly.
1973
 * @param graph The symbolic graph.
1974
 * @param symbol The execution node symbol reference.
1975
 * @param tos The pointer to store outgoing indexes of the execution nodes.
1976
 * @param to_size the pointer to store the number of outgoing indexes.
1977
 */
1978
void ccv_nnc_graph_exec_symbol_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const tos, int* const to_size);
1979
/**
1980
 * Find the size allocated on the opaque tensor arena structure.
1981
 * @param tensor_arena The tensor arena object generated through compilation.
1982
 * @return The total allocated size in bytes.
1983
 */
1984
CCV_WARN_UNUSED(uint64_t) ccv_nnc_tensor_arena_size(const ccv_nnc_tensor_arena_t* const tensor_arena);
1985
/**
1986
 * Query whether a set of sources are the ancestors to a set of destination nodes.
1987
 * @param graph The symbolic graph.
1988
 * @param sources The exec sources to check whether they can reach some of the destinations.
1989
 * @param source_size How many sources in the source list.
1990
 * @param destinations The exec destinations to check whether sources can reach.
1991
 * @param destination_size How many destinations in the destination list.
1992
 * @param bitmask Bit return value, each bit represents a source, and 1 meant it can reach some of the destinations.
1993
 */
1994
void ccv_nnc_symbolic_graph_sources_to_destinations(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, uint64_t* const bitmask);
1995
/**
1996
 * Re-init the tensor arena with updated symbolic graph. This won't work if the symbolic graph requires
1997
 * larger tensors than what's available. Use this method properly, you can avoid re-compile a graph
1998
 * just because some tensor shape changed.
1999
 * @param tensor_arena The tensor arena object generated through compilation.
2000
 * @param graph The updated symbolic graph with different tensor shape.
2001
 * @return 0 if successful, -1 if the tensor arena doesn't have enough space to just re-init.
2002
 */
2003
int ccv_nnc_tensor_arena_reinit(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_symbolic_graph_t* const graph);
2004
/**
2005
 * Re-init the graph exec arena with updated symbolic graph. This updated some hyper-parameters of
2006
 * executions to match the updated symbolic graph. Note that this will try to keep the backend / algorithm
2007
 * selection from previous graph if possible (meaning if the command still match).
2008
 * @param graph_exec_arena The graph exec arena object provided mapping between symbolic and concrete graph.
2009
 * @param graph The concrete graph generated through compile method.
2010
 * @param symbolic_graph The updated symbolic graph.
2011
 */
2012
void ccv_nnc_graph_exec_reinit(ccv_nnc_graph_exec_arena_t* const graph_exec_arena, ccv_nnc_graph_t* const graph, const ccv_nnc_symbolic_graph_t* const symbolic_graph);
2013
/**
2014
 * Function prototype for tensor symbol creation callback.
2015
 */
2016
typedef void(*ccv_nnc_tensor_symbol_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_param_t info, const char* const name);
2017
/**
2018
 * Hook into the call to ccv_nnc_tensor_symbol_new, return previous provided context if call into this method.
2019
 * @param graph The symbolic graph.
2020
 * @param hook The function to be called if a new tensor symbol created.
2021
 * @param context The context associated with the callback function.
2022
 * @param previous_hook Return the previous hook if provided.
2023
 * @return The previous context associated with the previous hook function.
2024
 */
2025
void* ccv_nnc_tensor_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_new_hook_f hook, void* context, ccv_nnc_tensor_symbol_new_hook_f* previous_hook);
2026
/**
2027
 * Function prototype for tensor symbol alias creation callback.
2028
 */
2029
typedef void(*ccv_nnc_tensor_symbol_alias_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_symbol_t from_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
2030
/**
2031
 * Hook into the call to ccv_nnc_tensor_symbol_alias_new, return previous provided context if call into this method.
2032
 * @param graph The symbolic graph.
2033
 * @param hook The function to be called if a new tensor symbol alias created.
2034
 * @param context The context associated with the callback function.
2035
 * @param previous_hook The function to be called if a new tensor symbol alias created.
2036
 * @return The previous context associated with the previous hook function.
2037
 */
2038
void* ccv_nnc_tensor_symbol_alias_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_alias_new_hook_f hook, void* context, ccv_nnc_tensor_symbol_alias_new_hook_f* previous_hook);
2039
/**
2040
 * Set the pair reference for tensor symbols. Peer reference for tensor symbols has very specific meanings.
2041
 * For a backward pass involves sub-graphs. The commands in the sub-graph could reference to tensor symbols of
2042
 * a different graph (its forward pass graph). That is not allowed (two graph has no ancestral relationship
2043
 * cannot share a tensor symbol). So we create a new tensor symbol, but set the pair reference.
2044
 * @param graph The symbolic graph.
2045
 * @param tensor_symbol The tensor symbol in the current graph.
2046
 * @param pair_tensor_symbol The tensor symbol in the pair graph.
2047
 */
2048
void ccv_nnc_tensor_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_nnc_tensor_symbol_t pair_tensor_symbol);
2049
/**
2050
 * Function prototype for execution node symbol creation callback.
2051
 */
2052
typedef void(*ccv_nnc_graph_exec_symbol_new_hook_f)(void* context, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
2053
/**
2054
 * Hook into the call to ccv_nnc_graph_exec_symbol_new, return previous provided context if call into this method.
2055
 * @param graph The symbolic graph.
2056
 * @param hook The function to be called if a new execution node symbol created.
2057
 * @param context The context associated with the callback function.
2058
 * @param previous_hook The previous hook function associated with this operation.
2059
 * @return The previous context associated with the previous hook function.
2060
 */
2061
void* ccv_nnc_graph_exec_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_new_hook_f hook, void* context, ccv_nnc_graph_exec_symbol_new_hook_f* previous_hook);
2062
/**
2063
 * Set the pair reference for exec. This is very similar to the one for concrete graph. A pair reference
2064
 * of a backward pass execution node is its forward pass counterpart.
2065
 * @param graph The symbolic graph.
2066
 * @param exec_symbol The execution node symbol in the current graph.
2067
 * @param pair_exec_symbol The pairing execution node symbol.
2068
 */
2069
void ccv_nnc_graph_exec_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec_symbol, const ccv_nnc_graph_exec_symbol_t pair_exec_symbol);
2070
2071
/** @} */
2072
2073
/** @} */
2074
2075
/**
2076
 * @defgroup level_3_5 Level-3.5 API
2077
 * @{
2078
 */
2079
2080
/**
2081
 * @defgroup level_3_5_autograd Automatic Differentiation
2082
 * @{
2083
 */
2084
2085
/**
2086
 * Compute the backward graph, assuming the provided symbolic graph only contain the "forward" part from sources to destinations.
2087
 * This effectively is called the "autograd" or automatic differentiation process (specifically, "reverse AD") in other libs.
2088
 * For a expression y = f(x), to compute dx, x is the wrt_symbol, y is the f_symbol.
2089
 * @param graph The symbolic graph.
2090
 * @param f_symbols The tensor symbols array of the result (or loss).
2091
 * @param f_symbol_size The size of the f symbols array.
2092
 * @param wrt_symbols The tensor symbols array of the inputs.
2093
 * @param wrt_symbol_size The size of the wrt symbols array.
2094
 * @param sources The source execution nodes array for the computation.
2095
 * @param source_size The size of the source nodes array.
2096
 * @param destinations The destination execution nodes array for the computation.
2097
 * @param destination_size The size of the destination nodes array.
2098
 */
2099
void ccv_nnc_symbolic_graph_backward(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const f_symbols, const int f_symbol_size, const ccv_nnc_tensor_symbol_t* const wrt_symbols, const int wrt_symbol_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2100
/**
2101
 * Get the symbol that contains the gradient. The list will be flushed if the ccv_nnc_symbolic_graph_backward function is called again.
2102
 * @param graph The symbolic graph.
2103
 * @param symbol The tensor symbol we want to retrieve its gradient (must be one of the wrt symbols or the f symbols).
2104
 * @return A tensor symbol that represents the gradient.
2105
 */
2106
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
2107
/**
2108
 * Get the execution node symbol for a tensor symbol. This used to retrieve the execution node for a gradient tensor symbol.
2109
 * @param graph The symbolic graph.
2110
 * @param symbol The tensor symbol that represents the gradient (must be one of the wrt symbols).
2111
 * @return A execution node symbol that generates the gradient.
2112
 */
2113
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
2114
2115
/** @} */
2116
2117
/**
2118
 * @defgroup level_3_5_while While Loop
2119
 * @{
2120
 */
2121
2122
/**
2123
 * @page symbolic_while Construct a "while" loop in a symbolic graph
2124
 *
2125
 * (This document was written in 2016, since then, Caffe2 added support for While loop (as sub-graph), similar
2126
 * implementation added for ONNX as well.)
2127
 *
2128
 * In NNC, a computation graph cannot allow cycles. Thus, there is no flexible way to express loops.
2129
 *
2130
 * A little survey on this problem:
2131
 *
2132
 * * Caffe2 supports specific type of recurrent neural network.
2133
 *
2134
 * * TensorFlow as it stands, supports while construct. Its while construct is very straightforward, a body and
2135
 *   a condition is provided, you can construct whatever graph as you want.
2136
 *
2137
 * * mxnet supports recurrent neural network by unrolling it into normal none-looped graph.
2138
 *
2139
 * * Theano supports "scan" ops, which is a terminable loop (with loop variant, known as sequence).
2140
 *
2141
 * * CNTK supports this with custom BrainScript. Within BrainScript, you can access the previous state in a
2142
 *   function, therefore, effectively supports calling a method multiple times (looping over).
2143
 *
2144
 * Of above, Caffe2 and mxnet gave up on supporting generic loop for performance reasons. TensorFlow supports
2145
 * generic while loop, with all the trouble it may introduce (see the Nested while loop bug in TensorFlow that
2146
 * recently fixed). Theano picked a point seems pretty sweet, although there are limitations. CNTK's BrainScript
2147
 * is a DSL, they can do whatever they want with the drawback now that they need to implement a language runtime.
2148
 * TensorFlow, Theano and CNTK all support auto-differentiation over the while loop with tape (Wengert list).
2149
 *
2150
 * A simple way to support loop is to support conditional jump. In fact, conditional jump is a more generic way
2151
 * of doing loops. However, if you put this into the consideration that fully differentiable computation graph
2152
 * wanna to be supported, it is terrible. With conditional jump, it is really hard for you to know which tensor
2153
 * is used where, thus keep track for reverse accumulation (backward propagation). There is no counter or
2154
 * whatsoever, it is pretty hard to trace back on which line is executed how many times. Compounding this with
2155
 * NNC's promise that as long as it shows on the graph can be "parallel" computed, it will be parallel computed,
2156
 * it is close to impossible to track if conditional jump used in its raw form. Certain restrictions must be
2157
 * applied to how to do the loop. The compromise comes from closer examination of NNC's preferences.
2158
 *
2159
 * NNC prefers to have the graph without cycles. It also prefers to be fully differentiable. Another important
2160
 * criteria is that most functions in NNC require SSA (Static Single Assignment) representation. With these in
2161
 * mind, supporting while loop has to be strict.
2162
 *
2163
 * Luckily, there are well-formalized way of supporting this in literature and practice. Because it is
2164
 * well-formalized, translating this into existing NNC implementation is actually pretty straightforward. We
2165
 * are going to introduce a special version of while loop. In literature that discussed about SSA, it may be
2166
 * called parameterized loop. For us, it works like this:
2167
 *
2168
 * To construct a while loop for existing NNC graph, you need to be able to separate the existing graph into
2169
 * two sub-graphs.
2170
 *
2171
 * The while-loop sub-graph (WL sub-graph) contains a set of incoming nodes (I-nodes), Condition false output
2172
 * nodes (CFO-nodes) and end nodes (E-nodes). Each set have its own properties, but in short, all incoming edges
2173
 * to the WL sub-graph connect to one of the I-nodes, but nothing else. All outgoing edges from the WL sub-graph
2174
 * connect to one of the CFO-nodes, but nothing else. A nodes can be either a I-node, CFO-node or E-node,
2175
 * non-exclusively.
2176
 *
2177
 * There are also 3 types of tensors used for all nodes in WL sub-graph: Input tensors (I-tensors) are tensors
2178
 * that are inputs to some nodes, and will never be outputs. Output tensors (O-tensors) are tensors that are
2179
 * outputs from some nodes, but never be inputs to any nodes. I-tensors can be outputs from some nodes that
2180
 * outside of WL sub-graph. O-tensors can be inputs to some nodes that outside of WL sub-graph. Internal
2181
 * tensors (IN-tensors) are not visible outside of WL sub-graph, therefore, they can be both inputs and outputs
2182
 * of some nodes inside the sub-graph. Some tensors can be feedback into the WL sub-graph, given either
2183
 * O-tensors or IN-tensors. A parameter map can be given in these cases to describe which maps to what.
2184
 *
2185
 * The way to drive a WL sub-graph like this: the WL sub-graph runs until all CFO-nodes are reached. At this
2186
 * point, the while_f condition is checked. If true, we continue until all the end-nodes are reached. At this
2187
 * point, we increase the counter, reconfigure the WL sub-graph with parameter map, and run from I-nodes all
2188
 * over again. When reached all CFO-nodes, the condition is checked again, if false, WL sub-graph terminates,
2189
 * and the graph continues from the nodes that are pointed by CFO-nodes.
2190
 *
2191
 * Given these constraints, doing automatic differentiation is not that hard any more. A WL sub-graph, from
2192
 * the whole graph's point of view, is just a giant command supports both forward / backward operations, with
2193
 * some extra information passed around in the form of userdata (tape).
2194
 *
2195
 * For WL sub-graph, we can continue to leverage the compile / backward function that already written for
2196
 * symbolic graph as well.
2197
 *
2198
 * For compile function, we just need to take care of parameter maps (these need to be converted into binded
2199
 * tensors).
2200
 *
2201
 * For backward function, we need to convert parameter maps from assigner (thus, y = x) to accumulator (x += y).
2202
 *
2203
 * This function will replace the nodes that it affects to one sub-graph node. Thus, how to drive this
2204
 * sub-graph is opaque. Its backward form is opaque as well.
2205
 *
2206
 * There are no connection between its nodes and the outside graph nodes other than the three sets:
2207
 *
2208
 * 1. Incoming nodes, the set of nodes that contains the incoming edges from outside, they cannot have edges
2209
 *    points by inside nodes. The sub-graph computation starts from these incoming nodes;
2210
 *
2211
 * 2. Condition false output nodes, when condition is false, we will break out of this while loop, these
2212
 *    nodes pointing to the outside nodes, but no inside nodes;
2213
 *
2214
 * 3. End nodes, the set of nodes that marks the end of the while body, and after these nodes are executed,
2215
 *    we will return to the incoming nodes. These end nodes shouldn't have any edges pointing to inside nodes
2216
 *    (OK if end nodes are condition true output nodes as well);
2217
 *
2218
 * Since these will become a sub-graph (which, to its owner graph, just simple "node"), it will have inputs
2219
 * and outputs. Besides that, the loop body needs to be parameterized to be SSA compliant (see:
2220
 * https://www.cs.cmu.edu/~fp/courses/15411-f13/lectures/06-ssa.pdf). Thus, a list of body parameters need to
2221
 * be provided.
2222
 */
2223
2224
/**
2225
 * @defgroup level_3_5_while_essentials While Loop Essentials
2226
 * @{
2227
 */
2228
2229
/**
2230
 * The given tensors contains all the common / input / output tensors specified in the sub-graph.
2231
 */
2232
typedef int(*ccv_nnc_graph_while_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
2233
/**
2234
 * Create a tensor tape that can be used to record for while loop or case..of.
2235
 * @return A ccv_nnc_tensor_tape_t pointer.
2236
 */
2237
CCV_WARN_UNUSED(ccv_nnc_tensor_tape_t*) ccv_nnc_tensor_tape_new(void);
2238
/**
2239
 * Deallocate the tensor tape and all the memory it allocated.
2240
 * @param tape The tensor tape object.
2241
 */
2242
void ccv_nnc_tensor_tape_free(ccv_nnc_tensor_tape_t* const tape);
2243
/**
2244
 * The API to operate on the symbolic graph is more involved than the concrete graph for while loops.
2245
 * The reason is because symbolic graph operates in SSA form (static single assignment), therefore, the while
2246
 * loops for the symbolic graph has to be parameterized.
2247
 * @param graph The symbolic graph.
2248
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2249
 * @param while_graph The sub-graph to run the while loop.
2250
 * @param name The name of the while loop. Optional.
2251
 * @return A while loop execution symbol (backed by a sub-graph) of the giving graph.
2252
 */
2253
ccv_nnc_graph_exec_symbol_t ccv_nnc_symbolic_graph_while(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, ccv_nnc_symbolic_graph_t* const while_graph, const char* const name);
2254
/**
2255
 * Set the expression to be evaluated, and at which nodes to be evaluated.
2256
 * @param while_graph The symbolic graph that will run the while loop.
2257
 * @param while_expr The function pointer to the expression.
2258
 * @param while_data A custom data provided to the expression evaluation function.
2259
 * @param inputs The input tensor symbols array to the expression evaluation function.
2260
 * @param input_size The size of the input tensor symbols array.
2261
 * @param breakpoints The execution node symbols at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
2262
 * @param breakpoint_size The size of the execution node symbols array.
2263
 */
2264
void ccv_nnc_symbolic_graph_set_while_expr(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const breakpoints, const int breakpoint_size);
2265
/**
2266
 * Set the loop carry parameters when reuse. (parameterized loop, these will be carried over to the next loop).
2267
 * @param while_graph The symbolic graph that will run the while loop.
2268
 * @param symbol_map A pair of tensor symbols array, where the source tensor symbol is the output tensor symbol in this loop, the destination tensor symbol is the input tensor symbol in the next loop.
2269
 * @param symbol_map_size The size of the symbol map array.
2270
 */
2271
void ccv_nnc_symbolic_graph_set_carry_overs(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
2272
/**
2273
 * Retrieve the special (magical) tensor symbol that retains the while loop counter (thus, dimension of 1x1x1, CCV_64S type).
2274
 * @param while_graph The symbolic graph that will run the while loop.
2275
 * @return A tensor symbol represents the implicit loop count.
2276
 */
2277
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_while_count(const ccv_nnc_symbolic_graph_t* const while_graph);
2278
/**
2279
 * Extract the sub-graph of the while loop from a symbol.
2280
 * @param graph The symbolic graph.
2281
 * @param while_symbol The execution node symbol.
2282
 * @return The sub-graph that represents a while loop.
2283
 */
2284
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_from_while_symbol(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t while_symbol);
2285
/**
2286
 * Constructing looped concrete graph. Note that this interface is a little bit simpler than the one for symbolic
2287
 * graph. The reason is that a concrete graph operates on allocated tensors, thus, there is no mapping of tensor
2288
 * symbols between the parent graph and the while graph. (The reason to have a mapping in symbolic graphs is to
2289
 * constraint the variable leaking between the sub graph and parent graph).
2290
 * @param graph The concrete graph.
2291
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2292
 * @param while_graph The sub-graph to run the while loop.
2293
 * @return A execution node that represents the sub-graph.
2294
 */
2295
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_while(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_graph_t* const while_graph);
2296
/**
2297
 * Set the evaluated expression for the while loop. The while loop will break out if the expression evaluates to 0.
2298
 * @param while_graph The concrete graph that will run the while loop.
2299
 * @param while_expr The function pointer to the expression.
2300
 * @param while_data A custom data provided to the expression evaluation function.
2301
 * @param inputs The input tensors array to the expression evaluation function.
2302
 * @param input_size The size of the input tensors array.
2303
 * @param breakpoints The execution nodes at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
2304
 * @param breakpoint_size The size of the execution nodes array.
2305
 */
2306
void ccv_nnc_graph_set_while_expr(ccv_nnc_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_graph_exec_t* const breakpoints, const int breakpoint_size);
2307
/**
2308
 * Get the special tensor for the while loop count. It contains one uint64_t value. We keep an implicit count
2309
 * when evaluate the while loop and you can access it with this tensor.
2310
 * @param while_graph The concrete graph that will run the while loop.
2311
 * @return A special tensor that you can retrieve the loop count at .data.i64[0].
2312
 */
2313
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor_for_while_count(const ccv_nnc_graph_t* const while_graph);
2314
/**
2315
 * Retrieve the sub-graph from a execution node.
2316
 * @param graph The concrete graph.
2317
 * @param exec The execution node represents the sub-graph.
2318
 * @return The sub-graph.
2319
 */
2320
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_from_while_exec(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_t exec);
2321
2322
/** @} */
2323
2324
/**
2325
 * @defgroup level_3_5_while_others While Loop Others
2326
 * @{
2327
 */
2328
2329
/**
2330
 * For a given tape on a given graph, update the input / output tensors so new version will be created (if needed).
2331
 * @param tape The tensor tape object.
2332
 * @param graph The concrete graph this tensor tape is executing in.
2333
 * @param input_flags The flags associated with input tensors.
2334
 * @param inputs The input tensors.
2335
 * @param input_size The size of input tensors array.
2336
 * @param output_flags The flags associated with output tensors.
2337
 * @param outputs The output tensors.
2338
 * @param output_size The size of output tensors array.
2339
 */
2340
void ccv_nnc_tensor_tape_io(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const int* const input_flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, const int* const output_flags, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2341
/**
2342
 * Retrieve the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2343
 * @param tape The tensor tape object.
2344
 * @param graph The concrete graph this tensor tape is executing in.
2345
 * @param exec The execution node.
2346
 * @return The number associated with the execution node.
2347
 */
2348
uint64_t ccv_nnc_tensor_tape_numbering(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec);
2349
/**
2350
 * Set the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2351
 * @param tape The tensor tape object.
2352
 * @param graph The concrete graph this tensor tape is executing in.
2353
 * @param exec The execution node.
2354
 * @param numbering The number associated with the execution node.
2355
 */
2356
void ccv_nnc_tensor_tape_set_numbering(ccv_nnc_tensor_tape_t* const tape, ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const uint64_t numbering);
2357
/**
2358
 * Augmented tensor to run a graph with while loop (An obvious example is dynamic RNN).
2359
 */
2360
typedef struct ccv_nnc_tensor_multiview_s {
2361
  // This is an augmented ccv_nnc_tensor_view_t
2362
  // Namely, it can point to multiple versions of tensors.
2363
  int type; // This type is CCV_NNC_TENSOR_MULTI_VIEW
2364
  // kind specified how the multi-version tensors stored.
2365
  // See the comment on the follow up enums.
2366
  uint8_t kind;
2367
  uint16_t repeat;
2368
  intptr_t anchor; // on which graph this multi-view tensor is wrapped. This helps to determine on which level the multi-view tensor should be unwrapped.
2369
  // If this tensor points to a tensor view, data.u8 - offset is the real pointer start.
2370
  off_t offset;
2371
  struct ccv_nnc_tensor_multiview_s* p; // If this is wrapped with another multiview tensor. Get to the parent one.
2372
  ccv_nnc_tensor_t* it; // Current tensor (tensor in use), this is updated along with the graph computation.
2373
  // This is useful because by just traverse tv, I can get the latest up-to-date reference to this multi-view tensor.
2374
  ccv_array_t* sp; // Synchronized tensor views. This corresponds to ccv_nnc_tensor_synchronize_to_multiview method, that records all the tensors registered for updates.
2375
  ccv_nnc_tensor_t* _inline_data[4];
2376
  ccv_nnc_tensor_t** _heap_data;
2377
} ccv_nnc_tensor_multiview_t;
2378
3.40k
#define CCV_NNC_MULTIVIEW_DATA(x) ((x)->_heap_data ? 
(x)->_heap_data0
: (x)->_inline_data)
2379
234
#define CCV_NNC_MULTIVIEW_PHI (intptr_t)0x1 /**< Denote this is a phi multi-view tensor. */
2380
2381
enum {
2382
  CCV_NNC_MULTIVIEW_K0N = 0, /**< All of them are repeated. */
2383
  CCV_NNC_MULTIVIEW_K1N = 1, /**< The first one is the first, the second one starts to repeat. (0111111...) */
2384
};
2385
#define CCV_NNC_MULTIVIEW_K01(x) ((x)->kind == CCV_NNC_MULTIVIEW_K0N && (x)->repeat == 1)
2386
/**
2387
 * Setup a tensor multiview with a given set of tensors.
2388
 * A multiview tensor point to a list of tensors, and its access depends on the loop count.
2389
 * For example, if we have a multiview tensor with list of [a, b, c, d], and kind is 1N, repeat is 3.
2390
 * For loop count 0, 1, 2, 3, 4, 5, 6, the corresponding tensors used will be a, b, c, d, b, c. If kind
2391
 * is 0N, and repeat is 4, it will be a, b, c, d, a, b.
2392
 * @param data[] The pointer to the list of tensors the multiview object can point to.
2393
 * @param kind Can be either CCV_NNC_MULTIVIEW_K0N or CCV_NNC_MULTIVIEW_K1N, basically whether to keep the initial tensor.
2394
 * @param repeat The length of the repeat.
2395
 * @param graph Which graph this multiview object attaches to.
2396
 * @param tensor_multiview The tensor multiview object to be updated.
2397
 */
2398
void ccv_nnc_tensor_multiview(ccv_nnc_tensor_t* data[], const uint8_t kind, const uint16_t repeat, const ccv_nnc_graph_t* const graph, ccv_nnc_tensor_multiview_t* const tensor_multiview);
2399
/**
2400
 * Since tensor_multiview will never be allocated with *_new method, the *_free method simply frees anything that is dynamically allocated afterwards (such as the reference items).
2401
 * @param tensor_multiview The tensor multiview object to be deallocated.
2402
 */
2403
void ccv_nnc_tensor_multiview_free(const ccv_nnc_tensor_multiview_t tensor_multiview);
2404
/**
2405
 * Setup a tensor as a reference to a tensor multiview, thus, when tensor multiview's tu (current tensor) updates, the tensor reference's data.u8 will get update as well (point to the same memory region as the tu).
2406
 * @param tensor_multiview The tensor multiview object.
2407
 * @param tensor The tensor that will be updated along with the multiview object.
2408
 */
2409
void ccv_nnc_tensor_synchronize_to_multiview(ccv_nnc_tensor_multiview_t* const tensor_multiview, ccv_nnc_tensor_t* const tensor);
2410
/**
2411
 * Send broadcast to subscribers of the multiview, call this in the beginning of exec.
2412
 * @param tensor_multiview The tensor multiview object.
2413
 */
2414
void ccv_nnc_tensor_multiview_synchronize(ccv_nnc_tensor_multiview_t* const tensor_multiview);
2415
2416
/** @} */
2417
2418
/** @} */
2419
2420
/**
2421
 * @defgroup level_3_5_case_of Branching
2422
 * @{
2423
 */
2424
2425
/**
2426
 * @page symbolic_switch Construct "switch" control structure in symbolic graph
2427
 *
2428
 * Here I use the keyword case_of. To provide a "switch" control structure within NNC has some nice properties
2429
 * even though you can simulate this with a while loop technically.
2430
 *
2431
 * 1. More optimal memory allocation: with "switch" control structure, memory can be multiplexed for each code
2432
 *    path because they are mutually exclusive.
2433
 *
2434
 * 2. No tape should be used within each branch: if we simulate with a "while" loop, any results from within
2435
 *    the "switch" statement has to be kept on the tape, which is inefficient because you don't need any tape
2436
 *    for the "switch" statement other than record which path it is taken.
2437
 *
2438
 * The particular "switch" control structure provided here is a multi-way structured "switch". Each branch is a
2439
 * sub-graph, so it is well-scoped. A node branch out based on the case_of condition return value to either of
2440
 * the branch (numbering from 0 to n, -1 means no path taken). If no path taken, the output tensors will be
2441
 * assigned with the default tensors and continue. Otherwise the computation within the sub-graph will be
2442
 * carried out and the output tensors will be assigned with the tensors specified within that sub-graph and
2443
 * continue.
2444
 *
2445
 * If we want to consider speculative execution in the future, we need to revisit our memory allocation scheme.
2446
 */
2447
2448
/**
2449
 * Function prototype to evaluate a branch expression.
2450
 */
2451
typedef int(*ccv_nnc_graph_case_of_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
2452
/**
2453
 * Create a new case..of execution node symbol.
2454
 * @param graph The symbolic graph.
2455
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2456
 * @param inputs The input tensor symbols array for the expression.
2457
 * @param input_size The size of the input tensor symbols array.
2458
 * @param symbol_map The pair of tensor symbols array where the source is the input tensor symbol and the destination is the output tensor symbol.
2459
 * @param symbol_map_size The size of symbol map array.
2460
 * @param name The name of the case..of graph. Optional.
2461
 * @return A execution node symbol that represents the case..of graph.
2462
 */
2463
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_symbolic_graph_case_of_new(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size, const char* const name);
2464
/**
2465
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2466
 * @param graph The symbolic graph.
2467
 * @param exec The execution node symbol that represents the case..of graph.
2468
 * @param case_of The function pointer to evaluate.
2469
 * @param case_of_data The data associated with the function pointer.
2470
 */
2471
void ccv_nnc_symbolic_graph_set_case_of_expr(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data);
2472
/**
2473
 * Set a sub-graph as one of the branch for the case..of graph.
2474
 * @param graph The symbolic graph.
2475
 * @param symbol The execution node symbol that represents the case..of graph.
2476
 * @param case_graph The sub-graph for one of the branch.
2477
 * @param case_of The index assigned to this sub-graph (expression returns this index to determine which sub-graph to execute).
2478
 * @param symbol_map The pair of tensor symbols array where the source is the output tensor symbol of the sub-graph, and the destination is the output tensor symbol of the execution node symbol.
2479
 * @param symbol_map_size The size of the symbol map array.
2480
 */
2481
void ccv_nnc_symbolic_graph_set_case_of(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, ccv_nnc_symbolic_graph_t* const case_graph, const int case_of, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
2482
/**
2483
 * Create a new case..of execution node.
2484
 * @param graph The concrete graph.
2485
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2486
 * @param inputs The input tensors array supplied to the expression.
2487
 * @param input_size The size of the input tensors array.
2488
 * @param outputs The output tensors array.
2489
 * @param output_size The size of the output tensors array.
2490
 * @return A execution node that represents the case..of graph.
2491
 */
2492
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_case_of_new(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2493
/**
2494
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2495
 * @param graph The concrete graph.
2496
 * @param exec The execution node that represents the case..of graph.
2497
 * @param case_of The function pointer to evaluate.
2498
 * @param case_of_data The data associated with the function pointer.
2499
 * @param offset A integer added to the expression output to help choose the index. Thus, real index = expression index + offset.
2500
 */
2501
void ccv_nnc_graph_set_case_of_expr(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data, const int offset);
2502
/**
2503
 * Set a sub-graph as one of the branch for the case..of graph.
2504
 * @param graph The concrete graph.
2505
 * @param exec The execution node that represents the case..of graph.
2506
 * @param case_graph The sub-graph for one of the branch.
2507
 * @param case_of The index assigned to this sub-graph (expression returns this index + offset to determine which sub-graph to execute).
2508
 */
2509
void ccv_nnc_graph_set_case_of(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_t* const case_graph, const int case_of);
2510
2511
/** @} */
2512
2513
/**
2514
 * @defgroup level_3_5_minimizer Gradient-based Optimization
2515
 * @{
2516
 */
2517
2518
/**
2519
 * This is the comparable part to Caffe's solver or TensorFlow's optimizer. It took a step further than just
2520
 * compute the gradient, but also apply the gradient to update parameters to minimize the loss.
2521
 * @param graph The symbolic graph.
2522
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2523
 * @param losses The tensor symbols array of losses.
2524
 * @param loss_size The size of the loss symbols array.
2525
 * @param parameters The parameter tensor symbols to optimize.
2526
 * @param parameter_size The size of parameter symbols array.
2527
 * @param inputs The additional input symbols we compute gradient against.
2528
 * @param input_size The size of the additional input symbols array.
2529
 * @param sources The source execution nodes array.
2530
 * @param source_size The size of source nodes array.
2531
 * @param destinations The destinations execution nodes array.
2532
 * @param destination_size The size of destination nodes array.
2533
 * @param gradients The tensor symbols that represents the gradient for update, should be the same size as the parameters array + input array size. This can be 0 (optional).
2534
 * @param updated_parameters The tensor symbols that represents the updated parameters, should be the same size as the parameters array.
2535
 * @param saved_aux The tensor symbols that is helpful for particular optimization strategy.
2536
 * @param graph_exec_symbols The execution node symbols for the updates, should be the same size as the parameters array.
2537
 */
2538
void ccv_nnc_symbolic_graph_minimize(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_symbol_t* const losses, const int loss_size, const ccv_nnc_tensor_symbol_t* const parameters, const int parameter_size, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_tensor_symbol_t* const gradients, ccv_nnc_tensor_symbol_t* const updated_parameters, ccv_nnc_tensor_symbol_map_t* const saved_aux, ccv_nnc_graph_exec_symbol_t* const graph_exec_symbols);
2539
/**
2540
 * The number of extra saved aux per parameter only depends on the commands. For example, SGD with momentum requires 1 aux (for momentum).
2541
 * Others require more.
2542
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2543
 * @return the number of saved aux per parameter.
2544
 */
2545
CCV_WARN_UNUSED(int) ccv_nnc_minimizer_saved_aux_size(const ccv_nnc_cmd_t minimizer);
2546
2547
/** @} */
2548
2549
/**
2550
 * @defgroup level_3_5_simplify Graph Simplification
2551
 * @{
2552
 */
2553
2554
/**
2555
 * @page symbolic_simplify Symbolic graph simplification
2556
 *
2557
 * We make a distinction between graph simplifications and optimizations (autotune).
2558
 *
2559
 * Simplification: rewrite the graph and the resulting graph will have less nodes. This is done on the symbolic
2560
 * graph only. Passes that is "simplification" include pruning, common sub-expression eliminations, constant
2561
 * folding etc.
2562
 *
2563
 * Optimization (autotune): graph optimization can have more objectives. The most obvious objective is to reduce
2564
 * computation time. For symbolic graph, passes that reduces computation time include data layout optimizations,
2565
 * auto parallel etc (in normal optimization implementations, they have a cost model to guide the optimization.
2566
 * NNC's implementation uses a cost database that profiles the time cost on the device to guide the optimization.
2567
 * We call it autotune to distinguish with the normal optimization passes because we need device profile data).
2568
 * There could be other objectives, for example, in many deep learning applications, reducing memory footprint
2569
 * can be desirable. However, as always in computer science, memory and time is a typical trade-off. Memory
2570
 * optimization almost always results longer computation time, and the objective is to trade between these two
2571
 * with a bias term (in other frameworks such as TensorFlow, the memory optimizer uses a list of "cheap ops" to
2572
 * bias between the time and memory footprint).
2573
 *
2574
 * For graph optimizations, it can happen on both the symbolic graph level as well as the concrete graph level.
2575
 * For NNC, symbolic graph is already very explicit (data layout, device allocation and data transfer between
2576
 * devices / nodes, even the command backend can all be specified on the symbolic graph), however, some
2577
 * information is unknown until it is compiled down to concrete graph (tensor addresses, tensor initialization
2578
 * etc.), and since graph optimizations need all the information to optimize. Keeping the flexibility to do
2579
 * optimization on both symbolic and concrete graph level seems reasonable.
2580
 */
2581
2582
enum {
2583
  /**
2584
   * If two commands generated the same outputs, all the places where the newer output used will be replaced by
2585
   * the old output. Later on the graph pruning stage, the command that generate the newer output will be
2586
   * eliminated.
2587
   */
2588
  CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION,
2589
  /**
2590
   * For the given outputs, eliminate unused input tensors, and then eliminate graph execs that don't contribute
2591
   * to the outputs.
2592
   */
2593
  CCV_NNC_SIMPLIFY_GRAPH_PRUNING,
2594
  /**
2595
   * For CCV_NNC_DATA_TRANSFER, if the input / output is the same (on the same device, no alias), we can skip.
2596
   * Similarly, if it is on the same device, but alias of some, for some cases we can skip as well (if neither
2597
   * are carry overs, bypasses etc.)
2598
   */
2599
  CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT,
2600
  /**
2601
   * Combine a few smaller ops into bigger one. For now, this functionality is limited. I can only address ops
2602
   * that are sequential.
2603
   */
2604
  CCV_NNC_SIMPLIFY_OPS_FUSION,
2605
  // CCV_NNC_SIMPLIFY_CONSTANT_FOLDING, // This currently is not supported, because we don't have efficient way to express constant in symbolic graph.
2606
};
2607
/**
2608
 * Simplify a graph with given list of passes, in that particular order.
2609
 * Note, when a graph is simplified, its sources / destinations are changed as well.
2610
 * @param graph The symbolic graph.
2611
 * @param passes The array of passes we are going to apply.
2612
 * @param pass_size The size of the passes array.
2613
 * @param binds The tensor symbols we may bind to an input later (it doesn't prevent pruning any execution nodes).
2614
 * @param bind_size The size of the bind array.
2615
 * @param outputs The output tensor symbols we want to retain (we are going to prune any execution nodes that is not related to these outputs).
2616
 * @param output_size The size of the output array.
2617
 * @param sources The source execution node symbols array.
2618
 * @param source_size The size of source node symbols array.
2619
 * @param destinations The destinations execution node symbols array.
2620
 * @param destination_size The size of destination node symbols array.
2621
 */
2622
void ccv_nnc_symbolic_graph_simplify(ccv_nnc_symbolic_graph_t* const graph, const int* const passes, const int pass_size, const ccv_nnc_tensor_symbol_t* const binds, const int bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2623
2624
/** @} */
2625
2626
/**
2627
 * @defgroup level_3_5_parallel Automatic Graph Parallelization
2628
 * @{
2629
 */
2630
2631
enum {
2632
  /**
2633
   * Op for reducer / allreducer. Currently only supports sum.
2634
   */
2635
  CCV_NNC_PARALLEL_REDUCE_OP_SUM,
2636
};
2637
2638
/**
2639
 * Turn the existing graph to be capable to run on several devices with different data inputs at parallel.
2640
 * With this method, additional tensor symbols will be created that runs on different devices. That has
2641
 * been said, there are concepts of "broadcast" and "reduce". "broadcast" tensor symbols will be copied to
2642
 * different devices, while "reduce" tensors will be summed from different devices to the default device.
2643
 * "allreducer" concept is simpler. The allreduce operation will be performed on these tensors and then
2644
 * be used on different devices again.
2645
 *
2646
 * Limitations: right now, the way to reduce / allreduce tensors only supports "sum". The data parallel
2647
 * only supports GPU, thus, the nodes will be duplicated are GPU computations and GPU memory backed
2648
 * tensors. Also, right now, the tensors to be broadcasted / allreduced / reduced should have no aliases.
2649
 *
2650
 * @param graph The symbolic graph.
2651
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
2652
 * @param broadcasts The tensor symbols to be broadcasted.
2653
 * @param broadcast_size The size of the broadcast tensor symbols array.
2654
 * @param allreducers The tensor symbols that to be allreduced.
2655
 * @param allreducer_size The size of the allreducer tensor symbols array.
2656
 * @param allreducer_outs Return the tensor symbols for allreducers that before allreduced. Optional, 0
2657
 *        means I don't care about this.
2658
 * @param reducers The tensor symbols to be reduced.
2659
 * @param reducer_size The size of the reducer tensor symbols array.
2660
 * @param reducer_outs Return the tensor symbols for reducers that after reduced. Optional, 0 means
2661
 *        I don't care about this.
2662
 * @param reduce_op_type The reduce op for reducer / allreducer.
2663
 * @param sources The source execution node symbols array.
2664
 * @param source_size The size of source node symbols array.
2665
 * @param destinations The destinations execution node symbols array.
2666
 * @param destination_size The size of destination node symbols array.
2667
 */
2668
void ccv_nnc_symbolic_graph_data_parallel(ccv_nnc_symbolic_graph_t* const graph, const int parallel, const ccv_nnc_tensor_symbol_t* const broadcasts, const int broadcast_size, const ccv_nnc_tensor_symbol_t* const allreducers, const int allreducer_size, ccv_nnc_tensor_symbol_t* const allreducer_outs, const ccv_nnc_tensor_symbol_t* const reducers, const int reducer_size, ccv_nnc_tensor_symbol_t* const reducer_outs, const int reduce_op_type, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2669
/**
2670
 * Get the symbol that is on a device other than the default one. The list will be flushed if the
2671
 * ccv_nnc_symbolic_graph_data_parallel function is called again.
2672
 * @param graph The symbolic graph.
2673
 * @param symbol The tensor symbol we want to retrieve its counterpart on a different device.
2674
 * @param device_id The device numeric id for this symbol.
2675
 * @return A tensor symbol that is on a different device.
2676
 */
2677
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id);
2678
/**
2679
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2680
 * later with ccv_nnc_tensor_symbol_copy
2681
 * @param graph The symbolic graph.
2682
 * @param symbol The tensor symbol we want to set its counterpart on a different device.
2683
 * @param device_id The device numeric id for this symbol.
2684
 * @param copy The tensor symbol counterpart on a different device.
2685
 */
2686
void ccv_nnc_tensor_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id, const ccv_nnc_tensor_symbol_t copy);
2687
/**
2688
 * Get the execution node that is on a device other than the default one. The list will be flushed
2689
 * if the ccv_nnc_symbolic_graph_data_parallel function is called again.
2690
 * @param graph The symbolic graph.
2691
 * @param symbol The execution node we want to retrieve its counterpart on a different device.
2692
 * @param device_id The device numeric id for this symbol.
2693
 * @return A execution node that is on a different device.
2694
 */
2695
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id);
2696
/**
2697
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2698
 * later with ccv_nnc_graph_exec_symbol_copy
2699
 * @param graph The symbolic graph.
2700
 * @param symbol The execution node we want to set its counterpart on a different device.
2701
 * @param device_id The device numeric id for this symbol.
2702
 * @param copy The execution node counterpart on a different device.
2703
 */
2704
void ccv_nnc_graph_exec_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id, const ccv_nnc_graph_exec_symbol_t copy);
2705
2706
/** @} */
2707
2708
/**
2709
 * @defgroup level_3_5_memory_compression Memory Compression
2710
 * @{
2711
 */
2712
2713
/**
2714
 * Apply LSSC memory compression algorithm to the convolution activations. This will compress the activation
2715
 * layer for convolution, therefore, save the overall memory usage during training time.
2716
 *
2717
 * @param graph The symbolic graph.
2718
 * @param sources The source execution node symbols array.
2719
 * @param source_size The size of source node symbols array.
2720
 * @param destinations The destinations execution node symbols array.
2721
 * @param destination_size The size of destination node symbols array.
2722
 */
2723
void ccv_nnc_symbolic_graph_memory_compression(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2724
2725
/** @} */
2726
2727
/**
2728
 * @defgroup level_3_5_memory_reduction Memory Reduction
2729
 * @{
2730
 */
2731
2732
/**
2733
 * Investigate memory reduction opportunities on the graph. Right now, we are looking at datatype
2734
 * conversions that resulted larger datatype, and these larger ones kept during backward pass.
2735
 * For these cases, we will keep the smaller one instead, and reconvert to larger datatype prior
2736
 * to the backward pass.
2737
 *
2738
 * @param graph The symbolic graph.
2739
 * @param sources The source execution node symbols array.
2740
 * @param source_size The size of source node symbols array.
2741
 * @param destinations The destinations execution node symbols array.
2742
 * @param destination_size The size of destination node symbols array.
2743
 */
2744
void ccv_nnc_symbolic_graph_memory_reduction(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2745
2746
/** @} */
2747
2748
/** @} */
2749
2750
/**
2751
 * @defgroup level_4 Level-4 API
2752
 * @{
2753
 */
2754
2755
/**
2756
 * Opaque pointer to the dynamic graph structure.
2757
 */
2758
typedef struct ccv_nnc_dynamic_graph_s ccv_nnc_dynamic_graph_t;
2759
2760
/**
2761
 * Masquerade this as if it is a on stack variable, there is a heap allocation but managed by the dynamic graph.
2762
 * The fact that ccv_nnc_tensor_variable_t is a pointer is an implementation detail. It should be treated as an
2763
 * opaque type throughout. We may later extends this to be some on-stack information or even just a uid.
2764
 */
2765
typedef struct ccv_nnc_tensor_variable_s* ccv_nnc_tensor_variable_t;
2766
2767
/**
2768
 * Create a dynamic graph.
2769
 * @return A newly created dynamic graph.
2770
 */
2771
CCV_WARN_UNUSED(ccv_nnc_dynamic_graph_t*) ccv_nnc_dynamic_graph_new(void);
2772
2773
/** @cond ALL */
2774
// Get a new tensor variable.
2775
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2776
16.5k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_1(graph) ccv_nnc_tensor_variable_new_impl(graph, ccv_nnc_tensor_auto)
2777
14.6k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(_1, _2, _FX, ...) _FX
2778
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2779
31.2k
#define ccv_nnc_tensor_variable_new(graph, ...) CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_variable_new_impl, 
CCV_NNC_TENSOR_VARIABLE_NEW_X_116.5k
)(graph, ##
__VA_ARGS__8.31k
)
2780
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_constant_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2781
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_1(graph) ccv_nnc_tensor_constant_new_impl(graph, ccv_nnc_tensor_auto)
2782
37
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(_1, _2, _FX, ...) _FX
2783
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2784
37
#define ccv_nnc_tensor_constant_new(graph, ...) CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_constant_new_impl, CCV_NNC_TENSOR_CONSTANT_NEW_X_1)(graph, ##
__VA_ARGS__5
)
2785
/** @endcond */
2786
2787
/**
2788
 * Create a new tensor variable that is an alias of a given tensor variable. You can alias any tensor
2789
 * variable that itself not an alias. You can also alias an alias, with some conditions: The tensor
2790
 * variable itself can be alias, but it needs to be contiguous as well. For example, a vector is
2791
 * contiguous. If both conditions satisfied, you can alias an alias.
2792
 * @param graph The dynamic graph.
2793
 * @param tensor_variable The tensor variable we are going to alias from.
2794
 * @param ofs The offset on each of the dimension.
2795
 * @param stride The stride of each dimension. If all 0, it matches the dimension of the tensor_variable.
2796
 * @param info The tensor parameters for the new alias.
2797
 * @return New tensor variable that is an alias.
2798
 */
2799
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_alias_new(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info);
2800
/**
2801
 * Get the parameters for a tensor variable.
2802
 * @param graph The dynamic graph.
2803
 * @param tensor_variable The tensor variable reference.
2804
 * @return The tensor parameters.
2805
 */
2806
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_variable_params(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2807
/**
2808
 * Get the parameters for a tensor variable alias.
2809
 * @param graph The symbolic graph.
2810
 * @param tensor_variable The tensor variable reference.
2811
 * @param ofs The offset on each of the dimension.
2812
 * @param stride The stride of each dimension.
2813
 * @return non-zero if it is not a tensor alias.
2814
 */
2815
int ccv_nnc_tensor_variable_alias_params(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, int ofs[CCV_NNC_MAX_DIM_ALLOC], int stride[CCV_NNC_MAX_DIM_ALLOC]);
2816
2817
/** @cond ALL */
2818
/**
2819
 * Get the underlying tensor for the tensor variable. The tensor allocation may be performed when calling this
2820
 * method. If the tensor cannot be allocated (because no shape specified), return 0.
2821
 * @param graph The dynamic graph.
2822
 * @param tensor_variable The tensor variable to get the underlying tensor.
2823
 * @param stream_context Which stream this command will be executed upon.
2824
 * @return The underlying tensor.
2825
 */
2826
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_variable_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_stream_context_t* const stream_context);
2827
8.52k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_1(graph, tensor_variable) ccv_nnc_tensor_from_variable_impl(graph, tensor_variable, 0)
2828
60.4k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL(_1, _2, _3, _FX, ...) _FX
2829
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2830
68.9k
#define ccv_nnc_tensor_from_variable(graph, tensor_variable, ...) CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL
(graph, tensor_variable, ##__VA_ARGS__, ccv_nnc_tensor_from_variable_impl, 46.0k
CCV_NNC_TENSOR_FROM_VARIABLE_X_18.52k
)(graph, tensor_variable, ##__VA_ARGS__)
2831
/** @endcond */
2832
/**
2833
 * Query whether a given tensor variable is a constant (no gradient).
2834
 * @param graph The dynamic graph.
2835
 * @param tensor_variable The tensor variable to query whether it is a constant.
2836
 */
2837
CCV_WARN_UNUSED(int) ccv_nnc_tensor_variable_is_constant(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2838
/**
2839
 * Set a tensor on the tensor variable. Tensor variable doesn't take over the life-cycle management of the tensor
2840
 * (in similar way as the tensor binds).
2841
 * @param graph The dynamic graph.
2842
 * @param tensor_variable The tensor variable to set.
2843
 * @param tensor The tensor that is going to be associated with the tensor variable.
2844
 */
2845
void ccv_nnc_tensor_variable_set(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_t* const tensor);
2846
/**
2847
 * Detach the tensor variable from current graph. It acts as if computed between
2848
 * ``ccv_nnc_dynamic_graph_set_no_grad``. Thus, there are a few requirements for this:
2849
 * 1. It cannot be an alias when detach. You have to detach the original, not the alias.
2850
 * 2. When detach a variable, it could impact correctness when computing gradients. This cut off backprop, acting as if the
2851
 *    detached variable is a constant (it will be marked as is).
2852
 * After this call, the tensor variable will be marked as constant and you can query that through ``ccv_nnc_tensor_variable_is_constant``.
2853
 * Why this method rather than making this variable as constant to begin with? First, an constant
2854
 * cannot be the output. Second, you may not wrap your computation between no grad, or not all inputs
2855
 * are constants, resulting a tensor variable that is on a graph. This method is helpful to rescue from
2856
 * that situation.
2857
 * @param graph The dynamic graph.
2858
 * @param tensor_variable The tensor variable to be detached.
2859
 */
2860
void ccv_nnc_tensor_variable_detach(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2861
/**
2862
 * A destructor function to be called when a tensor variable will be freed in the sense that no
2863
 * backward computation need it no more.
2864
 * Thus, we pass in tensor rather than tensor variable for the destructor.
2865
 */
2866
typedef void (*ccv_nnc_tensor_variable_destructor_f)(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_t* const tensor, void* const context);
2867
/**
2868
 * Hook into a tensor variable such that when it is actually freed (destroyed), the callback will receive
2869
 * the update.
2870
 * @param graph The dynamic graph.
2871
 * @param tensor_variable The tensor variable to observe when it is destroyed.
2872
 * @param func The callback function.
2873
 * @param context The context to be passed along to the callback function.
2874
 **/
2875
void ccv_nnc_tensor_variable_destructor_hook(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_variable_destructor_f func, void* const context);
2876
/**
2877
 * Check given tensor variables whether have effects to another set of tensor variables.
2878
 * @param graph The dynamic graph.
2879
 * @param source_variables The tensor variables to check whether it has effect to another set of variables.
2880
 * @param source_variable_size The size of source tensor variables.
2881
 * @param destination_variables Whether the source variables has effect to this list of variables.
2882
 * @param destination_variable_size The size of destination tensor variables.
2883
 * @param bitmask Bit return value, each bit represents a source tensor variable, and 1 meant it can reach some of the destinations.
2884
 */
2885
void ccv_nnc_dynamic_graph_has_effect_to_tensor_variables(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t* const source_variables, const int source_variable_size, const ccv_nnc_tensor_variable_t* const destination_variables, const int destination_variable_size, uint64_t* const bitmask);
2886
/**
2887
 * Execute a command with given tensor variables, the output is in the output tensor variables.
2888
 * @param graph The dynamic graph.
2889
 * @param cmd The wrapped command.
2890
 * @param hint The hint associated with the command.
2891
 * @param flags A reserved field for flags.
2892
 * @param inputs The input tensor variables array.
2893
 * @param input_size The size of the input tensor variables array.
2894
 * @param outputs The output tensor variables array.
2895
 * @param output_size The size of the output tensor variables array.
2896
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2897
 * @param stream_context Which stream this command will be executed upon.
2898
 */
2899
int ccv_nnc_dynamic_graph_exec(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2900
/**
2901
 * Compute the gradient of given tensor, with respect to the f. Thus, df / dt.
2902
 * @param dynamic_graph The dynamic graph.
2903
 * @param f_variables The output losses.
2904
 * @param f_variable_size The size of output losses array.
2905
 * @param df_optionals The custom gradients for f. If not provided, will default to 1.
2906
 * @param inputs The input variables.
2907
 * @param input_size The size of the input variables array.
2908
 * @param outputs The gradients with respect to the inputs. If the gradient already have value exist, it will be
2909
 *        accumulated into the final value.
2910
 * @param output_size The size of the outputs array. Should be equal to the input_size.
2911
 * @param stream_context Which stream this computation will be executed upon.
2912
 */
2913
void ccv_nnc_dynamic_graph_backward(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_tensor_variable_t* const f_variables, const int f_variable_size, const ccv_nnc_tensor_variable_t* const df_optionals, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
2914
/**
2915
 * Apply gradients to the set of parameters to update them with appropriate minimizer.
2916
 * @param dynamic_graph The dynamic graph.
2917
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2918
 * @param gradients The computed gradients to be applied.
2919
 * @param gradient_size The size of gradients array.
2920
 * @param parameters The parameters to update.
2921
 * @param parameter_size The size of parameters array, should be the same length as gradients.
2922
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2923
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2924
 * @param stream_context Which stream this computation will be executed upon.
2925
 */
2926
void ccv_nnc_dynamic_graph_apply_gradients(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const gradients, const int gradient_size, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2927
/**
2928
 * Apply one step of minimization (most likely, a gradient descent) to the parameters with a given loss (or
2929
 * losses).
2930
 * @param dynamic_graph The dynamic graph.
2931
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2932
 * @param losses The losses we are trying to minimize.
2933
 * @param loss_size The size of the losses array.
2934
 * @param dloss_optionals The custom gradient for losses. If not provided, will default to 1.
2935
 * @param parameters The parameters to update.
2936
 * @param parameter_size The size of parameters array.
2937
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2938
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2939
 * @param stream_context Which stream this computation will be executed upon.
2940
 */
2941
void ccv_nnc_dynamic_graph_minimize(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const losses, const int loss_size, const ccv_nnc_tensor_variable_t* const dloss_optionals, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2942
/**
2943
 * Read more in Level-5 API section.
2944
 */
2945
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
2946
/**
2947
 * Evaluate a CNNP model on the dynamic graph with set of inputs / outputs.
2948
 * @param dynamic_graph The dynamic graph.
2949
 * @param model The CNNP model to be evaluated against. Note that ccv_nnc_dynamic_graph_backward /
2950
 *              ccv_nnc_dynamic_graph_apply_gradients / ccv_nnc_dynamic_graph_minimize all works with this
2951
 *              model. It takes over the life-cycle of the model, and now you don't need to free it any more.
2952
 * @param is_test Whether we are in test mode or not.
2953
 * @param inputs The input variables.
2954
 * @param input_size The size of the input variables array.
2955
 * @param outputs The gradients with respect to the inputs.
2956
 * @param output_size The size of the outputs array.
2957
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
2958
 * @param stream_context Which stream this computation will be executed upon.
2959
 */
2960
void ccv_nnc_dynamic_graph_evaluate(ccv_nnc_dynamic_graph_t* const dynamic_graph, ccv_cnnp_model_t* const model, const int is_test, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
2961
/**
2962
 * Dry run a CNNP model on the dynamic graph with set of inputs up until the actual execution.
2963
 * @param dynamic_graph The dynamic graph.
2964
 * @param model The CNNP model to be evaluated against. Note that ccv_nnc_dynamic_graph_backward /
2965
 *              ccv_nnc_dynamic_graph_apply_gradients / ccv_nnc_dynamic_graph_minimize all works with this
2966
 *              model. It takes over the life-cycle of the model, and now you don't need to free it any more.
2967
 * @param is_test Whether we are in test mode or not.
2968
 * @param inputs The input variables.
2969
 * @param input_size The size of the input variables array.
2970
 * @param stream_context Which stream this computation will be executed upon.
2971
 */
2972
void ccv_nnc_dynamic_graph_dry_run(ccv_nnc_dynamic_graph_t* const dynamic_graph, ccv_cnnp_model_t* const model, const int is_test, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_stream_context_t* const stream_context);
2973
/**
2974
 * Set the maximum operator-level concurrency. This is a soft-limit, e.g. if you have operations on
2975
 * different devices, they are concurrent.
2976
 * @param graph The dynamic graph.
2977
 * @param max_stream_count The maximum concurrency if the dynamic graph schedules internal streams. 0 is no limit.
2978
 */
2979
void ccv_nnc_dynamic_graph_set_max_concurrency(ccv_nnc_dynamic_graph_t* const graph, const int max_stream_count);
2980
/**
2981
 * Enable or disable gradient computation on a dynamic graph.
2982
 * @param dynamic_graph The dynamic graph.
2983
 * @param no_grad If it is 1, disable gradient computation on the dynamic graph.
2984
 * @return 0 if it turned, otherwise it is not turned.
2985
 */
2986
int ccv_nnc_dynamic_graph_set_no_grad(ccv_nnc_dynamic_graph_t* const dynamic_graph, const int no_grad);
2987
/**
2988
 * Dynamic graph will retain a memory it allocated for efficient reuse. Triggering this method
2989
 * intentionally will force these memory to be collected. This is helpful if you know the existing
2990
 * allocation won't be enough for the future use.
2991
 * @param dynamic_graph The dynamic graph.
2992
 */
2993
void ccv_nnc_dynamic_graph_gc(ccv_nnc_dynamic_graph_t* const dynamic_graph);
2994
/**
2995
 * Dispose a tensor variable. You cannot do any computation against this tensor variable afterwards.
2996
 * @param graph The dynamic graph.
2997
 * @param tensor_variable The tensor variable to be disposed.
2998
 */
2999
void ccv_nnc_tensor_variable_free(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
3000
/**
3001
 * Free the dynamic graph.
3002
 * @param graph The dynamic graph.
3003
 */
3004
void ccv_nnc_dynamic_graph_free(ccv_nnc_dynamic_graph_t* const graph);
3005
/**
3006
 * Generate output that can be parsed by GraphViz (DOT language).
3007
 * @param graph The dynamic graph.
3008
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
3009
 * @param out The output file stream.
3010
 */
3011
void ccv_nnc_dynamic_graph_dot(const ccv_nnc_dynamic_graph_t* const graph, const int flags, FILE* out);
3012
/**
3013
 * Count how many ops we kept for gradient computation purpose. This method is useful when we
3014
 * want to assert at end of some train loop, we shouldn't have any gradient computation left.
3015
 * @param graph The dynamic graph.
3016
 * @param type The type of variables to trace. CCV_NNC_SYMBOL_TENSOR / CCV_NNC_SYMBOL_GRAPH_EXEC
3017
 * @return How many gradient computations we kept.
3018
 */
3019
CCV_WARN_UNUSED(int) ccv_nnc_dynamic_graph_bookkeeping_count(const ccv_nnc_dynamic_graph_t* const graph, const int type);
3020
/**
3021
 * Provide a hook for upper level to do custom formatting of a given dynamic graph for whatever
3022
 * inside. You can implement logic to format the graph into protobuf, or json. However, this
3023
 * is not the method for you to visit the graph, and do mutations on it. If ops are not needed for
3024
 * gradient computation, likely these are not kept on the dynamic graph at all. You probably will
3025
 * get an empty graph. What's still available can be checked with the ccv_nnc_dynamic_graph_bookkeeping_count.
3026
 * @param graph The dynamic graph.
3027
 * @param format_fn The format callback to be called on every node.
3028
 * @param context The context that will be passed to the callback.
3029
 */
3030
void ccv_nnc_dynamic_graph_format(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
3031
3032
/** @} */
3033
3034
/**
3035
 * @defgroup level_5 Level-5 API
3036
 * @{
3037
 */
3038
3039
/**
3040
 * @page dataframe What is "dataframe" in ML?
3041
 *
3042
 * A large part of machine learning consists of go through data, process them to a shape / form that makes sense,
3043
 * and pass that into the model to train. Deep learning frameworks such as TensorFlow or PyTorch provides some
3044
 * dataset APIs for this purpose. It is convenient for these frameworks because by being Python, people can use
3045
 * Pandas to process the data. In Pandas, this is called Dataframe, which again, imitates R language.
3046
 *
3047
 * Another interesting observation comes from recent (2018) release of Create ML framework from Apple. It provides
3048
 * a very close to Pandas style data process API (MLDataTable) but in Swift. This implementation is important because
3049
 * it provides a survey point other than Python.
3050
 *
3051
 * Comparing to Python, Swift is a stronger typed language. Though all being high-level, they all have pretty good
3052
 * string support (of course!), operator overloading, and polymorphism. String support makes column naming natural,
3053
 * Operator overloading makes conditioning and filtering easier, and polymorphism makes column type representation
3054
 * straight-forward. These, unfortunately, are the challenges I need to face when implementing in C with the eye
3055
 * towards that later the similar ideas can be implemented on top on a high-level language based on this one.
3056
 *
3057
 * It seems I haven't answered the most crucial question yet: what's special about these data process APIs? It is
3058
 * easier to answer this to first see what Pandas or MLDataTable does.
3059
 *
3060
 * * They both represent data as tables. Each column represents different type of the data (time, nd-array, scalar
3061
 *   or string). As such, they both have API to add / remove / rename columns, and load tabular data from disk.
3062
 *
3063
 * * They both provide API to filter (remove / add) rows, and derive new column from existing columns.
3064
 *
3065
 * * Pandas provides more API for data alignment (merge columns from different tables into one table), and compute
3066
 *   statistics (group rows by some criteria, and compute min / max / std / mean within that group).
3067
 *
3068
 * * MLDataTable provides API to batching data (random split) which covered in TensorFlow / PyTorch's Dataset API
3069
 *   as well.
3070
 *
3071
 * It turns out when you have a noisy dataset, these functionalities are useful to remove unwanted data quickly.
3072
 * If you have a relatively clean dataset, it also allows you to prepare data in a more elegant way. For NNC,
3073
 * the interesting requirements are:
3074
 *
3075
 * 1. Represents scalars, tensors, string as columns; columns can be named.
3076
 *
3077
 * 2. New columns can be derived, from existing ones.
3078
 *
3079
 * 3. Rows can be filtered, grouped, and statistics can be computed.
3080
 *
3081
 * 4. Columns can be aligned, with some given indexes.
3082
 *
3083
 * 5. All these can be done efficiently, on a scale of hundreds of Gigabytes data.
3084
 */
3085
3086
/**
3087
 * @defgroup level_5_dataframe Dataframe API
3088
 * @{
3089
 */
3090
3091
/**
3092
 * A data enumeration function to supply data for given row indexes.
3093
 */
3094
typedef void (*ccv_cnnp_column_data_enum_f)(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3095
/**
3096
 * A destructor for data.
3097
 */
3098
typedef void (*ccv_cnnp_column_data_deinit_f)(void* const data, void* const context);
3099
/**
3100
 * A destructor for context.
3101
 */
3102
typedef void (*ccv_cnnp_column_data_context_deinit_f)(void* const context);
3103
/**
3104
 * Column data.
3105
 */
3106
typedef struct {
3107
  int stream_type; /**< The type of stream context for this column. Each column only compatible with one stream type. */
3108
  char* name; /**< The name of the column. */
3109
  ccv_cnnp_column_data_enum_f data_enum; /**< The data enumeration function for this column. */
3110
  ccv_cnnp_column_data_deinit_f data_deinit; /**< The deinit function that will be used to destroy the data. */
3111
  void* context; /**< The context go along with this column. */
3112
  ccv_cnnp_column_data_context_deinit_f context_deinit; /**< The deinit function that will be used to destroy the context. */
3113
} ccv_cnnp_column_data_t;
3114
/**
3115
 * An opaque structure point to the dataframe object.
3116
 */
3117
typedef struct ccv_cnnp_dataframe_s ccv_cnnp_dataframe_t;
3118
/**
3119
 * Create a dataframe object with given column data.
3120
 * @param column_data The column data that can be loaded.
3121
 * @param column_size The size of column data array.
3122
 * @param row_count The number of rows in this dataframe.
3123
 */
3124
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_new(const ccv_cnnp_column_data_t* const column_data, const int column_size, const int row_count);
3125
/**
3126
 * Add a new column to the dataframe.
3127
 * @param dataframe The dataframe object to add column to.
3128
 * @param data_enum The data provider function for the new column.
3129
 * @param stream_type The type of stream context for this derived column.
3130
 * @param data_deinit The deinit function will be used to destroy the derived data.
3131
 * @param context The context that can be used to generate new column.
3132
 * @param context_deinit The deinit function will be used to destroy the context.
3133
 * @param name The name of the newly added column.
3134
 * @return The new column index.
3135
 */
3136
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_enum_f data_enum, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
3137
/**
3138
 * A map function that takes the data from multiple columns and derive new data out of it.
3139
 */
3140
typedef void (*ccv_cnnp_column_data_map_f)(void* const* const* const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3141
/**
3142
 * Derive a new column out of existing columns in the dataframe.
3143
 * @param dataframe The dataframe object that contains existing columns.
3144
 * @param map The map function used to derive new column from existing columns.
3145
 * @param stream_type The type of stream context for this derived column.
3146
 * @param data_deinit The deinit function will be used to destroy the derived data.
3147
 * @param column_idxs The columns that will be used to derive new column.
3148
 * @param column_idx_size The size of existing columns array.
3149
 * @param context The context that can be used to generate new column.
3150
 * @param context_deinit The deinit function will be used to destroy the context.
3151
 * @param name The name of the new column.
3152
 * @return The new column index.
3153
 */
3154
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_map(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_map_f map, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, const int* const column_idxs, const int column_idx_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
3155
/**
3156
 * Shuffle an existing dataframe.
3157
 * @param dataframe The dataframe that is about to be shuffled.
3158
 */
3159
void ccv_cnnp_dataframe_shuffle(ccv_cnnp_dataframe_t* const dataframe);
3160
/**
3161
 * Query row count of the dataframe.
3162
 * @param dataframe The dataframe we want to query row count.
3163
 * @return The row count of the dataframe.
3164
 */
3165
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_row_count(ccv_cnnp_dataframe_t* const dataframe);
3166
/**
3167
 * Query the column name of a given column on the dataframe.
3168
 * @param dataframe The dataframe we want to query the column name.
3169
 * @param column_idx The index of a column.
3170
 * @return The name of the column.
3171
 */
3172
CCV_WARN_UNUSED(const char*) ccv_cnnp_dataframe_column_name(ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
3173
/**
3174
 * A sampling function that takes multiple rows of one column, and sample to one row.
3175
 */
3176
typedef void (*ccv_cnnp_column_data_sample_f)(void* const* const input_data, const int batch_size, void** const output_data, void* const context, ccv_nnc_stream_context_t* const stream_context);
3177
/**
3178
 * Sample a dataframe by batch size. Thus, n rows are sampled to 1 row per sample function on
3179
 * one specific column. This will also sample the multi-column dataframe down to 1 column
3180
 * by selecting the one column to sample.
3181
 * @param dataframe The dataframe that is about to be sampled.
3182
 * @param sample The sample function used to sample n rows into 1.
3183
 * @param data_deinit The deinit function will be used to destroy the derived data.
3184
 * @param column_idx The column we selected to sample.
3185
 * @param batch_size How many rows will be sampled to 1 row from the original data.
3186
 * @param context The context that can be used in sample function.
3187
 * @param context_deinit The deinit function will be used to destroy the context.
3188
 * @return The sampled dataframe.
3189
 */
3190
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_sample_new(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_sample_f sample, ccv_cnnp_column_data_deinit_f data_deinit, const int column_idx, const int batch_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit);
3191
/**
3192
 * Extract a value out of a struct. Assuming the data points to a struct. This method extract
3193
 * n-offset value of that struct. For example, if you have struct { ccv_nnc_tensor_t* a; ccv_nnc_tensor_t* b; } S;
3194
 * if you want to extract the b tensor to a different column, you can call this function with
3195
 * offsetof(S, b).
3196
 * @param dataframe The dataframe object to be extracted.
3197
 * @param column_idx The column that we want to extract value of.
3198
 * @param offset The offset. For example, offsetof(S, b).
3199
 * @param name The name of the new column.
3200
 * @return The new column that contains the extracted value.
3201
 */
3202
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_value(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t offset, const char* name);
3203
/**
3204
 * Make a tuple out of columns specified. Thus, the new derived column will contains a tuple
3205
 * with data from all the columns specified here. Tuple here represented as void* tuple[], an
3206
 * array of void* pointers.
3207
 * @param dataframe The dataframe that will contain the new column.
3208
 * @param column_idxs The columns to be tupled.
3209
 * @param column_idx_size The number of columns.
3210
 * @param name The name of the new column.
3211
 * @return The derived column with the tuple.
3212
 */
3213
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_make_tuple(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const char* name);
3214
/**
3215
 * The size of the tuple. It is equal to the number of columns we specified. The behavior of
3216
 * calling this method on a column that is not a tuple is undefined.
3217
 * @param dataframe The dataframe that contains the tuple column.
3218
 * @param column_idx The tuple column we are going to inspect.
3219
 * @return The tuple size of the column.
3220
 */
3221
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_tuple_size(const ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
3222
/**
3223
 * Extract a data out of a tuple.
3224
 * @param dataframe The dataframe that will contain the new column.
3225
 * @param column_idx The column that is a tuple.
3226
 * @param index The index into the tuple.
3227
 * @param name The name of the new column.
3228
 * @return The derived column with the extracted value.
3229
 */
3230
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_tuple(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int index, const char* name);
3231
/**
3232
 * The opaque pointer to the iterator.
3233
 */
3234
typedef struct ccv_cnnp_dataframe_iter_s ccv_cnnp_dataframe_iter_t;
3235
/**
3236
 * Get a new iterator of the dataframe.
3237
 * @param dataframe The dataframe object to iterate through.
3238
 * @param column_idxs The columns that will be iterated.
3239
 * @param column_idx_size The size of columns array.
3240
 * @return The opaque iterator object.
3241
 */
3242
CCV_WARN_UNUSED(ccv_cnnp_dataframe_iter_t*) ccv_cnnp_dataframe_iter_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size);
3243
/**
3244
 * Get the next item from the iterator.
3245
 * @param iter The iterator to go through.
3246
 * @param data_ref The output for the data.
3247
 * @param column_idx_size The size of the data_ref array.
3248
 * @param stream_context The stream context to extract data asynchronously.
3249
 * @return 0 if the iteration is successful, -1 if there is no more row. -2 if it is already ended.
3250
 */
3251
int ccv_cnnp_dataframe_iter_next(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int column_idx_size, ccv_nnc_stream_context_t* const stream_context);
3252
/**
3253
 * Assuming iterator is on the same row, peek into potentially different column index.
3254
 * @param iter The iterator to go through.
3255
 * @param data_ref The output for the data.
3256
 * @param offset The offset for which column in this iterator to peek at.
3257
 * @param data_ref_size How many columns in this iterator to peek at.
3258
 * @param stream_context The stream context to extract data asynchronously.
3259
 */
3260
void ccv_cnnp_dataframe_iter_peek(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int offset, const int data_ref_size, ccv_nnc_stream_context_t* const stream_context);
3261
/**
3262
 * Prefetch next item on the iterator with the given stream context. You can call this method multiple times
3263
 * to prefetch multiple items ahead of time.
3264
 * @param iter The iterator to go through.
3265
 * @param prefetch_count How much ahead we should advance for.
3266
 * @param stream_context The stream context to extract data asynchronously.
3267
 * @return 0 if the prefetch is successful, -1 if it is ended.
3268
 */
3269
int ccv_cnnp_dataframe_iter_prefetch(ccv_cnnp_dataframe_iter_t* const iter, const int prefetch_count, ccv_nnc_stream_context_t* const stream_context);
3270
/**
3271
 * Set the cursor of the iterator. When set to 0, the iterator effectively restarts.
3272
 * @param iter The iterator to go through.
3273
 * @param idx The index of the cursor.
3274
 * @return 0 if it is successful, -1 if it is not (exceed the range).
3275
 */
3276
int ccv_cnnp_dataframe_iter_set_cursor(ccv_cnnp_dataframe_iter_t* const iter, const int idx);
3277
/**
3278
 * Free the dataframe iterator object.
3279
 * @param iter The dataframe iterator to be freed.
3280
 */
3281
void ccv_cnnp_dataframe_iter_free(ccv_cnnp_dataframe_iter_t* const iter);
3282
/**
3283
 * Free the dataframe object.
3284
 * @param dataframe The dataframe object to be freed.
3285
 */
3286
void ccv_cnnp_dataframe_free(ccv_cnnp_dataframe_t* const dataframe);
3287
3288
/** @} */
3289
3290
/**
3291
 * @defgroup level_5_dataframe_add_ons Dataframe Add-ons
3292
 * @{
3293
 */
3294
3295
/**
3296
 * Turn a ccv_array_t to a dataframe object.
3297
 * @param array The array we want to turn into a dataframe object.
3298
 * @return The new dataframe object.
3299
 */
3300
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_array_new(ccv_array_t* const array);
3301
/**
3302
 * Derive a new column that copies a tensor array from given column to the derived column on GPU.
3303
 * @param dataframe The dataframe object that get the derived column.
3304
 * @param column_idx The original column contains tensor array on CPU.
3305
 * @param tensor_offset Only copy as outputs[i] = inputs[i + tensor_offset].
3306
 * @param tensor_size How many tensors in the tensor array.
3307
 * @param device_id The device we want to copy the tensors to.
3308
 * @param name The name of the new column.
3309
 * @return The index of the newly derived column.
3310
 */
3311
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_to_gpu(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int tensor_offset, const int tensor_size, const int device_id, const char* name);
3312
/**
3313
 * Derive a new column by executing a generic command.
3314
 * @param dataframe The dataframe object that get the derived column.
3315
 * @param column_idx The original column contains tensor array.
3316
 * @param cmd The command for this operation.
3317
 * @param hint The hint to run the command.
3318
 * @param flags The flags with the command.
3319
 * @param input_offset Use inputs[i + input_offset] to inputs[i + input_offset + input_size - 1] as the inputs
3320
 * @param input_size How many tensors in the input array.
3321
 * @param output_params The parameters for the outputs.
3322
 * @param output_size How many tensors in the output array.
3323
 * @param stream_type The type of stream context we are going to use.
3324
 * @param name The name of the new column.
3325
 * @return The index of the newly derived column.
3326
 */
3327
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_cmd_exec(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const int input_offset, const int input_size, const ccv_nnc_tensor_param_t* const output_params, const int output_size, const int stream_type, const char* name);
3328
/**
3329
 * Add a new column contains some tensors. This will add a new column that each row is the tensor specified
3330
 * as the parameters. It comes handy when you want to have some auxiliary tensors along with each row.
3331
 * @param dataframe The dataframe object that get the new column.
3332
 * @param params The parameters for the tensors.
3333
 * @param name The name of the new column.
3334
 * @return The index of the newly added column.
3335
 */
3336
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add_aux(ccv_cnnp_dataframe_t* const dataframe, const ccv_nnc_tensor_param_t params, const char* name);
3337
/**
3338
 * Read image off a said column. That column should contain the filename (as char array). The new column
3339
 * will contain the ccv_dense_matrix_t / ccv_nnc_tensor_t (both are toll-free bridging) of the image.
3340
 * @param dataframe The dataframe object that loads the images.
3341
 * @param column_idx The column which contains the filename.
3342
 * @param structof The offset to the filename (as char array) from that column. For example, the column
3343
 *        could be a struct and filename could be one of the field. In that case, you can pass offsetof(S, filename)
3344
 * @param name The name of the new column.
3345
 * @return The index of the newly derived column.
3346
 */
3347
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_read_image(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const char* name);
3348
/**
3349
 * The structure to describe how to apply random jitter to the image.
3350
 */
3351
typedef struct {
3352
  float contrast; /**< The random contrast, the final contrast will be [1 / (1 + contrast), 1 + contrast] */
3353
  float saturation; /**< The saturation, the final saturation will be [1 / (1 + saturation), 1 + saturation] */
3354
  float brightness; /**< The brightness, the final brightness will be between [1 / (1 + brightness), 1 + brightness] */
3355
  float lighting; /**< AlexNet style PCA based image jitter */
3356
  float aspect_ratio; /**< Stretch aspect ratio between [1 / (1 + asepct_ratio), 1 + aspect_ratio] */
3357
  int symmetric; /**< Apply random flip on x-axis (around y-axis */
3358
  int seed; /**< The seed for random generator. */
3359
  int center_crop; /**< Enable crop to the center (otherwise do random crop). */
3360
  struct {
3361
    int min; /**< The minimal dimension of resize */
3362
    int max; /**< The maximal dimension of resize. The final resize can be computed from min + (max - min) * random_unit */
3363
    int roundup; /**< The dimension on both height / width are a multiple of roundup value. */
3364
  } resize;
3365
  struct {
3366
    int rows; /**< The height of the final image. */
3367
    int cols; /**< The width of the final image. */
3368
  } size;
3369
  struct {
3370
    int x; /**< The extra random offset on x-axis. */
3371
    int y; /**< The extra random offset on y-axis. */
3372
  } offset;
3373
  struct {
3374
    float mean[3]; /**< Normalize the image with mean. */
3375
    float std[3];/**< Normalize the image with std. pixel = (pixel - mean) / std */
3376
  } normalize;
3377
} ccv_cnnp_random_jitter_t;
3378
/**
3379
 * Apply random jitter on a image to generate a new image.
3380
 * @param dataframe The dataframe object that contains the original image.
3381
 * @param column_idx The column which contains the original image.
3382
 * @param datatype The final datatype of the image. We only support CCV_32F right now.
3383
 * @param random_jitter The random jitter parameters to be applied to.
3384
 * @param name The name of the new column.
3385
 * @return The index of the newly derived column.
3386
 */
3387
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_image_random_jitter(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int datatype, const ccv_cnnp_random_jitter_t random_jitter, const char* name);
3388
/**
3389
 * Generate a one-hot tensor off the label from a struct.
3390
 * @param dataframe The dataframe object that contains the label.
3391
 * @param column_idx The column which contains the label (as int).
3392
 * @param structof The offset to the label (as int) from that column. For example, the column
3393
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
3394
 * @param range The range of the label, from [0...range - 1]
3395
 * @param onval The value when it hit.
3396
 * @param offval The value for the others.
3397
 * @param datatype The datatype of the tensor.
3398
 * @param format The format of the tensor.
3399
 * @param name The name of the new column.
3400
 * @return The index of the newly derived column.
3401
 */
3402
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_hot(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int range, const float onval, const float offval, const int datatype, const int format, const char* name);
3403
/**
3404
 * Generate a scalar tensor (a tensor with one value) off a value from a struct.
3405
 * @param dataframe The dataframe object that contains the value.
3406
 * @param column_idx The column which contains the value (as datatype).
3407
 * @param structof The offset to the label (as int) from that column. For example, the column
3408
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
3409
 * @param from_dt The datatype of the value.
3410
 * @param to_dt The datatype of the tensor.
3411
 * @param format The format of the tensor.
3412
 * @param name The name of the new column.
3413
 * @return The index of the newly derived column.
3414
 */
3415
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_scalar(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int from_dt, const int to_dt, const int format, const char* name);
3416
/**
3417
 * Generate vector with ones up to a given length, the rest will be zeros. When applied to batched lengths
3418
 * array, this will generate a matrix of these vectors, squared. The derived column will be a tuple of vectors
3419
 * for the given number of columns.
3420
 * @param dataframe The dataframe object that will contain the matrix.
3421
 * @param column_idxs The columns which contain the sequence lengths (a 1d tensor).
3422
 * @param column_idx_size The number of columns. The derived column will be a tuple of vectors.
3423
 * @param variable_size The size of the final vector can vary, depending on the max length of current batch.
3424
 * @param max_length The absolute max length for inputs.
3425
 * @param name The name of the new column.
3426
 * @return The index of the newly derived column.
3427
 */
3428
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_squared(ccv_cnnp_dataframe_t* const dataframe,  const int* const column_idxs, const int column_idx_size, const int variable_size, const int max_length, const char* name);
3429
/**
3430
 * Truncate a given matrix (as a list of vector) to the given size provided by another vector. The truncated
3431
 * column will be a tuple of vectors for the given columns.
3432
 * @param dataframe The dataframe object that will contain the matrix.
3433
 * @param vec_idxs The columns of the given matrix to be truncated.
3434
 * @param vec_idx_size The number of columns for vec_idxs.
3435
 * @param len_idxs The columns of the given sizes as a vector.
3436
 * @param len_idx_size The number of columns for len_idxs.
3437
 * @param name The name of the new column.
3438
 * @return The index of the newly derived column.
3439
 */
3440
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_truncate(ccv_cnnp_dataframe_t* const dataframe, const int* const vec_idxs, const int vec_idx_size, const int* len_idxs, const int len_idx_size, const char* name);
3441
/**
3442
 * Combine multiple tensors in a column into one tensor. This method can take multiple columns, which
3443
 * will result a tuple of tensors. Each tensor in the tuple is a batched one from a given column.
3444
 * @param dataframe The dataframe contains the columns of tensors to be batched.
3445
 * @param column_idxs The columns that contain the tensors.
3446
 * @param column_idx_size The number of columns that contain the tensors.
3447
 * @param batch_count How many tensors in one column to be batched together.
3448
 * @param group_count We can generate many groups of batched tensor. For example, if you have column A, B, C, each
3449
 *        have different tensors. If group_count is 1, the result tuple will be (A_b, B_b, C_b). If group count is
3450
 *        2, the result tuple will be (A_b1, B_b1, C_b1, A_b2, B_b2, C_b2). A_b1 etc. will still contain the same
3451
 *        number of batch_count tensors.
3452
 * @param format The result format of the tensor. We support simply transformation NCHW <=> NHWC with the source tensor.
3453
 * @return The newly created dataframe with the 0-th column is the tuple of batched tensors.
3454
 */
3455
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_combine_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const int batch_count, const int group_count, const int format);
3456
3457
/** @} */
3458
3459
/**
3460
 * @page dataframe_csv Why to support comma-separated-values files in dataframe?
3461
 *
3462
 * C can be used as a parser. It usually can be fast. But most of them can be buggy and has bugs that can either crash, be
3463
 * exploited, or simply incorrect. There really isn't much motivation for me to start write a parser, even as simple as
3464
 * for CSV files.
3465
 *
3466
 * However, it does brought to my attention that a full-speed (defined by saturating the PCIx4 for SSD) implementation would
3467
 * be beneficial. I am also started to use nnc in many places that is handy to load a csv file and generate some tensors out
3468
 * of it.
3469
 *
3470
 * This implementation plan to use a variant of the two-pass approach documented in
3471
 * https://www.microsoft.com/en-us/research/uploads/prod/2019/04/chunker-sigmod19.pdf while first implemented in
3472
 * https://github.com/wiseio/paratext. It is differentiated from these two in these particular ways:
3473
 *
3474
 * 1. The first pass will not only find the quotes and even / odd CRLF, but also collect statistics on how many lines assuming
3475
 *    the first CRLF is within quote / outside of the quote;
3476
 *
3477
 * 2. The second pass will do a copy into a continuous page mirrors the original csv file, but null-terminate each column, and
3478
 *    assign the start pointer for each.
3479
 *
3480
 * The speculative approach while interesting, for many-core system implementation, it can be challenging and the worse-case
3481
 * scenario is indeed worse.
3482
 *
3483
 * The implementation itself follows https://tools.ietf.org/html/rfc4180, with only customization of delimiters (so it can support
3484
 * table-separated-values) and quotes (so you can choose between " and '). Escaping only supports double-quotes for whatever quote
3485
 * symbol you elect.
3486
 */
3487
3488
/**
3489
 * @defgroup level_5_dataframe_csv Dataframe for Comma-Separated-Values Files
3490
 * @{
3491
 */
3492
enum {
3493
  /* It is a file pointer. */
3494
  CCV_CNNP_DATAFRAME_CSV_FILE = 0,
3495
  /* It is a pointer to a memory. */
3496
  CCV_CNNP_DATAFRAME_CSV_MEMORY = 1,
3497
};
3498
3499
/**
3500
 * Create a dataframe object that read a CSV file. This will eagerly load the file into memory, parse each row / column
3501
 * into null-terminated strings, you can later convert these into numerics if needed. Each column will be a column indexed
3502
 * from 0 to column_size - 1. If there are syntax errors, the parser will make guesses and continue to parse to its best knowledge.
3503
 * If it cannot, we will return null for the object. We support both CRLF, LF, and LFCR termination.
3504
 * @param input The FILE handle for on-disk file, or the pointer to the region of the memory we are going to use.
3505
 * @param type The type of either `CCV_CNNP_DATAFRAME_CSV_FILE` or `CCV_CNNP_DATAFRAME_CSV_MEMORY`
3506
 * @param len The length of the memory region, if it is `CCV_CNNP_DATAFRAME_CSV_MEMORY`.
3507
 * @param delim The delim, it is ',' by default (if you provided '\0')
3508
 * @param quote The quote for escape strings, it is '"' by default (if you provided '\0')
3509
 * @param include_header whether to parse the header seperately. 1 means we treat the first line as header.
3510
 * @param column_size The number of columns in the resulted dataframe.
3511
 * @return A dataframe that can represent the csv file. nullptr if failed.
3512
 */
3513
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_csv_new(void* const input, const int type, const size_t len, const char delim, const char quote, const int include_header, int* const column_size);
3514
3515
/** @} */
3516
3517
/**
3518
 * @page model Models, layers, and Keras
3519
 *
3520
 * With Keras API in mind, this model implementation essentially is a light-weight way to group neural network layers
3521
 * together. This is a rare case in NNC (or ccv in general) where Object-Oriented programming makes sense. I borrowed
3522
 * heavily from Objective-C / C++ to implement this Object-Oriented interface.
3523
 *
3524
 * Now back to elaboration of the Model interface. It is specifically designed with Keras in mind, asking question:
3525
 * If we are going to build Keras high-level API in any languages (Ruby, Python, Swift, Julia), what's the underlying
3526
 * C interface would look like? Here is your answer (hint: it looks very much like just Python Keras API).
3527
 *
3528
 * A model consists of a set of inputs and outputs. This sounds very much like what "Command" is in Level-1 APIs,
3529
 * however, they are different: a model is stateful. For example, a convolution command takes 3 inputs: image, kernel
3530
 * weight and bias, has 1 output: image. A convolution model takes 1 input: image, and 1 output: image. kernel weight
3531
 * and bias are internal states to the model (in Keras, it is called "layer" for convolution, and model means a set of
3532
 * layers. In NNC, that kind of differentiation feels superficial, therefore, a layer is a model).
3533
 *
3534
 * A model can be combined, and a new model can be a combination of other models.
3535
 *
3536
 * The simpler composed model is the sequential model. A sequential model is a model that consists a sequence of models
3537
 * that contains one input and one output. The output of the earlier model feed into the later one, thus, a sequential
3538
 * evaluation path.
3539
 */
3540
3541
/**
3542
 * @defgroup level_5_model Model API
3543
 * @{
3544
 */
3545
3546
/**
3547
 * model type is an abstract type, you won't interact with a naked model ever.
3548
 */
3549
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
3550
/**
3551
 * With this type, now in NNC, we have 4 types that represents a "tensor":
3552
 *
3553
 * 1. ccv_nnc_tensor_t / ccv_nnc_tensor_view_t / ccv_nnc_tensor_multiview_t: a concrete tensor with memory allocated.
3554
 *
3555
 * 2. ccv_nnc_tensor_symbol_t: a symbol representation of a tensor, with its data layout, device affinity, and type
3556
 *                             specified.
3557
 *
3558
 * 3. ccv_nnc_tensor_variable_t: in dynamic graph, this represents a concrete tensor with memory allocated, but also
3559
 *                               associated with a recorded execution.
3560
 *
3561
 * 4. ccv_cnnp_model_io_t: this is the most flexible one. No data layout, device affinity or type specified. It can even
3562
 *                         represent a list of tensors rather than just one. This is a handle used by model API to
3563
 *                         associates model inputs / outputs.
3564
 */
3565
typedef struct ccv_cnnp_model_io_s* ccv_cnnp_model_io_t;
3566
/**
3567
 * Create a naked input.
3568
 * @return A ccv_cnnp_model_io_t represents an input.
3569
 */
3570
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_input(void);
3571
/**
3572
 * This method mimics Keras callable for model (thus, override __call__ method in Python class).
3573
 * @param model A model that we can apply a set of inputs to get one output.
3574
 * @param inputs The set of inputs.
3575
 * @param input_size The size of inputs array.
3576
 * @return A ccv_cnnp_model_io_t that represents the output of the given model.
3577
 */
3578
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_apply(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t* const inputs, const int input_size);
3579
/**
3580
 * This method adds non-functional dependencies for a model IO. "Non-functional dependencies" means
3581
 * their outputs are not used for this IO, however, their existence establishes a partial ordering
3582
 * for the execution. In that way, they act as "inputs" but not functional.
3583
 * @param model_io A model IO for which we will add additional non-functional dependencies.
3584
 * @param dependencies The set of dependencies.
3585
 * @param dependency_size The size of dependencies array.
3586
 */
3587
void ccv_cnnp_model_add_dependencies(ccv_cnnp_model_io_t model_io, const ccv_cnnp_model_io_t* const dependencies, const int dependency_size);
3588
enum {
3589
  /* Select only weights, no bias terms. */
3590
  CCV_CNNP_PARAMETER_SELECT_WEIGHT = 0,
3591
  /* Select bias terms, no weights. */
3592
  CCV_CNNP_PARAMETER_SELECT_BIAS = 1,
3593
};
3594
/**
3595
 * This method exposes parameter for a model out as a potential input for another model. Since
3596
 * it is a ccv_cnnp_model_io_t, it can also be used by other methods.
3597
 * @param model A model that we can extract parameters out.
3598
 * @param selector The selector for a parameter. ALL_PARAMETERS means all parameters, or you can select CCV_CNNP_PARAMETER_SELECT_WEIGHT or CCV_CNNP_PARAMETER_SELECT_BIAS.
3599
 * @param index The index into a parameter. ALL_PARAMETERS means all parameters.
3600
 */
3601
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameters(ccv_cnnp_model_t* const model, const int selector, const int index);
3602
/**
3603
 * A notification function such that a model can be notified.
3604
 * This is useful to broadcast a message to all models as sub-model of someone else.
3605
 */
3606
typedef void (*ccv_cnnp_model_notify_f)(const ccv_cnnp_model_t* const model, const int tag, void* const payload, void* const context);
3607
/**
3608
 * Hook into a model such that when there is a notification, the callback will receive it.
3609
 * @param model A model that can be notified.
3610
 * @param func The callback function.
3611
 * @param context The context to be passed along to the callback function.
3612
 **/
3613
void ccv_cnnp_model_notify_hook(ccv_cnnp_model_t* const model, ccv_cnnp_model_notify_f func, void* const context);
3614
/**
3615
 * Notify a model and its sub-models with a tag and a payload. This will be triggered
3616
 * synchronously.
3617
 * @param model A model that will be notified.
3618
 * @param tag An integer to help identify what kind of notification.
3619
 * @param payload A payload pointer that you can carry arbitrary information.
3620
 */
3621
void ccv_cnnp_model_notify(const ccv_cnnp_model_t* const model, const int tag, void* const payload);
3622
/**
3623
 * This method name is deceiving. It return a composed model, not a naked model.
3624
 * This composed model takes set of inputs, and run through various other models to arrive at
3625
 * the set of outputs.
3626
 * @param inputs The set of inputs.
3627
 * @param input_size The size of inputs array.
3628
 * @param outputs The set of outputs.
3629
 * @param output_size The size of outputs array.
3630
 * @param is_trainable Whether the parameters of this model can be trained. -1 means inherent from parent.
3631
 * @param name The unique name of the model.
3632
 * @return A composed model that takes inputs, and generate the outputs.
3633
 */
3634
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_new(const ccv_cnnp_model_io_t* const inputs, const int input_size, const ccv_cnnp_model_io_t* const outputs, const int output_size, const int is_trainable, const char* const name);
3635
/**
3636
 * This method returns a sequential model, which composed from a sequence of models.
3637
 * @param models The list of models, that takes one input, and emit one output, feeding into the subsequent one.
3638
 * @param model_size The size of the list.
3639
 * @param is_trainable Whether the parameters of this model can be trained.
3640
 * @param name The unique name of the model.
3641
 * @return A composed model that applies these models one by one in sequence.
3642
 */
3643
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sequential_new(ccv_cnnp_model_t* const* const models, const int model_size, const int is_trainable, const char* const name);
3644
/**
3645
 * A model generation function to be called for dynamic models.
3646
 */
3647
typedef ccv_cnnp_model_t* (*ccv_cnnp_model_dynamic_f)(const ccv_nnc_tensor_param_t* const inputs, const int input_size, void* const context);
3648
/**
3649
 * This method returns a model that will be recreated if it is recompiled. Put it this way, you can call
3650
 * ccv_cnnp_model_compile multiple times with different inputs and input size, however, the model will
3651
 * only be recompiled to some extent. For example, if you called ccv_cnnp_reshape, the shape is determined
3652
 * at the moment you create that model, recompilation won't change. There are two ways to workaround this:
3653
 * 1. Use models that doesn't have explicit shape specified, for example, ccv_cnnp_dense, and avoid models
3654
 *    that is not as flexible, such as ccv_cnnp_reshape, or ccv_cnnp_cmd_exec.
3655
 * 2. Create with ccv_cnnp_dynamic_new such that the model will be recreated again whenever recompile.
3656
 * @param func The function to be called to create the model.
3657
 * @param context The context used along to create the model.
3658
 * @param name The unique name of the model.
3659
 * @return A model object that is yet to be created until build.
3660
 */
3661
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dynamic_new(ccv_cnnp_model_dynamic_f func, void* const context, const char* const name);
3662
/**
3663
 * Prepare the model to be trained, the input specifies the batch size etc.
3664
 * Input size technically is not needed, here is a safety check.
3665
 * @param model The model to be compiled.
3666
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3667
 * @param input_size The size of the inputs array.
3668
 * @param minimizer The wrapped command that represents a particular optimization strategy.
3669
 * @param loss The wrapped command that computes the loss function.
3670
 */
3671
void ccv_cnnp_model_compile(ccv_cnnp_model_t* const model, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_cmd_t minimizer, const ccv_nnc_cmd_t loss);
3672
/**
3673
 * Absorb a new model into the existing model. This requires the new model has exactly the same parameters
3674
 * but other dimensionality's can change. The new model has to not be compiled yet, its life-cycle management
3675
 * will be take over by the existing model. You don't need to free it separately.
3676
 * @param model The existing model.
3677
 * @param init The new model.
3678
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3679
 * @param input_size The size of the inputs array.
3680
 */
3681
void ccv_cnnp_model_absorb(ccv_cnnp_model_t* const model, ccv_cnnp_model_t* const init, const ccv_nnc_tensor_param_t* const inputs, const int input_size);
3682
/**
3683
 * Create a copy of an existing model.
3684
 * @param model The existing model.
3685
 * @param is_trainable Whether the parameters of this model can be trained.
3686
 * @return The new model that is exactly the same copy of the old one.
3687
 */
3688
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_copy(const ccv_cnnp_model_t* const model, const int is_trainable);
3689
/**
3690
 * Get the output size of the model.
3691
 * @param model The existing model.
3692
 * @return The output size of the model.
3693
 */
3694
CCV_WARN_UNUSED(int) ccv_cnnp_model_output_size(const ccv_cnnp_model_t* const model);
3695
/**
3696
 * Get whether the model is trainable.
3697
 * @param model The existing model.
3698
 * @return Whether the model is trainable, -1 is inherited from its parent.
3699
 */
3700
CCV_WARN_UNUSED(int) ccv_cnnp_model_is_trainable(const ccv_cnnp_model_t* const model);
3701
/**
3702
 * Compute the shape of the output tensor after the model applied to the input.
3703
 * This can only be called after the model is compiled with proper input parameters.
3704
 * @param model The model to compute the output shapes.
3705
 * @param outputs The computed tensor parameters in the output.
3706
 * @param output_size The size of the output array, it has to match the model's output.
3707
 */
3708
void ccv_cnnp_model_tensor_auto(ccv_cnnp_model_t* const model, ccv_nnc_tensor_param_t* const outputs, const int output_size);
3709
/**
3710
 * Generate output that can be parsed by GraphViz (DOT language).
3711
 * @param model The composed model.
3712
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
3713
 * @param outs The output file streams.
3714
 * @param out_size The size of output file stream array.
3715
 */
3716
void ccv_cnnp_model_dot(const ccv_cnnp_model_t* const model, const int flags, FILE** const outs, const int out_size);
3717
/**
3718
 * Provide a hook for upper level to do custom formatting of a given model. You can implement logic
3719
 * to format the model into protobuf, or json. This is only useful after model is compiled.
3720
 * @param model The composed model.
3721
 * @param format_fn The format callback to be called on every node.
3722
 * @param context The context that will be passed to the callback.
3723
 */
3724
void ccv_cnnp_model_format(const ccv_cnnp_model_t* const model, const ccv_nnc_symbolic_graph_format_f format_fn, void* const context);
3725
/**
3726
 * Fit a model to a given input / output. This is a combination of running ccv_cnnp_model_evaluate /
3727
 * ccv_cnnp_model_backward / ccv_cnnp_model_apply_gradients. The difference is that when calling
3728
 * individual functions, the graph is compiled piece by piece, thus, is less efficient than calling
3729
 * ccv_cnnp_model_fit directly. However, having the separate functions makes this implementation much
3730
 * more versatile, for example, can accumulate gradients for multiple batches, or using custom gradients
3731
 * etc.
3732
 * @param model The composed model.
3733
 * @param inputs The input tensors.
3734
 * @param input_size The size of the input tensors array.
3735
 * @param fits The target tensors.
3736
 * @param fit_size The size of the target tensors array.
3737
 * @param outputs The actual outputs from the model.
3738
 * @param output_size The size of the outputs array.
3739
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3740
 * @param stream_context The stream where the fit can be executed upon.
3741
 */
3742
void ccv_cnnp_model_fit(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const fits, const int fit_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3743
enum {
3744
  /**
3745
   * Don't disable any outgrad.
3746
   */
3747
  CCV_CNNP_DISABLE_OUTGRAD_NONE = (uint64_t)0,
3748
  /**
3749
   * Disable all inputs' outgrads.
3750
   */
3751
  CCV_CNNP_DISABLE_OUTGRAD_ALL = (uint64_t)(int64_t)-1,
3752
};
3753
/**
3754
 * The parameters for how evaluation should behave.
3755
 */
3756
typedef struct {
3757
  int requires_grad; /**< Whether we need to keep intermediate results for gradient computations. */
3758
  int is_test; /**< Whether we evaluate it as test, or just as forward pass of the training process. */
3759
  uint64_t disable_outgrad; /**< Whether we can compute outflow gradients when call ccv_cnnp_model_backward later, this is a bitmask, you can mark for which input the outgrad is disabled. */
3760
} ccv_cnnp_evaluate_param_t;
3761
/**
3762
 * Evaluate model with output.
3763
 * @param model The composed model.
3764
 * @param params The parameters for how evaluation should behave.
3765
 * @param inputs The input tensors.
3766
 * @param input_size The size of the input tensors array.
3767
 * @param outputs The actual outputs from the model.
3768
 * @param output_size The size of the outputs array.
3769
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3770
 * @param stream_context The stream where the evaluation can be executed upon.
3771
 */
3772
void ccv_cnnp_model_evaluate(ccv_cnnp_model_t* const model, const ccv_cnnp_evaluate_param_t params, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3773
/**
3774
 * Dryrun the model with inputs / outputs. This runs the evaluation loop up until the actual execution.
3775
 * @param model The composed model.
3776
 * @param params The parameters for how evaluation should behave.
3777
 * @param inputs The input tensors.
3778
 * @param input_size The size of the input tensors array.
3779
 * @param outputs The actual outputs from the model.
3780
 * @param output_size The size of the outputs array.
3781
 */
3782
void ccv_cnnp_model_dry_run(ccv_cnnp_model_t* const model, const ccv_cnnp_evaluate_param_t params, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
3783
/**
3784
 * Based on the input gradients, compute the output gradients (w.r.t. the inputs). This also adds parameter gradients.
3785
 * @param model The composed model.
3786
 * @param ingrads The input gradients.
3787
 * @param ingrad_size The size of the input gradients array.
3788
 * @param outgrads The output gradients (w.r.t. the inputs).
3789
 * @param outgrad_size The size of the output gradients array.
3790
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3791
 * @param stream_context The stream where the gradient computation can be executed upon.
3792
 */
3793
void ccv_cnnp_model_backward(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const ingrads, const int ingrad_size, ccv_nnc_tensor_t* const* const outgrads, const int outgrad_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3794
/**
3795
 * Apply the computed gradients to the parameter tensors.
3796
 * @param model The composed model.
3797
 * @param stream_context The stream where the gradient computation can be executed upon.
3798
 */
3799
void ccv_cnnp_model_apply_gradients(ccv_cnnp_model_t* const model, ccv_nnc_stream_context_t* const stream_context);
3800
enum {
3801
  /**
3802
   * This is the default flag, if the model is not initialized, will attempt to read from the disk.
3803
   * Otherwise, will persist existing parameters to disk.
3804
   */
3805
  CCV_CNNP_MODEL_CHECKPOINT_READ_WRITE,
3806
  /**
3807
   * Only read parameters out of disk, even it is already initialized.
3808
   */
3809
  CCV_CNNP_MODEL_CHECKPOINT_READ_ONLY,
3810
  /**
3811
   * Only write parameters to disk.
3812
   */
3813
  CCV_CNNP_MODEL_CHECKPOINT_WRITE_ONLY,
3814
};
3815
/**
3816
 * Write model's tensors to a SQLite database with a given name. Note that we specifically say
3817
 * "model's tensors" because it doesn't persist the model's structure. Hence, you shouldn't
3818
 * expect us to take a name to then have a fully functional model restored from there. You still
3819
 * need to construct the model. This method only write the tensors (weights and other internal ones)
3820
 * to disk.
3821
 * @param model The model.
3822
 * @param handle The SQLite handle.
3823
 * @param name The name to find the tensors related to the model in the database.
3824
 * @param options The IO options that can do data encode / decode before persistence.
3825
 * @return CCV_IO_FINAL for success, otherwise error.
3826
 */
3827
int ccv_cnnp_model_write(const ccv_cnnp_model_t* const model, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
3828
/**
3829
 * Write model's tensors to a SQLite database implicitly with "" name. This is a convenience method
3830
 * to ccv_cnnp_model_write particularly useful at training time.
3831
 * @param model The composed model.
3832
 * @param fn The file name.
3833
 * @param options The IO options that can do data encode / decode before persistence.
3834
 */
3835
void ccv_cnnp_model_write_to_file(ccv_cnnp_model_t* const model, const char* const fn, const ccv_nnc_tensor_io_option_t* const options);
3836
/**
3837
 * The prototype for the writer function when exporting parameters out.
3838
 * @param tensor The tensor to be written to disk.
3839
 * @param sql The sql to be executed.
3840
 * @param handle The custom handle that you passed in from ``ccv_cnnp_model_write`` method.
3841
 * @param options The IO options that can do data encode / decode before persistence.
3842
 * @param name The name give to a particular parameter.
3843
 */
3844
typedef int (*ccv_cnnp_model_io_writer_f)(const ccv_nnc_tensor_t* const tensor, const char* const sql, void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options);
3845
/**
3846
 * The prototype for the reader function to load parameters.
3847
 * @param handle The custom handle that you passed in from ``ccv_cnnp_model_read`` method.
3848
 * @param name The name give to a particular parameter.
3849
 * @param options The IO options that can do data encode / decode before persistence.
3850
 * @param params The recommended tensor params.
3851
 * @param tensor_out The tensor to be loaded.
3852
 */
3853
typedef int (*ccv_cnnp_model_io_reader_f)(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const ccv_nnc_tensor_param_t params, ccv_nnc_tensor_t** const tensor_out);
3854
/**
3855
 * Set IO interceptor for loading weights from / to the model to replace the default SQLite reader / writer.
3856
 * @param model The model.
3857
 * @param reader The reader function for loading weights.
3858
 * @param writer The writer function for exporting weights out.
3859
 */
3860
void ccv_cnnp_model_set_io(ccv_cnnp_model_t* const model, ccv_cnnp_model_io_reader_f reader, ccv_cnnp_model_io_writer_f writer);
3861
/**
3862
 * Read model's tensors from a SQLite database with a given name.
3863
 * @param handle The SQLite handle.
3864
 * @param name The name to find the tensors related to the model in the database.
3865
 * @param options The IO options that can do data encode / decode before persistence.
3866
 * @param model_out The model which you want to restore the tensors. It should have the same
3867
 *                  structure as the one in write to.
3868
 * @return CCV_IO_FINAL for success, otherwise error.
3869
 */
3870
int ccv_cnnp_model_read(void* const handle, const char* const name, const ccv_nnc_tensor_io_option_t* const options, const ccv_cnnp_model_t* const model_out);
3871
/**
3872
 * Read model's tensors to a SQLite database implicitly with "" name. This is a convenience method
3873
 * to ccv_cnnp_model_read particularly useful at training time.
3874
 * @param fn The file name.
3875
 * @param options The IO options that can do data encode / decode before persistence.
3876
 * @param model_out The model which you want to restore the tensors. It should have the same
3877
 *                  structure as the one in write to.
3878
 */
3879
void ccv_cnnp_model_read_from_file(const char* const fn, const ccv_nnc_tensor_io_option_t* const options, const ccv_cnnp_model_t* const model_out);
3880
/**
3881
 * Apply data parallel to the composed model. This method has to be called before we call either
3882
 * evaluate or fit and after the model is compiled.
3883
 * @param model The composed model.
3884
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
3885
 */
3886
void ccv_cnnp_model_set_data_parallel(ccv_cnnp_model_t* const model, const int parallel);
3887
/**
3888
 * Set the maximum operator-level concurrency. This is a soft-limit, e.g. if you have operations on
3889
 * different devices, they are concurrent.
3890
 * @param model The composed model.
3891
 * @param max_stream_count The maximum concurrency if the model schedules internal streams. 0 is no limit.
3892
 */
3893
void ccv_cnnp_model_set_max_concurrency(ccv_cnnp_model_t* const model, const int max_stream_count);
3894
/**
3895
 * Apply memory compression to the composed model. The memory compression technique can reduce memory
3896
 * usage up to 75% comparing with raw mix-precision model during training time.
3897
 * @param model The composed model.
3898
 * @param memory_compression Whether to enable the memory compression (1 - enable, 0 - disable (default))
3899
 */
3900
void ccv_cnnp_model_set_memory_compression(ccv_cnnp_model_t* const model, const int memory_compression);
3901
/**
3902
 * Apply memory reduction to the composed model. The memory reduction technique can reduce memory
3903
 * usage losslessly. Right now, the supported memory reduction technique is to redo datatype conversion.
3904
 * @param model The composed model.
3905
 * @param memory_reduction Whether to enable the memory reduction (1 - enable, 0 - disable (default))
3906
 */
3907
void ccv_cnnp_model_set_memory_reduction(ccv_cnnp_model_t* const model, const int memory_reduction);
3908
/**
3909
 * Set the computations in this model to be gradient checkpointing. This can be strategically applied
3910
 * to models within the higher-level composed models such that these models can effectively save 0
3911
 * gradients during backprop with the cost of running forward pass twice.
3912
 * @param model The model that will turn on gradient checkpointing.
3913
 * @param gradient_checkpointing Whether to enable gradient checkpointing (1 - enable, 0 - disable (default))
3914
 */
3915
void ccv_cnnp_model_set_gradient_checkpointing(ccv_cnnp_model_t* const model, const int gradient_checkpointing);
3916
/**
3917
 * Get whether gradient checkpointing is enabled or not for this model.
3918
 * @param model The model that will turn on gradient checkpointing.
3919
 */
3920
int ccv_cnnp_model_gradient_checkpointing(ccv_cnnp_model_t* const model);
3921
/**
3922
 * Set compile parameters on the model so it compiles the graph with the said parameters.
3923
 * @param model The composed model.
3924
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
3925
 */
3926
void ccv_cnnp_model_set_compile_params(ccv_cnnp_model_t* const model, const ccv_nnc_symbolic_graph_compile_param_t compile_params);
3927
/**
3928
 * This method set the max workspace size. If the graph is already compiled. It will re-run
3929
 * autotune to use the new workspace size to find the best algorithm.
3930
 * @param model The composed model.
3931
 * @param workspace_size The size in bytes that we can use as workspace (scratch memory).
3932
 */
3933
void ccv_cnnp_model_set_workspace_size(ccv_cnnp_model_t* const model, size_t workspace_size);
3934
/**
3935
 * This method returns the current max workspace size.
3936
 * @param model The composed model.
3937
 */
3938
size_t ccv_cnnp_model_workspace_size(ccv_cnnp_model_t* const model);
3939
/**
3940
 * Set a parameter that is specified by the parameter span. This will override whatever value in that
3941
 * parameter. The given tensor should match the dimension of the parameter. It doesn't matter whether
3942
 * the given tensor is on CPU or GPU, it will be copied over. This method is limited, it can only set
3943
 * tensor once the model is compiled.
3944
 * @param model The composed model.
3945
 * @param parameter The parameter that is used to specify which parameter to override.
3946
 * @param tensor The tensor contains the value we want to copy over.
3947
 */
3948
void ccv_cnnp_model_set_parameter(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, const ccv_nnc_tensor_t* const tensor);
3949
/**
3950
 * Copy a parameter that is specified by the parameter span out of a model. This will override the value
3951
 * in the tensor you provided. The given tensor should match the dimension of the parameter and should
3952
 * already be allocated. It doesn't matter whether the given tensor is on CPU or GPU.
3953
 * @param model The composed model.
3954
 * @param parameter The parameter that is used to specify which parameter to copy from.
3955
 * @param tensor The tensor that receives value.
3956
 */
3957
void ccv_cnnp_model_parameter_copy(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, ccv_nnc_tensor_t* const tensor);
3958
/**
3959
 * Get the ccv_nnc_tensor_param_t for a particular parameter of a model.
3960
 * @param model The composed model.
3961
 * @param parameter The parameter that is used to specify which parameter to retrieve ccv_nnc_tensor_param_t.
3962
 * @return The ccv_nnc_tensor_param_t structure that specifies a tensor shape.
3963
 */
3964
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_cnnp_model_parameter_tensor_params(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter);
3965
/**
3966
 * Get the internal name for a particular parameter of a model.
3967
 * @param model The composed model.
3968
 * @param parameter The parameter that is used to specify which parameter to retrieve ccv_nnc_tensor_param_t.
3969
 * @return The name string for internal name, its life-cycle is managed by the model, and valid until the next invocation of the model either another call or free.
3970
 */
3971
CCV_WARN_UNUSED(const char*) ccv_cnnp_model_parameter_name(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter);
3972
/**
3973
 * This method returns the number of parameters for this particular model. Note that this is only available after
3974
 * model is compiled.
3975
 * @param model A model that is compiled.
3976
 * @return The number of parameters.
3977
 */
3978
CCV_WARN_UNUSED(int) ccv_cnnp_model_parameter_count(ccv_cnnp_model_t* const model);
3979
/**
3980
 * Use this to loop over and if the parameter matches, return 1.
3981
 */
3982
typedef int (*ccv_cnnp_model_parameters_filter_f)(const ccv_cnnp_model_t* const model, const char* const name, void* const context);
3983
/**
3984
 * Loop over a compiled model to find a parameter to either write or modify.
3985
 * @param model A model that is compiled.
3986
 * @param filter The callback that determines whether this parameter matches.
3987
 * @param context The context to be passed along with the callback.
3988
 * @return an array of ccv_cnnp_model_io_t.
3989
 */
3990
CCV_WARN_UNUSED(ccv_array_t*) ccv_cnnp_model_parameters_filter(ccv_cnnp_model_t* const model, ccv_cnnp_model_parameters_filter_f filter, void* const context);
3991
/**
3992
 * Loop over a compiled model to find a parameter to either write or modify.
3993
 * @param model A model that is compiled.
3994
 * @param first The callback that determines whether a parameter is found.
3995
 * @param context The context to be passed along with the callback.
3996
 * @return a ccv_cnnp_model_io_t or 0 if not found.
3997
 */
3998
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameter_first(ccv_cnnp_model_t* const model, ccv_cnnp_model_parameters_filter_f first, void* const context);
3999
/**
4000
 * Set parameters from another model. This will override whatever values in these parameters. The
4001
 * given parameters from another model should match the dimension of the parameter. It doesn't matter
4002
 * whether the given tensor is on CPU or GPU. This method can only set when both models are compiled.
4003
 * @param model The composed model to be set on parameters.
4004
 * @param parameters The parameters to be override.
4005
 * @param from_model The model to copy parameters from.
4006
 * @param from_parameters The parameters to be copied from.
4007
 */
4008
void ccv_cnnp_model_set_parameters(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
4009
4010
/**
4011
 * @param context The context pass to the share method.
4012
 * @param source_name The name of the parameter from the from model.
4013
 * @param updated_name The name of the parameter from the model. You can update the value.
4014
 * @param provided_size The size of the updated_name buffer.
4015
 * @return 0 if succeed. -1 if failed.
4016
 */
4017
typedef int(*ccv_cnnp_model_parameters_renamer_f)(void* const context, const char* const source_name, char* const updated_name, const size_t provided_size);
4018
/**
4019
 * Share parameters between two models. This is a very specific setup to enable memory optimization
4020
 * by sharing parameter weights between two models. The models can be different as long as the weights
4021
 * match. The model is responsible to keep from_model alive / from destroyed. There is no refcount.
4022
 * Besides using the parameters to identify, you can also use the given block to provide name match.
4023
 * @param model The composed model to be set on parameters.
4024
 * @param parameters The parameters to be override.
4025
 * @param from_model The model to copy parameters from.
4026
 * @param from_parameters The parameters to be shared from.
4027
 * @param renamer The provided rename function that can get the new name from the from_parameters.
4028
 * @param context The context for renamer function.
4029
 */
4030
void ccv_cnnp_model_share_parameters(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters, ccv_cnnp_model_parameters_renamer_f renamer, void* const context);
4031
/**
4032
 * Process parameters such as exponential averaging.
4033
 * parameters = zip(from_parameters, to_parameters).map { cmd(to_parameter, from_parameter) }
4034
 * The order is selected in such way because many of our commands only support inplace op if the first
4035
 * parameter matches.
4036
 * @param model The composed model to have parameters zip mapped.
4037
 * @param parameters The parameters to be written (and read).
4038
 * @param cmd The command to apply on the parameters.
4039
 * @param hint The hint supplied to the cmd.
4040
 * @param flags The flags supplied to the cmd.
4041
 * @param aux_ins Additional inputs supplied to the cmd.
4042
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4043
 * @param aux_outs Additional outputs supplied to the cmd.
4044
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4045
 * @param stream_context The stream context to be associated with.
4046
 * @param from_model The other composed model to have parameters zipped.
4047
 * @param from_parameters The parameters to be read.
4048
 */
4049
void ccv_cnnp_model_parameters_zip_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
4050
/**
4051
 * Process parameters such as clipping. parameters = parameters.map { cmd(parameter) }
4052
 * @param model The composed model to have parameters mapped.
4053
 * @param parameters The parameters to be mapped.
4054
 * @param cmd The command to apply on the parameters.
4055
 * @param hint The hint supplied to the cmd.
4056
 * @param flags The flags supplied to the cmd.
4057
 * @param aux_ins Additional inputs supplied to the cmd.
4058
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4059
 * @param aux_outs Additional outputs supplied to the cmd.
4060
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4061
 * @param stream_context The stream context to be associated with.
4062
 */
4063
void ccv_cnnp_model_parameters_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context);
4064
/**
4065
 * Process parameter gradients such as normalization. parameters.grad = parameters.apply { cmd(parameter.grad) }
4066
 * @param model The composed model to have parameters mapped.
4067
 * @param parameters The parameters to be mapped.
4068
 * @param cmd The command to apply on the parameters.
4069
 * @param hint The hint supplied to the cmd.
4070
 * @param flags The flags supplied to the cmd.
4071
 * @param aux_ins Additional inputs supplied to the cmd.
4072
 * @param aux_in_size The size of additional inputs supplied to the cmd.
4073
 * @param aux_outs Additional outputs supplied to the cmd.
4074
 * @param aux_out_size The size of additional outputs supplied to the cmd.
4075
 * @param stream_context The stream context to be associated with.
4076
 */
4077
void ccv_cnnp_model_parameter_gradients_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const aux_ins, const int aux_in_size, ccv_nnc_tensor_t* const* const aux_outs, const int aux_out_size, ccv_nnc_stream_context_t* const stream_context);
4078
/**
4079
 * Set a new minimizer for the model. This is useful when you need to update learn rate for stochastic
4080
 * gradient descent for example. This method can be called any time during the training process (after
4081
 * compilation).
4082
 * @param model The composed model.
4083
 * @param minimizer The wrapped command that represents a new optimization strategy.
4084
 * @param reset Reset all previous states of minimizers. This only makes sense if both parameters and parameter_size is 0.
4085
 * @param parameters The parameters to be applied the minimizer on. 0 meant for all.
4086
 * @param parameter_size The number of parameter spans.
4087
 */
4088
void ccv_cnnp_model_set_minimizer(ccv_cnnp_model_t* const model, const ccv_nnc_cmd_t minimizer, const int reset, const ccv_cnnp_model_io_t* const parameters, const int parameter_size);
4089
/**
4090
 * Retrieve the default minimizer for the model. This is set either you call model compile or
4091
 * ccv_cnnp_model_set_minimizer with no parameter spans.
4092
 * @param model The composed model.
4093
 * @return The minimizer command.
4094
 */
4095
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_cnnp_model_minimizer(ccv_cnnp_model_t* const model);
4096
/**
4097
 * Get the default stream from a compiled model. If the model is not compiled, the default stream is
4098
 * 0.
4099
 * @param model The composed model.
4100
 * @return The default stream for this model.
4101
 */
4102
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_cnnp_model_default_stream(const ccv_cnnp_model_t* const model);
4103
/**
4104
 * Get the allocated memory size (exclude workspace) from a compiled model. If the model is not compiled
4105
 * the size is 0.
4106
 * @param model The composed model.
4107
 * @return The number of bytes for memory allocated.
4108
 */
4109
CCV_WARN_UNUSED(uint64_t) ccv_cnnp_model_memory_size(const ccv_cnnp_model_t* const model);
4110
/**
4111
 * Free a given model.
4112
 * @param model The composed model.
4113
 */
4114
void ccv_cnnp_model_free(ccv_cnnp_model_t* const model);
4115
4116
/** @} */
4117
4118
/**
4119
 * @defgroup level_5_model_add_ons Model Add-ons
4120
 * @{
4121
 */
4122
4123
/**
4124
 * Process parameter gradients with normalization. Exactly the same as PyTorch's clip_grad_norm_
4125
 * @param model The composed model to have parameters mapped.
4126
 * @param parameters The parameters to be mapped.
4127
 * @param norm_type Currently only support 2.
4128
 * @param max_norm The max value for norm.
4129
 * @param stream_context The stream context to be associated with.
4130
 */
4131
void ccv_cnnp_model_parameters_clip_grad_norm(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, int norm_type, float max_norm, ccv_nnc_stream_context_t* const stream_context);
4132
/**
4133
 * Process parameter gradients to check if any is nan.
4134
 * @param model The composed model to have parameters mapped.
4135
 * @param parameters The parameters to be mapped.
4136
 * @param stream_context The stream context to be associated with.
4137
 * @return 1 if it has any nan, 0 otherwise.
4138
 */
4139
int ccv_cnnp_model_parameter_gradients_isnan(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, ccv_nnc_stream_context_t* const stream_context);
4140
4141
enum {
4142
  CCV_CNNP_IO, /**< The parameter is a ccv_cnnp_io_t. */
4143
  CCV_CNNP_NO_TENSOR, /**< The parameter is not used. */
4144
  CCV_CNNP_TENSOR_NOT_OUTPUT, /**< This parameter indicates this is a tensor parameter, but it is not an output reflected as ccv_cnnp_io_t */
4145
  CCV_CNNP_INIT_SHARED_TENSOR, /**< The parameter is a provided tensor for initialization. */
4146
  CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE, /**< The parameter is a provided tensor that can be updated. */
4147
};
4148
4149
typedef void(*ccv_cnnp_state_initializer_f)(void* const context, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const input, const ccv_nnc_tensor_symbol_t output_symbol);
4150
typedef void(*ccv_cnnp_cmd_exec_init_state_f)(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context);
4151
typedef void(*ccv_cnnp_cmd_exec_init_state_deinit_f)(void* const context);
4152
typedef void*(*ccv_cnnp_cmd_exec_init_state_copy_f)(void* const context);
4153
4154
typedef struct {
4155
  ccv_nnc_tensor_param_t info; /**< The tensor parameter for this one. */
4156
  void* context; /**< The context for which we initialize tensor. */
4157
  ccv_cnnp_cmd_exec_init_state_f init; /**< The function to init state for a tensor. */
4158
  ccv_cnnp_cmd_exec_init_state_copy_f copy; /**< The function to make a copy of the context. */
4159
  ccv_cnnp_cmd_exec_init_state_deinit_f deinit; /**< The function to release the context. */
4160
} ccv_cnnp_cmd_exec_io_init_state_t;
4161
4162
typedef struct {
4163
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, NO_TENSOR, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
4164
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
4165
} ccv_cnnp_cmd_exec_io_t;
4166
/**
4167
 * A generic model based on the command. If the tensors are labeled as ccv_cnnp_io_t, it will participate
4168
 * as the input / output of the model. If it is a init tensor, the model will use this tensor for that parameter.
4169
 * More over, if it is marked as parameter, that tensor will be differentiated against when you call
4170
 * ccv_cnnp_model_fit. This model however doesn't take over ownership of the tensor. You should manage the life
4171
 * cycle of the given tensor and it is your responsibility to make sure they outlive the model. Also, all inputs and
4172
 * outputs marked as init tensors will be shared if you reuse this model in other places.
4173
 * @param cmd The command to generate this model.
4174
 * @param hint The hint to run the command.
4175
 * @param flags The flags with the command.
4176
 * @param inputs A list of ccv_cnnp_cmd_exec_io_t identify each input as either a init tensor or a ccv_cnnp_io_t.
4177
 * @param input_size The size of input list.
4178
 * @param outputs A list of types identify each output as ccv_cnnp_io_t or a none tensor.
4179
 * @param output_size The size of the outputs. There is no need to give ccv_cnnp_tensor_param_t for outputs because
4180
 *        all of them are CCV_CNNP_IO type.
4181
 * @param is_trainable Whether the parameters of this model can be trained.
4182
 * @param name The unique name of the model.
4183
 * @return A model based on the given command.
4184
 */
4185
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const int is_trainable, const char* const name);
4186
/**
4187
 * Copy a tensor as initialization for the given parameter.
4188
 * @param tensor The tensor to copy from.
4189
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
4190
 */
4191
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_t* const tensor);
4192
/**
4193
 * Initialize a given parameter with the command.
4194
 * @param cmd The command to call when need to initialize.
4195
 * @param hint The hint to accompany the command.
4196
 * @param flags The flags to accompany the command.
4197
 * @param params The tensor configuration.
4198
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
4199
 */
4200
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_param_t params);
4201
4202
typedef struct {
4203
  ccv_nnc_tensor_symbol_t symbol; /**< The tensor symbol this is reference to. */
4204
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
4205
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
4206
} ccv_cnnp_tensor_symbol_param_t;
4207
/**
4208
 * A generic model based on the symbolic graph we provided. A list of tensor symbols are labeled whether it
4209
 * is ccv_cnnp_io_t or not (we identify whether this is a input or output based on whether it is in the graph).
4210
 * If it is not, we init it with a given tensor. If it is marked as parameter, that tensor will be differentiated
4211
 * against when you call ccv_cnnp_model_fit. The model doesn't take ownership over the init tensors. You are
4212
 * responsible to make sure the init tensors outlive the model until the initialization occurred. Also, these
4213
 * tensors will be shared if the model is reused.
4214
 * @param graph The symbolic graph that is our blue print for this model.
4215
 * @param tensor_symbol_params The list of tensor symbol parameters that labels a given symbol.
4216
 * @param tensor_symbol_param_size The size of the list.
4217
 * @param inputs The inputs to this graph. We can figure out which ones are inputs, but this gives us the order.
4218
 * @param input_size The size of the input list.
4219
 * @param outputs The outputs from this graph. We can figure out which ones are outputs, but this gives us the order.
4220
 * @param output_size The size of the output list.
4221
 * @param is_trainable Whether the parameters of this model can be trained.
4222
 * @param name The unique name of the model.
4223
 * @return A model based on the given symbolic graph.
4224
 */
4225
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_graph(const ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_tensor_symbol_param_t* const tensor_symbol_params, const int tensor_symbol_param_size, ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const int is_trainable, const char* const name);
4226
/**
4227
 * Sum multiple input tensors together.
4228
 * @param name The unique name of the model.
4229
 * @return A model that can be applied with multiple inputs, and generate output that is a sum of the inputs.
4230
 */
4231
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sum(const char* const name);
4232
/**
4233
 * Concatenate input tensors together.
4234
 * @param axis Along this axis, we concatenate tensors together. Other dimensions need to be exactly the same.
4235
 * @param name The unique name of the model.
4236
 * @return A model that can be applied with multiple inputs, and generate output that is a concatenation of the inputs.
4237
 */
4238
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_concat(const int axis, const char* const name);
4239
/**
4240
 * Chunk the input tensor into n pieces.
4241
 * @param n How many pieces we chunk the tensor into.
4242
 * @param axis Along this axis, we chunk the tensor. Other dimensions need to be exactly the same.
4243
 * @param name The unique name of the model.
4244
 * @return A model that can be applied with one input, and generate outputs that are chunks of the input.
4245
 */
4246
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_chunk(const int n, const int axis, const char* const name);
4247
/**
4248
 * A convolution model.
4249
 * @param groups The number of kernel groups in the model.
4250
 * @param filters The total number of filters in the model (filters = groups * per group filters).
4251
 * @param kdim The dimensions of the kernel.
4252
 * @param dilation The dilation factor on each dimension.
4253
 * @param no_bias Whether has bias term or not.
4254
 * @param hint The hint for alignment.
4255
 * @param format The format for weights. If 0, it will have the same format as the input.
4256
 * @param is_trainable Whether the parameters of this model can be trained.
4257
 * @param name The unique name of the model.
4258
 * @return A convolution model.
4259
 */
4260
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_convolution(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int dilation[CCV_NNC_MAX_DIM_ALLOC], const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name);
4261
/**
4262
 * A convolution transpose model.
4263
 * @param groups The number of kernel groups in the model.
4264
 * @param filters The total number of filters in the model (filters = groups * per group filters).
4265
 * @param kdim The dimensions of the kernel.
4266
 * @param dilation The dilation factor on each dimension.
4267
 * @param output_padding The padding helps to resolve shape ambiguity when this is inverse of convolution.
4268
 * @param no_bias Whether has bias term or not.
4269
 * @param hint The hint for alignment.
4270
 * @param format The format for weights. If 0, it will have the same format as the input.
4271
 * @param is_trainable Whether the parameters of this model can be trained.
4272
 * @param name The unique name of the model.
4273
 * @return A convolution transpose model.
4274
 */
4275
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_convolution_transpose(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int dilation[CCV_NNC_MAX_DIM_ALLOC], const int output_padding, const int no_bias, ccv_nnc_hint_t hint, const int format, const int is_trainable, const char* const name);
4276
/**
4277
 * A dense layer model.
4278
 * @param count The output dimension.
4279
 * @param no_bias Whether has a bias term or not.
4280
 * @param is_trainable Whether the parameters of this model can be trained.
4281
 * @param name The unique name of the model.
4282
 * @return A dense layer model.
4283
 */
4284
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dense(const int count, const int no_bias, const int is_trainable, const char* const name);
4285
/**
4286
 * A batch norm layer model.
4287
 * @param momentum The momentum in batch norm parameter.
4288
 * @param epsilon The epsilon in batch norm parameter.
4289
 * @param is_trainable Whether the parameters of this model can be trained.
4290
 * @param name The unique name of the model.
4291
 * @return A batch norm layer model.
4292
 */
4293
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_batch_norm(const float momentum, const float epsilon, const int is_trainable, const char* const name);
4294
/**
4295
 * A RELU activation layer model.
4296
 * @param name The unique name of the model.
4297
 * @return A RELU activation layer model.
4298
 */
4299
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_relu(const char* const name);
4300
/**
4301
 * A sigmoid activation layer model.
4302
 * @param name The unique name of the model.
4303
 * @return A sigmoid activation layer model.
4304
 */
4305
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sigmoid(const char* const name);
4306
/**
4307
 * A tanh activation layer model.
4308
 * @param name The unique name of the model.
4309
 * @return A tanh activation layer model.
4310
 */
4311
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_tanh(const char* const name);
4312
/**
4313
 * A swish activation layer model.
4314
 * @param name The unique name of the model.
4315
 * @return A swish activation layer model.
4316
 */
4317
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_swish(const char* const name);
4318
/**
4319
 * A GELU activation layer model.
4320
 * @param tanh Whether enable fast approximate GELU.
4321
 * @param name The unique name of the model.
4322
 * @return A GELU activation layer model.
4323
 */
4324
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_gelu(const int tanh, const char* const name);
4325
/**
4326
 * A leaky ReLU activation layer model.
4327
 * @param negative_slope The coefficient to be applied when it is negative.
4328
 * @param name The unique name of the model.
4329
 * @return A leaky ReLU activation layer model.
4330
 */
4331
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_leaky_relu(const float negative_slope, const char* const name);
4332
/**
4333
 * A softmax activation layer model.
4334
 * @param name The unique name of the model.
4335
 * @return A softmax activation layer model.
4336
 */
4337
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_softmax(const char* const name);
4338
/**
4339
 * A max pool model.
4340
 * @param kdim The pooling window dimension.
4341
 * @param hint The hint for alignment.
4342
 * @param name The unique name of the model.
4343
 * @return A max pool model.
4344
 */
4345
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
4346
/**
4347
 * An average pool model.
4348
 * @param kdim The pooling window dimension.
4349
 * @param hint The hint for alignment.
4350
 * @param name The unique name of the model.
4351
 * @return An average pool model.
4352
 */
4353
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_average_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
4354
/**
4355
 * Reshape an input into a different dimension.
4356
 * @param format Change the layout format for a given input, 0 is not to change.
4357
 * @param dim The new dimension for the input.
4358
 * @param ofs The offset on each of the dimension.
4359
 * @param stride The line size of each dimension.
4360
 * @param name The unique name of the model.
4361
 * @return A reshape layer model.
4362
 */
4363
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reshape(const int format, const int dim[CCV_NNC_MAX_DIM_ALLOC], const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int stride[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4364
/**
4365
 * Pad the input with extra dimensions at beginning or the ends. Padding should be > 0.
4366
 * @param type Two types of padding supported: zero and replication.
4367
 * @param begin How many elements to add at the beginning of each dimension.
4368
 * @param end How many elements to add at the end of each dimension.
4369
 * @param name The unique name of the model.
4370
 * @return A pad layer model.
4371
 */
4372
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_pad(const int type, const int begin[CCV_NNC_MAX_DIM_ALLOC], const int end[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4373
/**
4374
 * Identity op that simply copy from input to output without using any data transfer / format conversion methods.
4375
 * @param name The unique name of the model.
4376
 * @return An identity layer model.
4377
 */
4378
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_identity(const char* const name);
4379
/**
4380
 * Permute the input. For example, [2, 0, 1] means moving dimension 2 to 0, dimension 0 to 1, dimension 1 to 2.
4381
 * @param index The index for each dimensions from.
4382
 * @param name The unique name of the model.
4383
 * @return A permute layer model.
4384
 */
4385
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_permute(const int index[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
4386
/**
4387
 * Extract one of the multi-outputs. This is useful because ccv_cnnp_model_io_t can contain multiple outputs, this
4388
 * helps to extract one of them out to be used later.
4389
 * @param index The index to the output you want to extract.
4390
 * @param name The unique name of the model.
4391
 * @return A model that can extract one output.
4392
 */
4393
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_extract(const int index, const char* const name);
4394
/**
4395
 * Flatten an input tensor into a one dimensional array.
4396
 * @param name The unique name of the model.
4397
 * @return A flatten layer model.
4398
 */
4399
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_flatten(const char* const name);
4400
/**
4401
 * A layer norm model.
4402
 * @param epsilon The epsilon in layer norm parameter.
4403
 * @param axis The axis are the feature axis to compute norm.
4404
 * @param axis_count How many axis we count as feature.
4405
 * @param elementwise_affine Whether it contains scale / bias.
4406
 * @param is_trainable Whether the parameters of this model can be trained.
4407
 * @param name The unique name of the model.
4408
 * @return A layer norm model.
4409
 */
4410
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_layer_norm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name);
4411
/**
4412
 * A group norm model.
4413
 * @param group_axis The axis are the feature axis to compute norm.
4414
 * @param groups How many groups per axis channel.
4415
 * @param epsilon The epsilon in layer norm parameter.
4416
 * @param reduce_axis The other axes to be reduced.
4417
 * @param axis_count The number of other axes to be reduced.
4418
 * @param elementwise_affine Whether it contains scale / bias.
4419
 * @param is_trainable Whether the parameters of this model can be trained.
4420
 * @param name The unique name of the model.
4421
 * @return A group norm model.
4422
 */
4423
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_group_norm(const int group_axis, const int groups, const float epsilon, const int reduce_axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int elementwise_affine, const int is_trainable, const char* const name);
4424
/**
4425
 * A rmsnorm model.
4426
 * @param epsilon The epsilon in layer norm parameter.
4427
 * @param axis The axis are the feature axis to compute norm.
4428
 * @param axis_count How many axis we count as feature.
4429
 * @param is_trainable Whether the parameters of this model can be trained.
4430
 * @param name The unique name of the model.
4431
 * @return A rmsnorm model.
4432
 */
4433
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_rmsnorm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const int is_trainable, const char* const name);
4434
/**
4435
 * Add two input tensors together. Different from sum because this support broadcasting.
4436
 * @param p The weight for the first input.
4437
 * @param q The weight for the second input.
4438
 * @param name The unique name of the model.
4439
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4440
 */
4441
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_add(const float p, const float q, const char* const name);
4442
/**
4443
 * Multiply two input tensors together.
4444
 * @param p The weight for the output.
4445
 * @param name The unique name of the model.
4446
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4447
 */
4448
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_mul(const float p, const char* const name);
4449
/**
4450
 * A scalar multiplication model. Y = aX where a is a scalar.
4451
 * @param a The scalar parameter.
4452
 * @param name The unique name of the model.
4453
 * @return A scalar multiplication model.
4454
 */
4455
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar_mul(const float a, const char* const name);
4456
/**
4457
 * Divide two input tensors together.
4458
 * @param reciprocal Only take one tensor input, effectively compute 1 / input.
4459
 * @param name The unique name of the model.
4460
 * @return A model that can be applied with two inputs, and generate output that is a division of the inputs.
4461
 */
4462
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_div(const int reciprocal, const char* const name);
4463
/**
4464
 * Square root of the input tensor.
4465
 * @param name The unique name of the model.
4466
 * @return A model that can be applied with one input, and generate output that is the square root of the input.
4467
 */
4468
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sqrt(const char* const name);
4469
/**
4470
 * Multiply two input tensors together as if these are complex numbers.
4471
 * @param name The unique name of the model.
4472
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
4473
 */
4474
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_cmul(const char* const name);
4475
/**
4476
 * A matrix transpose model.
4477
 * @param axis_a The axis to be exchanged with axis_b
4478
 * @param axis_b The axis to be exchanged with axis_a
4479
 * @param name The unique name of the model.
4480
 * @return A matrix transpose model.
4481
 */
4482
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_transpose(const int axis_a, const int axis_b, const char* const name);
4483
/**
4484
 * A batched matrix multiplication model.
4485
 * @param transpose_a The axis to be transposed in the first matrix.
4486
 * @param transpose_b The axis to be transposed in the second matrix.
4487
 * @param name The unique name of the model.
4488
 * @return A batched matrix multiplication model.
4489
 */
4490
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_matmul(const int transpose_a[2], const int transpose_b[2], const char* const name);
4491
/**
4492
 * A dropout model.
4493
 * @param p The probability to drop the current value.
4494
 * @param entirety Drop the whole layer with the given probability.
4495
 * @param name The unique name of the model.
4496
 * @return A dropout model.
4497
 */
4498
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dropout(const float p, const int entirety, const char* const name);
4499
/**
4500
 * A masked fill model.
4501
 * @param eq If a value in the given mask tensor is equal to this.
4502
 * @param fill Fill in this value to the output tensor.
4503
 * @param name The unique name of the model.
4504
 * @return A masked fill model.
4505
 */
4506
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_masked_fill(const float eq, const float fill, const char* const name);
4507
/**
4508
 * A index select model.
4509
 * @param name The unique name of the model.
4510
 * @return A index select model.
4511
 */
4512
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_index_select(const char* const name);
4513
/**
4514
 * An dictionary embedding model. This can be thought as index select model but the vocabulary
4515
 * tensor is within this model itself.
4516
 * @param datatype The data type of the vocabulary.
4517
 * @param vocab_size The size of the vocabulary.
4518
 * @param embed_size The size of the embedding.
4519
 * @param is_trainable Whether the parameters of this model can be trained.
4520
 * @param name The unique name of the model.
4521
 * @return A index select model.
4522
 */
4523
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_embedding(const int datatype, const int vocab_size, const int embed_size, const int is_trainable, const char* const name);
4524
/**
4525
 * A upsample model.
4526
 * @param type The type of upsample, whether nearest or bilinear.
4527
 * @param width_scale The scale of the width of the input.
4528
 * @param height_scale The scale of the height of the input.
4529
 * @param align_corners Whether to align corners when doing upsample.
4530
 * @param name The unique name of the model.
4531
 * @return A upsample model.
4532
 */
4533
ccv_cnnp_model_t* ccv_cnnp_upsample(const int type, const float width_scale, const float height_scale, const int align_corners, const char* const name);
4534
/**
4535
 * A sum value reducer model.
4536
 * @param axis The axis to be reduced.
4537
 * @param axis_count The size of the axis array.
4538
 * @param name The unique name of the model.
4539
 * @return A sum value reducer model.
4540
 */
4541
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_sum(const int* const axis, const int axis_count, const char* const name);
4542
/**
4543
 * A mean value reducer model.
4544
 * @param axis The axis to be reduced.
4545
 * @param axis_count The size of the axis array.
4546
 * @param name The unique name of the model.
4547
 * @return A sum value reducer model.
4548
 */
4549
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_mean(const int* const axis, const int axis_count, const char* const name);
4550
/**
4551
 * A max value reducer model.
4552
 * @param axis The axis to be reduced.
4553
 * @param axis_count The size of the axis array.
4554
 * @param name The unique name of the model.
4555
 * @return A max value reducer model.
4556
 */
4557
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_max(const int* const axis, const int axis_count, const char* const name);
4558
/**
4559
 * A min value reducer model.
4560
 * @param axis The axis to be reduced.
4561
 * @param axis_count The size of the axis array.
4562
 * @param name The unique name of the model.
4563
 * @return A min value reducer model.
4564
 */
4565
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_min(const int* const axis, const int axis_count, const char* const name);
4566
/**
4567
 * A norm2 value reducer model.
4568
 * @param axis The axis to be reduced.
4569
 * @param axis_count The size of the axis array.
4570
 * @param name The unique name of the model.
4571
 * @return A norm2 value reducer model.
4572
 */
4573
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_norm2(const int* const axis, const int axis_count, const char* const name);
4574
/**
4575
 * A argmax model.
4576
 * @param axis The axis to be reduced.
4577
 * @param name The unique name of the model.
4578
 * @return A max indices model.
4579
 */
4580
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_argmax(const int axis, const char* const name);
4581
/**
4582
 * A argmin model.
4583
 * @param axis The axis to be reduced.
4584
 * @param name The unique name of the model.
4585
 * @return A min indices model.
4586
 */
4587
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_argmin(const int axis, const char* const name);
4588
/**
4589
 * A element-wise min model.
4590
 * @param name The unique name of the model.
4591
 * @return A element-wise min model.
4592
 */
4593
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_min(const char* const name);
4594
/**
4595
 * A element-wise max model.
4596
 * @param name The unique name of the model.
4597
 * @return A element-wise max model.
4598
 */
4599
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max(const char* const name);
4600
/**
4601
 * A Long-Short Term Memory model.
4602
 * @param masked Whether a mask tensor provided.
4603
 * @param hidden_size The number of features in the hidden state h.
4604
 * @param proj_size The number of features in the hidden state h.
4605
 * @param num_layers The number of layers for RNN.
4606
 * @param bias If 0, the layer won't use bias weights.
4607
 * @param batch_first If 1, will batch before sequence.
4608
 * @param bidirectional Enable bidirectional mode of RNN.
4609
 * @param dropout If non-zero, enable dropout at each layer of RNN.
4610
 * @param is_trainable Whether the parameters of this model can be trained.
4611
 * @param name The unique name of the model.
4612
 * @return A LSTM model.
4613
 */
4614
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_lstm(const int masked, const int hidden_size, const int proj_size, const int num_layers, const int bias, const int batch_first, const int bidirectional, const float dropout, const int is_trainable, const char* const name);
4615
/**
4616
 * Perform datatype conversion for input tensors.
4617
 * @param datatype The desired datatype.
4618
 * @param ref_to_last If there are two inputs to the model, use the last one as a datatype reference.
4619
 * @param name The unique name of the model.
4620
 * @return A model that does data conversion.
4621
 */
4622
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_datatype_conversion(const int datatype, const int ref_to_last, const char* const name);
4623
/**
4624
 * Clamp input tensor to a range.
4625
 * @param min NAN will ignore this.
4626
 * @param max NAN will ignore this.
4627
 * @param name The unique name of the model.
4628
 * @return A model that does clamp.
4629
 */
4630
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_clamp(const float min, const float max, const char* const name);
4631
/**
4632
 * A parameter that can be initialized / loaded.
4633
 * @param params The tensor shape / information about this parameter.
4634
 * @param init_bound The bound for the initial values, in uniform distribution.
4635
 * @param name The unique name of the model.
4636
 * @param is_trainable Whether the parameters of this model can be trained.
4637
 * @return A model that can be applied and return the weight.
4638
 */
4639
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_parameter(const ccv_nnc_tensor_param_t params, const float init_bound, const int is_trainable, const char* const name);
4640
/**
4641
 * A scalar value that can be used.
4642
 * @param type The type of this scalar.
4643
 * @param format The format of this scalar.
4644
 * @param datatype The datatype of this scalar.
4645
 * @param value The value in float.
4646
 * @param name The unique name of the model.
4647
 * @return A model that can be applied and return the scalar.
4648
 */
4649
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar(const int type, const int format, const int datatype, const float value, const char* const name);
4650
/**
4651
 * An empty variable that can be used. This is usually paired to ccv_cnnp_move to make this "input"
4652
 * as destination. This is also different from ccv_cnnp_parameter because that will be persisted.
4653
 * @param params The parameters for the tensor.
4654
 * @param name The unique name of the model.
4655
 * @return A model that can be applied and return the variable.
4656
 */
4657
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_variable(const ccv_nnc_tensor_param_t params, const char* const name);
4658
/**
4659
 * A special model that takes two inputs but copies value in the first input to the second. The
4660
 * second input then returned as the output. This is special because it enables you to violate
4661
 * single-static assignment rule otherwise without using this method, it won't trigger. However,
4662
 * it does have a special place because it enables hand-written optimizations that otherwise require
4663
 * you to either implement a new optimization pass in nnc (difficult to do it correctly) or it is
4664
 * not possible to do with CNNP models and you have to go to Level-3 API, which may not be exposed
4665
 * on high-level language bindings such as s4nnc.
4666
 * @param name The unique name of the model.
4667
 * @return A model that can be applied and copies first input to the second.
4668
 */
4669
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_move(const char* const name);
4670
/**
4671
 * If the input is not contiguous, this model will make it contiguous. Normally, such graph operation
4672
 * will be optimized away when calling ccv_nnc_symbolic_graph_simplify. In this case, we will disable
4673
 * such optimization on the generated node. If the input is not contiguous, the output of this model
4674
 * is the same as the input, hence, skipped.
4675
 * @param name The unique name of the model.
4676
 * @return A model that can be applied and making the input contiguous.
4677
 */
4678
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_contiguous(const char* const name);
4679
/**
4680
 * Apply the scaled dot product attention to input. Accepting input in the form of (q, k, v)
4681
 * or (q, k, v, attn_mask) if has_attn_mask is 1.
4682
 * @param scale The scale to be applied to the qk dot product.
4683
 * @param is_causal Whether to apply is_causal mask to it. If both attn_mask and is_causal supplied, we will cut attn_mask to upper right triangle.
4684
 * @param has_attn_mask Whether the input would accept a 4th parameter the attention mask.
4685
 * @param upcast Whether the attention computation will be run at higher precision (from FP16 to FP32).
4686
 * @param fused_unify_head_weights Whether we also have unifying head weight fused into it. The output would be in shape of (N, S, H * Ev).
4687
 * @param no_bias Whether we have bias or not for the unifying head output.
4688
 * @param is_trainable Whether or not it is trainable (if weight / bias provided).
4689
 * @param name The unique name of the model.
4690
 * @return A model that can apply scaled dot product attention compute.
4691
 */
4692
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scaled_dot_product_attention(const float scale, const int is_causal, const int has_attn_mask, const int upcast, const int fused_unify_head_weights, const int no_bias, const int is_trainable, const char* const name);
4693
4694
/** @} */
4695
4696
/** @} */
4697
4698
#endif