Coverage Report

Created: 2021-04-05 01:08

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/ccv_nnc.h
Line
Count
Source (jump to first uncovered line)
1
/**********************************************************
2
 * C-based/Cached/Core Computer Vision Library
3
 * Liu Liu, 2010-02-01
4
 **********************************************************/
5
6
/**********************************************************
7
 * CCV - Neural Network Collection
8
 **********************************************************/
9
10
#ifndef GUARD_ccv_nnc_h
11
#define GUARD_ccv_nnc_h
12
13
#include "ccv.h"
14
#include <stddef.h>
15
16
// These are generated by cmd/build-cmd.rb
17
#include "cmd/ccv_nnc_cmd.h"
18
#include "cmd/ccv_nnc_backend.h"
19
20
/**
21
 * @defgroup level_0 Level-0 API
22
 * @{
23
 */
24
25
/**
26
 * Initialize the library.
27
 */
28
void ccv_nnc_init(void);
29
30
/** @} */
31
32
/**
33
 * @defgroup level_1 Level-1 API
34
 * @{
35
 */
36
37
/**
38
 * @defgroup level_1_cmd Commands
39
 * @{
40
 */
41
enum {
42
  // Attributes that enable symbolic graph simplification
43
  CCV_NNC_CMD_ATTR_PASSTHROUGH  = 0x01, /**< This doesn't compute anything, but pass the first n tensors to the output (useful for backprop that is identical). */
44
  CCV_NNC_CMD_ATTR_OUTPUT_ONES  = 0x02, /**< All the output tensors are 1s (unit). */
45
  CCV_NNC_CMD_ATTR_NULL_IS_ONES = 0x04, /**< Accept nullptr input as if these are tensors with 1s (unit). */
46
};
47
48
// Flags pass into cmd when executing.
49
enum {
50
  CCV_NNC_ACCUMULATE_OUTPUT = 0x01, /**< Enable accumulate outputs (unsupported). */
51
  CCV_NNC_ZERO_MEMORY_ALLOC = 0x02, /**< Don't allocate any extra memory for this operation. */
52
};
53
54
enum {
55
  CCV_NNC_EXEC_SUCCESS   = 0, /**< Successfully executed the command. */
56
  CCV_NNC_EXEC_INVALID   = -1, /**< Invalid inputs. */
57
  CCV_NNC_EXEC_NO_KERNEL = -2, /**< No kernel available for a given command / backend. */
58
  CCV_NNC_EXEC_OOM       = -3, /**< Out of memory error. */
59
};
60
61
/**
62
 * Parameters for command.
63
 */
64
typedef struct {
65
  struct {
66
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< [size.dim] The window size for the layer. For full connect layer, it is 1 because it is 1x1 convolutional layer with count of filters */
67
  } size;
68
  union {
69
    struct {
70
      int count; /**< [convolution.count] The number of filters for convolutional layer. */
71
      int groups; /**< [convolution.groups] The number of groups for convolutional layer. */
72
    } convolution;
73
    struct {
74
      int reserved; /**< [pool.reserved] A reserved field. */
75
    } pool;
76
    struct {
77
      float kappa; /**< [rnorm.kappa] As of b[i] = a[i] / (rnorm.kappa + rnorm.alpha * sum(a, i - rnorm.size / 2, i + rnorm.size / 2)) ^ rnorm.beta */
78
      float alpha; /**< [rnorm.alpha] See **rnorm.kappa**. */
79
      float beta; /**< [rnorm.beta] See **rnorm.kappa**. */
80
    } rnorm;
81
    struct {
82
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [bnorm.axis[]] The axis selected to compute mean / variance. */
83
      int count; /**< [bnorm.count] The number of axis selected. */
84
      float epsilon; /**< [bnorm.epsilon] The epsilon for standard derivation. */
85
      int is_test; /**< [bnorm.is_test] Whether in test mode. */
86
      float momentum; /**< [bnorm.momentum] running_mean = running_mean * momentum + mean * (1 - momentum). */
87
    } bnorm;
88
    struct {
89
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [lnorm.axis[]] The axis selected to compute mean / variance. */
90
      int count; /**< [lnorm.count] The number of axis selected. */
91
      float epsilon; /**< [lnorm.epsilon] The epsilon for standard derivation. */
92
    } lnorm;
93
    struct {
94
      int nesterov; /**< [sgd.nesterov] Nesterov accelerated gradient. */
95
      float rate; /**< [sgd.rate] The learning rate. */
96
      float scale; /**< [sgd.scale] The scale to be applied to the gradient before doing any minimization. */
97
      float decay; /**< [sgd.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
98
      float momentum; /**< [sgd.momentum] For SGD, this follows http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf. */
99
      float dampening; /**< [sgd.dampening] This usually == momentum, however, it can be changed. */
100
    } sgd;
101
    struct {
102
      int step; /**< [adam.step] Step t in adam optimizer. */
103
      float rate; /**< [adam.rate] The learning rate. */
104
      float beta1; /**< [adam.beta1] The beta1 hyper-parameter in adam optimizer. */
105
      float beta2; /**< [adam.beta2] The beta2 hyper-parameter in adam optimizer. */
106
      float decay; /**< [adam.decay] This is the weight decay parameter, which represents L2 regularization. */
107
      float epsilon; /**< [adam.epsilon] The epsilon for standard derivation. */
108
    } adam;
109
    struct {
110
      float rate; /**< [rmsprop.rate] The learning rate. */
111
      float decay; /**< [rmsprop.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
112
      float alpha; /**< [rmsprop.momentum] The alpha hyper-parameter. */
113
      float momentum; /**< [rmsprop.momentum] The momentum hyper-parameter. */
114
      float epsilon; /**< [rmsprop.epsilon] The epsilon for standard derivation. */
115
    } rmsprop;
116
    struct {
117
      int transpose_a[2]; /**< [blas.transpose_a[2]] The axis we'd like to transpose for input a. */
118
      int transpose_b[2]; /**< [blas.transpose_b[2]] The axis we'd like to transpose for input b. */
119
      float a[3]; /**< [blas.a[3]] BLAS scalars. */
120
    } blas;
121
    struct {
122
      float trim0; /**< [label_smoothing.trim0] The smoothed label for 0. */
123
      float trim1; /**< [label_smoothing.trim1] The smoothed label for 1. */
124
    } label_smoothing;
125
    struct {
126
      float pos_weight; /**< [binary_crossentropy.pos_weight] The pos_weight on the loss: -(pos_weight * y * log(x) + (1 - y) * log(1 - x)) */
127
    } binary_crossentropy;
128
    struct {
129
      float beta; /**< [smooth_l1.beta] The beta on the smooth L1 loss (or Huber loss) */
130
    } smooth_l1;
131
    struct {
132
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [reduce.axis[]] The axis selected to reduce. */
133
      int count; /**< [reduce.count] The number of axis selected. */
134
    } reduce;
135
    struct {
136
      int axis[2]; /**< [transpose.axis[2]] The axis we'd like to transpose for input. */
137
    } transpose;
138
    struct {
139
      float p; /**< [dropout.p] Dropout probability. */
140
      int entirety; /**< [dropout.entirety] Drop the whole layer with the given probability. */
141
    } dropout;
142
    struct {
143
      float width_scale; /**< [upsample.width_scale] scale for width parameter. It is between 1 and 2 at the moment. */
144
      float height_scale; /**< [upsample.height_scale] scale for height parameter. It is between 1 and 2 at the moment. */
145
    } upsample;
146
    struct {
147
      float min; /**< [clamp.min] The minimum, NaN is no min. */
148
      float max; /**< [clamp.max] The maximum, NaN is no max. */
149
    } clamp;
150
    struct {
151
      float iou_threshold; /**< [nms.iou_threshold] Threshold between 0 to 1 for IoU threshold. */
152
    } nms;
153
    void* userdata;
154
  };
155
} ccv_nnc_cmd_param_t;
156
157
/*
158
 * Hints for command.
159
 */
160
typedef struct {
161
  struct {
162
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< Stride for each dimension. */
163
  } stride;
164
  struct {
165
    int begin[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the beginning of a dimension. */
166
    int end[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the end of a dimension. */
167
  } border;
168
} ccv_nnc_hint_t;
169
170
/**
171
 * Opaque pointer to a stream object.
172
 */
173
typedef struct ccv_nnc_stream_context_s ccv_nnc_stream_context_t;
174
175
typedef struct ccv_nnc_cmd_vtab_s ccv_nnc_cmd_vtab_t;
176
177
typedef struct ccv_nnc_cmd_s {
178
  uint32_t cmd; /**< The identifier for command. */
179
  uint32_t backend; /**< The identifier for backend. */
180
  int algorithm; /**< The algorithm selector (as defined by backend). */
181
  ccv_nnc_cmd_param_t info; /**< The command parameters. */
182
  /**
183
   * This is for type CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD
184
   */
185
  ccv_nnc_cmd_vtab_t* isa;
186
  void* data;
187
} ccv_nnc_cmd_t;
188
189
/**
190
 * For forward functions, the input tensors and output tensors can be arbitrary.
191
 * However, for backward functions (backpropagation, or gradient functions in other libs),
192
 * the input is: 0~m-1: gradient for output tensors, 1~n: input tensors for forward functions, n+1~n+m: output tensors for forward functions,
193
 * the output is: 0~n-1: output gradients w.r.t. input tensors.
194
 * Which input / output tensors can be ignored can be specified in the cmd config structs.
195
 */
196
typedef int(*ccv_nnc_cmd_exec_f)(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
197
198
/**
199
 * The function prototype for autotune. The only difference is the max_workspace_size.
200
 * Whoever implement this function prototype means we handled over autotune task to the
201
 * command itself, you are responsible to select the best algorithm.
202
 * @return The selected algorithm.
203
 */
204
typedef int(*ccv_nnc_cmd_autotune_f)(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
205
206
/**
207
 * The function prototype is for automatically deduce tensor shapes.
208
 */
209
210
typedef struct ccv_nnc_cmd_vtab_s {
211
  ccv_nnc_cmd_exec_f exec;
212
  void (*tensor_auto)(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
213
} ccv_nnc_cmd_vtab_t;
214
215
/** @} */
216
217
/**
218
 * @defgroup level_1_uops Micro Commands to Define Commands
219
 * @{
220
 */
221
222
/**
223
 * @page micro_jittor The concept of meta-ops in Jittor is amazing
224
 *
225
 * NNC will never do JIT. Particularly, I will never do codegen and compile at runtime, especially with static shapes.
226
 * The reason is pretty simple. JIT would be too much architectural dependent and with that, almost impossible for NNC
227
 * to be this small embeddable library that you can carry everywhere. However, this shouldn't prevent NNC to generate
228
 * proper descriptions of each command so a JIT version can be built if there are architectural support for it. In this
229
 * way, the core of NNC can be small and embeddable, but a new backend (identified by the backend attribute) can implement
230
 * more sophisticated JIT mechanism.
231
 *
232
 * More over, I need to generate some code for reference implementations, ideally from some descriptions. This is important
233
 * because with 90+ ops, having a correctly implemented command turns out to be more challenging than I expected.
234
 * Especially if I want them to be compliant with the metadata describes it (what shape it accepts, what datatype works,
235
 * whether it can accept tensor views, and how in-place tensors supported). Many of reference commands are not supporting
236
 * all datatypes and tensor views, and this has to be rectified because these are "reference commands", they must be.
237
 *
238
 * Jittor introduced to the world the idea of meta-ops. Basically, it claims every ops (or macro ops) can be break down to
239
 * 3 types of micro ops (they call them meta-ops): a reindex op that can map tensor from one dimensionality to another, an
240
 * element-wise op that does element-wise primitive math, and finally, a reduce op that can reduce along particular axis
241
 * of a tensor with some elementary math. This feels rather limited initially, but when thinking through it, I am convinced
242
 * it should be enough to describe all commands presented in NNC (this shouldn't be a surprise actually).
243
 *
244
 * Thus, the plan now is to use the meta-ops idea, implementing new micro commands that can describe other commands in
245
 * NNC. In this way, I can generate reference implementation from these descriptions and hopefully have better coverage
246
 * than my existing CPU / GPU reference implementations.
247
 *
248
 * To build on-top what Jittor did, if you need to have my dynamism in the ops, it is essential to index with the provided
249
 * tensor. With just reindex, binary operands and reduce, you cannot do that. Thus, on top of these 3, we added the 4th
250
 * micro op (meta-op) that is "select". This will be sufficient to implement ops such as masking.
251
 *
252
 */
253
254
/**
255
 * Abstract vtab for different ccv_nnc_micro_io_t.
256
 */
257
typedef struct ccv_nnc_micro_io_vtab_s ccv_nnc_micro_io_vtab_t;
258
259
enum {
260
  // These could be much more unary ops.
261
  CCV_NNC_MICRO_UNARY_OP_NEG,
262
  CCV_NNC_MICRO_UNARY_OP_LOG,
263
  CCV_NNC_MICRO_UNARY_OP_EXP,
264
};
265
266
enum {
267
  CCV_NNC_MICRO_BINARY_OP_PLUS,
268
  CCV_NNC_MICRO_BINARY_OP_MINUS,
269
  CCV_NNC_MICRO_BINARY_OP_MUL,
270
  CCV_NNC_MICRO_BINARY_OP_DIV,
271
  CCV_NNC_MICRO_BINARY_OP_MAX,
272
  CCV_NNC_MICRO_BINARY_OP_MIN,
273
  CCV_NNC_MICRO_BINARY_OP_EQUAL_TO,
274
  CCV_NNC_MICRO_BINARY_OP_LESS_THAN,
275
};
276
277
enum {
278
  CCV_NNC_MICRO_REDUCE_OP_MAX,
279
  CCV_NNC_MICRO_REDUCE_OP_MIN,
280
  CCV_NNC_MICRO_REDUCE_OP_ARGMAX,
281
  CCV_NNC_MICRO_REDUCE_OP_ARGMIN,
282
  CCV_NNC_MICRO_REDUCE_OP_MEAN, // Mean is complicated, we need a way to compute total for loops after this. It has to be done statically, and that is "interesting".
283
  CCV_NNC_MICRO_REDUCE_OP_SUM,
284
  CCV_NNC_MICRO_REDUCE_OP_PROD,
285
};
286
287
/**
288
 * Abstract micro op representation.
289
 */
290
typedef struct ccv_nnc_micro_io_s* ccv_nnc_micro_io_t;
291
292
struct ccv_nnc_micro_io_s {
293
  const ccv_nnc_micro_io_vtab_t* isa;
294
  ccv_nnc_micro_io_t* inputs;
295
  int input_size;
296
  int dimensions;
297
  int id;
298
};
299
300
typedef struct {
301
  // Type of the scalar is about precision, nothing to restrict the tensor's type. For example, we may assign a int32_t 0
302
  // to a float16 tensor element, this is perfectly fine.
303
  int type;
304
  union {
305
    unsigned char u8;
306
    int i32;
307
    ccv_float16_t f16;
308
    float f32;
309
    int64_t i64;
310
    uint64_t u64;
311
    double f64;
312
  };
313
} ccv_nnc_micro_scalar_t;
314
315
/**
316
 * Create a free-form input that represent a tensor.
317
 * @param dimensions The maximum dimension of the input.
318
 */
319
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_input(const int dimensions);
320
/**
321
 * Use shape and reindex expression to reindex the given tensor into a different shape.
322
 * The expressions can bind integer parameters which starts with $.
323
 *
324
 * The expression follows specific pattern, integer parameters starts with $. Dimensions are represented as dn, such
325
 * as d0, d1, d2 ... Index into the provided tensor can be represented as i0, i1, i2. These are all 0-indexed.
326
 *
327
 * Constants are supported, such as 235, 431 etc. Operators supported currently are -, +, /, *.
328
 *
329
 * Thus, for broadcast a tensor x[w, h] to y[w, h, h], it can be represented as:
330
 * shape: { "d0", "d1", "d1" }, reindex: { "i0", "i1", "0" }.
331
 * For example, transpose can be represented as:
332
 * shape: { "d1", "d0" }, reindex: { "i1", "i0" }
333
 *
334
 * @param shape The shape expressions per axis.
335
 * @param shape_count The dimensions of the output.
336
 * @param s The tensor to reference shape dimensions.
337
 * @param reindex The reindex expressions per axis.
338
 * @param reindex_count The dimensions of the input.
339
 * @param x The input for reindex operation.
340
 * @return The reindexed tensor.
341
 */
342
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reindex(const char* const* const shape, const int shape_count, const ccv_nnc_micro_io_t s, const char* const* const reindex, const int reindex_count, const ccv_nnc_micro_io_t x);
343
/**
344
 * Apply element-wise computations with one tensor.
345
 * @param op The binary operand.
346
 * @param x The input.
347
 * @return The result tensor.
348
 */
349
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_unary(const uint32_t op, const ccv_nnc_micro_io_t x);
350
/**
351
 * Apply pair-wise computations with two tensors. They has to match shape exactly.
352
 * @param op The binary operand.
353
 * @param left The left input.
354
 * @param right The right input.
355
 * @return The result tensor.
356
 */
357
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_binary(const uint32_t op, const ccv_nnc_micro_io_t left, const ccv_nnc_micro_io_t right);
358
/**
359
 * Apply reduction computation against some dimensions and generate the final reduced tensor.
360
 * @param op The reduction operand.
361
 * @param axis The axis to reduce.
362
 * @param axis_count Number of axes.
363
 * @param x The input tensor.
364
 * @return The result tensor after reduction.
365
 */
366
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reduce(const uint8_t op, const int* const axis, const int axis_count, const ccv_nnc_micro_io_t x);
367
/**
368
 * Use the index tensor to select one value from the x per axis.
369
 * @param axis The axis to select.
370
 * @param x The tensor to be indexed.
371
 * @param index The integer tensor of indexes.
372
 * @return The result tensor with values selected from x with index from index tensor.
373
 */
374
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_select(const int axis, const ccv_nnc_micro_io_t x, const ccv_nnc_micro_io_t index);
375
/**
376
 * Return the gradient for a particular output. For example, if x is ccv_nnc_micro_unary(exp, input),
377
 * this represents the gradient of x, not the input. This method is used to generate representation
378
 * of gradients for ccv_nnc_micro_combine_new method.
379
 * @param x The tensor to take a gradient of.
380
 * @return The result tensor that represents the gradient of x.
381
 */
382
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_grad(const ccv_nnc_micro_io_t x);
383
/**
384
 * The combined op from micro ops.
385
 */
386
typedef struct ccv_nnc_micro_combine_s ccv_nnc_micro_combine_t;
387
/**
388
 * Combine micro ops into one, and do some optimization passes. The combined one can be then processed to generate
389
 * optimized kernels. Particularly, we can processed the combined one into C code and CUDA code as reference
390
 * implementations.
391
 * @param inputs The inputs for the combined ops.
392
 * @param input_size The number of the inputs.
393
 * @param parameters The name of the parameters, this determines the order of the these parameters.
394
 * @param parameter_size The number of parameters.
395
 * @param outputs The outputs for the combined ops.
396
 * @param output_size The number of the outputs.
397
 * @param ingrads The gradient inputs for the combined ops, including any inputs / outputs if there are any.
398
 * @param ingrad_size The number of ingrads.
399
 * @param outgrads The gradient outputs for the combined ops.
400
 * @param outgrad_size The number of outgrads.
401
 */
402
CCV_WARN_UNUSED(ccv_nnc_micro_combine_t*) ccv_nnc_micro_combine_new(const ccv_nnc_micro_io_t* const inputs, const int input_size, const char* const* const parameters, const int parameter_size, const ccv_nnc_micro_io_t* const outputs, const int output_size, const ccv_nnc_micro_io_t* const ingrads, const int ingrad_size, const ccv_nnc_micro_io_t* const outgrads, const int outgrad_size);
403
/**
404
 * Free the combined op.
405
 * @param combine The op to be freed.
406
 */
407
void ccv_nnc_micro_combine_free(ccv_nnc_micro_combine_t* const combine);
408
/**
409
 * Run combined op in interpret mode. This is only useful for debug internals. Because this is for
410
 * generic combined op, there is no hint, or flags, or stream context, or cmd.
411
 * @param combine The op.
412
 * @param cmd Choice between CMD_CUSTOM_FORWARD and CMD_CUSTOM_BACKWARD.
413
 * @param inputs The input tensors.
414
 * @param input_size The size of input tensors.
415
 * @param values The value corresponding to the parameters when call ccv_nnc_micro_combine_new.
416
 * @param parameter_size How many parameters. It must match when called ccv_nnc_micro_combine_new.
417
 * @param outputs The output tensors.
418
 * @param output_size The size of output tensors.
419
 */
420
void ccv_nnc_micro_combine_interpret(ccv_nnc_micro_combine_t* const combine, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_micro_scalar_t* const values, const int parameter_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
421
/**
422
 * Generate C code from the combined op.
423
 * @param combine The combined op to generate some C code.
424
 * @return The generated C code string.
425
 */
426
char* ccv_nnc_micro_combine_c(ccv_nnc_micro_combine_t* const combine);
427
428
/** @} */
429
430
/**
431
 * @defgroup level_1_tensor Tensors
432
 * @{
433
 */
434
435
/**
436
 * Count the dimensionality of a tensor.
437
 */
438
static inline int ccv_nnc_tensor_nd(const int dim[CCV_NNC_MAX_DIM_ALLOC])
439
2.00M
{
440
2.00M
  int i;
441
5.26M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++3.26M
)
442
5.27M
    if (dim[i] == 0)
443
2.01M
      return i;
444
18.4E
  return CCV_NNC_MAX_DIM_ALLOC;
445
2.00M
}
Unexecuted instantiation: gradient.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: upsample.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: concat.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tensor.bind.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.vector.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dropout.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: custom.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: reduce.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tfb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: batch.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: crossentropy.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cnnp.core.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: micro.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compression.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: transform.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gemm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: roi_align.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: swish.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: index.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: minimize.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.compile.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.tests.c:ccv_nnc_tensor_nd
tensor.tests.c:ccv_nnc_tensor_nd
Line
Count
Source
439
1
{
440
1
  int i;
441
2
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1
)
442
2
    if (dim[i] == 0)
443
1
      return i;
444
1
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
1
}
Unexecuted instantiation: rand.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nms.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.io.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cblas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: simplify.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: numa.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tape.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dynamic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: layer.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: parallel.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: winograd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.addons.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: broadcast.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compare.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: smooth_l1.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: forward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cublas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.vgg.d.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: imdb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.vgg.d.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: random.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cudnn.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dense.net.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cifar.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rmsprop.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: sgd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nccl.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: schedule.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: loss.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: adam.tests.c:ccv_nnc_tensor_nd
ccv_nnc_cmd.c:ccv_nnc_tensor_nd
Line
Count
Source
439
206k
{
440
206k
  int i;
441
506k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++300k
)
442
506k
    if (dim[i] == 0)
443
206k
      return i;
444
206k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
206k
}
ccv_nnc_tensor.c:ccv_nnc_tensor_nd
Line
Count
Source
439
140
{
440
140
  int i;
441
422
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++282
)
442
422
    if (dim[i] == 0)
443
140
      return i;
444
140
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
140
}
Unexecuted instantiation: ccv_nnc_tensor_io.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_stream.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_core.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_interpret.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_io.c:ccv_nnc_tensor_nd
ccv_nnc_symbolic_graph_compile.c:ccv_nnc_tensor_nd
Line
Count
Source
439
1.00k
{
440
1.00k
  int i;
441
2.00k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.00k
)
442
2.00k
    if (dim[i] == 0)
443
1.00k
      return i;
444
1.00k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
1.00k
}
Unexecuted instantiation: ccv_nnc_symbolic_graph_backward.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tensor_tape.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_parallel.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_memory_compression.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_run.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_alloc.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_backward.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_apply_gradients.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_evaluate.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe_core.c:ccv_nnc_tensor_nd
ccv_cnnp_dataframe_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
439
145k
{
440
145k
  int i;
441
573k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++427k
)
442
579k
    if (dim[i] == 0)
443
151k
      return i;
444
18.4E
  return CCV_NNC_MAX_DIM_ALLOC;
445
145k
}
Unexecuted instantiation: ccv_cnnp_dataframe_csv.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_io.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_core.c:ccv_nnc_tensor_nd
ccv_cnnp_model_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
439
2.76k
{
440
2.76k
  int i;
441
7.06k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++4.30k
)
442
7.06k
    if (dim[i] == 0)
443
2.76k
      return i;
444
2.76k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
2.76k
}
Unexecuted instantiation: ccv_nnc_rand_uniform_cpu_ref.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rand_normal_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_conv_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
4.69k
{
440
4.69k
  int i;
441
22.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++17.4k
)
442
22.0k
    if (dim[i] == 0)
443
4.69k
      return i;
444
4.69k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
4.69k
}
ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
439
3.24k
{
440
3.24k
  int i;
441
15.3k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++12.0k
)
442
15.3k
    if (dim[i] == 0)
443
3.24k
      return i;
444
3.24k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
3.24k
}
Unexecuted instantiation: ccv_nnc_swish_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_dropout_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
24
{
440
24
  int i;
441
72
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
442
72
    if (dim[i] == 0)
443
24
      return i;
444
24
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
24
}
ccv_nnc_softmax_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
1.24k
{
440
1.24k
  int i;
441
3.12k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.87k
)
442
3.12k
    if (dim[i] == 0)
443
1.24k
      return i;
444
1.24k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
1.24k
}
ccv_nnc_sgd_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
212k
{
440
212k
  int i;
441
546k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++333k
)
442
546k
    if (dim[i] == 0)
443
212k
      return i;
444
212k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
212k
}
ccv_nnc_max_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
2.12k
{
440
2.12k
  int i;
441
10.5k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8.46k
)
442
10.5k
    if (dim[i] == 0)
443
2.12k
      return i;
444
2.12k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
2.12k
}
ccv_nnc_avg_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
2.73k
{
440
2.73k
  int i;
441
13.6k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++10.9k
)
442
13.6k
    if (dim[i] == 0)
443
2.73k
      return i;
444
2.73k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
2.73k
}
ccv_nnc_sigmoid_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
238
{
440
238
  int i;
441
698
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++460
)
442
698
    if (dim[i] == 0)
443
238
      return i;
444
238
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
238
}
ccv_nnc_lssc_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
72
{
440
72
  int i;
441
336
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++264
)
442
336
    if (dim[i] == 0)
443
72
      return i;
444
72
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
72
}
ccv_nnc_min_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
82
{
440
82
  int i;
441
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
442
312
    if (dim[i] == 0)
443
82
      return i;
444
82
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
82
}
ccv_nnc_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
82
{
440
82
  int i;
441
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
442
312
    if (dim[i] == 0)
443
82
      return i;
444
82
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
82
}
ccv_nnc_softmax_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
510
{
440
510
  int i;
441
1.03k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++523
)
442
1.03k
    if (dim[i] == 0)
443
510
      return i;
444
510
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
510
}
ccv_nnc_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
194
{
440
194
  int i;
441
550
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++356
)
442
550
    if (dim[i] == 0)
443
194
      return i;
444
194
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
194
}
ccv_nnc_categorical_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
35
{
440
35
  int i;
441
93
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
442
93
    if (dim[i] == 0)
443
35
      return i;
444
35
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
35
}
ccv_nnc_smooth_l1_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
63
{
440
63
  int i;
441
177
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++114
)
442
177
    if (dim[i] == 0)
443
63
      return i;
444
63
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
63
}
Unexecuted instantiation: ccv_nnc_relu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_adam_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
16.0k
{
440
16.0k
  int i;
441
48.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.0k
)
442
48.0k
    if (dim[i] == 0)
443
16.0k
      return i;
444
16.0k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
16.0k
}
ccv_nnc_nms_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
18
{
440
18
  int i;
441
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++30
)
442
48
    if (dim[i] == 0)
443
18
      return i;
444
18
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
18
}
ccv_nnc_gemm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
64.3k
{
440
64.3k
  int i;
441
176k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++112k
)
442
176k
    if (dim[i] == 0)
443
64.3k
      return i;
444
64.3k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
64.3k
}
ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
439
912
{
440
912
  int i;
441
2.32k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.41k
)
442
2.32k
    if (dim[i] == 0)
443
912
      return i;
444
912
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
912
}
ccv_nnc_add_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
204k
{
440
204k
  int i;
441
477k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++272k
)
442
477k
    if (dim[i] == 0)
443
204k
      return i;
444
204k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
204k
}
ccv_nnc_mul_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
104k
{
440
104k
  int i;
441
210k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++105k
)
442
210k
    if (dim[i] == 0)
443
104k
      return i;
444
104k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
104k
}
ccv_nnc_upsample_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
24
{
440
24
  int i;
441
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++72
)
442
96
    if (dim[i] == 0)
443
24
      return i;
444
24
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
24
}
ccv_nnc_util_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
200
{
440
200
  int i;
441
734
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++534
)
442
734
    if (dim[i] == 0)
443
200
      return i;
444
200
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
200
}
ccv_nnc_roi_align_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
65
{
440
65
  int i;
441
261
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++196
)
442
261
    if (dim[i] == 0)
443
65
      return i;
444
65
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
65
}
Unexecuted instantiation: ccv_nnc_sigmoid_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_index_select_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
27
{
440
27
  int i;
441
68
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++41
)
442
68
    if (dim[i] == 0)
443
27
      return i;
444
27
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
27
}
ccv_nnc_rmsprop_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
16.0k
{
440
16.0k
  int i;
441
48.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.0k
)
442
48.0k
    if (dim[i] == 0)
443
16.0k
      return i;
444
16.0k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
16.0k
}
ccv_nnc_ew_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
725k
{
440
725k
  int i;
441
1.78M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.06M
)
442
1.78M
    if (dim[i] == 0)
443
725k
      return i;
444
725k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
725k
}
ccv_nnc_reduce_sum_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
65.3k
{
440
65.3k
  int i;
441
196k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++131k
)
442
196k
    if (dim[i] == 0)
443
65.3k
      return i;
444
65.3k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
65.3k
}
ccv_nnc_reduce_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
41
{
440
41
  int i;
441
99
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
442
99
    if (dim[i] == 0)
443
41
      return i;
444
41
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
41
}
ccv_nnc_batch_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
438
{
440
438
  int i;
441
1.41k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++975
)
442
1.41k
    if (dim[i] == 0)
443
438
      return i;
444
438
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
438
}
ccv_nnc_layer_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
439
90
{
440
90
  int i;
441
450
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++360
)
442
450
    if (dim[i] == 0)
443
90
      return i;
444
90
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
90
}
Unexecuted instantiation: ccv_nnc_rand.c:ccv_nnc_tensor_nd
_ccv_nnc_conv_cpu_4x4_3x3_winograd.c:ccv_nnc_tensor_nd
Line
Count
Source
439
226
{
440
226
  int i;
441
904
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++678
)
442
904
    if (dim[i] == 0)
443
226
      return i;
444
226
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
226
}
Unexecuted instantiation: _ccv_nnc_conv_cpu_fft.c:ccv_nnc_tensor_nd
Unexecuted instantiation: _ccv_nnc_conv_cpu_gemm.c:ccv_nnc_tensor_nd
_ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
439
2.26k
{
440
2.26k
  int i;
441
11.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8.74k
)
442
11.0k
    if (dim[i] == 0)
443
2.26k
      return i;
444
2.26k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
2.26k
}
ccv_nnc_convolution.c:ccv_nnc_tensor_nd
Line
Count
Source
439
8.70k
{
440
8.70k
  int i;
441
43.4k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++34.7k
)
442
43.4k
    if (dim[i] == 0)
443
8.70k
      return i;
444
8.70k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
8.70k
}
Unexecuted instantiation: ccv_nnc_swish.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dropout.c:ccv_nnc_tensor_nd
ccv_nnc_softmax_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
439
860
{
440
860
  int i;
441
2.56k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.70k
)
442
2.56k
    if (dim[i] == 0)
443
860
      return i;
444
860
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
860
}
Unexecuted instantiation: ccv_nnc_sgd.c:ccv_nnc_tensor_nd
ccv_nnc_pool.c:ccv_nnc_tensor_nd
Line
Count
Source
439
2.06k
{
440
2.06k
  int i;
441
10.3k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8.23k
)
442
10.3k
    if (dim[i] == 0)
443
2.06k
      return i;
444
2.06k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
2.06k
}
ccv_nnc_sigmoid_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
439
28
{
440
28
  int i;
441
84
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++56
)
442
84
    if (dim[i] == 0)
443
28
      return i;
444
28
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
28
}
ccv_nnc_compression.c:ccv_nnc_tensor_nd
Line
Count
Source
439
10
{
440
10
  int i;
441
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++38
)
442
48
    if (dim[i] == 0)
443
10
      return i;
444
10
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
10
}
Unexecuted instantiation: ccv_nnc_cmp.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_softmax.c:ccv_nnc_tensor_nd
ccv_nnc_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
439
12
{
440
12
  int i;
441
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++24
)
442
36
    if (dim[i] == 0)
443
12
      return i;
444
12
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
12
}
ccv_nnc_categorical_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
439
31
{
440
31
  int i;
441
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++65
)
442
96
    if (dim[i] == 0)
443
31
      return i;
444
31
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
31
}
ccv_nnc_smooth_l1.c:ccv_nnc_tensor_nd
Line
Count
Source
439
4
{
440
4
  int i;
441
12
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8
)
442
12
    if (dim[i] == 0)
443
4
      return i;
444
4
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
4
}
Unexecuted instantiation: ccv_nnc_relu.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_adam.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_nms.c:ccv_nnc_tensor_nd
ccv_nnc_blas.c:ccv_nnc_tensor_nd
Line
Count
Source
439
96.5k
{
440
96.5k
  int i;
441
232k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++135k
)
442
232k
    if (dim[i] == 0)
443
96.5k
      return i;
444
96.5k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
96.5k
}
_ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
439
534
{
440
534
  int i;
441
1.41k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++885
)
442
1.41k
    if (dim[i] == 0)
443
534
      return i;
444
534
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
534
}
_ccv_nnc_gemm_cpu_sys.c:ccv_nnc_tensor_nd
Line
Count
Source
439
114k
{
440
114k
  int i;
441
313k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++199k
)
442
313k
    if (dim[i] == 0)
443
114k
      return i;
444
114k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
114k
}
ccv_nnc_upsample.c:ccv_nnc_tensor_nd
Line
Count
Source
439
12
{
440
12
  int i;
441
60
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
442
60
    if (dim[i] == 0)
443
12
      return i;
444
12
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
445
12
}
Unexecuted instantiation: ccv_nnc_comm.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_util.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_roi_align.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_sigmoid.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_index_select.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rmsprop.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_ew.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_reduce.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_norm.c:ccv_nnc_tensor_nd
446
447
/**
448
 * Create a new tensor.
449
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
450
 * @param params Tensor parameters.
451
 * @param flags Reserved flags for the allocation.
452
 * @return The newly created tensor.
453
 */
454
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
455
/**
456
 * Create a new tensor on stack.
457
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
458
 * @param params Tensor parameters.
459
 * @param flags Reserved flags for the allocation.
460
 * @return The tensor struct.
461
 */
462
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
463
/**
464
 * Resize an existing tensor to a new dimension.
465
 * @param tensor The old tensor to be resized.
466
 * @param params Tensor parameters.
467
 * @return Potentially a new tensor, but if the size is sufficient, it will be in-place operation.
468
 */
469
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_resize(ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params);
470
/**
471
 * Pin the tensor memory for faster access on GPU.
472
 * @param tensor A tensor that we want to pin the memory.
473
 * @return 0 for success.
474
 */
475
int ccv_nnc_tensor_pin_memory(ccv_nnc_tensor_t* const tensor);
476
/**
477
 * Free a tensor object.
478
 * @param tensor The tensor to be freed.
479
 */
480
void ccv_nnc_tensor_free(ccv_nnc_tensor_t* const tensor);
481
/**
482
 * Create a tensor view. A tensor view can be non-continuous. Essentially, it provides a view into a tensor.
483
 * @param tensor The tensor that we want to view into.
484
 * @param params The tensor parameters for the tensor view.
485
 * @param ofs The offset on each of the dimension.
486
 * @param inc The line size of each dimension.
487
 * @return The newly created tensor view.
488
 */
489
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t*) ccv_nnc_tensor_view_new(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC]);
490
/**
491
 * Create a tensor view on stack.
492
 * @param tensor The tensor that we want to view into.
493
 * @param params The tensor parameters for the tensor view.
494
 * @param ofs The offset on each of the dimension.
495
 * @param inc The line size of each dimension.
496
 * @return The tensor view struct.
497
 */
498
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t) ccv_nnc_tensor_view(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC]);
499
/**
500
 * Free a tensor view object.
501
 * @param tensor_view The tensor view to be freed.
502
 */
503
void ccv_nnc_tensor_view_free(ccv_nnc_tensor_view_t* const tensor_view);
504
/**
505
 * Zero out a given tensor.
506
 * @param tensor The tensor to be zero out.
507
 */
508
void ccv_nnc_tensor_zero(void* const tensor);
509
/**
510
 * Compare whether two tensors are equal. This will tolerant some floating point issues follow http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
511
 * @param a Tensor a.
512
 * @param b Tensor b.
513
 * @return 0 if equal, -1 otherwise.
514
 */
515
CCV_WARN_UNUSED(int) ccv_nnc_tensor_eq(const ccv_nnc_tensor_t* const a, const ccv_nnc_tensor_t* const b);
516
/**
517
 * Write tensor to a SQLite database with a given name.
518
 * @param tensor The tensor.
519
 * @param handle The SQLite handle.
520
 * @param name The name to find the tensor in the database.
521
 * @return CCV_IO_FINAL for success, otherwise error.
522
 */
523
int ccv_nnc_tensor_write(const ccv_nnc_tensor_t* const tensor, void* const handle, const char* const name);
524
/**
525
 * Read a tensor from a SQLite database with a given name.
526
 * @param handle The SQLite handle.
527
 * @param name The name to find the tensor in the database.
528
 * @param tensor_out The pointer to hold the tensor. If you supply the tensor yourself, we will read the data into the existing tensor.
529
 * @return CCV_IO_FINAL for success, otherwise error.
530
 */
531
int ccv_nnc_tensor_read(void* const handle, const char* const name, ccv_nnc_tensor_t** const tensor_out);
532
533
/** @} */
534
535
/**
536
 * @addtogroup level_1_cmd
537
 * @{
538
 */
539
540
/**
541
 * Return a high precision time unit. What this time unit is is platform specific.
542
 * @return A monotonic increasing 64-bit integer w.r.t. passing of time.
543
 */
544
uint64_t ccv_nnc_cmd_mono_time(void);
545
/**
546
 * Return UTF-8 encoded name of a given command.
547
 * @return A UTF-8 string (pointing to a static constant).
548
 */
549
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_name(const uint32_t cmd);
550
/**
551
 * Return UTF-8 encoded name of a given backend.
552
 * @return A UTF-8 string (pointing to a static constant).
553
 */
554
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_backend_name(const uint32_t backend);
555
/**
556
 * Check whether a given backend is available for a given command.
557
 * @return 1 if it is available.
558
 */
559
CCV_WARN_UNUSED(int) ccv_nnc_cmd_ok(const uint32_t cmd, const uint32_t backend);
560
/**
561
 * Create a wrapped command with parameters.
562
 * @param cmd The command identifier.
563
 * @param isa If this is a CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD command, this supplies the custom functions.
564
 * @param params The parameters for the command.
565
 * @param flags A reserved field for flags.
566
 * @return A wrapped ccv_nnc_cmd_t structure.
567
 */
568
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd(const uint32_t cmd, ccv_nnc_cmd_vtab_t* const isa, const ccv_nnc_cmd_param_t params, const int flags);
569
/**
570
 * Verify whether a hint is compatible with a given command and a given input tensor parameters / output tensor parameters.
571
 * @param hint The hint for a given command. Hint defines things such as paddings, strides etc. for a given command.
572
 * @param cmd The wrapped command.
573
 * @param a The input tensor parameters.
574
 * @param b The output tensor parameters.
575
 * @return 1 if it passes.
576
 */
577
CCV_WARN_UNUSED(int) ccv_nnc_hint_verify(const ccv_nnc_hint_t hint, const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
578
/**
579
 * Automatically find the best hint for a given input / output (on forward pass only).
580
 * @param cmd The wrapped command.
581
 * @param a The input tensor parameters.
582
 * @param b The output tensor parameters.
583
 * @return Best hint we can guess.
584
 */
585
CCV_WARN_UNUSED(ccv_nnc_hint_t) ccv_nnc_hint_auto(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
586
/**
587
 * Automatically find the outputs for the given inputs / hint.
588
 * @param cmd The wrapped command.
589
 * @param inputs An array of input tensor parameters.
590
 * @param input_size The size of input array.
591
 * @param hint The hint for the given command.
592
 * @param outputs An array for the output tensor parameters.
593
 * @param output_size The size of the output array.
594
 */
595
void ccv_nnc_hint_tensor_auto(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
596
/**
597
 * Find a suitable backend for a given command and tensor settings.
598
 * @param cmd The wrapped command.
599
 * @param tensor_memory The tensor memory setup (whether it is CPU or GPU).
600
 * @param tensor_formats The tensor layout format (NCHW, NHWC, CHWN etc.)
601
 * @param tensor_datatypes The datatype of a given tensor (FP32 etc.)
602
 * @return The backend identifier for the selected backend.
603
 */
604
CCV_WARN_UNUSED(uint32_t) ccv_nnc_cmd_find_backend(const ccv_nnc_cmd_t cmd, const int tensor_memory, const int tensor_formats, const int tensor_datatypes);
605
/**
606
 * Run autotune to find the best kernel and configuration for the given input.
607
 * @param cmd The original wrapped command.
608
 * @param max_workspace_size The maximum memory allowed for this command to execute.
609
 * @param hint The hint for the given command.
610
 * @param flags The reserved field for flags.
611
 * @param inputs An array of input tensors.
612
 * @param input_size The size of input array.
613
 * @param outputs An array of output tensors.
614
 * @param output_size The size of output array.
615
 * @param stream_context The stream we can do the autotune on. 0 uses default stream.
616
 * @return The modified cmd that contains the updated configuration.
617
 */
618
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd_autotune(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
619
/**
620
 * Check whether a given tensor input / output pattern can be computed by the given command.
621
 * bitmasks encode whether a given input tensor / output tensor available at a position.
622
 * @param cmd The wrapped command to check.
623
 * @param input_size The intended size of the input tensor array.
624
 * @param output_size The intended size of the output tensor array.
625
 * @param input_bitmasks The input tensor array encoding in bitmap, 0: no tensor, 1: has a tensor.
626
 * @param input_bitmask_size The size of the input bitmask array.
627
 * @param output_bitmasks The output tensor array encoding in bitmap.
628
 * @param output_bitmask_size The size of the output bitmask array.
629
 * @return 1 if the command can be executed with the given input / output pattern.
630
 */
631
CCV_WARN_UNUSED(int) ccv_nnc_cmd_bitmask(const ccv_nnc_cmd_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size);
632
/**
633
 * Execute a given command.
634
 * @param cmd The wrapped command to be executed.
635
 * @param hint The hint provided for the command.
636
 * @param flags A reserved field for flags.
637
 * @param inputs The input tensor array.
638
 * @param input_size The size of input tensor array.
639
 * @param outputs The output tensor array.
640
 * @param output_size The size of output tensor array.
641
 * @param stream_context The stream which the command will be executed upon.
642
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
643
 */
644
int ccv_nnc_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
645
/**
646
 * Check whether the command is a forward pass or not.
647
 * @param cmd The wrapped command.
648
 * @return 1 if it is a forward pass.
649
 */
650
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_forward(const ccv_nnc_cmd_t cmd);
651
/**
652
 * Check whether the command is a backward pass or not.
653
 * @param cmd The wrapped command.
654
 * @return 1 if it is a backward pass.
655
 */
656
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_backward(const ccv_nnc_cmd_t cmd);
657
/**
658
 * Check this command against listed attributes.
659
 * @param cmd The wrapped command.
660
 * @param flags The flags to check against the command (unsupported).
661
 * @return 1 if the flag is supported by the command.
662
 */
663
CCV_WARN_UNUSED(int) ccv_nnc_cmd_attr(const ccv_nnc_cmd_t cmd, const int flags);
664
/**
665
 * Check whether this command allow inplace operation against a particular input and output (index from 0).
666
 * @param cmd The wrapped command.
667
 * @param input_idx The index of the input tensor we want to check.
668
 * @param input_size The total number of inputs.
669
 * @param output_idx the index of the output tensor we want to check.
670
 * @param output_size The total number of outputs.
671
 * @return 1 if the input tensor can be used as the output tensor.
672
 */
673
CCV_WARN_UNUSED(int) ccv_nnc_cmd_allow_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
674
/**
675
 * Check whether this command need to enforce inplace operation against a particular input and output (index from 0).
676
 * @param cmd The wrapped command.
677
 * @param input_idx The index of the input tensor we want to check.
678
 * @param input_size The total number of inputs.
679
 * @param output_idx the index of the output tensor we want to check.
680
 * @param output_size The total number of outputs.
681
 * @return 1 if the input tensor is required to be used as the output tensor.
682
 */
683
CCV_WARN_UNUSED(int) ccv_nnc_cmd_enforce_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
684
685
/** @} */
686
687
/**
688
 * @defgroup level_1_stream Streams
689
 * @{
690
 */
691
692
// Control flow constructs
693
// Follow heavily based along CUDA's stream / event idea.
694
enum {
695
  CCV_STREAM_CONTEXT_CPU = 0x1, /**< A CPU based stream context (unsupported). */
696
  CCV_STREAM_CONTEXT_GPU = 0x2, /**< A GPU based stream context. */
697
};
698
206k
#define CCV_STREAM_GET_CONTEXT(type) ((type) & 0x3)
699
#define CCV_STREAM_GET_DEVICE(type) CCV_TENSOR_GET_DEVICE(type)
700
44.9k
#define CCV_STREAM_GET_DEVICE_ID(type) CCV_TENSOR_GET_DEVICE_ID(type)
701
3.06k
#define CCV_STREAM_SET_DEVICE_ID(type, device_id) CCV_TENSOR_SET_DEVICE_ID(type, device_id)
702
/**
703
 * Create a new stream context.
704
 * @param type A combination of CPU / GPU and DEVICE_ID.
705
 * @return The newly created stream context.
706
 */
707
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_new(const int type);
708
/**
709
 * Get the type of the stream context.
710
 * @param stream_context The stream context we want to inspect.
711
 * @return The type of the stream context.
712
 */
713
CCV_WARN_UNUSED(int) ccv_nnc_stream_context_type(const ccv_nnc_stream_context_t* const stream_context);
714
/**
715
 * Get a stream context local workspace memory. This memory region will be reused
716
 * the next time when you call this method on the same stream context.
717
 * @param stream_context The stream context which provides the workspace memory.
718
 * @param workspace_size The size of the workspace memory.
719
 * @param mem The memory type of the said workspace memory (GPU or CPU).
720
 * @return A pointer to the workspace memory.
721
 */
722
CCV_WARN_UNUSED(void*) ccv_nnc_stream_context_get_workspace(ccv_nnc_stream_context_t* const stream_context, const size_t workspace_size, const int mem);
723
/**
724
 * Deallocate any workspace memory on the stream context.
725
 * @param stream The stream context to drain workspace memory.
726
 */
727
void ccv_nnc_stream_context_drain(ccv_nnc_stream_context_t* const stream);
728
/**
729
 * The callback prototype on the stream context.
730
 */
731
typedef void(*ccv_nnc_callback_f)(void* const callback_context);
732
/**
733
 * Add a callback function to be called once stream executed to that point.
734
 * @param stream The stream context to add callback.
735
 * @param callback The callback function.
736
 * @param callback_context The context to be called with the callback function.
737
 */
738
void ccv_nnc_stream_context_add_callback(ccv_nnc_stream_context_t* const stream, const ccv_nnc_callback_f callback, void* const callback_context);
739
/**
740
 * Wait until all tasks submitted (command, graph run etc.) on the stream context
741
 * completed.
742
 * @param stream The stream context to wait.
743
 */
744
void ccv_nnc_stream_context_wait(const ccv_nnc_stream_context_t* const stream);
745
/**
746
 * The hooks to be called when a stream context is destroyed.
747
 * At the moment, the stream context will be destroyed at the time
748
 * ccv_nnc_stream_context_free is called, so there is no tricks.
749
 * This method is useful because we have some resources associated
750
 * with stream pointer, hence, would be good to free these resources
751
 * upon free the stream.
752
 */
753
typedef void (*ccv_nnc_stream_context_destructor_f)(const ccv_nnc_stream_context_t* const stream, void* const context);
754
/**
755
 * Add a new destructor hook callback when a stream is freed.
756
 * @param stream The stream to be observed.
757
 * @param destructor The new destructor callback method.
758
 * @param context additional context.
759
 * @return A integer identifier to help remove the hook.
760
 */
761
int ccv_nnc_stream_context_add_destructor_hook(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_context_destructor_f destructor, void* const context);
762
/**
763
 * Remove a destructor hook callback.
764
 * @param stream The stream we observe.
765
 * @param hook_id The returned integer when calling the add method.
766
 */
767
void ccv_nnc_stream_context_remove_destructor_hook(ccv_nnc_stream_context_t* const stream, const int hook_id);
768
/**
769
 * Deallocate the stream context.
770
 * @param stream_context The stream context to be destroyed.
771
 */
772
void ccv_nnc_stream_context_free(ccv_nnc_stream_context_t* const stream_context);
773
774
/**
775
 * Opaque pointer to the signal object.
776
 */
777
typedef struct ccv_nnc_stream_signal_s ccv_nnc_stream_signal_t;
778
779
/**
780
 * Create a new stream signal.
781
 * @param type A composed type denotes whether it associated with a GPU or CPU stream context, and on which device.
782
 * @return The newly created stream signal.
783
 */
784
CCV_WARN_UNUSED(ccv_nnc_stream_signal_t*) ccv_nnc_stream_signal_new(const int type);
785
/**
786
 * Get the type of the stream signal.
787
 * @param signal The stream signal we want to inspect.
788
 * @return The type of the stream signal.
789
 */
790
CCV_WARN_UNUSED(int) ccv_nnc_stream_signal_type(const ccv_nnc_stream_signal_t* const signal);
791
/**
792
 * Emit a signal on a stream.
793
 * @param stream The stream context where the signal will be emitted.
794
 * @param signal The signal to be emitted. It has to be on the same device as the stream.
795
 */
796
void ccv_nnc_stream_context_emit_signal(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_signal_t* const signal);
797
/**
798
 * Wait a signal on a stream.
799
 * @param stream The stream context that will be blocked by the signal.
800
 * @param signal The signal to be waited. It can be on a different device of the stream.
801
 */
802
void ccv_nnc_stream_context_wait_signal(const ccv_nnc_stream_context_t* const stream, const ccv_nnc_stream_signal_t* const signal);
803
/**
804
 * Get on which stream context this signal is going to be emitted on.
805
 * @param signal The signal we want to inspect.
806
 * @return The most recent stream context you called ccv_nnc_stream_context_emit_signal with.
807
 */
808
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_signal_get_emitter(const ccv_nnc_stream_signal_t* const signal);
809
/**
810
 * Deallocate the signal.
811
 * @param signal The signal to be destroyed.
812
 */
813
void ccv_nnc_stream_signal_free(ccv_nnc_stream_signal_t* const signal);
814
/**
815
 * Return number of devices.
816
 * @param type The type of devices (CCV_NNC_STREAM_CONTEXT_GPU / CCV_NNC_STREAM_CONTEXT_CPU)
817
 * @return The number of devices.
818
 */
819
CCV_WARN_UNUSED(int) ccv_nnc_device_count(const int type);
820
/**
821
 * Remap a source device as the destination device.
822
 * @param type The type of devices (CCV_NNC_STREAM_CONTEXT_GPU / CCV_NNC_STREAM_CONTEXT_CPU)
823
 * @param source The original device id.
824
 * @param destination The new device id.
825
 * @return 0 if the device remap is successful, -1 if it is not.
826
 */
827
CCV_WARN_UNUSED(int) ccv_nnc_device_remap(const int type, const int source, const int destination);
828
/**
829
 * The neighbor discovery function that will be called with the device id.
830
 */
831
typedef ccv_nnc_stream_context_t*(*ccv_nnc_stream_context_neighbor_discovery_f)(const int device_id, void* const context);
832
/**
833
 * Set the neighbor stream context discovery mechanism. This method exposes how
834
 * neighbor should be defined per stream context. This method is useful for
835
 * commands that operates cross devices and need to find the correct stream
836
 * context for these devices. Stream context itself is bounded to one device
837
 * only.
838
 * @param stream_context The stream context that bounds to a discovery mechanism.
839
 * @param discovery The neighbor discovery function to invoke.
840
 * @param context The associated context with the neighbor discovery function.
841
 */
842
void ccv_nnc_stream_context_set_neighbor_discovery(ccv_nnc_stream_context_t* const stream_context, ccv_nnc_stream_context_neighbor_discovery_f discovery, void* const context);
843
/**
844
 * Find a neighbor stream context on a given device id for current stream context.
845
 * @param stream_context The stream context which we will look for neighbors.
846
 * @param device_id On which device the stream context may exist.
847
 * @return 0 if no stream context found. Otherwise, return the stream context on that device.
848
 */
849
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_find_neighbor(ccv_nnc_stream_context_t* const stream_context, const int device_id);
850
851
/** @} */
852
853
/** @} */
854
855
/**
856
 * @defgroup level_2 Level-2 API
857
 * @{
858
 */
859
860
/**
861
 * @defgroup level_2_essentials Essentials
862
 * @{
863
 */
864
865
enum {
866
  CCV_NNC_SHORT_DOT_GRAPH = 0x0, /**< Display a simplified graph. */
867
  CCV_NNC_LONG_DOT_GRAPH  = 0x1, /**< Display a graph that contains all information. */
868
};
869
870
/**
871
 * Opaque pointer holds the concrete graph representation.
872
 */
873
typedef struct ccv_nnc_graph_s ccv_nnc_graph_t;
874
875
/**
876
 * The opaque on stack object hold a reference to an execution node within a graph.
877
 */
878
typedef struct {
879
  int32_t d; // This is int because sometimes I piggy-back on negatives to carry out some internal computations.
880
  ccv_nnc_graph_t* graph;
881
} ccv_nnc_graph_exec_t;
882
883
81.9k
#define CCV_NO_GRAPH_EXEC(exec) ((exec).graph == 0)
884
885
/**
886
 * Create an empty graph.
887
 * Note that all graph mutation methods are not thread-safe.
888
 * You should only operate the graph in serial fashion.
889
 * @return An opaque ccv_nnc_graph_t pointer.
890
 */
891
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_new(void);
892
/**
893
 * Create a node with specific command execution, as well as its inputs & outputs.
894
 * Underlying, the graph maintains the backing object for the node, and all you get is
895
 * a on-stack object to index the backing object from the graph.
896
 * @param graph The concrete graph.
897
 * @param cmd The wrapped command.
898
 * @param hint The hint for this command.
899
 * @param inputs The input tensors array.
900
 * @param input_size The size of input tensors array.
901
 * @param outputs The output tensors array.
902
 * @param output_size The size of output tensors array.
903
 * @return An on-stack object that references a execution node.
904
 */
905
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_new(ccv_nnc_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
906
/**
907
 * Set the command for an existing execution node.
908
 * @param graph The concrete graph.
909
 * @param exec The execution node reference.
910
 * @param cmd The new wrapped command.
911
 */
912
void ccv_nnc_graph_exec_set(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_cmd_t cmd);
913
/**
914
 * Set hint for an existing execution node.
915
 * @param graph The concrete graph.
916
 * @param exec The execution node reference.
917
 * @param hint The new hint.
918
 */
919
void ccv_nnc_graph_exec_set_hint(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_hint_t hint);
920
/**
921
 * Set input / output tensors for an existing execution node.
922
 * @param graph The concrete graph.
923
 * @param exec The execution node reference.
924
 * @param inputs The input tensors array.
925
 * @param input_size The size of input tensors array.
926
 * @param outputs The output tensors array.
927
 * @param output_size The size of output tensors array.
928
 */
929
void ccv_nnc_graph_exec_set_io(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
930
/**
931
 * Concatenate input graph nodes with an output graph node to create a new graph.
932
 * @param graph The concrete graph.
933
 * @param source The execution node reference to connect.
934
 * @param destination The execution node reference connect to.
935
 * @return Non-zero if cannot concat successfully.
936
 */
937
int ccv_nnc_graph_exec_concat(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
938
/**
939
 * Disconnect input graph nodes with an output graph nodes in this graph.
940
 * @param graph The concrete graph.
941
 * @param source The execution node reference to disconnect.
942
 * @param destination The execution node reference disconnect to.
943
 * @return Non-zero if cannot disjoin successfully.
944
 */
945
int ccv_nnc_graph_exec_disjoin(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
946
/**
947
 * Count number of exec in the graph.
948
 * @param graph The concrete graph.
949
 * @return The number of execution nodes in the graph.
950
 */
951
int ccv_nnc_graph_exec_count(const ccv_nnc_graph_t* const graph);
952
/**
953
 * Generate output that can be parsed by GraphViz (DOT language).
954
 * @param graph The concrete graph.
955
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
956
 * @param out The output file stream.
957
 */
958
void ccv_nnc_graph_dot(const ccv_nnc_graph_t* const graph, const int flags, FILE* out);
959
/**
960
 * Run the autotune function on all execution node, and assign back with the optimized commands.
961
 * @param graph The concrete graph.
962
 * @param max_workspace_size The maximum allowed extra memory usage.
963
 * @param flags A reserved field for flags.
964
 * @param sources The source execution nodes to begin. 0 uses default sources.
965
 * @param source_size The size of source execution nodes.
966
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
967
 * @param destination_size The size of destination execution nodes.
968
 */
969
void ccv_nnc_graph_autotune(ccv_nnc_graph_t* const graph, const size_t max_workspace_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
970
/**
971
 * Make the graph topsorted, thus, do a topological sort so when run the graph, no additional memory will be allocated.
972
 * Otherwise when we run the graph, we need to allocate some memory on heap to faciliate.
973
 * @param graph The concrete graph.
974
 * @param exec_cvt The execution node assignments will change, and you can give an array to know the changes.
975
 * @param exec_cvt_size The provided conversion array size.
976
 */
977
void ccv_nnc_graph_topsort(ccv_nnc_graph_t* const graph, int* const exec_cvt, const int exec_cvt_size);
978
979
/**
980
 * Opaque pointer holds the graph schedule.
981
 */
982
typedef struct ccv_nnc_graph_static_schedule_s ccv_nnc_graph_static_schedule_t;
983
/**
984
 * Assuming the graph runs from the beginning to the end. Allocate a internal schedule object that will
985
 * run the graph efficiently if it runs from the beginning to the end. It will basically call ccv_nnc_graph_static_schedule
986
 * and save the end result to a internal schedule object to this graph.
987
 * @param graph The concrete graph.
988
 * @param stream_type The type of stream context we are going to use.
989
 */
990
void ccv_nnc_graph_set_default_static_schedule(ccv_nnc_graph_t* const graph, const int stream_type);
991
/**
992
 * Allocate extra streams to make this graph parallel runnable. Note this requires the graph to be topsorted.
993
 * After this is done, you can schedule a graph either on its default stream, or a new stream with the schedule
994
 * object.
995
 * @param graph The concrete graph.
996
 * @param stream_type The type of stream context we are going to use.
997
 * @param sources The source execution nodes to begin. 0 uses default sources.
998
 * @param source_size The size of source execution nodes.
999
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
1000
 * @param destination_size The size of destination execution nodes.
1001
 * @return An opaque schedule object that let the graph knows how to run itself efficiently.
1002
 */
1003
CCV_WARN_UNUSED(ccv_nnc_graph_static_schedule_t*) ccv_nnc_graph_static_schedule_new(ccv_nnc_graph_t* const graph, const int stream_type, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1004
/**
1005
 * Free a schedule object for a graph.
1006
 * @param schedule The schedule object returned from ccv_nnc_graph_static_schedule_new.
1007
 */
1008
void ccv_nnc_graph_static_schedule_free(ccv_nnc_graph_static_schedule_t* const schedule);
1009
/**
1010
 * Query the default stream for a given graph.
1011
 * @param graph The concrete graph.
1012
 * @return The default stream context.
1013
 */
1014
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_graph_default_stream(const ccv_nnc_graph_t* const graph);
1015
/**
1016
 * Set default sources for a give graph.
1017
 * @param graph The concrete graph.
1018
 * @param sources The source execution nodes to begin.
1019
 * @param source_size The size of source execution nodes.
1020
 */
1021
void ccv_nnc_graph_set_sources(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const sources, const int source_size);
1022
/**
1023
 * Get the default source execution nodes pointer.
1024
 * @param graph The concrete graph.
1025
 * @return A pointer to an array of default source execution nodes.
1026
 */
1027
ccv_nnc_graph_exec_t* ccv_nnc_graph_sources(const ccv_nnc_graph_t* const graph);
1028
/**
1029
 * Get the number of default source execution nodes.
1030
 * @param graph The concrete graph.
1031
 * @return The number of default source execution nodes.
1032
 */
1033
int ccv_nnc_graph_source_size(const ccv_nnc_graph_t* const graph);
1034
/**
1035
 * Set default destinations for a give graph.
1036
 * @param graph The concrete graph.
1037
 * @param destinations The destination execution nodes which we end.
1038
 * @param destination_size The size of destination execution nodes.
1039
 */
1040
void ccv_nnc_graph_set_destinations(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1041
/**
1042
 * Get the default destination execution nodes pointer.
1043
 * @param graph The concrete graph.
1044
 * @return A pointer to an array of default destination execution nodes.
1045
 */
1046
ccv_nnc_graph_exec_t* ccv_nnc_graph_destinations(const ccv_nnc_graph_t* const graph);
1047
/**
1048
 * Get the number of default destination execution nodes.
1049
 * @param graph The concrete graph.
1050
 * @return The number of default destination execution nodes.
1051
 */
1052
int ccv_nnc_graph_destination_size(const ccv_nnc_graph_t* const graph);
1053
/**
1054
 * This graph, and its relevant auxiliary objects (opaque to user) are deallocated.
1055
 * @param graph The concrete graph.
1056
 */
1057
void ccv_nnc_graph_free(ccv_nnc_graph_t* const graph);
1058
/**
1059
 * Opaque pointer to the tape of tensors. The tape are used by the while loop.
1060
 */
1061
typedef struct ccv_nnc_tensor_tape_s ccv_nnc_tensor_tape_t;
1062
/**
1063
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1064
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1065
 * @param graph The concrete graph.
1066
 * @param flags A reserved field for flags.
1067
 * @param sources The source execution nodes array.
1068
 * @param source_size The size of source execution nodes array. 0 uses default sources.
1069
 * @param destinations The destination execution nodes array.
1070
 * @param destination_size The size of destination execution nodes array. 0 uses default destinations.
1071
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1072
 * @param stream_context Which stream this graph will be executed upon.
1073
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1074
 */
1075
int ccv_nnc_graph_run(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1076
/**
1077
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1078
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1079
 * Comparing with ccv_nnc_graph_run method, this method doesn't take sources / destinations node, rather, it takes the
1080
 * schedule object.
1081
 * @param graph The concrete graph.
1082
 * @param flags A reserved field for flags.
1083
 * @param schedule The schedule object specified the sources / destinations and how to efficiently run this.
1084
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1085
 * @param stream_context Which stream this graph will be executed upon.
1086
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1087
 */
1088
int ccv_nnc_graph_run_with_schedule(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_static_schedule_t* const schedule, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1089
1090
/** @} */
1091
1092
/**
1093
 * @defgroup level_2_others Others
1094
 * @{
1095
 */
1096
1097
/**
1098
 * Set input / output flags for an existing execution node.
1099
 * This must be called after set_io, set additional flags for tensors related to this exec.
1100
 * @param graph The concrete graph.
1101
 * @param exec The execution node reference.
1102
 * @param input_flags The input flags array.
1103
 * @param input_flag_size the size of input flags array, should be the same as input tensors array (or 0).
1104
 * @param output_flags The output flags array.
1105
 * @param output_flag_size the size of output flags array, should be the same as output tensors array (or 0).
1106
 */
1107
void ccv_nnc_graph_exec_set_io_flags(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const int* const input_flags, const int input_flag_size, const int* const output_flags, const int output_flag_size);
1108
/**
1109
 * Set the pair reference for exec. In backward pass, an execution node's pair node is the forward pass node.
1110
 * @param graph The concrete graph.
1111
 * @param exec The execution node reference.
1112
 * @param pair_exec The pair execution node reference.
1113
 */
1114
void ccv_nnc_graph_exec_pair_with(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_graph_exec_t pair_exec);
1115
/**
1116
 * Add tensor pair that can be used to "carry over". (carry over: passing a tensor from current loop to the next loop).
1117
 * @param graph The concrete graph.
1118
 * @param from The tensor we have output in this loop.
1119
 * @param to The tensor we will use as input in the next loop.
1120
 */
1121
void ccv_nnc_graph_add_carry_over(ccv_nnc_graph_t* const graph, const ccv_nnc_tensor_t* const from, const ccv_nnc_tensor_t* const to);
1122
/**
1123
 * Updates are the tensors that not directly involved in the computation, but its pointers need to get updated
1124
 * along with this exec, thus need to be "update" to other exec nodes.
1125
 * @param graph The concrete graph.
1126
 * @param exec The execution node reference.
1127
 * @param update The tensor need to be updated along the execution node.
1128
 */
1129
void ccv_nnc_graph_exec_add_as_affected(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const update);
1130
1131
/** @} */
1132
1133
/** @} */
1134
1135
/**
1136
 * @defgroup level_3 Level-3 API
1137
 * @{
1138
 */
1139
1140
/**
1141
 * @defgroup level_3_essentials Essentials
1142
 * @{
1143
 */
1144
1145
/**
1146
 * Opaque pointer to the symbolic graph object.
1147
 */
1148
typedef struct ccv_nnc_symbolic_graph_s ccv_nnc_symbolic_graph_t;
1149
1150
/**
1151
 * Opaque pointer to an arena of allocated tensors.
1152
 */
1153
typedef struct ccv_nnc_tensor_arena_s ccv_nnc_tensor_arena_t;
1154
1155
/**
1156
 * Opaque pointer to an arena of allocated execs.
1157
 */
1158
typedef struct ccv_nnc_graph_exec_arena_s ccv_nnc_graph_exec_arena_t;
1159
1160
/**
1161
 * On stack object references a tensor symbol in the symbolic graph.
1162
 */
1163
typedef struct {
1164
  int32_t d;
1165
  const ccv_nnc_symbolic_graph_t* graph;
1166
} ccv_nnc_tensor_symbol_t;
1167
1168
/**
1169
 * On stack object references a execution node symbol in the symbolic graph.
1170
 */
1171
typedef struct {
1172
  int32_t d;
1173
  const ccv_nnc_symbolic_graph_t* graph;
1174
} ccv_nnc_graph_exec_symbol_t;
1175
1176
enum {
1177
  CCV_NNC_TENSOR_SYMBOL_INIT_ZEROS = 0x01, /**< Initialize underlying tensor for the symbol with zeros */
1178
  CCV_NNC_TENSOR_SYMBOL_INIT_ONES = 0x02, /**< Initialize underlying tensor for the symbol with ones */
1179
  CCV_NNC_TENSOR_SYMBOL_TAPE_VAR = 0x04, /**< Mark this as a tape variable (it cannot be folded, will contain flag CCV_TAPE_ALLOC) */
1180
  // The one below is special.
1181
  CCV_NNC_TENSOR_SYMBOL_DEAD = 0x80000000, /**< Mark this tensor symbol as dead, any future usage will cause assertion */
1182
};
1183
1184
151k
#define CCV_NNC_TENSOR_SYMBOL_IS_DEAD(x) ((x) & CCV_NNC_TENSOR_SYMBOL_DEAD)
1185
1186
enum {
1187
  CCV_NNC_GRAPH_EXEC_DEAD = 0x1, /**< Mark this node as dead. */
1188
  CCV_NNC_GRAPH_EXEC_P_WHILE = 0x10, /**< Mark this node keyword is while */
1189
  CCV_NNC_GRAPH_EXEC_CASE_OF = 0x20, /**< Mark this node keyword is case_of */
1190
};
1191
1192
422k
#define CCV_NNC_GRAPH_EXEC_IS_DEAD(x) ((x) & CCV_NNC_GRAPH_EXEC_DEAD)
1193
18.6k
#define CCV_NNC_GRAPH_REF(x) ((x)->_heap_graph_ref ? 
(x)->_heap_graph_ref178
:
(x)->_inline_graph_ref18.4k
)
1194
1195
enum {
1196
  CCV_NNC_NO_TENSOR_SYMBOL = -1, /**< Special symbol reference for no tensor symbol. */
1197
  CCV_NNC_WHILE_COUNT_TENSOR_SYMBOL = -2, /**< Special symbol reference for while loop count tensor. */
1198
};
1199
1200
enum {
1201
  CCV_NNC_NO_GRAPH_EXEC_SYMBOL = -1, /**< Special symbol reference for no exec symbol. */
1202
};
1203
1204
1205
enum {
1206
  CCV_NNC_SYMBOL_TENSOR, /**< Identifier for tensor symbol */
1207
  CCV_NNC_SYMBOL_TENSOR_ALIAS, /**< Identifier for tensor alias symbol */
1208
  CCV_NNC_SYMBOL_GRAPH_EXEC, /**< Identifier for exec symbol */
1209
};
1210
1211
22
#define CCV_NNC_IS_WHILE_COUNT_TENSOR_SYMBOL(d) (((uint32_t)(d) & 0xf) == 0xe)
1212
1213
/**
1214
 * A data structure to pass in a pair of tensor symbols.
1215
 */
1216
typedef struct {
1217
  ccv_nnc_tensor_symbol_t source; /**< The 'from' tensor symbol. */
1218
  ccv_nnc_tensor_symbol_t destination; /**< The 'to' tensor symbol. */
1219
} ccv_nnc_tensor_symbol_map_t;
1220
1221
/**
1222
 * Create a new empty symbolic graph. It is an opaque data structure that maintains the whole graph of computation in its symbolic form.
1223
 * Note that all graph mutation methods are not thread-safe. You should only operate the graph in serial fashion.
1224
 */
1225
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_new(void);
1226
/**
1227
 * Create an tensor symbol (thus, with no actual memory space allocation) in a symbolic graph.
1228
 * @param graph The symbolic graph.
1229
 * @param info The tensor parameters.
1230
 * @param name The name of the tensor symbol, it is optional.
1231
 * @return A tensor symbol reference.
1232
 */
1233
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_param_t info, const char* const name);
1234
/**
1235
 * Create an alias to the tensor symbol as tensor view (thus, pointing to the same memory region, but with a different header info and offset).
1236
 * @param graph The symbolic graph.
1237
 * @param tensor_symbol The tensor symbol we are going to reference to.
1238
 * @param ofs The offset on each of the dimension.
1239
 * @param inc The line size of each dimension.
1240
 * @param info The tensor parameters for the new alias.
1241
 * @param name The name of the tensor symbol alias, it is optional.
1242
 * @return A tensor symbol alias reference.
1243
 */
1244
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
1245
/**
1246
 * Manually delete a tensor symbol off the symbolic graph.
1247
 * @param graph The symbolic graph.
1248
 * @param tensor The tensor symbol reference.
1249
 */
1250
void ccv_nnc_tensor_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_t tensor);
1251
/**
1252
 * Create a graph execution node (an operation that takes a set of inputs and generates a set of outputs).
1253
 * @param graph The symbolic graph.
1254
 * @param cmd The wrapped command.
1255
 * @param inputs The input tensor symbols array.
1256
 * @param input_size The size of input tensor symbols array.
1257
 * @param outputs The output tensor symbols array.
1258
 * @param output_size The size of output tensor symbols array.
1259
 * @param name The name of this execution node, optional.
1260
 * @return The execution node symbol reference.
1261
 */
1262
ccv_nnc_graph_exec_symbol_t ccv_nnc_graph_exec_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
1263
/**
1264
 * ccv_nnc_graph_exec_symbol_new defaults to use `ccv_nnc_hint_auto` find the best hints for a set of inputs / outputs.
1265
 * However, you can also set your own hints.
1266
 * @param graph The symbolic graph.
1267
 * @param exec The execution node symbol reference.
1268
 * @param hint The hint for the command.
1269
 */
1270
void ccv_nnc_graph_exec_symbol_set_hint(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_hint_t hint);
1271
/**
1272
 * Manually delete a exec symbol off the symbolic graph.
1273
 * @param graph The symbolic graph.
1274
 * @param symbol The execution node symbol reference.
1275
 */
1276
void ccv_nnc_graph_exec_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_t symbol);
1277
enum {
1278
  CCV_NNC_AUTOGEN_ALL_EXECS = 0x1, /**< Automatic concatenation for all execution nodes */
1279
  CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS = 0x2, /**< Automatically find all source and destination nodes. */
1280
};
1281
/**
1282
 * Automatic concatenate these nodes together based on its inputs / outputs.
1283
 * Imagining this is to generate the execution flow based on input tensors and output tensors.
1284
 * nil for execs and 0 for exec_size means to loop over all the execs on the graph and autogen.
1285
 * @param graph The symbolic graph.
1286
 * @param execs The execution nodes array.
1287
 * @param exec_size The size of execution nodes array.
1288
 * @param flags The flags determines what operations to perform when concatenating.
1289
 * @return non-zero if cannot figure out.
1290
 */
1291
int ccv_nnc_graph_exec_symbol_autogen(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const execs, const int exec_size, const int flags);
1292
/**
1293
 * Set the default sources for a symbolic graph.
1294
 * @param graph The symbolic graph.
1295
 * @param sources The source execution nodes array.
1296
 * @param source_size The size of source execution nodes array.
1297
 */
1298
void ccv_nnc_symbolic_graph_set_sources(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size);
1299
/**
1300
 * Add one node to the default sources for a symbolic graph.
1301
 * @param graph The symbolic graph.
1302
 * @param source The source execution node.
1303
 */
1304
void ccv_nnc_symbolic_graph_add_source(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source);
1305
/**
1306
 * Get the pointer to the default sources.
1307
 * @param graph The symbolic graph.
1308
 * @return The pointer to the source execution nodes array.
1309
 */
1310
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_sources(const ccv_nnc_symbolic_graph_t* const graph);
1311
/**
1312
 * Get the size of the default source nodes array.
1313
 * @param graph The symbolic graph.
1314
 * @return The size of the default source nodes array.
1315
 */
1316
int ccv_nnc_symbolic_graph_source_size(const ccv_nnc_symbolic_graph_t* const graph);
1317
/**
1318
 * Set the default destinations for a symbolic graph.
1319
 * @param graph The symbolic graph.
1320
 * @param destinations The destination execution nodes array.
1321
 * @param destination_size The size of destination execution nodes array.
1322
 */
1323
void ccv_nnc_symbolic_graph_set_destinations(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1324
/**
1325
 * Add one node to the default destinations for a symbolic graph.
1326
 * @param graph The symbolic graph.
1327
 * @param destination The destination execution node.
1328
 */
1329
void ccv_nnc_symbolic_graph_add_destination(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t destination);
1330
/**
1331
 * Get the pointer to the default destinations.
1332
 * @param graph The symbolic graph.
1333
 * @return The pointer to the destinationsexecution nodes array.
1334
 */
1335
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_destinations(const ccv_nnc_symbolic_graph_t* const graph);
1336
/**
1337
 * Get the size of the default destination nodes array.
1338
 * @param graph The symbolic graph.
1339
 * @return The size of the default destination nodes array.
1340
 */
1341
int ccv_nnc_symbolic_graph_destination_size(const ccv_nnc_symbolic_graph_t* const graph);
1342
/**
1343
 * Generate output that can be parsed by GraphViz (DOT language).
1344
 * @param graph The symbolic graph.
1345
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
1346
 * @param out The output file stream.
1347
 */
1348
void ccv_nnc_symbolic_graph_dot(const ccv_nnc_symbolic_graph_t* const graph, const int flags, FILE* out);
1349
1350
/**
1351
 * The data structure to wrap a tensor symbol and a concrete tensor together.
1352
 */
1353
typedef struct {
1354
  ccv_nnc_tensor_symbol_t symbol;
1355
  const ccv_nnc_tensor_t* tensor;
1356
} ccv_nnc_tensor_bind_t;
1357
1358
typedef struct {
1359
  void* (*alloc)(const int type, const int pinned_mem /* Currently only used to annotate CCV_TENSOR_PINNED_MEM, future can be expanded to generic flags */, const size_t size, void* const arg);
1360
  void (*free)(void* const ptr, void* const arg);
1361
} ccv_nnc_symbolic_graph_compile_allocator_vtab_t;
1362
1363
typedef struct {
1364
  const ccv_nnc_symbolic_graph_compile_allocator_vtab_t* isa;
1365
  struct {
1366
    void* alloc;
1367
    void* free;
1368
  } context;
1369
} ccv_nnc_symbolic_graph_compile_allocator_t;
1370
1371
typedef struct {
1372
  ccv_nnc_symbolic_graph_compile_allocator_t allocator;
1373
} ccv_nnc_symbolic_graph_compile_param_t;
1374
1375
/**
1376
 * Compile a symbolic graph into a graph that can be executed, and a set of tensors (opaque data structure tensor arena) are allocated based on which tensor symbols are the input and which are the outputs. The tensor allocation is done to minimize the required storage.
1377
 * tensor_binds provide custom binding for these tensors. You still responsible to manage the life-time of these tensors.
1378
 * outputs marks the tensor symbols that need to be kept til the end of the graph.
1379
 * @param graph The symbolic graph.
1380
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
1381
 * @param tensor_binds The binding array (a tensor symbol and a concrete tensor). We replace everywhere that uses the tensor symbol with the concrete tensor.
1382
 * @param tensor_bind_size The size of the binding array.
1383
 * @param outputs The output tensor symbols that we want to keep the value.
1384
 * @param output_size The size of the output tensor symbols array.
1385
 * @param sources The sources for the graph.
1386
 * @param source_size The size of the sources array. 0 to use default sources.
1387
 * @param destinations The destinations for the graph.
1388
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1389
 * @param graph_ref The pointer to store the generated concrete graph.
1390
 * @param tensor_arena_ref The pointer to store ccv_nnc_tensor_arena_t.
1391
 * @param graph_exec_arena_ref The pointer to store ccv_nnc_graph_exec_arena_t.
1392
 */
1393
void ccv_nnc_symbolic_graph_compile(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_symbolic_graph_compile_param_t compile_params, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_graph_t** const graph_ref, ccv_nnc_tensor_arena_t** const tensor_arena_ref, ccv_nnc_graph_exec_arena_t** const graph_exec_arena_ref);
1394
/**
1395
 * Free the symbolic graph and its associated memory. Note that if you compiled a graph / tensor arena out of this symbolic graph, these won't be free'd.
1396
 * @param graph The symbolic graph.
1397
 */
1398
void ccv_nnc_symbolic_graph_free(ccv_nnc_symbolic_graph_t* const graph);
1399
/**
1400
 * Find corresponding tensor by a symbol from the tensor arena.
1401
 * @param tensor_arena The tensor arena object generated through compilation,
1402
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1403
 * @return A concrete tensor from the tensor arena.
1404
 */
1405
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_symbol(const ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol);
1406
/**
1407
 * Bind a tensor to a symbol. You still responsible to manage the life-time of the tensor to make sure it is not freed until everything is done.
1408
 * @param tensor_arena The tensor arena object generated through compilation.
1409
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1410
 * @param tensor The new tensor to bind to.
1411
 */
1412
void ccv_nnc_tensor_bind_symbol(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_t* const tensor);
1413
/**
1414
 * Clear existing bindings on the tensor arena.
1415
 * @param tensor_arena The tensor arena object generated through compilation to clear bindings.
1416
 */
1417
void ccv_nnc_tensor_arena_clear_bindings(ccv_nnc_tensor_arena_t* const tensor_arena);
1418
/**
1419
 * Free the data buffer of the tensor arena.
1420
 * @param tensor_arena The tensor arena object generated through compilation.
1421
 */
1422
void ccv_nnc_tensor_arena_buffer_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1423
/**
1424
 * Free the opaque tensor arena structure.
1425
 * @param tensor_arena The tensor arena object generated through compilation.
1426
 */
1427
void ccv_nnc_tensor_arena_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1428
/**
1429
 * Find corresponding graph exec by a exec symbol from graph exec arena.
1430
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1431
 * @param symbol The execution node symbol reference. Because execution node symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1432
 * @return A execution node reference to the concrete graph.
1433
 */
1434
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_from_symbol(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena, const ccv_nnc_graph_exec_symbol_t symbol);
1435
/**
1436
 * Return the node that can drive all the source nodes from the compilation.
1437
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1438
 * @return A execution node reference that is the source.
1439
 */
1440
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_source(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1441
/**
1442
 * Return the node that can drain all the destination nodes from the compilation.
1443
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1444
 * @return A execution node reference that is the destination.
1445
 */
1446
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_destination(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1447
/**
1448
 * Free the opaque graph exec arena structure.
1449
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1450
 */
1451
void ccv_nnc_graph_exec_arena_free(ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1452
/**
1453
 * Write symbolic graph to disk, along with some binding tensors.
1454
 * @param graph The symbolic graph.
1455
 * @param tensor_binds The binding array (pair of tensor symbol and concrete tensor).
1456
 * @param tensor_bind_size The size of the binding array.
1457
 * @param fn The file name.
1458
 */
1459
void ccv_nnc_symbolic_graph_write(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const char* const fn);
1460
/**
1461
 * Read symbolic graph from disk, with some binding tensors.
1462
 * @param fn The file name.
1463
 * @param graph_ref The pointer to store symbolic graph.
1464
 * @param tensor_binds_ref The pointer to store the binding array.
1465
 * @param tensor_bind_size_ref The pointer to store the size of the binding array.
1466
 */
1467
void ccv_nnc_symbolic_graph_read(const char* const fn, ccv_nnc_symbolic_graph_t** const graph_ref, ccv_nnc_tensor_bind_t** const tensor_binds_ref, int* const tensor_bind_size_ref);
1468
1469
/** @} */
1470
1471
/**
1472
 * @defgroup level_3_others Others
1473
 * @{
1474
 */
1475
1476
/**
1477
 * Return the symbol it alias to.
1478
 * @param graph The symbolic graph.
1479
 * @param tensor_symbol The tensor symbol alias.
1480
 * @return A tensor symbol reference to the original tensor symbol. If this symbol has no reference, return NO_SYMBOL (.graph = 0)
1481
 */
1482
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1483
/**
1484
 * Set the tensor symbol parameters.
1485
 * @param graph The symbolic graph.
1486
 * @param tensor The tensor symbol reference.
1487
 * @param info The new tensor parameters.
1488
 * @return non-zero if encountered errors.
1489
 */
1490
int ccv_nnc_tensor_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const ccv_nnc_tensor_param_t info);
1491
/**
1492
 * Get the parameters for a tensor symbol.
1493
 * @param graph The symbolic graph.
1494
 * @param tensor The tensor symbol reference.
1495
 * @return The tensor parameters.
1496
 */
1497
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_symbol_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1498
/**
1499
 * Set the tensor symbol alias parameters.
1500
 * @param graph The symbolic graph.
1501
 * @param tensor The tensor symbol reference.
1502
 * @param ofs The offset on each of the dimension.
1503
 * @param inc The line size of each dimension.
1504
 * @return non-zero if it is not a tensor alias.
1505
 */
1506
int ccv_nnc_tensor_symbol_alias_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC]);
1507
/**
1508
 * Get the parameters for a tensor symbol.
1509
 * @param graph The symbolic graph.
1510
 * @param tensor The tensor symbol reference.
1511
 * @param ofs The offset on each of the dimension.
1512
 * @param inc The line size of each dimension.
1513
 * @return non-zero if it is not a tensor alias.
1514
 */
1515
int ccv_nnc_tensor_symbol_alias_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, int ofs[CCV_NNC_MAX_DIM_ALLOC], int inc[CCV_NNC_MAX_DIM_ALLOC]);
1516
/**
1517
 * Set the flags for this tensor symbol. The flags are only used for symbol, not for tensor.
1518
 * @param graph The symbolic graph.
1519
 * @param tensor The tensor symbol reference.
1520
 * @param flags A reserved field for flags.
1521
 */
1522
int ccv_nnc_tensor_symbol_set_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int flags);
1523
/**
1524
 * Get all the flags for a tensor.
1525
 * @param graph The symbolic graph.
1526
 * @param tensor The tensor symbol reference.
1527
 */
1528
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1529
/**
1530
 * Set the cmd of this exec symbol.
1531
 * @param graph The symbolic graph.
1532
 * @param exec The execution node symbol reference.
1533
 * @param cmd The new wrapped command.
1534
 */
1535
void ccv_nnc_graph_exec_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_cmd_t cmd);
1536
/**
1537
 * Return the command on this exec symbol.
1538
 * @param graph The symbolic graph.
1539
 * @param exec The execution node symbol reference.
1540
 * @return The wrapped command.
1541
 */
1542
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_graph_exec_symbol_cmd(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1543
/**
1544
 * Set the inputs / outputs for a exec symbol.
1545
 * @param graph The symbolic graph.
1546
 * @param exec The execution node symbol reference.
1547
 * @param inputs The input tensor symbols array.
1548
 * @param input_size The size of input tensor symbols array.
1549
 * @param outputs The output tensor symbols array.
1550
 * @param output_size The size of output tensor symbols array.
1551
 */
1552
void ccv_nnc_graph_exec_symbol_set_io(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size);
1553
/**
1554
 * Manually concatenate input node with an output graph node.
1555
 * @param graph The symbolic graph.
1556
 * @param source The source execution node symbol to connect.
1557
 * @param destination The destination execution node symbol connect to.
1558
 * @return non-zero if cannot concat successfully.
1559
 */
1560
int ccv_nnc_graph_exec_symbol_concat(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1561
/**
1562
 * Manually disconnect input node with an output graph node for this graph.
1563
 * @param graph The symbolic graph.
1564
 * @param source The source execution node symbol to disconnect.
1565
 * @param destination The destination execution node symbol disconnect to.
1566
 * @return non-zero if cannot disjoin successfully.
1567
 */
1568
int ccv_nnc_graph_exec_symbol_disjoin(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1569
/**
1570
 * Number of exec symbols.
1571
 * @param graph The symbolic graph.
1572
 */
1573
CCV_WARN_UNUSED(int) ccv_nnc_graph_exec_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1574
/**
1575
 * Number of active exec symbols.
1576
 * @param graph The symbolic graph.
1577
 * @param type The type of op, can be CCV_NNC_SYMBOL_TENSOR, CCV_NNC_SYMBOL_GRAPH_EXEC (will error out on CCV_NNC_SYMBOL_TENSOR_ALIAS)
1578
 */
1579
CCV_WARN_UNUSED(int) ccv_nnc_symbolic_graph_active_symbol_count(const ccv_nnc_symbolic_graph_t* const graph, const int type);
1580
/**
1581
 * Substitution function. Given an execution node symbol and a command, return a new command.
1582
 */
1583
typedef ccv_nnc_cmd_t(*ccv_nnc_symbolic_graph_subst_f)(const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd);
1584
/**
1585
 * Generate a duplicate of the provided graph.
1586
 * While generating the duplicate, it calls the function pointer to re-process the node type.
1587
 * @param graph The symbolic graph.
1588
 * @param subst The substitution function.
1589
 * @return The duplicated symbolic graph.
1590
 */
1591
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_dup(const ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_symbolic_graph_subst_f subst);
1592
/**
1593
 * Number of tensor symbols.
1594
 * @param graph The symbolic graph.
1595
 */
1596
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1597
/**
1598
 * Compute all the tensor shapes within this graph.
1599
 * @param graph The symbolic graph.
1600
 * @param sources The sources for the graph.
1601
 * @param source_size The size of the sources array. 0 to use default sources.
1602
 * @param destinations The destinations for the graph.
1603
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1604
 */
1605
void ccv_nnc_symbolic_graph_tensor_auto(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1606
/**
1607
 * For a given tensor symbol, this method resolves to its local reference inside the given graph.
1608
 * This is related to the sub-graph of symbolic graphs. A tensor symbol in the sub-graph can still have a
1609
 * representation in the parent graph. This method used to find the local reference in any graph.
1610
 * @param graph The symbolic graph.
1611
 * @param tensor_symbol The tensor symbol we want to resolve.
1612
 * @return A tensor symbol reference in the given graph.
1613
 */
1614
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_resolve(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1615
/**
1616
 * Pass graph's tensor symbol into its sub graph. We will make the connection that the source tensor
1617
 * symbol in the source symbolic graph is the destination tensor symbol in the destination symbolic graph.
1618
 * The reason to do this inference is because a tensor symbol is local to a symbolic graph under the hood.
1619
 * Although you can use tensor symbols from different graphs directly (it calls this method or the resolve
1620
 * method above when create an execution node symbol), sometimes you need this method to do it manually.
1621
 * @param src_graph The source symbolic graph.
1622
 * @param dest_graph The destination symbolic graph.
1623
 * @param src_tensor_symbol The tensor symbol we want to resolve.
1624
 * @param dest_tensor_symbol The tensor symbol we want to resolve.
1625
 */
1626
void ccv_nnc_tensor_symbol_hookup(ccv_nnc_symbolic_graph_t* const src_graph, ccv_nnc_symbolic_graph_t* const dest_graph, const ccv_nnc_tensor_symbol_t src_tensor_symbol, const ccv_nnc_tensor_symbol_t dest_tensor_symbol);
1627
/**
1628
 * Set bypasses for a tensor symbol.
1629
 * For case..of graphs, if the condition doesn't meet, we will skip the execution of a sub-graph.
1630
 * However, in that case, we cannot express easily which output tensor corresponds to which input tensor.
1631
 * This methods provides the way.
1632
 * @param graph The symbolic graph.
1633
 * @param symbol_map The pair of tensors array, source is the input tensor, destination is the output tensor.
1634
 * @param symbol_map_size The size of the tensor pairs array.
1635
 */
1636
void ccv_nnc_tensor_symbol_set_bypasses(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
1637
/**
1638
 * Fetch input / output for an exec symbol. For efficiency consideration, this returns pointer directly.
1639
 * @param graph The symbolic graph.
1640
 * @param symbol The execution node symbol reference.
1641
 * @param inputs The pointer to store input tensor symbols array.
1642
 * @param input_size The pointer to store the size of input tensor symbols array.
1643
 * @param outputs The pointer to store output tensor symbols array.
1644
 * @param output_size The pointer to store the size of output tensor symbols array.
1645
 */
1646
void ccv_nnc_graph_exec_symbol_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const inputs, int* const input_size, const int** const outputs, int* const output_size);
1647
/**
1648
 * Replace a input / output tensor symbol on an exec symbol.
1649
 * @param graph The symbolic graph.
1650
 * @param symbol The execution node symbol reference.
1651
 * @param old_symbol The old tensor symbol to be replaced.
1652
 * @param new_symbol The new tensor symbol on input / output.
1653
 */
1654
void ccv_nnc_graph_exec_symbol_replace_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_tensor_symbol_t old_symbol, const ccv_nnc_tensor_symbol_t new_symbol);
1655
/**
1656
 * Which exec symbol this is connected to. For efficiency consideration, this returns pointer directly.
1657
 * @param graph The symbolic graph.
1658
 * @param symbol The execution node symbol reference.
1659
 * @param tos The pointer to store outgoing indexes of the execution nodes.
1660
 * @param to_size the pointer to store the number of outgoing indexes.
1661
 */
1662
void ccv_nnc_graph_exec_symbol_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const tos, int* const to_size);
1663
/**
1664
 * Find the size allocated on the opaque tensor arena structure.
1665
 * @param tensor_arena The tensor arena object generated through compilation.
1666
 * @return The total allocated size in bytes.
1667
 */
1668
CCV_WARN_UNUSED(uint64_t) ccv_nnc_tensor_arena_size(const ccv_nnc_tensor_arena_t* const tensor_arena);
1669
/**
1670
 * Query whether a set of sources are the ancestors to a set of destination nodes.
1671
 * @param graph The symbolic graph.
1672
 * @param sources The exec sources to check whether they can reach some of the destinations.
1673
 * @param source_size How many sources in the source list.
1674
 * @param destinations The exec destinations to check whether sources can reach.
1675
 * @param destination_size How many destinations in the destination list.
1676
 * @param bitmask Bit return value, each bit represents a source, and 1 meant it can reach some of the destinations.
1677
 */
1678
void ccv_nnc_symbolic_graph_sources_to_destinations(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, uint64_t* const bitmask);
1679
/**
1680
 * Re-init the tensor arena with updated symbolic graph. This won't work if the symbolic graph requires
1681
 * larger tensors than what's available. Use this method properly, you can avoid re-compile a graph
1682
 * just because some tensor shape changed.
1683
 * @param tensor_arena The tensor arena object generated through compilation.
1684
 * @param graph The updated symbolic graph with different tensor shape.
1685
 * @return 0 if successful, -1 if the tensor arena doesn't have enough space to just re-init.
1686
 */
1687
int ccv_nnc_tensor_arena_reinit(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_symbolic_graph_t* const graph);
1688
/**
1689
 * Re-init the graph exec arena with updated symbolic graph. This updated some hyper-parameters of
1690
 * executions to match the updated symbolic graph.
1691
 * @param graph_exec_arena The graph exec arena object provided mapping between symbolic and concrete graph.
1692
 * @param graph The concrete graph generated through compile method.
1693
 * @param symbolic_graph The updated symbolic graph.
1694
 */
1695
void ccv_nnc_graph_exec_reinit(ccv_nnc_graph_exec_arena_t* const graph_exec_arena, ccv_nnc_graph_t* const graph, const ccv_nnc_symbolic_graph_t* const symbolic_graph);
1696
/**
1697
 * Function prototype for tensor symbol creation callback.
1698
 */
1699
typedef void(*ccv_nnc_tensor_symbol_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_param_t info, const char* const name);
1700
/**
1701
 * Hook into the call to ccv_nnc_tensor_symbol_new, return previous provided context if call into this method.
1702
 * @param graph The symbolic graph.
1703
 * @param hook The function to be called if a new tensor symbol created.
1704
 * @param context The context associated with the callback function.
1705
 */
1706
void* ccv_nnc_tensor_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_new_hook_f hook, void* context);
1707
/**
1708
 * Function prototype for tensor symbol alias creation callback.
1709
 */
1710
typedef void(*ccv_nnc_tensor_symbol_alias_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_symbol_t from_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
1711
/**
1712
 * Hook into the call to ccv_nnc_tensor_symbol_alias_new, return previous provided context if call into this method.
1713
 * @param graph The symbolic graph.
1714
 * @param hook The function to be called if a new tensor symbol alias created.
1715
 * @param context The context associated with the callback function.
1716
 */
1717
void* ccv_nnc_tensor_symbol_alias_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_alias_new_hook_f hook, void* context);
1718
/**
1719
 * Set the pair reference for tensor symbols. Peer reference for tensor symbols has very specific meanings.
1720
 * For a backward pass involves sub-graphs. The commands in the sub-graph could reference to tensor symbols of
1721
 * a different graph (its forward pass graph). That is not allowed (two graph has no ancestral relationship
1722
 * cannot share a tensor symbol). So we create a new tensor symbol, but set the pair reference.
1723
 * @param graph The symbolic graph.
1724
 * @param tensor_symbol The tensor symbol in the current graph.
1725
 * @param pair_tensor_symbol The tensor symbol in the pair graph.
1726
 */
1727
void ccv_nnc_tensor_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_nnc_tensor_symbol_t pair_tensor_symbol);
1728
/**
1729
 * Function prototype for execution node symbol creation callback.
1730
 */
1731
typedef void(*ccv_nnc_graph_exec_symbol_new_hook_f)(void* context, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
1732
/**
1733
 * Hook into the call to ccv_nnc_graph_exec_symbol_new, return previous provided context if call into this method.
1734
 * @param graph The symbolic graph.
1735
 * @param hook The function to be called if a new execution node symbol created.
1736
 * @param context The context associated with the callback function.
1737
 */
1738
void* ccv_nnc_graph_exec_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_new_hook_f hook, void* context);
1739
/**
1740
 * Set the pair reference for exec. This is very similar to the one for concrete graph. A pair reference
1741
 * of a backward pass execution node is its forward pass counterpart.
1742
 * @param graph The symbolic graph.
1743
 * @param exec_symbol The execution node symbol in the current graph.
1744
 * @param pair_exec_symbol The pairing execution node symbol.
1745
 */
1746
void ccv_nnc_graph_exec_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec_symbol, const ccv_nnc_graph_exec_symbol_t pair_exec_symbol);
1747
1748
/** @} */
1749
1750
/** @} */
1751
1752
/**
1753
 * @defgroup level_3_5 Level-3.5 API
1754
 * @{
1755
 */
1756
1757
/**
1758
 * @defgroup level_3_5_autograd Automatic Differentiation
1759
 * @{
1760
 */
1761
1762
/**
1763
 * Compute the backward graph, assuming the provided symbolic graph only contain the "forward" part from sources to destinations.
1764
 * This effectively is called the "autograd" or automatic differentiation process (specifically, "reverse AD") in other libs.
1765
 * For a expression y = f(x), to compute dx, x is the wrt_symbol, y is the f_symbol.
1766
 * @param graph The symbolic graph.
1767
 * @param f_symbols The tensor symbols array of the result (or loss).
1768
 * @param f_symbol_size The size of the f symbols array.
1769
 * @param wrt_symbols The tensor symbols array of the inputs.
1770
 * @param wrt_symbol_size The size of the wrt symbols array.
1771
 * @param sources The source execution nodes array for the computation.
1772
 * @param source_size The size of the source nodes array.
1773
 * @param destinations The destination execution nodes array for the computation.
1774
 * @param destination_size The size of the destination nodes array.
1775
 */
1776
void ccv_nnc_symbolic_graph_backward(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const f_symbols, const int f_symbol_size, const ccv_nnc_tensor_symbol_t* const wrt_symbols, const int wrt_symbol_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1777
/**
1778
 * Get the symbol that contains the gradient. The list will be flushed if the ccv_nnc_symbolic_graph_backward function is called again.
1779
 * @param graph The symbolic graph.
1780
 * @param symbol The tensor symbol we want to retrieve its gradient (must be one of the wrt symbols or the f symbols).
1781
 * @return A tensor symbol that represents the gradient.
1782
 */
1783
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
1784
/**
1785
 * Get the execution node symbol for a tensor symbol. This used to retrieve the execution node for a gradient tensor symbol.
1786
 * @param graph The symbolic graph.
1787
 * @param symbol The tensor symbol that represents the gradient (must be one of the wrt symbols).
1788
 * @return A execution node symbol that generates the gradient.
1789
 */
1790
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
1791
1792
/** @} */
1793
1794
/**
1795
 * @defgroup level_3_5_while While Loop
1796
 * @{
1797
 */
1798
1799
/**
1800
 * @page symbolic_while Construct a "while" loop in a symbolic graph
1801
 *
1802
 * (This document was written in 2016, since then, Caffe2 added support for While loop (as sub-graph), similar
1803
 * implementation added for ONNX as well.)
1804
 *
1805
 * In NNC, a computation graph cannot allow cycles. Thus, there is no flexible way to express loops.
1806
 *
1807
 * A little survey on this problem:
1808
 *
1809
 * * Caffe2 supports specific type of recurrent neural network.
1810
 *
1811
 * * TensorFlow as it stands, supports while construct. Its while construct is very straightforward, a body and
1812
 *   a condition is provided, you can construct whatever graph as you want.
1813
 *
1814
 * * mxnet supports recurrent neural network by unrolling it into normal none-looped graph.
1815
 *
1816
 * * Theano supports "scan" ops, which is a terminable loop (with loop variant, known as sequence).
1817
 *
1818
 * * CNTK supports this with custom BrainScript. Within BrainScript, you can access the previous state in a
1819
 *   function, therefore, effectively supports calling a method multiple times (looping over).
1820
 *
1821
 * Of above, Caffe2 and mxnet gave up on supporting generic loop for performance reasons. TensorFlow supports
1822
 * generic while loop, with all the trouble it may introduce (see the Nested while loop bug in TensorFlow that
1823
 * recently fixed). Theano picked a point seems pretty sweet, although there are limitations. CNTK's BrainScript
1824
 * is a DSL, they can do whatever they want with the drawback now that they need to implement a language runtime.
1825
 * TensorFlow, Theano and CNTK all support auto-differentiation over the while loop with tape (Wengert list).
1826
 *
1827
 * A simple way to support loop is to support conditional jump. In fact, conditional jump is a more generic way
1828
 * of doing loops. However, if you put this into the consideration that fully differentiable computation graph
1829
 * wanna to be supported, it is terrible. With conditional jump, it is really hard for you to know which tensor
1830
 * is used where, thus keep track for reverse accumulation (backward propagation). There is no counter or
1831
 * whatsoever, it is pretty hard to trace back on which line is executed how many times. Compounding this with
1832
 * NNC's promise that as long as it shows on the graph can be "parallel" computed, it will be parallel computed,
1833
 * it is close to impossible to track if conditional jump used in its raw form. Certain restrictions must be
1834
 * applied to how to do the loop. The compromise comes from closer examination of NNC's preferences.
1835
 *
1836
 * NNC prefers to have the graph without cycles. It also prefers to be fully differentiable. Another important
1837
 * criteria is that most functions in NNC require SSA (Static Single Assignment) representation. With these in
1838
 * mind, supporting while loop has to be strict.
1839
 *
1840
 * Luckily, there are well-formalized way of supporting this in literature and practice. Because it is
1841
 * well-formalized, translating this into existing NNC implementation is actually pretty straightforward. We
1842
 * are going to introduce a special version of while loop. In literature that discussed about SSA, it may be
1843
 * called parameterized loop. For us, it works like this:
1844
 *
1845
 * To construct a while loop for existing NNC graph, you need to be able to separate the existing graph into
1846
 * two sub-graphs.
1847
 *
1848
 * The while-loop sub-graph (WL sub-graph) contains a set of incoming nodes (I-nodes), Condition false output
1849
 * nodes (CFO-nodes) and end nodes (E-nodes). Each set have its own properties, but in short, all incoming edges
1850
 * to the WL sub-graph connect to one of the I-nodes, but nothing else. All outgoing edges from the WL sub-graph
1851
 * connect to one of the CFO-nodes, but nothing else. A nodes can be either a I-node, CFO-node or E-node,
1852
 * non-exclusively.
1853
 *
1854
 * There are also 3 types of tensors used for all nodes in WL sub-graph: Input tensors (I-tensors) are tensors
1855
 * that are inputs to some nodes, and will never be outputs. Output tensors (O-tensors) are tensors that are
1856
 * outputs from some nodes, but never be inputs to any nodes. I-tensors can be outputs from some nodes that
1857
 * outside of WL sub-graph. O-tensors can be inputs to some nodes that outside of WL sub-graph. Internal
1858
 * tensors (IN-tensors) are not visible outside of WL sub-graph, therefore, they can be both inputs and outputs
1859
 * of some nodes inside the sub-graph. Some tensors can be feedback into the WL sub-graph, given either
1860
 * O-tensors or IN-tensors. A parameter map can be given in these cases to describe which maps to what.
1861
 *
1862
 * The way to drive a WL sub-graph like this: the WL sub-graph runs until all CFO-nodes are reached. At this
1863
 * point, the while_f condition is checked. If true, we continue until all the end-nodes are reached. At this
1864
 * point, we increase the counter, reconfigure the WL sub-graph with parameter map, and run from I-nodes all
1865
 * over again. When reached all CFO-nodes, the condition is checked again, if false, WL sub-graph terminates,
1866
 * and the graph continues from the nodes that are pointed by CFO-nodes.
1867
 *
1868
 * Given these constraints, doing automatic differentiation is not that hard any more. A WL sub-graph, from
1869
 * the whole graph's point of view, is just a giant command supports both forward / backward operations, with
1870
 * some extra information passed around in the form of userdata (tape).
1871
 *
1872
 * For WL sub-graph, we can continue to leverage the compile / backward function that already written for
1873
 * symbolic graph as well.
1874
 *
1875
 * For compile function, we just need to take care of parameter maps (these need to be converted into binded
1876
 * tensors).
1877
 *
1878
 * For backward function, we need to convert parameter maps from assigner (thus, y = x) to accumulator (x += y).
1879
 *
1880
 * This function will replace the nodes that it affects to one sub-graph node. Thus, how to drive this
1881
 * sub-graph is opaque. Its backward form is opaque as well.
1882
 *
1883
 * There are no connection between its nodes and the outside graph nodes other than the three sets:
1884
 *
1885
 * 1. Incoming nodes, the set of nodes that contains the incoming edges from outside, they cannot have edges
1886
 *    points by inside nodes. The sub-graph computation starts from these incoming nodes;
1887
 *
1888
 * 2. Condition false output nodes, when condition is false, we will break out of this while loop, these
1889
 *    nodes pointing to the outside nodes, but no inside nodes;
1890
 *
1891
 * 3. End nodes, the set of nodes that marks the end of the while body, and after these nodes are executed,
1892
 *    we will return to the incoming nodes. These end nodes shouldn't have any edges pointing to inside nodes
1893
 *    (OK if end nodes are condition true output nodes as well);
1894
 *
1895
 * Since these will become a sub-graph (which, to its owner graph, just simple "node"), it will have inputs
1896
 * and outputs. Besides that, the loop body needs to be parameterized to be SSA compliant (see:
1897
 * https://www.cs.cmu.edu/~fp/courses/15411-f13/lectures/06-ssa.pdf). Thus, a list of body parameters need to
1898
 * be provided.
1899
 */
1900
1901
/**
1902
 * @defgroup level_3_5_while_essentials While Loop Essentials
1903
 * @{
1904
 */
1905
1906
/**
1907
 * The given tensors contains all the common / input / output tensors specified in the sub-graph.
1908
 */
1909
typedef int(*ccv_nnc_graph_while_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
1910
/**
1911
 * Create a tensor tape that can be used to record for while loop or case..of.
1912
 * @return A ccv_nnc_tensor_tape_t pointer.
1913
 */
1914
CCV_WARN_UNUSED(ccv_nnc_tensor_tape_t*) ccv_nnc_tensor_tape_new(void);
1915
/**
1916
 * Deallocate the tensor tape and all the memory it allocated.
1917
 * @param tape The tensor tape object.
1918
 */
1919
void ccv_nnc_tensor_tape_free(ccv_nnc_tensor_tape_t* const tape);
1920
/**
1921
 * The API to operate on the symbolic graph is more involved than the concrete graph for while loops.
1922
 * The reason is because symbolic graph operates in SSA form (static single assignment), therefore, the while
1923
 * loops for the symbolic graph has to be parameterized.
1924
 * @param graph The symbolic graph.
1925
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
1926
 * @param while_graph The sub-graph to run the while loop.
1927
 * @param name The name of the while loop. Optional.
1928
 * @return A while loop execution symbol (backed by a sub-graph) of the giving graph.
1929
 */
1930
ccv_nnc_graph_exec_symbol_t ccv_nnc_symbolic_graph_while(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, ccv_nnc_symbolic_graph_t* const while_graph, const char* const name);
1931
/**
1932
 * Set the expression to be evaluated, and at which nodes to be evaluated.
1933
 * @param while_graph The symbolic graph that will run the while loop.
1934
 * @param while_expr The function pointer to the expression.
1935
 * @param while_data A custom data provided to the expression evaluation function.
1936
 * @param inputs The input tensor symbols array to the expression evaluation function.
1937
 * @param input_size The size of the input tensor symbols array.
1938
 * @param breakpoints The execution node symbols at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
1939
 * @param breakpoint_size The size of the execution node symbols array.
1940
 */
1941
void ccv_nnc_symbolic_graph_set_while_expr(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const breakpoints, const int breakpoint_size);
1942
/**
1943
 * Set the loop carry parameters when reuse. (parameterized loop, these will be carried over to the next loop).
1944
 * @param while_graph The symbolic graph that will run the while loop.
1945
 * @param symbol_map A pair of tensor symbols array, where the source tensor symbol is the output tensor symbol in this loop, the destination tensor symbol is the input tensor symbol in the next loop.
1946
 * @param symbol_map_size The size of the symbol map array.
1947
 */
1948
void ccv_nnc_symbolic_graph_set_carry_overs(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
1949
/**
1950
 * Retrieve the special (magical) tensor symbol that retains the while loop counter (thus, dimension of 1x1x1, CCV_64S type).
1951
 * @param while_graph The symbolic graph that will run the while loop.
1952
 * @return A tensor symbol represents the implicit loop count.
1953
 */
1954
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_while_count(const ccv_nnc_symbolic_graph_t* const while_graph);
1955
/**
1956
 * Extract the sub-graph of the while loop from a symbol.
1957
 * @param graph The symbolic graph.
1958
 * @param while_symbol The execution node symbol.
1959
 * @return The sub-graph that represents a while loop.
1960
 */
1961
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_from_while_symbol(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t while_symbol);
1962
/**
1963
 * Constructing looped concrete graph. Note that this interface is a little bit simpler than the one for symbolic
1964
 * graph. The reason is that a concrete graph operates on allocated tensors, thus, there is no mapping of tensor
1965
 * symbols between the parent graph and the while graph. (The reason to have a mapping in symbolic graphs is to
1966
 * constraint the variable leaking between the sub graph and parent graph).
1967
 * @param graph The concrete graph.
1968
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
1969
 * @param while_graph The sub-graph to run the while loop.
1970
 * @return A execution node that represents the sub-graph.
1971
 */
1972
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_while(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_graph_t* const while_graph);
1973
/**
1974
 * Set the evaluated expression for the while loop. The while loop will break out if the expression evaluates to 0.
1975
 * @param while_graph The concrete graph that will run the while loop.
1976
 * @param while_expr The function pointer to the expression.
1977
 * @param while_data A custom data provided to the expression evaluation function.
1978
 * @param inputs The input tensors array to the expression evaluation function.
1979
 * @param input_size The size of the input tensors array.
1980
 * @param breakpoints The execution nodes at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
1981
 * @param breakpoint_size The size of the execution nodes array.
1982
 */
1983
void ccv_nnc_graph_set_while_expr(ccv_nnc_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_graph_exec_t* const breakpoints, const int breakpoint_size);
1984
/**
1985
 * Get the special tensor for the while loop count. It contains one uint64_t value. We keep an implicit count
1986
 * when evaluate the while loop and you can access it with this tensor.
1987
 * @param while_graph The concrete graph that will run the while loop.
1988
 * @return A special tensor that you can retrieve the loop count at .data.i64[0].
1989
 */
1990
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor_for_while_count(const ccv_nnc_graph_t* const while_graph);
1991
/**
1992
 * Retrieve the sub-graph from a execution node.
1993
 * @param graph The concrete graph.
1994
 * @param exec The execution node represents the sub-graph.
1995
 * @return The sub-graph.
1996
 */
1997
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_from_while_exec(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_t exec);
1998
1999
/** @} */
2000
2001
/**
2002
 * @defgroup level_3_5_while_others While Loop Others
2003
 * @{
2004
 */
2005
2006
/**
2007
 * For a given tape on a given graph, update the input / output tensors so new version will be created (if needed).
2008
 * @param tape The tensor tape object.
2009
 * @param graph The concrete graph this tensor tape is executing in.
2010
 * @param input_flags The flags associated with input tensors.
2011
 * @param inputs The input tensors.
2012
 * @param input_size The size of input tensors array.
2013
 * @param output_flags The flags associated with output tensors.
2014
 * @param outputs The output tensors.
2015
 * @param output_size The size of output tensors array.
2016
 */
2017
void ccv_nnc_tensor_tape_io(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const int* const input_flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, const int* const output_flags, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2018
/**
2019
 * Retrieve the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2020
 * @param tape The tensor tape object.
2021
 * @param graph The concrete graph this tensor tape is executing in.
2022
 * @param exec The execution node.
2023
 * @return The number associated with the execution node.
2024
 */
2025
uint64_t ccv_nnc_tensor_tape_numbering(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec);
2026
/**
2027
 * Set the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2028
 * @param tape The tensor tape object.
2029
 * @param graph The concrete graph this tensor tape is executing in.
2030
 * @param exec The execution node.
2031
 * @param numbering The number associated with the execution node.
2032
 */
2033
void ccv_nnc_tensor_tape_set_numbering(ccv_nnc_tensor_tape_t* const tape, ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const uint64_t numbering);
2034
/**
2035
 * Augmented tensor to run a graph with while loop (An obvious example is dynamic RNN).
2036
 */
2037
typedef struct ccv_nnc_tensor_multiview_s {
2038
  // This is an augmented ccv_nnc_tensor_view_t
2039
  // Namely, it can point to multiple versions of tensors.
2040
  int type; // This type is CCV_NNC_TENSOR_MULTI_VIEW
2041
  // kind specified how the multi-version tensors stored.
2042
  // See the comment on the follow up enums.
2043
  uint8_t kind;
2044
  uint16_t repeat;
2045
  intptr_t anchor; // on which graph this multi-view tensor is wrapped. This helps to determine on which level the multi-view tensor should be unwrapped.
2046
  // If this tensor points to a tensor view, data.u8 - offset is the real pointer start.
2047
  off_t offset;
2048
  struct ccv_nnc_tensor_multiview_s* p; // If this is wrapped with another multiview tensor. Get to the parent one.
2049
  ccv_nnc_tensor_t* it; // Current tensor (tensor in use), this is updated along with the graph computation.
2050
  // This is useful because by just traverse tv, I can get the latest up-to-date reference to this multi-view tensor.
2051
  ccv_array_t* sp; // Synchronized tensor views. This corresponds to ccv_nnc_tensor_synchronize_to_multiview method, that records all the tensors registered for updates.
2052
  ccv_nnc_tensor_t* _inline_data[4];
2053
  ccv_nnc_tensor_t** _heap_data;
2054
} ccv_nnc_tensor_multiview_t;
2055
3.40k
#define CCV_NNC_MULTIVIEW_DATA(x) ((x)->_heap_data ? 
(x)->_heap_data0
: (x)->_inline_data)
2056
234
#define CCV_NNC_MULTIVIEW_PHI (intptr_t)0x1 /**< Denote this is a phi multi-view tensor. */
2057
2058
enum {
2059
  CCV_NNC_MULTIVIEW_K0N = 0, /**< All of them are repeated. */
2060
  CCV_NNC_MULTIVIEW_K1N = 1, /**< The first one is the first, the second one starts to repeat. (0111111...) */
2061
};
2062
#define CCV_NNC_MULTIVIEW_K01(x) ((x)->kind == CCV_NNC_MULTIVIEW_K0N && (x)->repeat == 1)
2063
/**
2064
 * Setup a tensor multiview with a given set of tensors.
2065
 * A multiview tensor point to a list of tensors, and its access depends on the loop count.
2066
 * For example, if we have a multiview tensor with list of [a, b, c, d], and kind is 1N, repeat is 3.
2067
 * For loop count 0, 1, 2, 3, 4, 5, 6, the corresponding tensors used will be a, b, c, d, b, c. If kind
2068
 * is 0N, and repeat is 4, it will be a, b, c, d, a, b.
2069
 * @param data[] The pointer to the list of tensors the multiview object can point to.
2070
 * @param kind Can be either CCV_NNC_MULTIVIEW_K0N or CCV_NNC_MULTIVIEW_K1N, basically whether to keep the initial tensor.
2071
 * @param repeat The length of the repeat.
2072
 * @param graph Which graph this multiview object attaches to.
2073
 * @param tensor_multiview The tensor multiview object to be updated.
2074
 */
2075
void ccv_nnc_tensor_multiview(ccv_nnc_tensor_t* data[], const uint8_t kind, const uint16_t repeat, const ccv_nnc_graph_t* const graph, ccv_nnc_tensor_multiview_t* const tensor_multiview);
2076
/**
2077
 * Since tensor_multiview will never be allocated with *_new method, the *_free method simply frees anything that is dynamically allocated afterwards (such as the reference items).
2078
 * @param tensor_multiview The tensor multiview object to be deallocated.
2079
 */
2080
void ccv_nnc_tensor_multiview_free(const ccv_nnc_tensor_multiview_t tensor_multiview);
2081
/**
2082
 * Setup a tensor as a reference to a tensor multiview, thus, when tensor multiview's tu (current tensor) updates, the tensor reference's data.u8 will get update as well (point to the same memory region as the tu).
2083
 * @param tensor_multiview The tensor multiview object.
2084
 * @param tensor The tensor that will be updated along with the multiview object.
2085
 */
2086
void ccv_nnc_tensor_synchronize_to_multiview(ccv_nnc_tensor_multiview_t* const tensor_multiview, ccv_nnc_tensor_t* const tensor);
2087
/**
2088
 * Send broadcast to subscribers of the multiview, call this in the beginning of exec.
2089
 * @param tensor_multiview The tensor multiview object.
2090
 */
2091
void ccv_nnc_tensor_multiview_synchronize(ccv_nnc_tensor_multiview_t* const tensor_multiview);
2092
2093
/** @} */
2094
2095
/** @} */
2096
2097
/**
2098
 * @defgroup level_3_5_case_of Branching
2099
 * @{
2100
 */
2101
2102
/**
2103
 * @page symbolic_switch Construct "switch" control structure in symbolic graph
2104
 *
2105
 * Here I use the keyword case_of. To provide a "switch" control structure within NNC has some nice properties
2106
 * even though you can simulate this with a while loop technically.
2107
 *
2108
 * 1. More optimal memory allocation: with "switch" control structure, memory can be multiplexed for each code
2109
 *    path because they are mutually exclusive.
2110
 *
2111
 * 2. No tape should be used within each branch: if we simulate with a "while" loop, any results from within
2112
 *    the "switch" statement has to be kept on the tape, which is inefficient because you don't need any tape
2113
 *    for the "switch" statement other than record which path it is taken.
2114
 *
2115
 * The particular "switch" control structure provided here is a multi-way structured "switch". Each branch is a
2116
 * sub-graph, so it is well-scoped. A node branch out based on the case_of condition return value to either of
2117
 * the branch (numbering from 0 to n, -1 means no path taken). If no path taken, the output tensors will be
2118
 * assigned with the default tensors and continue. Otherwise the computation within the sub-graph will be
2119
 * carried out and the output tensors will be assigned with the tensors specified within that sub-graph and
2120
 * continue.
2121
 *
2122
 * If we want to consider speculative execution in the future, we need to revisit our memory allocation scheme.
2123
 */
2124
2125
/**
2126
 * Function prototype to evaluate a branch expression.
2127
 */
2128
typedef int(*ccv_nnc_graph_case_of_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
2129
/**
2130
 * Create a new case..of execution node symbol.
2131
 * @param graph The symbolic graph.
2132
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2133
 * @param inputs The input tensor symbols array for the expression.
2134
 * @param input_size The size of the input tensor symbols array.
2135
 * @param symbol_map The pair of tensor symbols array where the source is the input tensor symbol and the destination is the output tensor symbol.
2136
 * @param symbol_map_size The size of symbol map array.
2137
 * @param name The name of the case..of graph. Optional.
2138
 * @return A execution node symbol that represents the case..of graph.
2139
 */
2140
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_symbolic_graph_case_of_new(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size, const char* const name);
2141
/**
2142
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2143
 * @param graph The symbolic graph.
2144
 * @param exec The execution node symbol that represents the case..of graph.
2145
 * @param case_of The function pointer to evaluate.
2146
 * @param case_of_data The data associated with the function pointer.
2147
 */
2148
void ccv_nnc_symbolic_graph_set_case_of_expr(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data);
2149
/**
2150
 * Set a sub-graph as one of the branch for the case..of graph.
2151
 * @param graph The symbolic graph.
2152
 * @param symbol The execution node symbol that represents the case..of graph.
2153
 * @param case_graph The sub-graph for one of the branch.
2154
 * @param case_of The index assigned to this sub-graph (expression returns this index to determine which sub-graph to execute).
2155
 * @param symbol_map The pair of tensor symbols array where the source is the output tensor symbol of the sub-graph, and the destination is the output tensor symbol of the execution node symbol.
2156
 * @param symbol_map_size The size of the symbol map array.
2157
 */
2158
void ccv_nnc_symbolic_graph_set_case_of(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, ccv_nnc_symbolic_graph_t* const case_graph, const int case_of, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
2159
/**
2160
 * Create a new case..of execution node.
2161
 * @param graph The concrete graph.
2162
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2163
 * @param inputs The input tensors array supplied to the expression.
2164
 * @param input_size The size of the input tensors array.
2165
 * @param outputs The output tensors array.
2166
 * @param output_size The size of the output tensors array.
2167
 * @return A execution node that represents the case..of graph.
2168
 */
2169
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_case_of_new(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2170
/**
2171
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2172
 * @param graph The concrete graph.
2173
 * @param exec The execution node that represents the case..of graph.
2174
 * @param case_of The function pointer to evaluate.
2175
 * @param case_of_data The data associated with the function pointer.
2176
 * @param offset A integer added to the expression output to help choose the index. Thus, real index = expression index + offset.
2177
 */
2178
void ccv_nnc_graph_set_case_of_expr(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data, const int offset);
2179
/**
2180
 * Set a sub-graph as one of the branch for the case..of graph.
2181
 * @param graph The concrete graph.
2182
 * @param exec The execution node that represents the case..of graph.
2183
 * @param case_graph The sub-graph for one of the branch.
2184
 * @param case_of The index assigned to this sub-graph (expression returns this index + offset to determine which sub-graph to execute).
2185
 */
2186
void ccv_nnc_graph_set_case_of(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_t* const case_graph, const int case_of);
2187
2188
/** @} */
2189
2190
/**
2191
 * @defgroup level_3_5_minimizer Gradient-based Optimization
2192
 * @{
2193
 */
2194
2195
/**
2196
 * This is the comparable part to Caffe's solver or TensorFlow's optimizer. It took a step further than just
2197
 * compute the gradient, but also apply the gradient to update parameters to minimize the loss.
2198
 * @param graph The symbolic graph.
2199
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2200
 * @param losses The tensor symbols array of losses.
2201
 * @param loss_size The size of the loss symbols array.
2202
 * @param parameters The parameter tensor symbols to optimize.
2203
 * @param parameter_size The size of parameter symbols array.
2204
 * @param inputs The additional input symbols we compute gradient against.
2205
 * @param input_size The size of the additional input symbols array.
2206
 * @param sources The source execution nodes array.
2207
 * @param source_size The size of source nodes array.
2208
 * @param destinations The destinations execution nodes array.
2209
 * @param destination_size The size of destination nodes array.
2210
 * @param gradients The tensor symbols that represents the gradient for update, should be the same size as the parameters array + input array size. This can be 0 (optional).
2211
 * @param updated_parameters The tensor symbols that represents the updated parameters, should be the same size as the parameters array.
2212
 * @param saved_aux The tensor symbols that is helpful for particular optimization strategy.
2213
 * @param graph_exec_symbols The execution node symbols for the updates, should be the same size as the parameters array.
2214
 */
2215
void ccv_nnc_symbolic_graph_minimize(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_symbol_t* const losses, const int loss_size, const ccv_nnc_tensor_symbol_t* const parameters, const int parameter_size, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_tensor_symbol_t* const gradients, ccv_nnc_tensor_symbol_t* const updated_parameters, ccv_nnc_tensor_symbol_map_t* const saved_aux, ccv_nnc_graph_exec_symbol_t* const graph_exec_symbols);
2216
/**
2217
 * The number of extra saved aux per parameter only depends on the commands. For example, SGD with momentum requires 1 aux (for momentum).
2218
 * Others require more.
2219
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2220
 * @return the number of saved aux per parameter.
2221
 */
2222
CCV_WARN_UNUSED(int) ccv_nnc_minimizer_saved_aux_size(const ccv_nnc_cmd_t minimizer);
2223
2224
/** @} */
2225
2226
/**
2227
 * @defgroup level_3_5_simplify Graph Simplification
2228
 * @{
2229
 */
2230
2231
/**
2232
 * @page symbolic_simplify Symbolic graph simplification
2233
 *
2234
 * We make a distinction between graph simplifications and optimizations (autotune).
2235
 *
2236
 * Simplification: rewrite the graph and the resulting graph will have less nodes. This is done on the symbolic
2237
 * graph only. Passes that is "simplification" include pruning, common sub-expression eliminations, constant
2238
 * folding etc.
2239
 *
2240
 * Optimization (autotune): graph optimization can have more objectives. The most obvious objective is to reduce
2241
 * computation time. For symbolic graph, passes that reduces computation time include data layout optimizations,
2242
 * auto parallel etc (in normal optimization implementations, they have a cost model to guide the optimization.
2243
 * NNC's implementation uses a cost database that profiles the time cost on the device to guide the optimization.
2244
 * We call it autotune to distinguish with the normal optimization passes because we need device profile data).
2245
 * There could be other objectives, for example, in many deep learning applications, reducing memory footprint
2246
 * can be desirable. However, as always in computer science, memory and time is a typical trade-off. Memory
2247
 * optimization almost always results longer computation time, and the objective is to trade between these two
2248
 * with a bias term (in other frameworks such as TensorFlow, the memory optimizer uses a list of "cheap ops" to
2249
 * bias between the time and memory footprint).
2250
 *
2251
 * For graph optimizations, it can happen on both the symbolic graph level as well as the concrete graph level.
2252
 * For NNC, symbolic graph is already very explicit (data layout, device allocation and data transfer between
2253
 * devices / nodes, even the command backend can all be specified on the symbolic graph), however, some
2254
 * information is unknown until it is compiled down to concrete graph (tensor addresses, tensor initialization
2255
 * etc.), and since graph optimizations need all the information to optimize. Keeping the flexibility to do
2256
 * optimization on both symbolic and concrete graph level seems reasonable.
2257
 */
2258
2259
enum {
2260
  /**
2261
   * If two commands generated the same outputs, all the places where the newer output used will be replaced by
2262
   * the old output. Later on the graph pruning stage, the command that generate the newer output will be
2263
   * eliminated.
2264
   */
2265
  CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION,
2266
  /**
2267
   * For the given outputs, eliminate unused input tensors, and then eliminate graph execs that don't contribute
2268
   * to the outputs.
2269
   */
2270
  CCV_NNC_SIMPLIFY_GRAPH_PRUNING,
2271
  /**
2272
   * For CCV_NNC_DATA_TRANSFER, if the input / output is the same (on the same device, no alias), we can skip.
2273
   * Similarly, if it is on the same device, but alias of some, for some cases we can skip as well (if neither
2274
   * are carry overs, bypasses etc.)
2275
   */
2276
  CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT,
2277
  /**
2278
   * Combine a few smaller ops into bigger one. For now, this functionality is limited. I can only address ops
2279
   * that are sequential.
2280
   */
2281
  CCV_NNC_SIMPLIFY_OPS_FUSION,
2282
  // CCV_NNC_SIMPLIFY_CONSTANT_FOLDING, // This currently is not supported, because we don't have efficient way to express constant in symbolic graph.
2283
};
2284
/**
2285
 * Simplify a graph with given list of passes, in that particular order.
2286
 * Note, when a graph is simplified, its sources / destinations are changed as well.
2287
 * @param graph The symbolic graph.
2288
 * @param passes The array of passes we are going to apply.
2289
 * @param pass_size The size of the passes array.
2290
 * @param binds The tensor symbols we may bind to an input later (it doesn't prevent pruning any execution nodes).
2291
 * @param bind_size The size of the bind array.
2292
 * @param outputs The output tensor symbols we want to retain (we are going to prune any execution nodes that is not related to these outputs).
2293
 * @param output_size The size of the output array.
2294
 * @param sources The source execution node symbols array.
2295
 * @param source_size The size of source node symbols array.
2296
 * @param destinations The destinations execution node symbols array.
2297
 * @param destination_size The size of destination node symbols array.
2298
 */
2299
void ccv_nnc_symbolic_graph_simplify(ccv_nnc_symbolic_graph_t* const graph, const int* const passes, const int pass_size, const ccv_nnc_tensor_symbol_t* const binds, const int bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2300
2301
/** @} */
2302
2303
/**
2304
 * @defgroup level_3_5_parallel Automatic Graph Parallelization
2305
 * @{
2306
 */
2307
2308
enum {
2309
  /**
2310
   * Op for reducer / allreducer. Currently only supports sum.
2311
   */
2312
  CCV_NNC_PARALLEL_REDUCE_OP_SUM,
2313
};
2314
2315
/**
2316
 * Turn the existing graph to be capable to run on several devices with different data inputs at parallel.
2317
 * With this method, additional tensor symbols will be created that runs on different devices. That has
2318
 * been said, there are concepts of "broadcast" and "reduce". "broadcast" tensor symbols will be copied to
2319
 * different devices, while "reduce" tensors will be summed from different devices to the default device.
2320
 * "allreducer" concept is simpler. The allreduce operation will be performed on these tensors and then
2321
 * be used on different devices again.
2322
 *
2323
 * Limitations: right now, the way to reduce / allreduce tensors only supports "sum". The data parallel
2324
 * only supports GPU, thus, the nodes will be duplicated are GPU computations and GPU memory backed
2325
 * tensors. Also, right now, the tensors to be broadcasted / allreduced / reduced should have no aliases.
2326
 *
2327
 * @param graph The symbolic graph.
2328
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
2329
 * @param broadcasts The tensor symbols to be broadcasted.
2330
 * @param broadcast_size The size of the broadcast tensor symbols array.
2331
 * @param allreducers The tensor symbols that to be allreduced.
2332
 * @param allreducer_size The size of the allreducer tensor symbols array.
2333
 * @param allreducer_outs Return the tensor symbols for allreducers that before allreduced. Optional, 0
2334
 *        means I don't care about this.
2335
 * @param reducers The tensor symbols to be reduced.
2336
 * @param reducer_size The size of the reducer tensor symbols array.
2337
 * @param reducer_outs Return the tensor symbols for reducers that after reduced. Optional, 0 means
2338
 *        I don't care about this.
2339
 * @param reduce_op_type The reduce op for reducer / allreducer.
2340
 * @param sources The source execution node symbols array.
2341
 * @param source_size The size of source node symbols array.
2342
 * @param destinations The destinations execution node symbols array.
2343
 * @param destination_size The size of destination node symbols array.
2344
 */
2345
void ccv_nnc_symbolic_graph_data_parallel(ccv_nnc_symbolic_graph_t* const graph, const int parallel, const ccv_nnc_tensor_symbol_t* const broadcasts, const int broadcast_size, const ccv_nnc_tensor_symbol_t* const allreducers, const int allreducer_size, ccv_nnc_tensor_symbol_t* const allreducer_outs, const ccv_nnc_tensor_symbol_t* const reducers, const int reducer_size, ccv_nnc_tensor_symbol_t* const reducer_outs, const int reduce_op_type, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2346
/**
2347
 * Get the symbol that is on a device other than the default one. The list will be flushed if the
2348
 * ccv_nnc_symbolic_graph_data_parallel function is called again.
2349
 * @param graph The symbolic graph.
2350
 * @param symbol The tensor symbol we want to retrieve its counterpart on a different device.
2351
 * @param device_id The device numeric id for this symbol.
2352
 * @return A tensor symbol that is on a different device.
2353
 */
2354
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id);
2355
/**
2356
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2357
 * later with ccv_nnc_tensor_symbol_copy
2358
 * @param graph The symbolic graph.
2359
 * @param symbol The tensor symbol we want to set its counterpart on a different device.
2360
 * @param device_id The device numeric id for this symbol.
2361
 * @param copy The tensor symbol counterpart on a different device.
2362
 */
2363
void ccv_nnc_tensor_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id, const ccv_nnc_tensor_symbol_t copy);
2364
/**
2365
 * Get the execution node that is on a device other than the default one. The list will be flushed
2366
 * if the ccv_nnc_symbolic_graph_data_parallel function is called again.
2367
 * @param graph The symbolic graph.
2368
 * @param symbol The execution node we want to retrieve its counterpart on a different device.
2369
 * @param device_id The device numeric id for this symbol.
2370
 * @return A execution node that is on a different device.
2371
 */
2372
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id);
2373
/**
2374
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2375
 * later with ccv_nnc_graph_exec_symbol_copy
2376
 * @param graph The symbolic graph.
2377
 * @param symbol The execution node we want to set its counterpart on a different device.
2378
 * @param device_id The device numeric id for this symbol.
2379
 * @param copy The execution node counterpart on a different device.
2380
 */
2381
void ccv_nnc_graph_exec_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id, const ccv_nnc_graph_exec_symbol_t copy);
2382
2383
/** @} */
2384
2385
/**
2386
 * @defgroup level_3_5_memory_compression Memory Compression
2387
 * @{
2388
 */
2389
2390
/**
2391
 * Apply LSSC memory compression algorithm to the convolution activations. This will compress the activation
2392
 * layer for convolution, therefore, save the overall memory usage during training time.
2393
 *
2394
 * @param graph The symbolic graph.
2395
 * @param sources The source execution node symbols array.
2396
 * @param source_size The size of source node symbols array.
2397
 * @param destinations The destinations execution node symbols array.
2398
 * @param destination_size The size of destination node symbols array.
2399
 */
2400
void ccv_nnc_symbolic_graph_memory_compression(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2401
2402
/** @} */
2403
2404
/** @} */
2405
2406
/**
2407
 * @defgroup level_4 Level-4 API
2408
 * @{
2409
 */
2410
2411
/**
2412
 * Opaque pointer to the dynamic graph structure.
2413
 */
2414
typedef struct ccv_nnc_dynamic_graph_s ccv_nnc_dynamic_graph_t;
2415
2416
/**
2417
 * Masquerade this as if it is a on stack variable, there is a heap allocation but managed by the dynamic graph.
2418
 * The fact that ccv_nnc_tensor_variable_t is a pointer is an implementation detail. It should be treated as an
2419
 * opaque type throughout. We may later extends this to be some on-stack information or even just a uid.
2420
 */
2421
typedef struct ccv_nnc_tensor_variable_s* ccv_nnc_tensor_variable_t;
2422
2423
/**
2424
 * Create a dynamic graph.
2425
 * @return A newly created dynamic graph.
2426
 */
2427
CCV_WARN_UNUSED(ccv_nnc_dynamic_graph_t*) ccv_nnc_dynamic_graph_new(void);
2428
2429
/** @cond ALL */
2430
// Get a new tensor variable.
2431
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2432
16.5k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_1(graph) ccv_nnc_tensor_variable_new_impl(graph, ccv_nnc_tensor_auto)
2433
14.6k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(_1, _2, _FX, ...) _FX
2434
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2435
31.2k
#define ccv_nnc_tensor_variable_new(graph, ...) CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_variable_new_impl, 
CCV_NNC_TENSOR_VARIABLE_NEW_X_116.5k
)(graph, ##
__VA_ARGS__8.30k
)
2436
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_constant_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2437
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_1(graph) ccv_nnc_tensor_constant_new_impl(graph, ccv_nnc_tensor_auto)
2438
29
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(_1, _2, _FX, ...) _FX
2439
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2440
29
#define ccv_nnc_tensor_constant_new(graph, ...) CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_constant_new_impl, CCV_NNC_TENSOR_CONSTANT_NEW_X_1)(graph, ##
__VA_ARGS__5
)
2441
/** @endcond */
2442
2443
/**
2444
 * Create a new tensor variable that is an alias of a given tensor variable.
2445
 * @param graph The dynamic graph.
2446
 * @param tensor_variable The tensor variable we are going to alias from.
2447
 * @param ofs The offset on each of the dimension.
2448
 * @param inc The line size of each dimension.
2449
 * @param info The tensor parameters for the new alias.
2450
 * @return New tensor variable that is an alias.
2451
 */
2452
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_alias_new(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info);
2453
/**
2454
 * Get the parameters for a tensor variable.
2455
 * @param graph The dynamic graph.
2456
 * @param tensor_variable The tensor variable reference.
2457
 * @return The tensor parameters.
2458
 */
2459
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_variable_params(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2460
2461
/** @cond ALL */
2462
/**
2463
 * Get the underlying tensor for the tensor variable. The tensor allocation may be performed when calling this
2464
 * method. If the tensor cannot be allocated (because no shape specified), return 0.
2465
 * @param graph The dynamic graph.
2466
 * @param tensor_variable The tensor variable to get the underlying tensor.
2467
 * @param stream_context Which stream this command will be executed upon.
2468
 * @return The underlying tensor.
2469
 */
2470
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_variable_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_stream_context_t* const stream_context);
2471
8.48k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_1(graph, tensor_variable) ccv_nnc_tensor_from_variable_impl(graph, tensor_variable, 0)
2472
60.3k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL(_1, _2, _3, _FX, ...) _FX
2473
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2474
68.8k
#define ccv_nnc_tensor_from_variable(graph, tensor_variable, ...) CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL
(graph, tensor_variable, ##__VA_ARGS__, ccv_nnc_tensor_from_variable_impl, 45.9k
CCV_NNC_TENSOR_FROM_VARIABLE_X_18.48k
)(graph, tensor_variable, ##__VA_ARGS__)
2475
/** @endcond */
2476
/**
2477
 * Query whether a given tensor variable is a constant (no gradient).
2478
 * @param graph The dynamic graph.
2479
 * @param tensor_variable The tensor variable to query whether it is a constant.
2480
 */
2481
CCV_WARN_UNUSED(int) ccv_nnc_tensor_variable_is_constant(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2482
/**
2483
 * Set a tensor on the tensor variable. Tensor variable doesn't take over the life-cycle management of the tensor
2484
 * (in similar way as the tensor binds).
2485
 * @param graph The dynamic graph.
2486
 * @param tensor_variable The tensor variable to set.
2487
 * @param tensor The tensor that is going to be associated with the tensor variable.
2488
 */
2489
void ccv_nnc_tensor_variable_set(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_t* const tensor);
2490
/**
2491
 * A destructor function to be called when a tensor variable will be freed in the sense that no
2492
 * backward computation need it no more.
2493
 * Thus, we pass in tensor rather than tensor variable for the destructor.
2494
 */
2495
typedef void (*ccv_nnc_tensor_variable_destructor_f)(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_t* const tensor, void* const context);
2496
/**
2497
 * Hook into a tensor variable such that when it is actually freed (destroyed), the callback will receive
2498
 * the update.
2499
 * @param graph The dynamic graph.
2500
 * @param tensor_variable The tensor variable to observe when it is destroyed.
2501
 * @param func The callback function.
2502
 * @param context The context to be passed along to the callback function.
2503
 **/
2504
void ccv_nnc_tensor_variable_destructor_hook(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_variable_destructor_f func, void* const context);
2505
/**
2506
 * Check given tensor variables whether have effects to another set of tensor variables.
2507
 * @param graph The dynamic graph.
2508
 * @param source_variables The tensor variables to check whether it has effect to another set of variables.
2509
 * @param source_variable_size The size of source tensor variables.
2510
 * @param destination_variables Whether the source variables has effect to this list of variables.
2511
 * @param destination_variable_size The size of destination tensor variables.
2512
 * @param bitmask Bit return value, each bit represents a source tensor variable, and 1 meant it can reach some of the destinations.
2513
 */
2514
void ccv_nnc_dynamic_graph_has_effect_to_tensor_variables(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t* const source_variables, const int source_variable_size, const ccv_nnc_tensor_variable_t* const destination_variables, const int destination_variable_size, uint64_t* const bitmask);
2515
/**
2516
 * Execute a command with given tensor variables, the output is in the output tensor variables.
2517
 * @param graph The dynamic graph.
2518
 * @param cmd The wrapped command.
2519
 * @param hint The hint associated with the command.
2520
 * @param flags A reserved field for flags.
2521
 * @param inputs The input tensor variables array.
2522
 * @param input_size The size of the input tensor variables array.
2523
 * @param outputs The output tensor variables array.
2524
 * @param output_size The size of the output tensor variables array.
2525
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2526
 * @param stream_context Which stream this command will be executed upon.
2527
 */
2528
int ccv_nnc_dynamic_graph_exec(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2529
/**
2530
 * Compute the gradient of given tensor, with respect to the f. Thus, df / dt.
2531
 * @param dynamic_graph The dynamic graph.
2532
 * @param f_variables The output losses.
2533
 * @param f_variable_size The size of output losses array.
2534
 * @param df_optionals The custom gradients for f. If not provided, will default to 1.
2535
 * @param inputs The input variables.
2536
 * @param input_size The size of the input variables array.
2537
 * @param outputs The gradients with respect to the inputs. If the gradient already have value exist, it will be
2538
 *        accumulated into the final value.
2539
 * @param output_size The size of the outputs array. Should be equal to the input_size.
2540
 * @param stream_context Which stream this computation will be executed upon.
2541
 */
2542
void ccv_nnc_dynamic_graph_backward(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_tensor_variable_t* const f_variables, const int f_variable_size, const ccv_nnc_tensor_variable_t* const df_optionals, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
2543
/**
2544
 * Apply gradients to the set of parameters to update them with appropriate minimizer.
2545
 * @param dynamic_graph The dynamic graph.
2546
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2547
 * @param gradients The computed gradients to be applied.
2548
 * @param gradient_size The size of gradients array.
2549
 * @param parameters The parameters to update.
2550
 * @param parameter_size The size of parameters array, should be the same length as gradients.
2551
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2552
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2553
 * @param stream_context Which stream this computation will be executed upon.
2554
 */
2555
void ccv_nnc_dynamic_graph_apply_gradients(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const gradients, const int gradient_size, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2556
/**
2557
 * Apply one step of minimization (most likely, a gradient descent) to the parameters with a given loss (or
2558
 * losses).
2559
 * @param dynamic_graph The dynamic graph.
2560
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2561
 * @param losses The losses we are trying to minimize.
2562
 * @param loss_size The size of the losses array.
2563
 * @param dloss_optionals The custom gradient for losses. If not provided, will default to 1.
2564
 * @param parameters The parameters to update.
2565
 * @param parameter_size The size of parameters array.
2566
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2567
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2568
 * @param stream_context Which stream this computation will be executed upon.
2569
 */
2570
void ccv_nnc_dynamic_graph_minimize(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const losses, const int loss_size, const ccv_nnc_tensor_variable_t* const dloss_optionals, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2571
/**
2572
 * Read more in Level-5 API section.
2573
 */
2574
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
2575
/**
2576
 * Evaluate a CNNP model on the dynamic graph with set of inputs / outputs.
2577
 * @param dynamic_graph The dynamic graph.
2578
 * @param model The CNNP model to be evaluated against. Note that ccv_nnc_dynamic_graph_backward /
2579
 *              ccv_nnc_dynamic_graph_apply_gradients / ccv_nnc_dynamic_graph_minimize all works with this
2580
 *              model. It takes over the life-cycle of the model, and now you don't need to free it any more.
2581
 * @param is_test Whether we are in test mode or not.
2582
 * @param inputs The input variables.
2583
 * @param input_size The size of the input variables array.
2584
 * @param outputs The gradients with respect to the inputs.
2585
 * @param output_size The size of the outputs array.
2586
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
2587
 * @param stream_context Which stream this computation will be executed upon.
2588
 */
2589
void ccv_nnc_dynamic_graph_evaluate(ccv_nnc_dynamic_graph_t* const dynamic_graph, ccv_cnnp_model_t* const model, const int is_test, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
2590
/**
2591
 * Enable or disable gradient computation on a dynamic graph.
2592
 * @param dynamic_graph The dynamic graph.
2593
 * @param no_grad If it is 1, disable gradient computation on the dynamic graph.
2594
 */
2595
void ccv_nnc_dynamic_graph_set_no_grad(ccv_nnc_dynamic_graph_t* const dynamic_graph, const int no_grad);
2596
/**
2597
 * Dynamic graph will retain a memory it allocated for efficient reuse. Triggering this method
2598
 * intentionally will force these memory to be collected. This is helpful if you know the existing
2599
 * allocation won't be enough for the future use.
2600
 * @param dynamic_graph The dynamic graph.
2601
 */
2602
void ccv_nnc_dynamic_graph_gc(ccv_nnc_dynamic_graph_t* const dynamic_graph);
2603
/**
2604
 * Dispose a tensor variable. You cannot do any computation against this tensor variable afterwards.
2605
 * @param graph The dynamic graph.
2606
 * @param tensor_variable The tensor variable to be disposed.
2607
 */
2608
void ccv_nnc_tensor_variable_free(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2609
/**
2610
 * Free the dynamic graph.
2611
 * @param graph The dynamic graph.
2612
 */
2613
void ccv_nnc_dynamic_graph_free(ccv_nnc_dynamic_graph_t* const graph);
2614
/**
2615
 * Generate output that can be parsed by GraphViz (DOT language).
2616
 * @param graph The dynamic graph.
2617
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
2618
 * @param out The output file stream.
2619
 */
2620
void ccv_nnc_dynamic_graph_dot(const ccv_nnc_dynamic_graph_t* const graph, const int flags, FILE* out);
2621
/**
2622
 * Count how many ops we kept for gradient computation purpose. This method is useful when we
2623
 * want to assert at end of some train loop, we shouldn't have any gradient computation left.
2624
 * @param graph The dynamic graph.
2625
 * @param type The type of variables to trace. CCV_NNC_SYMBOL_TENSOR / CCV_NNC_SYMBOL_GRAPH_EXEC
2626
 * @return How many gradient computations we kept.
2627
 */
2628
CCV_WARN_UNUSED(int) ccv_nnc_dynamic_graph_bookkeeping_count(const ccv_nnc_dynamic_graph_t* const graph, const int type);
2629
2630
/** @} */
2631
2632
/**
2633
 * @defgroup level_5 Level-5 API
2634
 * @{
2635
 */
2636
2637
/**
2638
 * @page dataframe What is "dataframe" in ML?
2639
 *
2640
 * A large part of machine learning consists of go through data, process them to a shape / form that makes sense,
2641
 * and pass that into the model to train. Deep learning frameworks such as TensorFlow or PyTorch provides some
2642
 * dataset APIs for this purpose. It is convenient for these frameworks because by being Python, people can use
2643
 * Pandas to process the data. In Pandas, this is called Dataframe, which again, imitates R language.
2644
 *
2645
 * Another interesting observation comes from recent (2018) release of Create ML framework from Apple. It provides
2646
 * a very close to Pandas style data process API (MLDataTable) but in Swift. This implementation is important because
2647
 * it provides a survey point other than Python.
2648
 *
2649
 * Comparing to Python, Swift is a stronger typed language. Though all being high-level, they all have pretty good
2650
 * string support (of course!), operator overloading, and polymorphism. String support makes column naming natural,
2651
 * Operator overloading makes conditioning and filtering easier, and polymorphism makes column type representation
2652
 * straight-forward. These, unfortunately, are the challenges I need to face when implementing in C with the eye
2653
 * towards that later the similar ideas can be implemented on top on a high-level language based on this one.
2654
 *
2655
 * It seems I haven't answered the most crucial question yet: what's special about these data process APIs? It is
2656
 * easier to answer this to first see what Pandas or MLDataTable does.
2657
 *
2658
 * * They both represent data as tables. Each column represents different type of the data (time, nd-array, scalar
2659
 *   or string). As such, they both have API to add / remove / rename columns, and load tabular data from disk.
2660
 *
2661
 * * They both provide API to filter (remove / add) rows, and derive new column from existing columns.
2662
 *
2663
 * * Pandas provides more API for data alignment (merge columns from different tables into one table), and compute
2664
 *   statistics (group rows by some criteria, and compute min / max / std / mean within that group).
2665
 *
2666
 * * MLDataTable provides API to batching data (random split) which covered in TensorFlow / PyTorch's Dataset API
2667
 *   as well.
2668
 *
2669
 * It turns out when you have a noisy dataset, these functionalities are useful to remove unwanted data quickly.
2670
 * If you have a relatively clean dataset, it also allows you to prepare data in a more elegant way. For NNC,
2671
 * the interesting requirements are:
2672
 *
2673
 * 1. Represents scalars, tensors, string as columns; columns can be named.
2674
 *
2675
 * 2. New columns can be derived, from existing ones.
2676
 *
2677
 * 3. Rows can be filtered, grouped, and statistics can be computed.
2678
 *
2679
 * 4. Columns can be aligned, with some given indexes.
2680
 *
2681
 * 5. All these can be done efficiently, on a scale of hundreds of Gigabytes data.
2682
 */
2683
2684
/**
2685
 * @defgroup level_5_dataframe Dataframe API
2686
 * @{
2687
 */
2688
2689
/**
2690
 * A data enumeration function to supply data for given row indexes.
2691
 */
2692
typedef void (*ccv_cnnp_column_data_enum_f)(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
2693
/**
2694
 * A destructor for data.
2695
 */
2696
typedef void (*ccv_cnnp_column_data_deinit_f)(void* const data, void* const context);
2697
/**
2698
 * A destructor for context.
2699
 */
2700
typedef void (*ccv_cnnp_column_data_context_deinit_f)(void* const context);
2701
/**
2702
 * Column data.
2703
 */
2704
typedef struct {
2705
  int stream_type; /**< The type of stream context for this column. Each column only compatible with one stream type. */
2706
  char* name; /**< The name of the column. */
2707
  ccv_cnnp_column_data_enum_f data_enum; /**< The data enumeration function for this column. */
2708
  ccv_cnnp_column_data_deinit_f data_deinit; /**< The deinit function that will be used to destroy the data. */
2709
  void* context; /**< The context go along with this column. */
2710
  ccv_cnnp_column_data_context_deinit_f context_deinit; /**< The deinit function that will be used to destroy the context. */
2711
} ccv_cnnp_column_data_t;
2712
/**
2713
 * An opaque structure point to the dataframe object.
2714
 */
2715
typedef struct ccv_cnnp_dataframe_s ccv_cnnp_dataframe_t;
2716
/**
2717
 * Create a dataframe object with given column data.
2718
 * @param column_data The column data that can be loaded.
2719
 * @param column_size The size of column data array.
2720
 * @param row_count The number of rows in this dataframe.
2721
 */
2722
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_new(const ccv_cnnp_column_data_t* const column_data, const int column_size, const int row_count);
2723
/**
2724
 * Add a new column to the dataframe.
2725
 * @param dataframe The dataframe object to add column to.
2726
 * @param data_enum The data provider function for the new column.
2727
 * @param stream_type The type of stream context for this derived column.
2728
 * @param data_deinit The deinit function will be used to destroy the derived data.
2729
 * @param context The context that can be used to generate new column.
2730
 * @param context_deinit The deinit function will be used to destroy the context.
2731
 * @param name The name of the newly added column.
2732
 * @return The new column index.
2733
 */
2734
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_enum_f data_enum, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
2735
/**
2736
 * A map function that takes the data from multiple columns and derive new data out of it.
2737
 */
2738
typedef void (*ccv_cnnp_column_data_map_f)(void* const* const* const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
2739
/**
2740
 * Derive a new column out of existing columns in the dataframe.
2741
 * @param dataframe The dataframe object that contains existing columns.
2742
 * @param map The map function used to derive new column from existing columns.
2743
 * @param stream_type The type of stream context for this derived column.
2744
 * @param data_deinit The deinit function will be used to destroy the derived data.
2745
 * @param column_idxs The columns that will be used to derive new column.
2746
 * @param column_idx_size The size of existing columns array.
2747
 * @param context The context that can be used to generate new column.
2748
 * @param context_deinit The deinit function will be used to destroy the context.
2749
 * @param name The name of the new column.
2750
 * @return The new column index.
2751
 */
2752
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_map(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_map_f map, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, const int* const column_idxs, const int column_idx_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
2753
/**
2754
 * Shuffle an existing dataframe.
2755
 * @param dataframe The dataframe that is about to be shuffled.
2756
 */
2757
void ccv_cnnp_dataframe_shuffle(ccv_cnnp_dataframe_t* const dataframe);
2758
/**
2759
 * Query row count of the dataframe.
2760
 * @param dataframe The dataframe we want to query row count.
2761
 * @return The row count of the dataframe.
2762
 */
2763
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_row_count(ccv_cnnp_dataframe_t* const dataframe);
2764
/**
2765
 * Query the column name of a given column on the dataframe.
2766
 * @param dataframe The dataframe we want to query the column name.
2767
 * @param column_idx The index of a column.
2768
 * @return The name of the column.
2769
 */
2770
CCV_WARN_UNUSED(const char*) ccv_cnnp_dataframe_column_name(ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
2771
/**
2772
 * A sampling function that takes multiple rows of one column, and sample to one row.
2773
 */
2774
typedef void (*ccv_cnnp_column_data_sample_f)(void* const* const input_data, const int batch_size, void** const output_data, void* const context, ccv_nnc_stream_context_t* const stream_context);
2775
/**
2776
 * Sample a dataframe by batch size. Thus, n rows are sampled to 1 row per sample function on
2777
 * one specific column. This will also sample the multi-column dataframe down to 1 column
2778
 * by selecting the one column to sample.
2779
 * @param dataframe The dataframe that is about to be sampled.
2780
 * @param sample The sample function used to sample n rows into 1.
2781
 * @param data_deinit The deinit function will be used to destroy the derived data.
2782
 * @param column_idx The column we selected to sample.
2783
 * @param batch_size How many rows will be sampled to 1 row from the original data.
2784
 * @param context The context that can be used in sample function.
2785
 * @param context_deinit The deinit function will be used to destroy the context.
2786
 * @return The sampled dataframe.
2787
 */
2788
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_sample_new(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_sample_f sample, ccv_cnnp_column_data_deinit_f data_deinit, const int column_idx, const int batch_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit);
2789
/**
2790
 * Extract a value out of a struct. Assuming the data points to a struct. This method extract
2791
 * n-offset value of that struct. For example, if you have struct { ccv_nnc_tensor_t* a; ccv_nnc_tensor_t* b; } S;
2792
 * if you want to extract the b tensor to a different column, you can call this function with
2793
 * offsetof(S, b).
2794
 * @param dataframe The dataframe object to be extracted.
2795
 * @param column_idx The column that we want to extract value of.
2796
 * @param offset The offset. For example, offsetof(S, b).
2797
 * @param name The name of the new column.
2798
 * @return The new column that contains the extracted value.
2799
 */
2800
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_value(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t offset, const char* name);
2801
/**
2802
 * Make a tuple out of columns specified. Thus, the new derived column will contains a tuple
2803
 * with data from all the columns specified here. Tuple here represented as void* tuple[], an
2804
 * array of void* pointers.
2805
 * @param dataframe The dataframe that will contain the new column.
2806
 * @param column_idxs The columns to be tupled.
2807
 * @param column_idx_size The number of columns.
2808
 * @param name The name of the new column.
2809
 * @return The derived column with the tuple.
2810
 */
2811
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_make_tuple(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const char* name);
2812
/**
2813
 * The size of the tuple. It is equal to the number of columns we specified. The behavior of
2814
 * calling this method on a column that is not a tuple is undefined.
2815
 * @param dataframe The dataframe that contains the tuple column.
2816
 * @param column_idx The tuple column we are going to inspect.
2817
 * @return The tuple size of the column.
2818
 */
2819
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_tuple_size(const ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
2820
/**
2821
 * Extract a data out of a tuple.
2822
 * @param dataframe The dataframe that will contain the new column.
2823
 * @param column_idx The column that is a tuple.
2824
 * @param index The index into the tuple.
2825
 * @param name The name of the new column.
2826
 * @return The derived column with the extracted value.
2827
 */
2828
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_tuple(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int index, const char* name);
2829
/**
2830
 * The opaque pointer to the iterator.
2831
 */
2832
typedef struct ccv_cnnp_dataframe_iter_s ccv_cnnp_dataframe_iter_t;
2833
/**
2834
 * Get a new iterator of the dataframe.
2835
 * @param dataframe The dataframe object to iterate through.
2836
 * @param column_idxs The columns that will be iterated.
2837
 * @param column_idx_size The size of columns array.
2838
 * @return The opaque iterator object.
2839
 */
2840
CCV_WARN_UNUSED(ccv_cnnp_dataframe_iter_t*) ccv_cnnp_dataframe_iter_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size);
2841
/**
2842
 * Get the next item from the iterator.
2843
 * @param iter The iterator to go through.
2844
 * @param data_ref The output for the data.
2845
 * @param column_idx_size The size of the data_ref array.
2846
 * @param stream_context The stream context to extract data asynchronously.
2847
 * @return 0 if the iteration is successful, -1 if there is no more row. -2 if it is already ended.
2848
 */
2849
int ccv_cnnp_dataframe_iter_next(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int column_idx_size, ccv_nnc_stream_context_t* const stream_context);
2850
/**
2851
 * Assuming iterator is on the same row, peek into potentially different column index.
2852
 * @param iter The iterator to go through.
2853
 * @param data_ref The output for the data.
2854
 * @param offset The offset for which column in this iterator to peek at.
2855
 * @param data_ref_size How many columns in this iterator to peek at.
2856
 * @param stream_context The stream context to extract data asynchronously.
2857
 */
2858
void ccv_cnnp_dataframe_iter_peek(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int offset, const int data_ref_size, ccv_nnc_stream_context_t* const stream_context);
2859
/**
2860
 * Prefetch next item on the iterator with the given stream context. You can call this method multiple times
2861
 * to prefetch multiple items ahead of time.
2862
 * @param iter The iterator to go through.
2863
 * @param prefetch_count How much ahead we should advance for.
2864
 * @param stream_context The stream context to extract data asynchronously.
2865
 * @return 0 if the prefetch is successful, -1 if it is ended.
2866
 */
2867
int ccv_cnnp_dataframe_iter_prefetch(ccv_cnnp_dataframe_iter_t* const iter, const int prefetch_count, ccv_nnc_stream_context_t* const stream_context);
2868
/**
2869
 * Set the cursor of the iterator. When set to 0, the iterator effectively restarts.
2870
 * @param iter The iterator to go through.
2871
 * @param idx The index of the cursor.
2872
 * @return 0 if it is successful, -1 if it is not (exceed the range).
2873
 */
2874
int ccv_cnnp_dataframe_iter_set_cursor(ccv_cnnp_dataframe_iter_t* const iter, const int idx);
2875
/**
2876
 * Free the dataframe iterator object.
2877
 * @param iter The dataframe iterator to be freed.
2878
 */
2879
void ccv_cnnp_dataframe_iter_free(ccv_cnnp_dataframe_iter_t* const iter);
2880
/**
2881
 * Free the dataframe object.
2882
 * @param dataframe The dataframe object to be freed.
2883
 */
2884
void ccv_cnnp_dataframe_free(ccv_cnnp_dataframe_t* const dataframe);
2885
2886
/** @} */
2887
2888
/**
2889
 * @defgroup level_5_dataframe_add_ons Dataframe Add-ons
2890
 * @{
2891
 */
2892
2893
/**
2894
 * Turn a ccv_array_t to a dataframe object.
2895
 * @param array The array we want to turn into a dataframe object.
2896
 * @return The new dataframe object.
2897
 */
2898
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_array_new(ccv_array_t* const array);
2899
/**
2900
 * Derive a new column that copies a tensor array from given column to the derived column on GPU.
2901
 * @param dataframe The dataframe object that get the derived column.
2902
 * @param column_idx The original column contains tensor array on CPU.
2903
 * @param tensor_offset Only copy as outputs[i] = inputs[i + tensor_offset].
2904
 * @param tensor_size How many tensors in the tensor array.
2905
 * @param device_id The device we want to copy the tensors to.
2906
 * @param name The name of the new column.
2907
 * @return The index of the newly derived column.
2908
 */
2909
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_to_gpu(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int tensor_offset, const int tensor_size, const int device_id, const char* name);
2910
/**
2911
 * Derive a new column by executing a generic command.
2912
 * @param dataframe The dataframe object that get the derived column.
2913
 * @param column_idx The original column contains tensor array.
2914
 * @param cmd The command for this operation.
2915
 * @param hint The hint to run the command.
2916
 * @param flags The flags with the command.
2917
 * @param input_offset Use inputs[i + input_offset] to inputs[i + input_offset + input_size - 1] as the inputs
2918
 * @param input_size How many tensors in the input array.
2919
 * @param output_params The parameters for the outputs.
2920
 * @param output_size How many tensors in the output array.
2921
 * @param stream_type The type of stream context we are going to use.
2922
 * @param name The name of the new column.
2923
 * @return The index of the newly derived column.
2924
 */
2925
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_cmd_exec(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const int input_offset, const int input_size, const ccv_nnc_tensor_param_t* const output_params, const int output_size, const int stream_type, const char* name);
2926
/**
2927
 * Add a new column contains some tensors. This will add a new column that each row is the tensor specified
2928
 * as the parameters. It comes handy when you want to have some auxiliary tensors along with each row.
2929
 * @param dataframe The dataframe object that get the new column.
2930
 * @param params The parameters for the tensors.
2931
 * @param name The name of the new column.
2932
 * @return The index of the newly added column.
2933
 */
2934
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add_aux(ccv_cnnp_dataframe_t* const dataframe, const ccv_nnc_tensor_param_t params, const char* name);
2935
/**
2936
 * Read image off a said column. That column should contain the filename (as char array). The new column
2937
 * will contain the ccv_dense_matrix_t / ccv_nnc_tensor_t (both are toll-free bridging) of the image.
2938
 * @param dataframe The dataframe object that loads the images.
2939
 * @param column_idx The column which contains the filename.
2940
 * @param structof The offset to the filename (as char array) from that column. For example, the column
2941
 *        could be a struct and filename could be one of the field. In that case, you can pass offsetof(S, filename)
2942
 * @param name The name of the new column.
2943
 * @return The index of the newly derived column.
2944
 */
2945
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_read_image(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const char* name);
2946
/**
2947
 * The structure to describe how to apply random jitter to the image.
2948
 */
2949
typedef struct {
2950
  float contrast; /**< The random contrast, the final contrast will be [1 / (1 + contrast), 1 + contrast] */
2951
  float saturation; /**< The saturation, the final saturation will be [1 / (1 + saturation), 1 + saturation] */
2952
  float brightness; /**< The brightness, the final brightness will be between [1 / (1 + brightness), 1 + brightness] */
2953
  float lighting; /**< AlexNet style PCA based image jitter */
2954
  float aspect_ratio; /**< Stretch aspect ratio between [1 / (1 + asepct_ratio), 1 + aspect_ratio] */
2955
  int symmetric; /**< Apply random flip on x-axis (around y-axis */
2956
  int seed; /**< The seed for random generator. */
2957
  int center_crop; /**< Enable crop to the center (otherwise do random crop). */
2958
  struct {
2959
    int min; /**< The minimal dimension of resize */
2960
    int max; /**< The maximal dimension of resize. The final resize can be computed from min + (max - min) * random_unit */
2961
    int roundup; /**< The dimension on both height / width are a multiple of roundup value. */
2962
  } resize;
2963
  struct {
2964
    int rows; /**< The height of the final image. */
2965
    int cols; /**< The width of the final image. */
2966
  } size;
2967
  struct {
2968
    int x; /**< The extra random offset on x-axis. */
2969
    int y; /**< The extra random offset on y-axis. */
2970
  } offset;
2971
  struct {
2972
    float mean[3]; /**< Normalize the image with mean. */
2973
    float std[3];/**< Normalize the image with std. pixel = (pixel - mean) / std */
2974
  } normalize;
2975
} ccv_cnnp_random_jitter_t;
2976
/**
2977
 * Apply random jitter on a image to generate a new image.
2978
 * @param dataframe The dataframe object that contains the original image.
2979
 * @param column_idx The column which contains the original image.
2980
 * @param datatype The final datatype of the image. We only support CCV_32F right now.
2981
 * @param random_jitter The random jitter parameters to be applied to.
2982
 * @param name The name of the new column.
2983
 * @return The index of the newly derived column.
2984
 */
2985
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_image_random_jitter(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int datatype, const ccv_cnnp_random_jitter_t random_jitter, const char* name);
2986
/**
2987
 * Generate a one-hot tensor off the label from a struct.
2988
 * @param dataframe The dataframe object that contains the label.
2989
 * @param column_idx The column which contains the label (as int).
2990
 * @param structof The offset to the label (as int) from that column. For example, the column
2991
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
2992
 * @param range The range of the label, from [0...range - 1]
2993
 * @param onval The value when it hit.
2994
 * @param offval The value for the others.
2995
 * @param datatype The datatype of the tensor.
2996
 * @param format The format of the tensor.
2997
 * @param name The name of the new column.
2998
 * @return The index of the newly derived column.
2999
 */
3000
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_hot(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int range, const float onval, const float offval, const int datatype, const int format, const char* name);
3001
/**
3002
 * Generate a scalar tensor (a tensor with one value) off a value from a struct.
3003
 * @param dataframe The dataframe object that contains the value.
3004
 * @param column_idx The column which contains the value (as datatype).
3005
 * @param structof The offset to the label (as int) from that column. For example, the column
3006
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
3007
 * @param from_dt The datatype of the value.
3008
 * @param to_dt The datatype of the tensor.
3009
 * @param format The format of the tensor.
3010
 * @param name The name of the new column.
3011
 * @return The index of the newly derived column.
3012
 */
3013
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_scalar(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int from_dt, const int to_dt, const int format, const char* name);
3014
/**
3015
 * Generate vector with ones up to a given length, the rest will be zeros. When applied to batched lengths
3016
 * array, this will generate a matrix of these vectors, squared. The derived column will be a tuple of vectors
3017
 * for the given number of columns.
3018
 * @param dataframe The dataframe object that will contain the matrix.
3019
 * @param column_idxs The columns which contain the sequence lengths (a 1d tensor).
3020
 * @param column_idx_size The number of columns. The derived column will be a tuple of vectors.
3021
 * @param variable_size The size of the final vector can vary, depending on the max length of current batch.
3022
 * @param max_length The absolute max length for inputs.
3023
 * @param name The name of the new column.
3024
 * @return The index of the newly derived column.
3025
 */
3026
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_squared(ccv_cnnp_dataframe_t* const dataframe,  const int* const column_idxs, const int column_idx_size, const int variable_size, const int max_length, const char* name);
3027
/**
3028
 * Truncate a given matrix (as a list of vector) to the given size provided by another vector. The truncated
3029
 * column will be a tuple of vectors for the given columns.
3030
 * @param dataframe The dataframe object that will contain the matrix.
3031
 * @param vec_idxs The columns of the given matrix to be truncated.
3032
 * @param vec_idx_size The number of columns for vec_idxs.
3033
 * @param len_idxs The columns of the given sizes as a vector.
3034
 * @param len_idx_size The number of columns for len_idxs.
3035
 * @param name The name of the new column.
3036
 * @return The index of the newly derived column.
3037
 */
3038
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_truncate(ccv_cnnp_dataframe_t* const dataframe, const int* const vec_idxs, const int vec_idx_size, const int* len_idxs, const int len_idx_size, const char* name);
3039
/**
3040
 * Combine multiple tensors in a column into one tensor. This method can take multiple columns, which
3041
 * will result a tuple of tensors. Each tensor in the tuple is a batched one from a given column.
3042
 * @param dataframe The dataframe contains the columns of tensors to be batched.
3043
 * @param column_idxs The columns that contain the tensors.
3044
 * @param column_idx_size The number of columns that contain the tensors.
3045
 * @param batch_count How many tensors in one column to be batched together.
3046
 * @param group_count We can generate many groups of batched tensor. For example, if you have column A, B, C, each
3047
 *        have different tensors. If group_count is 1, the result tuple will be (A_b, B_b, C_b). If group count is
3048
 *        2, the result tuple will be (A_b1, B_b1, C_b1, A_b2, B_b2, C_b2). A_b1 etc. will still contain the same
3049
 *        number of batch_count tensors.
3050
 * @param format The result format of the tensor. We support simply transformation NCHW <=> NHWC with the source tensor.
3051
 * @return The newly created dataframe with the 0-th column is the tuple of batched tensors.
3052
 */
3053
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_combine_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const int batch_count, const int group_count, const int format);
3054
3055
/** @} */
3056
3057
/**
3058
 * @page dataframe_csv Why to support comma-separated-values files in dataframe?
3059
 *
3060
 * C can be used as a parser. It usually can be fast. But most of them can be buggy and has bugs that can either crash, be
3061
 * exploited, or simply incorrect. There really isn't much motivation for me to start write a parser, even as simple as
3062
 * for CSV files.
3063
 *
3064
 * However, it does brought to my attention that a full-speed (defined by saturating the PCIx4 for SSD) implementation would
3065
 * be beneficial. I am also started to use nnc in many places that is handy to load a csv file and generate some tensors out
3066
 * of it.
3067
 *
3068
 * This implementation plan to use a variant of the two-pass approach documented in
3069
 * https://www.microsoft.com/en-us/research/uploads/prod/2019/04/chunker-sigmod19.pdf while first implemented in
3070
 * https://github.com/wiseio/paratext. It is differentiated from these two in these particular ways:
3071
 *
3072
 * 1. The first pass will not only find the quotes and even / odd CRLF, but also collect statistics on how many lines assuming
3073
 *    the first CRLF is within quote / outside of the quote;
3074
 *
3075
 * 2. The second pass will do a copy into a continuous page mirrors the original csv file, but null-terminate each column, and
3076
 *    assign the start pointer for each.
3077
 *
3078
 * The speculative approach while interesting, for many-core system implementation, it can be challenging and the worse-case
3079
 * scenario is indeed worse.
3080
 *
3081
 * The implementation itself follows https://tools.ietf.org/html/rfc4180, with only customization of delimiters (so it can support
3082
 * table-separated-values) and quotes (so you can choose between " and '). Escaping only supports double-quotes for whatever quote
3083
 * symbol you elect.
3084
 */
3085
3086
/**
3087
 * @defgroup level_5_dataframe_csv Dataframe for Comma-Separated-Values Files
3088
 * @{
3089
 */
3090
enum {
3091
  /* It is a file pointer. */
3092
  CCV_CNNP_DATAFRAME_CSV_FILE = 0,
3093
  /* It is a pointer to a memory. */
3094
  CCV_CNNP_DATAFRAME_CSV_MEMORY = 1,
3095
};
3096
3097
/**
3098
 * Create a dataframe object that read a CSV file. This will eagerly load the file into memory, parse each row / column
3099
 * into null-terminated strings, you can later convert these into numerics if needed. Each column will be a column indexed
3100
 * from 0 to column_size - 1. If there are syntax errors, the parser will make guesses and continue to parse to its best knowledge.
3101
 * If it cannot, we will return null for the object. We support both CRLF, LF, and LFCR termination.
3102
 * @param input The FILE handle for on-disk file, or the pointer to the region of the memory we are going to use.
3103
 * @param type The type of either `CCV_CNNP_DATAFRAME_CSV_FILE` or `CCV_CNNP_DATAFRAME_CSV_MEMORY`
3104
 * @param len The length of the memory region, if it is `CCV_CNNP_DATAFRAME_CSV_MEMORY`.
3105
 * @param delim The delim, it is ',' by default (if you provided '\0')
3106
 * @param quote The quote for escape strings, it is '"' by default (if you provided '\0')
3107
 * @param include_header whether to parse the header seperately. 1 means we treat the first line as header.
3108
 * @param column_size The number of columns in the resulted dataframe.
3109
 * @return A dataframe that can represent the csv file. nullptr if failed.
3110
 */
3111
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_csv_new(void* const input, const int type, const size_t len, const char delim, const char quote, const int include_header, int* const column_size);
3112
3113
/** @} */
3114
3115
/**
3116
 * @page model Models, layers, and Keras
3117
 *
3118
 * With Keras API in mind, this model implementation essentially is a light-weight way to group neural network layers
3119
 * together. This is a rare case in NNC (or ccv in general) where Object-Oriented programming makes sense. I borrowed
3120
 * heavily from Objective-C / C++ to implement this Object-Oriented interface.
3121
 *
3122
 * Now back to elaboration of the Model interface. It is specifically designed with Keras in mind, asking question:
3123
 * If we are going to build Keras high-level API in any languages (Ruby, Python, Swift, Julia), what's the underlying
3124
 * C interface would look like? Here is your answer (hint: it looks very much like just Python Keras API).
3125
 *
3126
 * A model consists of a set of inputs and outputs. This sounds very much like what "Command" is in Level-1 APIs,
3127
 * however, they are different: a model is stateful. For example, a convolution command takes 3 inputs: image, kernel
3128
 * weight and bias, has 1 output: image. A convolution model takes 1 input: image, and 1 output: image. kernel weight
3129
 * and bias are internal states to the model (in Keras, it is called "layer" for convolution, and model means a set of
3130
 * layers. In NNC, that kind of differentiation feels superficial, therefore, a layer is a model).
3131
 *
3132
 * A model can be combined, and a new model can be a combination of other models.
3133
 *
3134
 * The simpler composed model is the sequential model. A sequential model is a model that consists a sequence of models
3135
 * that contains one input and one output. The output of the earlier model feed into the later one, thus, a sequential
3136
 * evaluation path.
3137
 */
3138
3139
/**
3140
 * @defgroup level_5_model Model API
3141
 * @{
3142
 */
3143
3144
/**
3145
 * model type is an abstract type, you won't interact with a naked model ever.
3146
 */
3147
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
3148
/**
3149
 * With this type, now in NNC, we have 4 types that represents a "tensor":
3150
 *
3151
 * 1. ccv_nnc_tensor_t / ccv_nnc_tensor_view_t / ccv_nnc_tensor_multiview_t: a concrete tensor with memory allocated.
3152
 *
3153
 * 2. ccv_nnc_tensor_symbol_t: a symbol representation of a tensor, with its data layout, device affinity, and type
3154
 *                             specified.
3155
 *
3156
 * 3. ccv_nnc_tensor_variable_t: in dynamic graph, this represents a concrete tensor with memory allocated, but also
3157
 *                               associated with a recorded execution.
3158
 *
3159
 * 4. ccv_cnnp_model_io_t: this is the most flexible one. No data layout, device affinity or type specified. It can even
3160
 *                         represent a list of tensors rather than just one. This is a handle used by model API to
3161
 *                         associates model inputs / outputs.
3162
 */
3163
typedef struct ccv_cnnp_model_io_s* ccv_cnnp_model_io_t;
3164
/**
3165
 * Create a naked input.
3166
 * @return A ccv_cnnp_model_io_t represents an input.
3167
 */
3168
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_input(void);
3169
/**
3170
 * This method mimics Keras callable for model (thus, override __call__ method in Python class).
3171
 * @param model A model that we can apply a set of inputs to get one output.
3172
 * @param inputs The set of inputs.
3173
 * @param input_size The size of inputs array.
3174
 * @return A ccv_cnnp_model_io_t that represents the output of the given model.
3175
 */
3176
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_apply(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t* const inputs, const int input_size);
3177
enum {
3178
  /* Select only weights, no bias terms. */
3179
  CCV_CNNP_PARAMETER_SELECT_WEIGHT = 0,
3180
  /* Select bias terms, no weights. */
3181
  CCV_CNNP_PARAMETER_SELECT_BIAS = 1,
3182
};
3183
/**
3184
 * This method exposes parameter for a model out as a potential input for another model. Since
3185
 * it is a ccv_cnnp_model_io_t, it can also be used by other methods.
3186
 * @param model A model that we can extract parameters out.
3187
 * @param selector The selector for a parameter. ALL_PARAMETERS means all parameters, or you can select CCV_CNNP_PARAMETER_SELECT_WEIGHT or CCV_CNNP_PARAMETER_SELECT_BIAS.
3188
 * @param index The index into a parameter. ALL_PARAMETERS means all parameters.
3189
 */
3190
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameters(ccv_cnnp_model_t* const model, const int selector, const int index);
3191
/**
3192
 * A notification function such that a model can be notified.
3193
 * This is useful to broadcast a message to all models as sub-model of someone else.
3194
 */
3195
typedef void (*ccv_cnnp_model_notify_f)(const ccv_cnnp_model_t* const model, const int tag, void* const payload, void* const context);
3196
/**
3197
 * Hook into a model such that when there is a notification, the callback will receive it.
3198
 * @param model A model that can be notified.
3199
 * @param func The callback function.
3200
 * @param context The context to be passed along to the callback function.
3201
 **/
3202
void ccv_cnnp_model_notify_hook(ccv_cnnp_model_t* const model, ccv_cnnp_model_notify_f func, void* const context);
3203
/**
3204
 * Notify a model and its sub-models with a tag and a payload. This will be triggered
3205
 * synchronously.
3206
 * @param model A model that will be notified.
3207
 * @param tag An integer to help identify what kind of notification.
3208
 * @param payload A payload pointer that you can carry arbitrary information.
3209
 */
3210
void ccv_cnnp_model_notify(const ccv_cnnp_model_t* const model, const int tag, void* const payload);
3211
/**
3212
 * This method name is deceiving. It return a composed model, not a naked model.
3213
 * This composed model takes set of inputs, and run through various other models to arrive at
3214
 * the set of outputs.
3215
 * @param inputs The set of inputs.
3216
 * @param input_size The size of inputs array.
3217
 * @param outputs The set of outputs.
3218
 * @param output_size The size of outputs array.
3219
 * @param name The unique name of the model.
3220
 * @return A composed model that takes inputs, and generate the outputs.
3221
 */
3222
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_new(const ccv_cnnp_model_io_t* const inputs, const int input_size, const ccv_cnnp_model_io_t* const outputs, const int output_size, const char* const name);
3223
/**
3224
 * This method returns a sequential model, which composed from a sequence of models.
3225
 * @param models The list of models, that takes one input, and emit one output, feeding into the subsequent one.
3226
 * @param model_size The size of the list.
3227
 * @param name The unique name of the model.
3228
 * @return A composed model that applies these models one by one in sequence.
3229
 */
3230
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sequential_new(ccv_cnnp_model_t* const* const models, const int model_size, const char* const name);
3231
/**
3232
 * A model generation function to be called for dynamic models.
3233
 */
3234
typedef ccv_cnnp_model_t* (*ccv_cnnp_model_dynamic_f)(const ccv_nnc_tensor_param_t* const inputs, const int input_size, void* const context);
3235
/**
3236
 * This method returns a model that will be recreated if it is recompiled. Put it this way, you can call
3237
 * ccv_cnnp_model_compile multiple times with different inputs and input size, however, the model will
3238
 * only be recompiled to some extent. For example, if you called ccv_cnnp_reshape, the shape is determined
3239
 * at the moment you create that model, recompilation won't change. There are two ways to workaround this:
3240
 * 1. Use models that doesn't have explicit shape specified, for example, ccv_cnnp_dense, and avoid models
3241
 *    that is not as flexible, such as ccv_cnnp_reshape, or ccv_cnnp_cmd_exec.
3242
 * 2. Create with ccv_cnnp_dynamic_new such that the model will be recreated again whenever recompile.
3243
 * @param func The function to be called to create the model.
3244
 * @param context The context used along to create the model.
3245
 * @param name The unique name of the model.
3246
 * @return A model object that is yet to be created until build.
3247
 */
3248
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dynamic_new(ccv_cnnp_model_dynamic_f func, void* const context, const char* const name);
3249
/**
3250
 * Prepare the model to be trained, the input specifies the batch size etc.
3251
 * Input size technically is not needed, here is a safety check.
3252
 * @param model The model to be compiled.
3253
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3254
 * @param input_size The size of the inputs array.
3255
 * @param minimizer The wrapped command that represents a particular optimization strategy.
3256
 * @param loss The wrapped command that computes the loss function.
3257
 */
3258
void ccv_cnnp_model_compile(ccv_cnnp_model_t* const model, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_cmd_t minimizer, const ccv_nnc_cmd_t loss);
3259
/**
3260
 * Absorb a new model into the existing model. This requires the new model has exactly the same parameters
3261
 * but other dimensionality's can change. The new model has to not be compiled yet, its life-cycle management
3262
 * will be take over by the existing model. You don't need to free it separately.
3263
 * @param model The existing model.
3264
 * @param init The new model.
3265
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3266
 * @param input_size The size of the inputs array.
3267
 */
3268
void ccv_cnnp_model_absorb(ccv_cnnp_model_t* const model, ccv_cnnp_model_t* const init, const ccv_nnc_tensor_param_t* const inputs, const int input_size);
3269
/**
3270
 * Create a copy of an existing model.
3271
 * @param model The existing model.
3272
 * @return The new model that is exactly the same copy of the old one.
3273
 */
3274
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_copy(const ccv_cnnp_model_t* const model);
3275
/**
3276
 * Get the output size of the model.
3277
 * @param model The existing model.
3278
 * @return The output size of the model.
3279
 */
3280
CCV_WARN_UNUSED(int) ccv_cnnp_model_output_size(const ccv_cnnp_model_t* const model);
3281
/**
3282
 * Compute the shape of the output tensor after the model applied to the input.
3283
 * This can only be called after the model is compiled with proper input parameters.
3284
 * @param model The model to compute the output shapes.
3285
 * @param outputs The computed tensor parameters in the output.
3286
 * @param output_size The size of the output array, it has to match the model's output.
3287
 */
3288
void ccv_cnnp_model_tensor_auto(ccv_cnnp_model_t* const model, ccv_nnc_tensor_param_t* const outputs, const int output_size);
3289
/**
3290
 * Generate output that can be parsed by GraphViz (DOT language).
3291
 * @param model The composed model.
3292
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
3293
 * @param outs The output file streams.
3294
 * @param out_size The size of output file stream array.
3295
 */
3296
void ccv_cnnp_model_dot(const ccv_cnnp_model_t* const model, const int flags, FILE** const outs, const int out_size);
3297
/**
3298
 * Fit a model to a given input / output. This is a combination of running ccv_cnnp_model_evaluate /
3299
 * ccv_cnnp_model_backward / ccv_cnnp_model_apply_gradients. The difference is that when calling
3300
 * individual functions, the graph is compiled piece by piece, thus, is less efficient than calling
3301
 * ccv_cnnp_model_fit directly. However, having the separate functions makes this implementation much
3302
 * more versatile, for example, can accumulate gradients for multiple batches, or using custom gradients
3303
 * etc.
3304
 * @param model The composed model.
3305
 * @param inputs The input tensors.
3306
 * @param input_size The size of the input tensors array.
3307
 * @param fits The target tensors.
3308
 * @param fit_size The size of the target tensors array.
3309
 * @param outputs The actual outputs from the model.
3310
 * @param output_size The size of the outputs array.
3311
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3312
 * @param stream_context The stream where the fit can be executed upon.
3313
 */
3314
void ccv_cnnp_model_fit(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const fits, const int fit_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3315
enum {
3316
  /**
3317
   * Don't disable any outgrad.
3318
   */
3319
  CCV_CNNP_DISABLE_OUTGRAD_NONE = (uint64_t)0,
3320
  /**
3321
   * Disable all inputs' outgrads.
3322
   */
3323
  CCV_CNNP_DISABLE_OUTGRAD_ALL = (uint64_t)(int64_t)-1,
3324
};
3325
/**
3326
 * The parameters for how evaluation should behave.
3327
 */
3328
typedef struct {
3329
  int requires_grad; /**< Whether we need to keep intermediate results for gradient computations. */
3330
  int is_test; /**< Whether we evaluate it as test, or just as forward pass of the training process. */
3331
  uint64_t disable_outgrad; /**< Whether we can compute outflow gradients when call ccv_cnnp_model_backward later, this is a bitmask, you can mark for which input the outgrad is disabled. */
3332
} ccv_cnnp_evaluate_param_t;
3333
/**
3334
 * Evaluate model with output.
3335
 * @param model The composed model.
3336
 * @param params The parameters for how evaluation should behave.
3337
 * @param inputs The input tensors.
3338
 * @param input_size The size of the input tensors array.
3339
 * @param outputs The actual outputs from the model.
3340
 * @param output_size The size of the outputs array.
3341
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3342
 * @param stream_context The stream where the evaluation can be executed upon.
3343
 */
3344
void ccv_cnnp_model_evaluate(ccv_cnnp_model_t* const model, const ccv_cnnp_evaluate_param_t params, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3345
/**
3346
 * Based on the input gradients, compute the output gradients (w.r.t. the inputs). This also adds parameter gradients.
3347
 * @param model The composed model.
3348
 * @param ingrads The input gradients.
3349
 * @param ingrad_size The size of the input gradients array.
3350
 * @param outgrads The output gradients (w.r.t. the inputs).
3351
 * @param outgrad_size The size of the output gradients array.
3352
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3353
 * @param stream_context The stream where the gradient computation can be executed upon.
3354
 */
3355
void ccv_cnnp_model_backward(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const ingrads, const int ingrad_size, ccv_nnc_tensor_t* const* const outgrads, const int outgrad_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3356
/**
3357
 * Apply the computed gradients to the parameter tensors.
3358
 * @param model The composed model.
3359
 * @param stream_context The stream where the gradient computation can be executed upon.
3360
 */
3361
void ccv_cnnp_model_apply_gradients(ccv_cnnp_model_t* const model, ccv_nnc_stream_context_t* const stream_context);
3362
enum {
3363
  /**
3364
   * This is the default flag, if the model is not initialized, will attempt to read from the disk.
3365
   * Otherwise, will persist existing parameters to disk.
3366
   */
3367
  CCV_CNNP_MODEL_CHECKPOINT_READ_WRITE,
3368
  /**
3369
   * Only read parameters out of disk, even it is already initialized.
3370
   */
3371
  CCV_CNNP_MODEL_CHECKPOINT_READ_ONLY,
3372
  /**
3373
   * Only write parameters to disk.
3374
   */
3375
  CCV_CNNP_MODEL_CHECKPOINT_WRITE_ONLY,
3376
};
3377
/**
3378
 * This method checkpoint the given model. If the model is initialized, it will persist all parameters
3379
 * to the given file path. If it is not initialized, this method will try to load tensors off the
3380
 * disk. Under the hood, it calls ccv_cnnp_model_write / ccv_cnnp_model_read when appropriate.
3381
 * @param model The composed model.
3382
 * @param fn The file name.
3383
 * @param flags Whether we perform read / write on this checkpoint, or read only / write only.
3384
 */
3385
void ccv_cnnp_model_checkpoint(ccv_cnnp_model_t* const model, const char* const fn, const int flags);
3386
/**
3387
 * Write model's tensors to a SQLite database with a given name. Note that we specifically say
3388
 * "model's tensors" because it doesn't persist the model's structure. Hence, you shouldn't
3389
 * expect us to take a name to then have a fully functional model restored from there. You still
3390
 * need to construct the model. This method only write the tensors (weights and other internal ones)
3391
 * to disk.
3392
 * @param model The model.
3393
 * @param handle The SQLite handle.
3394
 * @param name The name to find the tensors related to the model in the database.
3395
 * @return CCV_IO_FINAL for success, otherwise error.
3396
 */
3397
int ccv_cnnp_model_write(const ccv_cnnp_model_t* const model, void* const handle, const char* const name);
3398
/**
3399
 * Read model's tensors from a SQLite database with a given name.
3400
 * @param handle The SQLite handle.
3401
 * @param name The name to find the tensors related to the model in the database.
3402
 * @param model_out The model which you want to restore the tensors. It should have the same
3403
 *                  structure as the one in write to.
3404
 * @return CCV_IO_FINAL for success, otherwise error.
3405
 */
3406
int ccv_cnnp_model_read(void* const handle, const char* const name, const ccv_cnnp_model_t* const model_out);
3407
/**
3408
 * Apply data parallel to the composed model. This method has to be called before we call either
3409
 * evaluate or fit and after the model is compiled.
3410
 * @param model The composed model.
3411
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
3412
 */
3413
void ccv_cnnp_model_set_data_parallel(ccv_cnnp_model_t* const model, const int parallel);
3414
/**
3415
 * Apply memory compression to the composed model. The memory compression technique can reduce memory
3416
 * usage up to 75% comparing with raw mix-precision model during training time.
3417
 * @param model The composed model.
3418
 * @param memory_compression Whether to enable the memory compression (1 - enable, 0 - disable (default))
3419
 */
3420
void ccv_cnnp_model_set_memory_compression(ccv_cnnp_model_t* const model, const int memory_compression);
3421
/**
3422
 * Set compile parameters on the model so it compiles the graph with the said parameters.
3423
 * @param model The composed model.
3424
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
3425
 */
3426
void ccv_cnnp_model_set_compile_params(ccv_cnnp_model_t* const model, const ccv_nnc_symbolic_graph_compile_param_t compile_params);
3427
/**
3428
 * This method set the max workspace size. If the graph is already compiled. It will re-run
3429
 * autotune to use the new workspace size to find the best algorithm.
3430
 * @param model The composed model.
3431
 * @param workspace_size The size in bytes that we can use as workspace (scratch memory).
3432
 */
3433
void ccv_cnnp_model_set_workspace_size(ccv_cnnp_model_t* const model, size_t workspace_size);
3434
/**
3435
 * Set a parameter that is specified by the parameter span. This will override whatever value in that
3436
 * parameter. The given tensor should match the dimension of the parameter. It doesn't matter whether
3437
 * the given tensor is on CPU or GPU, it will be copied over. This method is limited, it can only set
3438
 * tensor once the model is compiled.
3439
 * @param model The composed model.
3440
 * @param parameter The parameter that is used to specify which parameter to override.
3441
 * @param tensor The tensor contains the value we want to copy over.
3442
 */
3443
void ccv_cnnp_model_set_parameter(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, const ccv_nnc_tensor_t* const tensor);
3444
/**
3445
 * Copy a parameter that is specified by the parameter span out of a model. This will override the value
3446
 * in the tensor you provided. The given tensor should match the dimension of the parameter and should
3447
 * already be allocated. It doesn't matter whether the given tensor is on CPU or GPU.
3448
 * @param model The composed model.
3449
 * @param parameter The parameter that is used to specify which parameter to override.
3450
 * @param tensor The tensor that receives value.
3451
 */
3452
void ccv_cnnp_model_parameter_copy(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, ccv_nnc_tensor_t* const tensor);
3453
/**
3454
 * Set parameters from another model. This will override whatever values in these parameters. The
3455
 * given parameters from another model should match the dimension of the parameter. It doesn't matter
3456
 * whether the given tensor is on CPU or GPU. This method can only set when both models are compiled.
3457
 * @param model The composed model to be set on parameters.
3458
 * @param parameters The parameters to be override.
3459
 * @param from_model The model to copy parameters from.
3460
 * @param from_parameters The parameters to be copied from.
3461
 */
3462
void ccv_cnnp_model_set_parameters(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
3463
/**
3464
 * Process parameters such as exponential averaging.
3465
 * parameters = zip(from_parameters, to_parameters).map { cmd(to_parameter, from_parameter) }
3466
 * The order is selected in such way because many of our commands only support inplace op if the first
3467
 * parameter matches.
3468
 * @param model The composed model to have parameters zip mapped.
3469
 * @param parameters The parameters to be written (and read).
3470
 * @param cmd The command to apply on the parameters.
3471
 * @param hint The hint supplied to the cmd.
3472
 * @param flags The flags supplied to the cmd.
3473
 * @param stream_context The stream context to be associated with.
3474
 * @param from_model The other composed model to have parameters zipped.
3475
 * @param from_parameters The parameters to be read.
3476
 */
3477
void ccv_cnnp_model_parameters_zip_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_stream_context_t* const stream_context, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
3478
/**
3479
 * Process parameters such as clipping. parameters = parameters.map { cmd(parameter) }
3480
 * @param model The composed model to have parameters mapped.
3481
 * @param parameters The parameters to be mapped.
3482
 * @param cmd The command to apply on the parameters.
3483
 * @param hint The hint supplied to the cmd.
3484
 * @param flags The flags supplied to the cmd.
3485
 * @param stream_context The stream context to be associated with.
3486
 */
3487
void ccv_cnnp_model_parameters_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_stream_context_t* const stream_context);
3488
/**
3489
 * Set a new minimizer for the model. This is useful when you need to update learn rate for stochastic
3490
 * gradient descent for example. This method can be called any time during the training process (after
3491
 * compilation).
3492
 * @param model The composed model.
3493
 * @param minimizer The wrapped command that represents a new optimization strategy.
3494
 * @param reset Reset all previous states of minimizers. This only makes sense if both parameters and parameter_size is 0.
3495
 * @param parameters The parameters to be applied the minimizer on. 0 meant for all.
3496
 * @param parameter_size The number of parameter spans.
3497
 */
3498
void ccv_cnnp_model_set_minimizer(ccv_cnnp_model_t* const model, const ccv_nnc_cmd_t minimizer, const int reset, const ccv_cnnp_model_io_t* const parameters, const int parameter_size);
3499
/**
3500
 * Retrieve the default minimizer for the model. This is set either you call model compile or
3501
 * ccv_cnnp_model_set_minimizer with no parameter spans.
3502
 * @param model The composed model.
3503
 * @return The minimizer command.
3504
 */
3505
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_cnnp_model_minimizer(ccv_cnnp_model_t* const model);
3506
/**
3507
 * Get the default stream from a compiled model. If the model is not compiled, the default stream is
3508
 * 0.
3509
 * @param model The composed model.
3510
 * @return The default stream for this model.
3511
 */
3512
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_cnnp_model_default_stream(const ccv_cnnp_model_t* const model);
3513
/**
3514
 * Get the allocated memory size (exclude workspace) from a compiled model. If the model is not compiled
3515
 * the size is 0.
3516
 * @param model The composed model.
3517
 * @return The number of bytes for memory allocated.
3518
 */
3519
CCV_WARN_UNUSED(uint64_t) ccv_cnnp_model_memory_size(const ccv_cnnp_model_t* const model);
3520
/**
3521
 * Free a given model.
3522
 * @param model The composed model.
3523
 */
3524
void ccv_cnnp_model_free(ccv_cnnp_model_t* const model);
3525
3526
/** @} */
3527
3528
/**
3529
 * @defgroup level_5_model_add_ons Model Add-ons
3530
 * @{
3531
 */
3532
3533
enum {
3534
  CCV_CNNP_IO, /**< The parameter is a ccv_cnnp_io_t. */
3535
  CCV_CNNP_NO_TENSOR, /**< The parameter is not used. */
3536
  CCV_CNNP_TENSOR_NOT_OUTPUT, /**< This parameter indicates this is a tensor parameter, but it is not an output reflected as ccv_cnnp_io_t */
3537
  CCV_CNNP_INIT_SHARED_TENSOR, /**< The parameter is a provided tensor for initialization. */
3538
  CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE, /**< The parameter is a provided tensor that can be updated. */
3539
};
3540
3541
typedef void(*ccv_cnnp_state_initializer_f)(void* const context, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const input, const ccv_nnc_tensor_symbol_t output_symbol);
3542
typedef void(*ccv_cnnp_cmd_exec_init_state_f)(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context);
3543
typedef void(*ccv_cnnp_cmd_exec_init_state_deinit_f)(void* const context);
3544
typedef void*(*ccv_cnnp_cmd_exec_init_state_copy_f)(void* const context);
3545
3546
typedef struct {
3547
  ccv_nnc_tensor_param_t info; /**< The tensor parameter for this one. */
3548
  void* context; /**< The context for which we initialize tensor. */
3549
  ccv_cnnp_cmd_exec_init_state_f init; /**< The function to init state for a tensor. */
3550
  ccv_cnnp_cmd_exec_init_state_copy_f copy; /**< The function to make a copy of the context. */
3551
  ccv_cnnp_cmd_exec_init_state_deinit_f deinit; /**< The function to release the context. */
3552
} ccv_cnnp_cmd_exec_io_init_state_t;
3553
3554
typedef struct {
3555
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, NO_TENSOR, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
3556
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
3557
} ccv_cnnp_cmd_exec_io_t;
3558
/**
3559
 * A generic model based on the command. If the tensors are labeled as ccv_cnnp_io_t, it will participate
3560
 * as the input / output of the model. If it is a init tensor, the model will use this tensor for that parameter.
3561
 * More over, if it is marked as parameter, that tensor will be differentiated against when you call
3562
 * ccv_cnnp_model_fit. This model however doesn't take over ownership of the tensor. You should manage the life
3563
 * cycle of the given tensor and it is your responsibility to make sure they outlive the model. Also, all inputs and
3564
 * outputs marked as init tensors will be shared if you reuse this model in other places.
3565
 * @param cmd The command to generate this model.
3566
 * @param hint The hint to run the command.
3567
 * @param flags The flags with the command.
3568
 * @param inputs A list of ccv_cnnp_cmd_exec_io_t identify each input as either a init tensor or a ccv_cnnp_io_t.
3569
 * @param input_size The size of input list.
3570
 * @param outputs A list of types identify each output as ccv_cnnp_io_t or a none tensor.
3571
 * @param output_size The size of the outputs. There is no need to give ccv_cnnp_tensor_param_t for outputs because
3572
 *        all of them are CCV_CNNP_IO type.
3573
 * @param name The unique name of the model.
3574
 * @return A model based on the given command.
3575
 */
3576
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const char* const name);
3577
/**
3578
 * Copy a tensor as initialization for the given parameter.
3579
 * @param tensor The tensor to copy from.
3580
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
3581
 */
3582
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_t* const tensor);
3583
/**
3584
 * Initialize a given parameter with the command.
3585
 * @param cmd The command to call when need to initialize.
3586
 * @param hint The hint to accompany the command.
3587
 * @param flags The flags to accompany the command.
3588
 * @param params The tensor configuration.
3589
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
3590
 */
3591
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_param_t params);
3592
3593
typedef struct {
3594
  ccv_nnc_tensor_symbol_t symbol; /**< The tensor symbol this is reference to. */
3595
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
3596
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
3597
} ccv_cnnp_tensor_symbol_param_t;
3598
/**
3599
 * A generic model based on the symbolic graph we provided. A list of tensor symbols are labeled whether it
3600
 * is ccv_cnnp_io_t or not (we identify whether this is a input or output based on whether it is in the graph).
3601
 * If it is not, we init it with a given tensor. If it is marked as parameter, that tensor will be differentiated
3602
 * against when you call ccv_cnnp_model_fit. The model doesn't take ownership over the init tensors. You are
3603
 * responsible to make sure the init tensors outlive the model until the initialization occurred. Also, these
3604
 * tensors will be shared if the model is reused.
3605
 * @param graph The symbolic graph that is our blue print for this model.
3606
 * @param tensor_symbol_params The list of tensor symbol parameters that labels a given symbol.
3607
 * @param tensor_symbol_param_size The size of the list.
3608
 * @param inputs The inputs to this graph. We can figure out which ones are inputs, but this gives us the order.
3609
 * @param input_size The size of the input list.
3610
 * @param outputs The outputs from this graph. We can figure out which ones are outputs, but this gives us the order.
3611
 * @param output_size The size of the output list.
3612
 * @param name The unique name of the model.
3613
 * @return A model based on the given symbolic graph.
3614
 */
3615
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_graph(const ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_tensor_symbol_param_t* const tensor_symbol_params, const int tensor_symbol_param_size, ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
3616
/**
3617
 * Sum multiple input tensors together.
3618
 * @param name The unique name of the model.
3619
 * @return A model that can be applied with multiple inputs, and generate output that is a sum of the inputs.
3620
 */
3621
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sum(const char* const name);
3622
/**
3623
 * Concatenate input tensors together.
3624
 * @param axis Along this axis, we concatenate tensors together. Other dimensions need to be exactly the same.
3625
 * @param name The unique name of the model.
3626
 * @return A model that can be applied with multiple inputs, and generate output that is a concatenation of the inputs.
3627
 */
3628
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_concat(const int axis, const char* const name);
3629
/**
3630
 * A convolution model.
3631
 * @param groups The number of kernel groups in the model.
3632
 * @param filters The total number of filters in the model (filters = groups * per group filters).
3633
 * @param kdim The dimensions of the kernel.
3634
 * @param no_bias Whether has bias term or not.
3635
 * @param hint The hint for alignment.
3636
 * @param name The unique name of the model.
3637
 * @return A convolution model.
3638
 */
3639
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_convolution(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int no_bias, ccv_nnc_hint_t hint, const char* const name);
3640
/**
3641
 * A dense layer model.
3642
 * @param count The output dimension.
3643
 * @param no_bias Whether has a bias term or not.
3644
 * @param name The unique name of the model.
3645
 * @return A dense layer model.
3646
 */
3647
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dense(const int count, const int no_bias, const char* const name);
3648
/**
3649
 * A batch norm layer model.
3650
 * @param momentum The momentum in batch norm parameter.
3651
 * @param epsilon The epsilon in batch norm parameter.
3652
 * @param name The unique name of the model.
3653
 * @return A batch norm layer model.
3654
 */
3655
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_batch_norm(const float momentum, const float epsilon, const char* const name);
3656
/**
3657
 * A RELU activation layer model.
3658
 * @return A RELU activation layer model.
3659
 */
3660
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_relu(const char* const name);
3661
/**
3662
 * A sigmoid activation layer model.
3663
 * @return A sigmoid activation layer model.
3664
 */
3665
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sigmoid(const char* const name);
3666
/**
3667
 * A swish activation layer model.
3668
 * @return A swish activation layer model.
3669
 */
3670
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_swish(const char* const name);
3671
/**
3672
 * A softmax activation layer model.
3673
 * @return A softmax activation layer model.
3674
 */
3675
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_softmax(const char* const name);
3676
/**
3677
 * A max pool model.
3678
 * @param kdim The pooling window dimension.
3679
 * @param hint The hint for alignment.
3680
 * @param name The unique name of the model.
3681
 * @return A max pool model.
3682
 */
3683
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
3684
/**
3685
 * An average pool model.
3686
 * @param kdim The pooling window dimension.
3687
 * @param hint The hint for alignment.
3688
 * @param name The unique name of the model.
3689
 * @return An average pool model.
3690
 */
3691
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_average_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
3692
/**
3693
 * Reshape an input into a different dimension.
3694
 * @param dim The new dimension for the input.
3695
 * @param ofs The offset on each of the dimension.
3696
 * @param inc The line size of each dimension.
3697
 * @param name The unique name of the model.
3698
 * @return A reshape layer model.
3699
 */
3700
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reshape(const int dim[CCV_NNC_MAX_DIM_ALLOC], const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
3701
/**
3702
 * Flatten an input tensor into a one dimensional array.
3703
 * @param name The unique name of the model.
3704
 * @return A flatten layer model.
3705
 */
3706
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_flatten(const char* const name);
3707
/**
3708
 * A layer norm model.
3709
 * @param epsilon The epsilon in layer norm parameter.
3710
 * @param axis The axis are the feature axis to compute norm.
3711
 * @param axis_count How many axis we count as feature.
3712
 * @param name The unique name of the model.
3713
 * @return A layer norm model.
3714
 */
3715
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_layer_norm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const char* const name);
3716
/**
3717
 * Add two input tensors together. Different from sum because this support broadcasting.
3718
 * @param p The weight for the first input.
3719
 * @param q The weight for the second input.
3720
 * @param name The unique name of the model.
3721
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
3722
 */
3723
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_add(const float p, const float q, const char* const name);
3724
/**
3725
 * Multiple two input tensors together.
3726
 * @param p The weight for the output.
3727
 * @param name The unique name of the model.
3728
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
3729
 */
3730
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_mul(const float p, const char* const name);
3731
/**
3732
 * A scalar multiplication model. Y = aX where a is a scalar.
3733
 * @param a The scalar parameter.
3734
 * @param name The unique name of the model.
3735
 * @return A scalar multiplication model.
3736
 */
3737
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar_mul(const float a, const char* const name);
3738
/**
3739
 * A matrix transpose model.
3740
 * @param axis_a The axis to be exchanged with axis_b
3741
 * @param axis_b The axis to be exchanged with axis_a
3742
 * @param name The unique name of the model.
3743
 * @return A matrix transpose model.
3744
 */
3745
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_transpose(const int axis_a, const int axis_b, const char* const name);
3746
/**
3747
 * A batched matrix multiplication model.
3748
 * @param transpose_a The axis to be transposed in the first matrix.
3749
 * @param transpose_b The axis to be transposed in the second matrix.
3750
 * @param name The unique name of the model.
3751
 * @return A batched matrix multiplication model.
3752
 */
3753
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_matmul(const int transpose_a[2], const int transpose_b[2], const char* const name);
3754
/**
3755
 * A dropout model.
3756
 * @param p The probability to drop the current value.
3757
 * @param entirety Drop the whole layer with the given probability.
3758
 * @param name The unique name of the model.
3759
 * @return A dropout model.
3760
 */
3761
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dropout(const float p, const int entirety, const char* const name);
3762
/**
3763
 * A masked fill model.
3764
 * @param eq If a value in the given mask tensor is equal to this.
3765
 * @param fill Fill in this value to the output tensor.
3766
 * @param name The unique name of the model.
3767
 * @return A masked fill model.
3768
 */
3769
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_masked_fill(const float eq, const float fill, const char* const name);
3770
/**
3771
 * A index select model.
3772
 * @param datatype The data type of the vocabulary.
3773
 * @param vocab_size The size of the vocabulary.
3774
 * @param embed_size The size of the embedding.
3775
 * @param name The unique name of the model.
3776
 * @return A index select model.
3777
 */
3778
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_index_select(const int datatype, const int vocab_size, const int embed_size, const char* const name);
3779
/**
3780
 * A upsample model.
3781
 * @param width_scale The scale of the width of the input.
3782
 * @param height_scale The scale of the height of the input.
3783
 * @param name The unique name of the model.
3784
 * @return A upsample model.
3785
 */
3786
ccv_cnnp_model_t* ccv_cnnp_upsample(const float width_scale, const float height_scale, const char* const name);
3787
/**
3788
 * A sum value reducer model.
3789
 * @param axis The axis to be reduced.
3790
 * @param axis_count The size of the axis array.
3791
 * @param name The unique name of the model.
3792
 * @return A sum value reducer model.
3793
 */
3794
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_sum(const int* const axis, const int axis_count, const char* const name);
3795
/**
3796
 * A max value reducer model.
3797
 * @param axis The axis to be reduced.
3798
 * @param axis_count The size of the axis array.
3799
 * @param name The unique name of the model.
3800
 * @return A max value reducer model.
3801
 */
3802
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_max(const int* const axis, const int axis_count, const char* const name);
3803
/**
3804
 * A element-wise min model.
3805
 * @param name The unique name of the model.
3806
 * @return A element-wise min model.
3807
 */
3808
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_min(const char* const name);
3809
/**
3810
 * A element-wise max model.
3811
 * @param name The unique name of the model.
3812
 * @return A element-wise max model.
3813
 */
3814
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max(const char* const name);
3815
3816
/** @} */
3817
3818
/** @} */
3819
3820
#endif