Coverage Report

Created: 2021-04-12 03:25

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/ccv_nnc.h
Line
Count
Source (jump to first uncovered line)
1
/**********************************************************
2
 * C-based/Cached/Core Computer Vision Library
3
 * Liu Liu, 2010-02-01
4
 **********************************************************/
5
6
/**********************************************************
7
 * CCV - Neural Network Collection
8
 **********************************************************/
9
10
#ifndef GUARD_ccv_nnc_h
11
#define GUARD_ccv_nnc_h
12
13
#include "ccv.h"
14
#include <stddef.h>
15
16
// These are generated by cmd/build-cmd.rb
17
#include "cmd/ccv_nnc_cmd.h"
18
#include "cmd/ccv_nnc_backend.h"
19
20
/**
21
 * @defgroup level_0 Level-0 API
22
 * @{
23
 */
24
25
/**
26
 * Initialize the library.
27
 */
28
void ccv_nnc_init(void);
29
30
/** @} */
31
32
/**
33
 * @defgroup level_1 Level-1 API
34
 * @{
35
 */
36
37
/**
38
 * @defgroup level_1_cmd Commands
39
 * @{
40
 */
41
enum {
42
  // Attributes that enable symbolic graph simplification
43
  CCV_NNC_CMD_ATTR_PASSTHROUGH  = 0x01, /**< This doesn't compute anything, but pass the first n tensors to the output (useful for backprop that is identical). */
44
  CCV_NNC_CMD_ATTR_OUTPUT_ONES  = 0x02, /**< All the output tensors are 1s (unit). */
45
  CCV_NNC_CMD_ATTR_NULL_IS_ONES = 0x04, /**< Accept nullptr input as if these are tensors with 1s (unit). */
46
};
47
48
// Flags pass into cmd when executing.
49
enum {
50
  CCV_NNC_ACCUMULATE_OUTPUT = 0x01, /**< Enable accumulate outputs (unsupported). */
51
  CCV_NNC_ZERO_MEMORY_ALLOC = 0x02, /**< Don't allocate any extra memory for this operation. */
52
};
53
54
enum {
55
  CCV_NNC_EXEC_SUCCESS   = 0, /**< Successfully executed the command. */
56
  CCV_NNC_EXEC_INVALID   = -1, /**< Invalid inputs. */
57
  CCV_NNC_EXEC_NO_KERNEL = -2, /**< No kernel available for a given command / backend. */
58
  CCV_NNC_EXEC_OOM       = -3, /**< Out of memory error. */
59
};
60
61
/**
62
 * Parameters for command.
63
 */
64
typedef struct {
65
  struct {
66
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< [size.dim] The window size for the layer. For full connect layer, it is 1 because it is 1x1 convolutional layer with count of filters */
67
  } size;
68
  union {
69
    struct {
70
      int count; /**< [convolution.count] The number of filters for convolutional layer. */
71
      int groups; /**< [convolution.groups] The number of groups for convolutional layer. */
72
    } convolution;
73
    struct {
74
      int reserved; /**< [pool.reserved] A reserved field. */
75
    } pool;
76
    struct {
77
      float kappa; /**< [rnorm.kappa] As of b[i] = a[i] / (rnorm.kappa + rnorm.alpha * sum(a, i - rnorm.size / 2, i + rnorm.size / 2)) ^ rnorm.beta */
78
      float alpha; /**< [rnorm.alpha] See **rnorm.kappa**. */
79
      float beta; /**< [rnorm.beta] See **rnorm.kappa**. */
80
    } rnorm;
81
    struct {
82
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [bnorm.axis[]] The axis selected to compute mean / variance. */
83
      int count; /**< [bnorm.count] The number of axis selected. */
84
      float epsilon; /**< [bnorm.epsilon] The epsilon for standard derivation. */
85
      int is_test; /**< [bnorm.is_test] Whether in test mode. */
86
      float momentum; /**< [bnorm.momentum] running_mean = running_mean * momentum + mean * (1 - momentum). */
87
    } bnorm;
88
    struct {
89
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [lnorm.axis[]] The axis selected to compute mean / variance. */
90
      int count; /**< [lnorm.count] The number of axis selected. */
91
      float epsilon; /**< [lnorm.epsilon] The epsilon for standard derivation. */
92
    } lnorm;
93
    struct {
94
      int nesterov; /**< [sgd.nesterov] Nesterov accelerated gradient. */
95
      float rate; /**< [sgd.rate] The learning rate. */
96
      float scale; /**< [sgd.scale] The scale to be applied to the gradient before doing any minimization. */
97
      float decay; /**< [sgd.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
98
      float momentum; /**< [sgd.momentum] For SGD, this follows http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf. */
99
      float dampening; /**< [sgd.dampening] This usually == momentum, however, it can be changed. */
100
    } sgd;
101
    struct {
102
      int step; /**< [adam.step] Step t in adam optimizer. */
103
      float rate; /**< [adam.rate] The learning rate. */
104
      float beta1; /**< [adam.beta1] The beta1 hyper-parameter in adam optimizer. */
105
      float beta2; /**< [adam.beta2] The beta2 hyper-parameter in adam optimizer. */
106
      float decay; /**< [adam.decay] This is the weight decay parameter, which represents L2 regularization. */
107
      float epsilon; /**< [adam.epsilon] The epsilon for standard derivation. */
108
    } adam;
109
    struct {
110
      float rate; /**< [rmsprop.rate] The learning rate. */
111
      float decay; /**< [rmsprop.decay] This is the weight decay parameter, which represents L2 regularization after momentum applied. */
112
      float alpha; /**< [rmsprop.momentum] The alpha hyper-parameter. */
113
      float momentum; /**< [rmsprop.momentum] The momentum hyper-parameter. */
114
      float epsilon; /**< [rmsprop.epsilon] The epsilon for standard derivation. */
115
    } rmsprop;
116
    struct {
117
      int transpose_a[2]; /**< [blas.transpose_a[2]] The axis we'd like to transpose for input a. */
118
      int transpose_b[2]; /**< [blas.transpose_b[2]] The axis we'd like to transpose for input b. */
119
      float a[3]; /**< [blas.a[3]] BLAS scalars. */
120
    } blas;
121
    struct {
122
      float trim0; /**< [label_smoothing.trim0] The smoothed label for 0. */
123
      float trim1; /**< [label_smoothing.trim1] The smoothed label for 1. */
124
    } label_smoothing;
125
    struct {
126
      float pos_weight; /**< [binary_crossentropy.pos_weight] The pos_weight on the loss: -(pos_weight * y * log(x) + (1 - y) * log(1 - x)) */
127
    } binary_crossentropy;
128
    struct {
129
      float beta; /**< [smooth_l1.beta] The beta on the smooth L1 loss (or Huber loss) */
130
    } smooth_l1;
131
    struct {
132
      int axis[CCV_NNC_MAX_DIM_ALLOC]; /**< [reduce.axis[]] The axis selected to reduce. */
133
      int count; /**< [reduce.count] The number of axis selected. */
134
    } reduce;
135
    struct {
136
      int axis[2]; /**< [transpose.axis[2]] The axis we'd like to transpose for input. */
137
    } transpose;
138
    struct {
139
      float p; /**< [dropout.p] Dropout probability. */
140
      int entirety; /**< [dropout.entirety] Drop the whole layer with the given probability. */
141
    } dropout;
142
    struct {
143
      float width_scale; /**< [upsample.width_scale] scale for width parameter. It is between 1 and 2 at the moment. */
144
      float height_scale; /**< [upsample.height_scale] scale for height parameter. It is between 1 and 2 at the moment. */
145
    } upsample;
146
    struct {
147
      float min; /**< [clamp.min] The minimum, NaN is no min. */
148
      float max; /**< [clamp.max] The maximum, NaN is no max. */
149
    } clamp;
150
    struct {
151
      float iou_threshold; /**< [nms.iou_threshold] Threshold between 0 to 1 for IoU threshold. */
152
    } nms;
153
    void* userdata;
154
  };
155
} ccv_nnc_cmd_param_t;
156
157
/*
158
 * Hints for command.
159
 */
160
typedef struct {
161
  struct {
162
    int dim[CCV_NNC_MAX_DIM_ALLOC]; /**< Stride for each dimension. */
163
  } stride;
164
  struct {
165
    int begin[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the beginning of a dimension. */
166
    int end[CCV_NNC_MAX_DIM_ALLOC]; /**< Padding at the end of a dimension. */
167
  } border;
168
} ccv_nnc_hint_t;
169
170
/**
171
 * Opaque pointer to a stream object.
172
 */
173
typedef struct ccv_nnc_stream_context_s ccv_nnc_stream_context_t;
174
175
typedef struct ccv_nnc_cmd_vtab_s ccv_nnc_cmd_vtab_t;
176
177
typedef struct ccv_nnc_cmd_s {
178
  uint32_t cmd; /**< The identifier for command. */
179
  uint32_t backend; /**< The identifier for backend. */
180
  int algorithm; /**< The algorithm selector (as defined by backend). */
181
  ccv_nnc_cmd_param_t info; /**< The command parameters. */
182
  /**
183
   * This is for type CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD
184
   */
185
  ccv_nnc_cmd_vtab_t* isa;
186
  void* data;
187
} ccv_nnc_cmd_t;
188
189
/**
190
 * For forward functions, the input tensors and output tensors can be arbitrary.
191
 * However, for backward functions (backpropagation, or gradient functions in other libs),
192
 * the input is: 0~m-1: gradient for output tensors, 1~n: input tensors for forward functions, n+1~n+m: output tensors for forward functions,
193
 * the output is: 0~n-1: output gradients w.r.t. input tensors.
194
 * Which input / output tensors can be ignored can be specified in the cmd config structs.
195
 */
196
typedef int(*ccv_nnc_cmd_exec_f)(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
197
198
/**
199
 * The function prototype for autotune. The only difference is the max_workspace_size.
200
 * Whoever implement this function prototype means we handled over autotune task to the
201
 * command itself, you are responsible to select the best algorithm.
202
 * @return The selected algorithm.
203
 */
204
typedef int(*ccv_nnc_cmd_autotune_f)(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
205
206
/**
207
 * The function prototype is for automatically deduce tensor shapes.
208
 */
209
210
typedef struct ccv_nnc_cmd_vtab_s {
211
  ccv_nnc_cmd_exec_f exec;
212
  void (*tensor_auto)(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
213
} ccv_nnc_cmd_vtab_t;
214
215
/** @} */
216
217
/**
218
 * @defgroup level_1_uops Micro Commands to Define Commands
219
 * @{
220
 */
221
222
/**
223
 * @page micro_jittor The concept of meta-ops in Jittor is amazing
224
 *
225
 * NNC will never do JIT. Particularly, I will never do codegen and compile at runtime, especially with static shapes.
226
 * The reason is pretty simple. JIT would be too much architectural dependent and with that, almost impossible for NNC
227
 * to be this small embeddable library that you can carry everywhere. However, this shouldn't prevent NNC to generate
228
 * proper descriptions of each command so a JIT version can be built if there are architectural support for it. In this
229
 * way, the core of NNC can be small and embeddable, but a new backend (identified by the backend attribute) can implement
230
 * more sophisticated JIT mechanism.
231
 *
232
 * More over, I need to generate some code for reference implementations, ideally from some descriptions. This is important
233
 * because with 90+ ops, having a correctly implemented command turns out to be more challenging than I expected.
234
 * Especially if I want them to be compliant with the metadata describes it (what shape it accepts, what datatype works,
235
 * whether it can accept tensor views, and how in-place tensors supported). Many of reference commands are not supporting
236
 * all datatypes and tensor views, and this has to be rectified because these are "reference commands", they must be.
237
 *
238
 * Jittor introduced to the world the idea of meta-ops. Basically, it claims every ops (or macro ops) can be break down to
239
 * 3 types of micro ops (they call them meta-ops): a reindex op that can map tensor from one dimensionality to another, an
240
 * element-wise op that does element-wise primitive math, and finally, a reduce op that can reduce along particular axis
241
 * of a tensor with some elementary math. This feels rather limited initially, but when thinking through it, I am convinced
242
 * it should be enough to describe all commands presented in NNC (this shouldn't be a surprise actually).
243
 *
244
 * Thus, the plan now is to use the meta-ops idea, implementing new micro commands that can describe other commands in
245
 * NNC. In this way, I can generate reference implementation from these descriptions and hopefully have better coverage
246
 * than my existing CPU / GPU reference implementations.
247
 *
248
 * To build on-top what Jittor did, if you need to have my dynamism in the ops, it is essential to index with the provided
249
 * tensor. With just reindex, binary operands and reduce, you cannot do that. Thus, on top of these 3, we added the 4th
250
 * micro op (meta-op) that is "select". This will be sufficient to implement ops such as masking.
251
 *
252
 */
253
254
/**
255
 * Abstract vtab for different ccv_nnc_micro_io_t.
256
 */
257
typedef struct ccv_nnc_micro_io_vtab_s ccv_nnc_micro_io_vtab_t;
258
259
enum {
260
  // These could be much more unary ops.
261
  CCV_NNC_MICRO_UNARY_OP_NEG,
262
  CCV_NNC_MICRO_UNARY_OP_LOG,
263
  CCV_NNC_MICRO_UNARY_OP_EXP,
264
};
265
266
enum {
267
  CCV_NNC_MICRO_BINARY_OP_PLUS,
268
  CCV_NNC_MICRO_BINARY_OP_MINUS,
269
  CCV_NNC_MICRO_BINARY_OP_MUL,
270
  CCV_NNC_MICRO_BINARY_OP_DIV,
271
  CCV_NNC_MICRO_BINARY_OP_MAX,
272
  CCV_NNC_MICRO_BINARY_OP_MIN,
273
  CCV_NNC_MICRO_BINARY_OP_EQUAL_TO,
274
  CCV_NNC_MICRO_BINARY_OP_LESS_THAN,
275
};
276
277
enum {
278
  CCV_NNC_MICRO_REDUCE_OP_MAX,
279
  CCV_NNC_MICRO_REDUCE_OP_MIN,
280
  CCV_NNC_MICRO_REDUCE_OP_ARGMAX,
281
  CCV_NNC_MICRO_REDUCE_OP_ARGMIN,
282
  CCV_NNC_MICRO_REDUCE_OP_MEAN, // Mean is complicated, we need a way to compute total for loops after this. It has to be done statically, and that is "interesting".
283
  CCV_NNC_MICRO_REDUCE_OP_SUM,
284
  CCV_NNC_MICRO_REDUCE_OP_PROD,
285
};
286
287
/**
288
 * Abstract micro op representation.
289
 */
290
typedef struct ccv_nnc_micro_io_s* ccv_nnc_micro_io_t;
291
292
struct ccv_nnc_micro_io_s {
293
  const ccv_nnc_micro_io_vtab_t* isa;
294
  ccv_nnc_micro_io_t* inputs;
295
  int input_size;
296
  int dimensions;
297
  int id;
298
};
299
300
typedef struct {
301
  // Type of the scalar is about precision, nothing to restrict the tensor's type. For example, we may assign a int32_t 0
302
  // to a float16 tensor element, this is perfectly fine.
303
  int type;
304
  union {
305
    unsigned char u8;
306
    int i32;
307
    ccv_float16_t f16;
308
    float f32;
309
    int64_t i64;
310
    uint64_t u64;
311
    double f64;
312
  };
313
} ccv_nnc_micro_scalar_t;
314
315
/**
316
 * Create a free-form input that represent a tensor.
317
 * @param dimensions The maximum dimension of the input.
318
 */
319
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_input(const int dimensions);
320
/**
321
 * Use shape and reindex expression to reindex the given tensor into a different shape.
322
 * The expressions can bind integer parameters which starts with $.
323
 *
324
 * The expression follows specific pattern, integer parameters starts with $. Dimensions are represented as dXn, such
325
 * as dA0, dA1, dA2 ... Index into the provided tensor can be represented as i0, i1, i2. These are all 0-indexed.
326
 *
327
 * Constants are supported, such as 235, 431 etc. Operators supported currently are -, +, /, *.
328
 *
329
 * Thus, for broadcast a tensor x[w, h] to y[w, h, h], it can be represented as:
330
 * shape: { "dA0", "dA1", "dA1" }, reindex: { "i0", "i1", "0" }.
331
 * For example, transpose can be represented as:
332
 * shape: { "dA1", "dA0" }, reindex: { "i1", "i0" }
333
 *
334
 * @param shape The shape expressions per axis.
335
 * @param shape_count The dimensions of the output.
336
 * @param ss The tensors to reference shape dimensions.
337
 * @param s_count The number of tensors to reference shape dimensions.
338
 * @param reindex The reindex expressions per axis.
339
 * @param reindex_count The dimensions of the input.
340
 * @param x The input for reindex operation.
341
 * @return The reindexed tensor.
342
 */
343
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reindex(const char* const* const shape, const int shape_count, const ccv_nnc_micro_io_t* const ss, const int s_count, const char* const* const reindex, const int reindex_count, const ccv_nnc_micro_io_t x);
344
/**
345
 * Apply element-wise computations with one tensor.
346
 * @param op The binary operand.
347
 * @param x The input.
348
 * @return The result tensor.
349
 */
350
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_unary(const uint32_t op, const ccv_nnc_micro_io_t x);
351
/**
352
 * Apply pair-wise computations with two tensors. They has to match shape exactly.
353
 * @param op The binary operand.
354
 * @param left The left input.
355
 * @param right The right input.
356
 * @return The result tensor.
357
 */
358
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_binary(const uint32_t op, const ccv_nnc_micro_io_t left, const ccv_nnc_micro_io_t right);
359
/**
360
 * Apply reduction computation against some dimensions and generate the final reduced tensor.
361
 * @param op The reduction operand.
362
 * @param axis The axis to reduce.
363
 * @param axis_count Number of axes.
364
 * @param x The input tensor.
365
 * @return The result tensor after reduction.
366
 */
367
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_reduce(const uint8_t op, const int* const axis, const int axis_count, const ccv_nnc_micro_io_t x);
368
/**
369
 * Use the index tensor to select one value from the x per axis.
370
 * @param axis The axis to select.
371
 * @param x The tensor to be indexed.
372
 * @param index The integer tensor of indexes.
373
 * @return The result tensor with values selected from x with index from index tensor.
374
 */
375
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_select(const int axis, const ccv_nnc_micro_io_t x, const ccv_nnc_micro_io_t index);
376
/**
377
 * Return the gradient for a particular output. For example, if x is ccv_nnc_micro_unary(exp, input),
378
 * this represents the gradient of x, not the input. This method is used to generate representation
379
 * of gradients for ccv_nnc_micro_combine_new method.
380
 * @param x The tensor to take a gradient of.
381
 * @return The result tensor that represents the gradient of x.
382
 */
383
CCV_WARN_UNUSED(ccv_nnc_micro_io_t) ccv_nnc_micro_grad(const ccv_nnc_micro_io_t x);
384
/**
385
 * The combined op from micro ops.
386
 */
387
typedef struct ccv_nnc_micro_combine_s ccv_nnc_micro_combine_t;
388
/**
389
 * Combine micro ops into one, and do some optimization passes. The combined one can be then processed to generate
390
 * optimized kernels. Particularly, we can processed the combined one into C code and CUDA code as reference
391
 * implementations.
392
 * @param inputs The inputs for the combined ops.
393
 * @param input_size The number of the inputs.
394
 * @param parameters The name of the parameters, this determines the order of the these parameters.
395
 * @param parameter_size The number of parameters.
396
 * @param outputs The outputs for the combined ops.
397
 * @param output_size The number of the outputs.
398
 * @param ingrads The gradient inputs for the combined ops, including any inputs / outputs if there are any.
399
 * @param ingrad_size The number of ingrads.
400
 * @param outgrads The gradient outputs for the combined ops.
401
 * @param outgrad_size The number of outgrads.
402
 */
403
CCV_WARN_UNUSED(ccv_nnc_micro_combine_t*) ccv_nnc_micro_combine_new(const ccv_nnc_micro_io_t* const inputs, const int input_size, const char* const* const parameters, const int parameter_size, const ccv_nnc_micro_io_t* const outputs, const int output_size, const ccv_nnc_micro_io_t* const ingrads, const int ingrad_size, const ccv_nnc_micro_io_t* const outgrads, const int outgrad_size);
404
/**
405
 * Free the combined op.
406
 * @param combine The op to be freed.
407
 */
408
void ccv_nnc_micro_combine_free(ccv_nnc_micro_combine_t* const combine);
409
/**
410
 * Run combined op in interpret mode. This is only useful for debug internals. Because this is for
411
 * generic combined op, there is no hint, or flags, or stream context, or cmd.
412
 * @param combine The op.
413
 * @param cmd Choice between CMD_CUSTOM_FORWARD and CMD_CUSTOM_BACKWARD.
414
 * @param inputs The input tensors.
415
 * @param input_size The size of input tensors.
416
 * @param values The value corresponding to the parameters when call ccv_nnc_micro_combine_new.
417
 * @param parameter_size How many parameters. It must match when called ccv_nnc_micro_combine_new.
418
 * @param outputs The output tensors.
419
 * @param output_size The size of output tensors.
420
 */
421
void ccv_nnc_micro_combine_interpret(ccv_nnc_micro_combine_t* const combine, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_micro_scalar_t* const values, const int parameter_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
422
/**
423
 * Generate C code from the combined op.
424
 * @param combine The combined op to generate some C code.
425
 * @return The generated C code string.
426
 */
427
char* ccv_nnc_micro_combine_c(ccv_nnc_micro_combine_t* const combine);
428
429
/** @} */
430
431
/**
432
 * @defgroup level_1_tensor Tensors
433
 * @{
434
 */
435
436
/**
437
 * Count the dimensionality of a tensor.
438
 */
439
static inline int ccv_nnc_tensor_nd(const int dim[CCV_NNC_MAX_DIM_ALLOC])
440
1.98M
{
441
1.98M
  int i;
442
5.14M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++3.15M
)
443
5.17M
    if (dim[i] == 0)
444
2.01M
      return i;
445
18.4E
  return CCV_NNC_MAX_DIM_ALLOC;
446
1.98M
}
Unexecuted instantiation: gradient.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: upsample.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: concat.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tensor.bind.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.backward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.vector.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dropout.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: custom.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: reduce.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tfb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: batch.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: crossentropy.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cnnp.core.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: case_of.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: micro.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compression.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: transform.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: gemm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: roi_align.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: swish.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: index.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: minimize.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.compile.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: autograd.tests.c:ccv_nnc_tensor_nd
tensor.tests.c:ccv_nnc_tensor_nd
Line
Count
Source
440
1
{
441
1
  int i;
442
2
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1
)
443
2
    if (dim[i] == 0)
444
1
      return i;
445
1
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
1
}
Unexecuted instantiation: rand.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: while.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nms.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.io.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cblas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: simplify.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: numa.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: tape.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dynamic.graph.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: layer.norm.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: parallel.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: winograd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dataframe.addons.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: broadcast.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: compare.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: smooth_l1.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: forward.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cublas.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: symbolic.graph.vgg.d.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: imdb.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: graph.vgg.d.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: random.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cudnn.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: dense.net.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: cifar.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: rmsprop.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: sgd.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: nccl.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: schedule.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: loss.tests.c:ccv_nnc_tensor_nd
Unexecuted instantiation: adam.tests.c:ccv_nnc_tensor_nd
ccv_nnc_cmd.c:ccv_nnc_tensor_nd
Line
Count
Source
440
206k
{
441
206k
  int i;
442
506k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++300k
)
443
506k
    if (dim[i] == 0)
444
206k
      return i;
445
206k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
206k
}
ccv_nnc_tensor.c:ccv_nnc_tensor_nd
Line
Count
Source
440
140
{
441
140
  int i;
442
422
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++282
)
443
422
    if (dim[i] == 0)
444
140
      return i;
445
140
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
140
}
Unexecuted instantiation: ccv_nnc_tensor_io.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_stream.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_core.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_interpret.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_micro_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_io.c:ccv_nnc_tensor_nd
ccv_nnc_symbolic_graph_compile.c:ccv_nnc_tensor_nd
Line
Count
Source
440
1.00k
{
441
1.00k
  int i;
442
2.00k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.00k
)
443
2.00k
    if (dim[i] == 0)
444
1.00k
      return i;
445
1.00k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
1.00k
}
Unexecuted instantiation: ccv_nnc_symbolic_graph_backward.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_while.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_tensor_tape.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_case_of.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_parallel.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_simplify.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_symbolic_graph_memory_compression.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_graph_run.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_alloc.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_backward.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_apply_gradients.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_minimize.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dynamic_graph_evaluate.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_dataframe_core.c:ccv_nnc_tensor_nd
ccv_cnnp_dataframe_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
440
119k
{
441
119k
  int i;
442
445k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++325k
)
443
476k
    if (dim[i] == 0)
444
150k
      return i;
445
18.4E
  return CCV_NNC_MAX_DIM_ALLOC;
446
119k
}
Unexecuted instantiation: ccv_cnnp_dataframe_csv.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_io.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_cnnp_model_core.c:ccv_nnc_tensor_nd
ccv_cnnp_model_addons.c:ccv_nnc_tensor_nd
Line
Count
Source
440
2.76k
{
441
2.76k
  int i;
442
7.06k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++4.30k
)
443
7.06k
    if (dim[i] == 0)
444
2.76k
      return i;
445
2.76k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
2.76k
}
Unexecuted instantiation: ccv_nnc_rand_uniform_cpu_ref.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rand_normal_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_conv_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
4.71k
{
441
4.71k
  int i;
442
22.1k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++17.4k
)
443
22.1k
    if (dim[i] == 0)
444
4.71k
      return i;
445
4.71k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
4.71k
}
ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
440
3.23k
{
441
3.23k
  int i;
442
15.2k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++12.0k
)
443
15.2k
    if (dim[i] == 0)
444
3.23k
      return i;
445
3.23k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
3.23k
}
Unexecuted instantiation: ccv_nnc_swish_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_dropout_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
24
{
441
24
  int i;
442
72
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
443
72
    if (dim[i] == 0)
444
24
      return i;
445
24
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
24
}
ccv_nnc_softmax_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
1.24k
{
441
1.24k
  int i;
442
3.12k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.87k
)
443
3.12k
    if (dim[i] == 0)
444
1.24k
      return i;
445
1.24k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
1.24k
}
ccv_nnc_sgd_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
212k
{
441
212k
  int i;
442
546k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++333k
)
443
546k
    if (dim[i] == 0)
444
212k
      return i;
445
212k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
212k
}
ccv_nnc_max_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
2.12k
{
441
2.12k
  int i;
442
10.5k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8.46k
)
443
10.5k
    if (dim[i] == 0)
444
2.12k
      return i;
445
2.12k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
2.12k
}
ccv_nnc_avg_pool_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
2.73k
{
441
2.73k
  int i;
442
13.6k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++10.9k
)
443
13.6k
    if (dim[i] == 0)
444
2.73k
      return i;
445
2.73k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
2.73k
}
ccv_nnc_sigmoid_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
238
{
441
238
  int i;
442
698
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++460
)
443
698
    if (dim[i] == 0)
444
238
      return i;
445
238
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
238
}
ccv_nnc_lssc_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
72
{
441
72
  int i;
442
336
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++264
)
443
336
    if (dim[i] == 0)
444
72
      return i;
445
72
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
72
}
ccv_nnc_min_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
82
{
441
82
  int i;
442
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
443
312
    if (dim[i] == 0)
444
82
      return i;
445
82
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
82
}
ccv_nnc_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
82
{
441
82
  int i;
442
312
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++230
)
443
312
    if (dim[i] == 0)
444
82
      return i;
445
82
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
82
}
ccv_nnc_softmax_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
510
{
441
510
  int i;
442
1.03k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++523
)
443
1.03k
    if (dim[i] == 0)
444
510
      return i;
445
510
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
510
}
ccv_nnc_binary_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
194
{
441
194
  int i;
442
550
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++356
)
443
550
    if (dim[i] == 0)
444
194
      return i;
445
194
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
194
}
ccv_nnc_categorical_crossentropy_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
35
{
441
35
  int i;
442
93
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
443
93
    if (dim[i] == 0)
444
35
      return i;
445
35
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
35
}
ccv_nnc_smooth_l1_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
63
{
441
63
  int i;
442
177
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++114
)
443
177
    if (dim[i] == 0)
444
63
      return i;
445
63
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
63
}
Unexecuted instantiation: ccv_nnc_relu_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_adam_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
16.0k
{
441
16.0k
  int i;
442
48.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.0k
)
443
48.0k
    if (dim[i] == 0)
444
16.0k
      return i;
445
16.0k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
16.0k
}
ccv_nnc_nms_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
18
{
441
18
  int i;
442
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++30
)
443
48
    if (dim[i] == 0)
444
18
      return i;
445
18
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
18
}
ccv_nnc_gemm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
64.3k
{
441
64.3k
  int i;
442
176k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++112k
)
443
176k
    if (dim[i] == 0)
444
64.3k
      return i;
445
64.3k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
64.3k
}
ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
440
912
{
441
912
  int i;
442
2.32k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.41k
)
443
2.32k
    if (dim[i] == 0)
444
912
      return i;
445
912
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
912
}
ccv_nnc_add_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
204k
{
441
204k
  int i;
442
477k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++272k
)
443
477k
    if (dim[i] == 0)
444
204k
      return i;
445
204k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
204k
}
ccv_nnc_mul_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
104k
{
441
104k
  int i;
442
210k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++105k
)
443
210k
    if (dim[i] == 0)
444
104k
      return i;
445
104k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
104k
}
ccv_nnc_upsample_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
24
{
441
24
  int i;
442
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++72
)
443
96
    if (dim[i] == 0)
444
24
      return i;
445
24
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
24
}
ccv_nnc_util_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
200
{
441
200
  int i;
442
734
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++534
)
443
734
    if (dim[i] == 0)
444
200
      return i;
445
200
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
200
}
ccv_nnc_roi_align_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
65
{
441
65
  int i;
442
261
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++196
)
443
261
    if (dim[i] == 0)
444
65
      return i;
445
65
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
65
}
Unexecuted instantiation: ccv_nnc_sigmoid_cpu_ref.c:ccv_nnc_tensor_nd
ccv_nnc_index_select_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
27
{
441
27
  int i;
442
68
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++41
)
443
68
    if (dim[i] == 0)
444
27
      return i;
445
27
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
27
}
ccv_nnc_rmsprop_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
16.0k
{
441
16.0k
  int i;
442
48.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++32.0k
)
443
48.0k
    if (dim[i] == 0)
444
16.0k
      return i;
445
16.0k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
16.0k
}
ccv_nnc_ew_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
725k
{
441
725k
  int i;
442
1.78M
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.06M
)
443
1.78M
    if (dim[i] == 0)
444
725k
      return i;
445
725k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
725k
}
ccv_nnc_reduce_sum_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
65.3k
{
441
65.3k
  int i;
442
196k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++131k
)
443
196k
    if (dim[i] == 0)
444
65.3k
      return i;
445
65.3k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
65.3k
}
ccv_nnc_reduce_max_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
41
{
441
41
  int i;
442
99
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++58
)
443
99
    if (dim[i] == 0)
444
41
      return i;
445
41
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
41
}
ccv_nnc_batch_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
438
{
441
438
  int i;
442
1.41k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++975
)
443
1.41k
    if (dim[i] == 0)
444
438
      return i;
445
438
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
438
}
ccv_nnc_layer_norm_cpu_ref.c:ccv_nnc_tensor_nd
Line
Count
Source
440
90
{
441
90
  int i;
442
450
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++360
)
443
450
    if (dim[i] == 0)
444
90
      return i;
445
90
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
90
}
Unexecuted instantiation: ccv_nnc_rand.c:ccv_nnc_tensor_nd
_ccv_nnc_conv_cpu_4x4_3x3_winograd.c:ccv_nnc_tensor_nd
Line
Count
Source
440
206
{
441
206
  int i;
442
824
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++618
)
443
824
    if (dim[i] == 0)
444
206
      return i;
445
206
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
206
}
Unexecuted instantiation: _ccv_nnc_conv_cpu_fft.c:ccv_nnc_tensor_nd
Unexecuted instantiation: _ccv_nnc_conv_cpu_gemm.c:ccv_nnc_tensor_nd
_ccv_nnc_conv_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
440
2.26k
{
441
2.26k
  int i;
442
11.0k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8.75k
)
443
11.0k
    if (dim[i] == 0)
444
2.26k
      return i;
445
2.26k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
2.26k
}
ccv_nnc_convolution.c:ccv_nnc_tensor_nd
Line
Count
Source
440
8.70k
{
441
8.70k
  int i;
442
43.4k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++34.7k
)
443
43.4k
    if (dim[i] == 0)
444
8.70k
      return i;
445
8.70k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
8.70k
}
Unexecuted instantiation: ccv_nnc_swish.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_dropout.c:ccv_nnc_tensor_nd
ccv_nnc_softmax_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
440
860
{
441
860
  int i;
442
2.56k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++1.70k
)
443
2.56k
    if (dim[i] == 0)
444
860
      return i;
445
860
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
860
}
Unexecuted instantiation: ccv_nnc_sgd.c:ccv_nnc_tensor_nd
ccv_nnc_pool.c:ccv_nnc_tensor_nd
Line
Count
Source
440
2.06k
{
441
2.06k
  int i;
442
10.3k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8.23k
)
443
10.3k
    if (dim[i] == 0)
444
2.06k
      return i;
445
2.06k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
2.06k
}
ccv_nnc_sigmoid_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
440
28
{
441
28
  int i;
442
84
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++56
)
443
84
    if (dim[i] == 0)
444
28
      return i;
445
28
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
28
}
ccv_nnc_compression.c:ccv_nnc_tensor_nd
Line
Count
Source
440
10
{
441
10
  int i;
442
48
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++38
)
443
48
    if (dim[i] == 0)
444
10
      return i;
445
10
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
10
}
Unexecuted instantiation: ccv_nnc_cmp.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_softmax.c:ccv_nnc_tensor_nd
ccv_nnc_binary_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
440
12
{
441
12
  int i;
442
36
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++24
)
443
36
    if (dim[i] == 0)
444
12
      return i;
445
12
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
12
}
ccv_nnc_categorical_crossentropy.c:ccv_nnc_tensor_nd
Line
Count
Source
440
31
{
441
31
  int i;
442
96
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++65
)
443
96
    if (dim[i] == 0)
444
31
      return i;
445
31
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
31
}
ccv_nnc_smooth_l1.c:ccv_nnc_tensor_nd
Line
Count
Source
440
4
{
441
4
  int i;
442
12
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++8
)
443
12
    if (dim[i] == 0)
444
4
      return i;
445
4
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
4
}
Unexecuted instantiation: ccv_nnc_relu.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_adam.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_nms.c:ccv_nnc_tensor_nd
ccv_nnc_blas.c:ccv_nnc_tensor_nd
Line
Count
Source
440
96.5k
{
441
96.5k
  int i;
442
232k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++135k
)
443
232k
    if (dim[i] == 0)
444
96.5k
      return i;
445
96.5k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
96.5k
}
_ccv_nnc_gemm_cpu_opt.c:ccv_nnc_tensor_nd
Line
Count
Source
440
534
{
441
534
  int i;
442
1.41k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++885
)
443
1.41k
    if (dim[i] == 0)
444
534
      return i;
445
534
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
534
}
_ccv_nnc_gemm_cpu_sys.c:ccv_nnc_tensor_nd
Line
Count
Source
440
114k
{
441
114k
  int i;
442
313k
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++199k
)
443
313k
    if (dim[i] == 0)
444
114k
      return i;
445
114k
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
114k
}
ccv_nnc_upsample.c:ccv_nnc_tensor_nd
Line
Count
Source
440
12
{
441
12
  int i;
442
60
  for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC; 
i++48
)
443
60
    if (dim[i] == 0)
444
12
      return i;
445
12
  
return 0
CCV_NNC_MAX_DIM_ALLOC0
;
446
12
}
Unexecuted instantiation: ccv_nnc_comm.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_util.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_roi_align.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_sigmoid.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_index_select.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_rmsprop.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_ew.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_reduce.c:ccv_nnc_tensor_nd
Unexecuted instantiation: ccv_nnc_norm.c:ccv_nnc_tensor_nd
447
448
/**
449
 * Create a new tensor.
450
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
451
 * @param params Tensor parameters.
452
 * @param flags Reserved flags for the allocation.
453
 * @return The newly created tensor.
454
 */
455
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_new(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
456
/**
457
 * Create a new tensor on stack.
458
 * @param ptr If 0, nnc will allocate the tensor ourselves. Otherwise, will use the memory region referenced by 'ptr'.
459
 * @param params Tensor parameters.
460
 * @param flags Reserved flags for the allocation.
461
 * @return The tensor struct.
462
 */
463
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor(const void* const ptr, const ccv_nnc_tensor_param_t params, const int flags);
464
/**
465
 * Resize an existing tensor to a new dimension.
466
 * @param tensor The old tensor to be resized.
467
 * @param params Tensor parameters.
468
 * @return Potentially a new tensor, but if the size is sufficient, it will be in-place operation.
469
 */
470
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_resize(ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params);
471
/**
472
 * Pin the tensor memory for faster access on GPU.
473
 * @param tensor A tensor that we want to pin the memory.
474
 * @return 0 for success.
475
 */
476
int ccv_nnc_tensor_pin_memory(ccv_nnc_tensor_t* const tensor);
477
/**
478
 * Free a tensor object.
479
 * @param tensor The tensor to be freed.
480
 */
481
void ccv_nnc_tensor_free(ccv_nnc_tensor_t* const tensor);
482
/**
483
 * Create a tensor view. A tensor view can be non-continuous. Essentially, it provides a view into a tensor.
484
 * @param tensor The tensor that we want to view into.
485
 * @param params The tensor parameters for the tensor view.
486
 * @param ofs The offset on each of the dimension.
487
 * @param inc The line size of each dimension.
488
 * @return The newly created tensor view.
489
 */
490
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t*) ccv_nnc_tensor_view_new(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC]);
491
/**
492
 * Create a tensor view on stack.
493
 * @param tensor The tensor that we want to view into.
494
 * @param params The tensor parameters for the tensor view.
495
 * @param ofs The offset on each of the dimension.
496
 * @param inc The line size of each dimension.
497
 * @return The tensor view struct.
498
 */
499
CCV_WARN_UNUSED(ccv_nnc_tensor_view_t) ccv_nnc_tensor_view(const ccv_nnc_tensor_t* const tensor, const ccv_nnc_tensor_param_t params, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC]);
500
/**
501
 * Free a tensor view object.
502
 * @param tensor_view The tensor view to be freed.
503
 */
504
void ccv_nnc_tensor_view_free(ccv_nnc_tensor_view_t* const tensor_view);
505
/**
506
 * Zero out a given tensor.
507
 * @param tensor The tensor to be zero out.
508
 */
509
void ccv_nnc_tensor_zero(void* const tensor);
510
/**
511
 * Compare whether two tensors are equal. This will tolerant some floating point issues follow http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
512
 * @param a Tensor a.
513
 * @param b Tensor b.
514
 * @return 0 if equal, -1 otherwise.
515
 */
516
CCV_WARN_UNUSED(int) ccv_nnc_tensor_eq(const ccv_nnc_tensor_t* const a, const ccv_nnc_tensor_t* const b);
517
/**
518
 * Write tensor to a SQLite database with a given name.
519
 * @param tensor The tensor.
520
 * @param handle The SQLite handle.
521
 * @param name The name to find the tensor in the database.
522
 * @return CCV_IO_FINAL for success, otherwise error.
523
 */
524
int ccv_nnc_tensor_write(const ccv_nnc_tensor_t* const tensor, void* const handle, const char* const name);
525
/**
526
 * Read a tensor from a SQLite database with a given name.
527
 * @param handle The SQLite handle.
528
 * @param name The name to find the tensor in the database.
529
 * @param tensor_out The pointer to hold the tensor. If you supply the tensor yourself, we will read the data into the existing tensor.
530
 * @return CCV_IO_FINAL for success, otherwise error.
531
 */
532
int ccv_nnc_tensor_read(void* const handle, const char* const name, ccv_nnc_tensor_t** const tensor_out);
533
534
/** @} */
535
536
/**
537
 * @addtogroup level_1_cmd
538
 * @{
539
 */
540
541
/**
542
 * Return a high precision time unit. What this time unit is is platform specific.
543
 * @return A monotonic increasing 64-bit integer w.r.t. passing of time.
544
 */
545
uint64_t ccv_nnc_cmd_mono_time(void);
546
/**
547
 * Return UTF-8 encoded name of a given command.
548
 * @return A UTF-8 string (pointing to a static constant).
549
 */
550
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_name(const uint32_t cmd);
551
/**
552
 * Return UTF-8 encoded name of a given backend.
553
 * @return A UTF-8 string (pointing to a static constant).
554
 */
555
CCV_WARN_UNUSED(const char*) ccv_nnc_cmd_backend_name(const uint32_t backend);
556
/**
557
 * Check whether a given backend is available for a given command.
558
 * @return 1 if it is available.
559
 */
560
CCV_WARN_UNUSED(int) ccv_nnc_cmd_ok(const uint32_t cmd, const uint32_t backend);
561
/**
562
 * Create a wrapped command with parameters.
563
 * @param cmd The command identifier.
564
 * @param isa If this is a CCV_NNC_CUSTOM_FORWARD / CCV_NNC_CUSTOM_BACKWARD command, this supplies the custom functions.
565
 * @param params The parameters for the command.
566
 * @param flags A reserved field for flags.
567
 * @return A wrapped ccv_nnc_cmd_t structure.
568
 */
569
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd(const uint32_t cmd, ccv_nnc_cmd_vtab_t* const isa, const ccv_nnc_cmd_param_t params, const int flags);
570
/**
571
 * Verify whether a hint is compatible with a given command and a given input tensor parameters / output tensor parameters.
572
 * @param hint The hint for a given command. Hint defines things such as paddings, strides etc. for a given command.
573
 * @param cmd The wrapped command.
574
 * @param a The input tensor parameters.
575
 * @param b The output tensor parameters.
576
 * @return 1 if it passes.
577
 */
578
CCV_WARN_UNUSED(int) ccv_nnc_hint_verify(const ccv_nnc_hint_t hint, const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
579
/**
580
 * Automatically find the best hint for a given input / output (on forward pass only).
581
 * @param cmd The wrapped command.
582
 * @param a The input tensor parameters.
583
 * @param b The output tensor parameters.
584
 * @return Best hint we can guess.
585
 */
586
CCV_WARN_UNUSED(ccv_nnc_hint_t) ccv_nnc_hint_auto(const ccv_nnc_cmd_param_t cmd, const ccv_nnc_tensor_param_t a, const ccv_nnc_tensor_param_t b);
587
/**
588
 * Automatically find the outputs for the given inputs / hint.
589
 * @param cmd The wrapped command.
590
 * @param inputs An array of input tensor parameters.
591
 * @param input_size The size of input array.
592
 * @param hint The hint for the given command.
593
 * @param outputs An array for the output tensor parameters.
594
 * @param output_size The size of the output array.
595
 */
596
void ccv_nnc_hint_tensor_auto(const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_hint_t hint, ccv_nnc_tensor_param_t* const outputs, const int output_size);
597
/**
598
 * Find a suitable backend for a given command and tensor settings.
599
 * @param cmd The wrapped command.
600
 * @param tensor_memory The tensor memory setup (whether it is CPU or GPU).
601
 * @param tensor_formats The tensor layout format (NCHW, NHWC, CHWN etc.)
602
 * @param tensor_datatypes The datatype of a given tensor (FP32 etc.)
603
 * @return The backend identifier for the selected backend.
604
 */
605
CCV_WARN_UNUSED(uint32_t) ccv_nnc_cmd_find_backend(const ccv_nnc_cmd_t cmd, const int tensor_memory, const int tensor_formats, const int tensor_datatypes);
606
/**
607
 * Run autotune to find the best kernel and configuration for the given input.
608
 * @param cmd The original wrapped command.
609
 * @param max_workspace_size The maximum memory allowed for this command to execute.
610
 * @param hint The hint for the given command.
611
 * @param flags The reserved field for flags.
612
 * @param inputs An array of input tensors.
613
 * @param input_size The size of input array.
614
 * @param outputs An array of output tensors.
615
 * @param output_size The size of output array.
616
 * @param stream_context The stream we can do the autotune on. 0 uses default stream.
617
 * @return The modified cmd that contains the updated configuration.
618
 */
619
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_cmd_autotune(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
620
/**
621
 * Check whether a given tensor input / output pattern can be computed by the given command.
622
 * bitmasks encode whether a given input tensor / output tensor available at a position.
623
 * @param cmd The wrapped command to check.
624
 * @param input_size The intended size of the input tensor array.
625
 * @param output_size The intended size of the output tensor array.
626
 * @param input_bitmasks The input tensor array encoding in bitmap, 0: no tensor, 1: has a tensor.
627
 * @param input_bitmask_size The size of the input bitmask array.
628
 * @param output_bitmasks The output tensor array encoding in bitmap.
629
 * @param output_bitmask_size The size of the output bitmask array.
630
 * @return 1 if the command can be executed with the given input / output pattern.
631
 */
632
CCV_WARN_UNUSED(int) ccv_nnc_cmd_bitmask(const ccv_nnc_cmd_t cmd, const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size);
633
/**
634
 * Execute a given command.
635
 * @param cmd The wrapped command to be executed.
636
 * @param hint The hint provided for the command.
637
 * @param flags A reserved field for flags.
638
 * @param inputs The input tensor array.
639
 * @param input_size The size of input tensor array.
640
 * @param outputs The output tensor array.
641
 * @param output_size The size of output tensor array.
642
 * @param stream_context The stream which the command will be executed upon.
643
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
644
 */
645
int ccv_nnc_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
646
/**
647
 * Check whether the command is a forward pass or not.
648
 * @param cmd The wrapped command.
649
 * @return 1 if it is a forward pass.
650
 */
651
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_forward(const ccv_nnc_cmd_t cmd);
652
/**
653
 * Check whether the command is a backward pass or not.
654
 * @param cmd The wrapped command.
655
 * @return 1 if it is a backward pass.
656
 */
657
CCV_WARN_UNUSED(int) ccv_nnc_cmd_is_backward(const ccv_nnc_cmd_t cmd);
658
/**
659
 * Check this command against listed attributes.
660
 * @param cmd The wrapped command.
661
 * @param flags The flags to check against the command (unsupported).
662
 * @return 1 if the flag is supported by the command.
663
 */
664
CCV_WARN_UNUSED(int) ccv_nnc_cmd_attr(const ccv_nnc_cmd_t cmd, const int flags);
665
/**
666
 * Check whether this command allow inplace operation against a particular input and output (index from 0).
667
 * @param cmd The wrapped command.
668
 * @param input_idx The index of the input tensor we want to check.
669
 * @param input_size The total number of inputs.
670
 * @param output_idx the index of the output tensor we want to check.
671
 * @param output_size The total number of outputs.
672
 * @return 1 if the input tensor can be used as the output tensor.
673
 */
674
CCV_WARN_UNUSED(int) ccv_nnc_cmd_allow_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
675
/**
676
 * Check whether this command need to enforce inplace operation against a particular input and output (index from 0).
677
 * @param cmd The wrapped command.
678
 * @param input_idx The index of the input tensor we want to check.
679
 * @param input_size The total number of inputs.
680
 * @param output_idx the index of the output tensor we want to check.
681
 * @param output_size The total number of outputs.
682
 * @return 1 if the input tensor is required to be used as the output tensor.
683
 */
684
CCV_WARN_UNUSED(int) ccv_nnc_cmd_enforce_inplace(const ccv_nnc_cmd_t cmd, const int input_idx, const int input_size, const int output_idx, const int output_size);
685
686
/** @} */
687
688
/**
689
 * @defgroup level_1_stream Streams
690
 * @{
691
 */
692
693
// Control flow constructs
694
// Follow heavily based along CUDA's stream / event idea.
695
enum {
696
  CCV_STREAM_CONTEXT_CPU = 0x1, /**< A CPU based stream context (unsupported). */
697
  CCV_STREAM_CONTEXT_GPU = 0x2, /**< A GPU based stream context. */
698
};
699
205k
#define CCV_STREAM_GET_CONTEXT(type) ((type) & 0x3)
700
#define CCV_STREAM_GET_DEVICE(type) CCV_TENSOR_GET_DEVICE(type)
701
44.9k
#define CCV_STREAM_GET_DEVICE_ID(type) CCV_TENSOR_GET_DEVICE_ID(type)
702
3.06k
#define CCV_STREAM_SET_DEVICE_ID(type, device_id) CCV_TENSOR_SET_DEVICE_ID(type, device_id)
703
/**
704
 * Create a new stream context.
705
 * @param type A combination of CPU / GPU and DEVICE_ID.
706
 * @return The newly created stream context.
707
 */
708
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_new(const int type);
709
/**
710
 * Get the type of the stream context.
711
 * @param stream_context The stream context we want to inspect.
712
 * @return The type of the stream context.
713
 */
714
CCV_WARN_UNUSED(int) ccv_nnc_stream_context_type(const ccv_nnc_stream_context_t* const stream_context);
715
/**
716
 * Get a stream context local workspace memory. This memory region will be reused
717
 * the next time when you call this method on the same stream context.
718
 * @param stream_context The stream context which provides the workspace memory.
719
 * @param workspace_size The size of the workspace memory.
720
 * @param mem The memory type of the said workspace memory (GPU or CPU).
721
 * @return A pointer to the workspace memory.
722
 */
723
CCV_WARN_UNUSED(void*) ccv_nnc_stream_context_get_workspace(ccv_nnc_stream_context_t* const stream_context, const size_t workspace_size, const int mem);
724
/**
725
 * Deallocate any workspace memory on the stream context.
726
 * @param stream The stream context to drain workspace memory.
727
 */
728
void ccv_nnc_stream_context_drain(ccv_nnc_stream_context_t* const stream);
729
/**
730
 * The callback prototype on the stream context.
731
 */
732
typedef void(*ccv_nnc_callback_f)(void* const callback_context);
733
/**
734
 * Add a callback function to be called once stream executed to that point.
735
 * @param stream The stream context to add callback.
736
 * @param callback The callback function.
737
 * @param callback_context The context to be called with the callback function.
738
 */
739
void ccv_nnc_stream_context_add_callback(ccv_nnc_stream_context_t* const stream, const ccv_nnc_callback_f callback, void* const callback_context);
740
/**
741
 * Wait until all tasks submitted (command, graph run etc.) on the stream context
742
 * completed.
743
 * @param stream The stream context to wait.
744
 */
745
void ccv_nnc_stream_context_wait(const ccv_nnc_stream_context_t* const stream);
746
/**
747
 * The hooks to be called when a stream context is destroyed.
748
 * At the moment, the stream context will be destroyed at the time
749
 * ccv_nnc_stream_context_free is called, so there is no tricks.
750
 * This method is useful because we have some resources associated
751
 * with stream pointer, hence, would be good to free these resources
752
 * upon free the stream.
753
 */
754
typedef void (*ccv_nnc_stream_context_destructor_f)(const ccv_nnc_stream_context_t* const stream, void* const context);
755
/**
756
 * Add a new destructor hook callback when a stream is freed.
757
 * @param stream The stream to be observed.
758
 * @param destructor The new destructor callback method.
759
 * @param context additional context.
760
 * @return A integer identifier to help remove the hook.
761
 */
762
int ccv_nnc_stream_context_add_destructor_hook(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_context_destructor_f destructor, void* const context);
763
/**
764
 * Remove a destructor hook callback.
765
 * @param stream The stream we observe.
766
 * @param hook_id The returned integer when calling the add method.
767
 */
768
void ccv_nnc_stream_context_remove_destructor_hook(ccv_nnc_stream_context_t* const stream, const int hook_id);
769
/**
770
 * Deallocate the stream context.
771
 * @param stream_context The stream context to be destroyed.
772
 */
773
void ccv_nnc_stream_context_free(ccv_nnc_stream_context_t* const stream_context);
774
775
/**
776
 * Opaque pointer to the signal object.
777
 */
778
typedef struct ccv_nnc_stream_signal_s ccv_nnc_stream_signal_t;
779
780
/**
781
 * Create a new stream signal.
782
 * @param type A composed type denotes whether it associated with a GPU or CPU stream context, and on which device.
783
 * @return The newly created stream signal.
784
 */
785
CCV_WARN_UNUSED(ccv_nnc_stream_signal_t*) ccv_nnc_stream_signal_new(const int type);
786
/**
787
 * Get the type of the stream signal.
788
 * @param signal The stream signal we want to inspect.
789
 * @return The type of the stream signal.
790
 */
791
CCV_WARN_UNUSED(int) ccv_nnc_stream_signal_type(const ccv_nnc_stream_signal_t* const signal);
792
/**
793
 * Emit a signal on a stream.
794
 * @param stream The stream context where the signal will be emitted.
795
 * @param signal The signal to be emitted. It has to be on the same device as the stream.
796
 */
797
void ccv_nnc_stream_context_emit_signal(ccv_nnc_stream_context_t* const stream, ccv_nnc_stream_signal_t* const signal);
798
/**
799
 * Wait a signal on a stream.
800
 * @param stream The stream context that will be blocked by the signal.
801
 * @param signal The signal to be waited. It can be on a different device of the stream.
802
 */
803
void ccv_nnc_stream_context_wait_signal(const ccv_nnc_stream_context_t* const stream, const ccv_nnc_stream_signal_t* const signal);
804
/**
805
 * Get on which stream context this signal is going to be emitted on.
806
 * @param signal The signal we want to inspect.
807
 * @return The most recent stream context you called ccv_nnc_stream_context_emit_signal with.
808
 */
809
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_signal_get_emitter(const ccv_nnc_stream_signal_t* const signal);
810
/**
811
 * Deallocate the signal.
812
 * @param signal The signal to be destroyed.
813
 */
814
void ccv_nnc_stream_signal_free(ccv_nnc_stream_signal_t* const signal);
815
/**
816
 * Return number of devices.
817
 * @param type The type of devices (CCV_NNC_STREAM_CONTEXT_GPU / CCV_NNC_STREAM_CONTEXT_CPU)
818
 * @return The number of devices.
819
 */
820
CCV_WARN_UNUSED(int) ccv_nnc_device_count(const int type);
821
/**
822
 * Remap a source device as the destination device.
823
 * @param type The type of devices (CCV_NNC_STREAM_CONTEXT_GPU / CCV_NNC_STREAM_CONTEXT_CPU)
824
 * @param source The original device id.
825
 * @param destination The new device id.
826
 * @return 0 if the device remap is successful, -1 if it is not.
827
 */
828
CCV_WARN_UNUSED(int) ccv_nnc_device_remap(const int type, const int source, const int destination);
829
/**
830
 * The neighbor discovery function that will be called with the device id.
831
 */
832
typedef ccv_nnc_stream_context_t*(*ccv_nnc_stream_context_neighbor_discovery_f)(const int device_id, void* const context);
833
/**
834
 * Set the neighbor stream context discovery mechanism. This method exposes how
835
 * neighbor should be defined per stream context. This method is useful for
836
 * commands that operates cross devices and need to find the correct stream
837
 * context for these devices. Stream context itself is bounded to one device
838
 * only.
839
 * @param stream_context The stream context that bounds to a discovery mechanism.
840
 * @param discovery The neighbor discovery function to invoke.
841
 * @param context The associated context with the neighbor discovery function.
842
 */
843
void ccv_nnc_stream_context_set_neighbor_discovery(ccv_nnc_stream_context_t* const stream_context, ccv_nnc_stream_context_neighbor_discovery_f discovery, void* const context);
844
/**
845
 * Find a neighbor stream context on a given device id for current stream context.
846
 * @param stream_context The stream context which we will look for neighbors.
847
 * @param device_id On which device the stream context may exist.
848
 * @return 0 if no stream context found. Otherwise, return the stream context on that device.
849
 */
850
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_stream_context_find_neighbor(ccv_nnc_stream_context_t* const stream_context, const int device_id);
851
852
/** @} */
853
854
/** @} */
855
856
/**
857
 * @defgroup level_2 Level-2 API
858
 * @{
859
 */
860
861
/**
862
 * @defgroup level_2_essentials Essentials
863
 * @{
864
 */
865
866
enum {
867
  CCV_NNC_SHORT_DOT_GRAPH = 0x0, /**< Display a simplified graph. */
868
  CCV_NNC_LONG_DOT_GRAPH  = 0x1, /**< Display a graph that contains all information. */
869
};
870
871
/**
872
 * Opaque pointer holds the concrete graph representation.
873
 */
874
typedef struct ccv_nnc_graph_s ccv_nnc_graph_t;
875
876
/**
877
 * The opaque on stack object hold a reference to an execution node within a graph.
878
 */
879
typedef struct {
880
  int32_t d; // This is int because sometimes I piggy-back on negatives to carry out some internal computations.
881
  ccv_nnc_graph_t* graph;
882
} ccv_nnc_graph_exec_t;
883
884
81.9k
#define CCV_NO_GRAPH_EXEC(exec) ((exec).graph == 0)
885
886
/**
887
 * Create an empty graph.
888
 * Note that all graph mutation methods are not thread-safe.
889
 * You should only operate the graph in serial fashion.
890
 * @return An opaque ccv_nnc_graph_t pointer.
891
 */
892
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_new(void);
893
/**
894
 * Create a node with specific command execution, as well as its inputs & outputs.
895
 * Underlying, the graph maintains the backing object for the node, and all you get is
896
 * a on-stack object to index the backing object from the graph.
897
 * @param graph The concrete graph.
898
 * @param cmd The wrapped command.
899
 * @param hint The hint for this command.
900
 * @param inputs The input tensors array.
901
 * @param input_size The size of input tensors array.
902
 * @param outputs The output tensors array.
903
 * @param output_size The size of output tensors array.
904
 * @return An on-stack object that references a execution node.
905
 */
906
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_new(ccv_nnc_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
907
/**
908
 * Set the command for an existing execution node.
909
 * @param graph The concrete graph.
910
 * @param exec The execution node reference.
911
 * @param cmd The new wrapped command.
912
 */
913
void ccv_nnc_graph_exec_set(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_cmd_t cmd);
914
/**
915
 * Set hint for an existing execution node.
916
 * @param graph The concrete graph.
917
 * @param exec The execution node reference.
918
 * @param hint The new hint.
919
 */
920
void ccv_nnc_graph_exec_set_hint(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_hint_t hint);
921
/**
922
 * Set input / output tensors for an existing execution node.
923
 * @param graph The concrete graph.
924
 * @param exec The execution node reference.
925
 * @param inputs The input tensors array.
926
 * @param input_size The size of input tensors array.
927
 * @param outputs The output tensors array.
928
 * @param output_size The size of output tensors array.
929
 */
930
void ccv_nnc_graph_exec_set_io(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
931
/**
932
 * Concatenate input graph nodes with an output graph node to create a new graph.
933
 * @param graph The concrete graph.
934
 * @param source The execution node reference to connect.
935
 * @param destination The execution node reference connect to.
936
 * @return Non-zero if cannot concat successfully.
937
 */
938
int ccv_nnc_graph_exec_concat(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
939
/**
940
 * Disconnect input graph nodes with an output graph nodes in this graph.
941
 * @param graph The concrete graph.
942
 * @param source The execution node reference to disconnect.
943
 * @param destination The execution node reference disconnect to.
944
 * @return Non-zero if cannot disjoin successfully.
945
 */
946
int ccv_nnc_graph_exec_disjoin(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t source, const ccv_nnc_graph_exec_t destination);
947
/**
948
 * Count number of exec in the graph.
949
 * @param graph The concrete graph.
950
 * @return The number of execution nodes in the graph.
951
 */
952
int ccv_nnc_graph_exec_count(const ccv_nnc_graph_t* const graph);
953
/**
954
 * Generate output that can be parsed by GraphViz (DOT language).
955
 * @param graph The concrete graph.
956
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
957
 * @param out The output file stream.
958
 */
959
void ccv_nnc_graph_dot(const ccv_nnc_graph_t* const graph, const int flags, FILE* out);
960
/**
961
 * Run the autotune function on all execution node, and assign back with the optimized commands.
962
 * @param graph The concrete graph.
963
 * @param max_workspace_size The maximum allowed extra memory usage.
964
 * @param flags A reserved field for flags.
965
 * @param sources The source execution nodes to begin. 0 uses default sources.
966
 * @param source_size The size of source execution nodes.
967
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
968
 * @param destination_size The size of destination execution nodes.
969
 */
970
void ccv_nnc_graph_autotune(ccv_nnc_graph_t* const graph, const size_t max_workspace_size, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
971
/**
972
 * Make the graph topsorted, thus, do a topological sort so when run the graph, no additional memory will be allocated.
973
 * Otherwise when we run the graph, we need to allocate some memory on heap to faciliate.
974
 * @param graph The concrete graph.
975
 * @param exec_cvt The execution node assignments will change, and you can give an array to know the changes.
976
 * @param exec_cvt_size The provided conversion array size.
977
 */
978
void ccv_nnc_graph_topsort(ccv_nnc_graph_t* const graph, int* const exec_cvt, const int exec_cvt_size);
979
980
/**
981
 * Opaque pointer holds the graph schedule.
982
 */
983
typedef struct ccv_nnc_graph_static_schedule_s ccv_nnc_graph_static_schedule_t;
984
/**
985
 * Assuming the graph runs from the beginning to the end. Allocate a internal schedule object that will
986
 * run the graph efficiently if it runs from the beginning to the end. It will basically call ccv_nnc_graph_static_schedule
987
 * and save the end result to a internal schedule object to this graph.
988
 * @param graph The concrete graph.
989
 * @param stream_type The type of stream context we are going to use.
990
 */
991
void ccv_nnc_graph_set_default_static_schedule(ccv_nnc_graph_t* const graph, const int stream_type);
992
/**
993
 * Allocate extra streams to make this graph parallel runnable. Note this requires the graph to be topsorted.
994
 * After this is done, you can schedule a graph either on its default stream, or a new stream with the schedule
995
 * object.
996
 * @param graph The concrete graph.
997
 * @param stream_type The type of stream context we are going to use.
998
 * @param sources The source execution nodes to begin. 0 uses default sources.
999
 * @param source_size The size of source execution nodes.
1000
 * @param destinations The destination execution nodes which we end. 0 uses default destinations.
1001
 * @param destination_size The size of destination execution nodes.
1002
 * @return An opaque schedule object that let the graph knows how to run itself efficiently.
1003
 */
1004
CCV_WARN_UNUSED(ccv_nnc_graph_static_schedule_t*) ccv_nnc_graph_static_schedule_new(ccv_nnc_graph_t* const graph, const int stream_type, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1005
/**
1006
 * Free a schedule object for a graph.
1007
 * @param schedule The schedule object returned from ccv_nnc_graph_static_schedule_new.
1008
 */
1009
void ccv_nnc_graph_static_schedule_free(ccv_nnc_graph_static_schedule_t* const schedule);
1010
/**
1011
 * Query the default stream for a given graph.
1012
 * @param graph The concrete graph.
1013
 * @return The default stream context.
1014
 */
1015
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_nnc_graph_default_stream(const ccv_nnc_graph_t* const graph);
1016
/**
1017
 * Set default sources for a give graph.
1018
 * @param graph The concrete graph.
1019
 * @param sources The source execution nodes to begin.
1020
 * @param source_size The size of source execution nodes.
1021
 */
1022
void ccv_nnc_graph_set_sources(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const sources, const int source_size);
1023
/**
1024
 * Get the default source execution nodes pointer.
1025
 * @param graph The concrete graph.
1026
 * @return A pointer to an array of default source execution nodes.
1027
 */
1028
ccv_nnc_graph_exec_t* ccv_nnc_graph_sources(const ccv_nnc_graph_t* const graph);
1029
/**
1030
 * Get the number of default source execution nodes.
1031
 * @param graph The concrete graph.
1032
 * @return The number of default source execution nodes.
1033
 */
1034
int ccv_nnc_graph_source_size(const ccv_nnc_graph_t* const graph);
1035
/**
1036
 * Set default destinations for a give graph.
1037
 * @param graph The concrete graph.
1038
 * @param destinations The destination execution nodes which we end.
1039
 * @param destination_size The size of destination execution nodes.
1040
 */
1041
void ccv_nnc_graph_set_destinations(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t* const destinations, const int destination_size);
1042
/**
1043
 * Get the default destination execution nodes pointer.
1044
 * @param graph The concrete graph.
1045
 * @return A pointer to an array of default destination execution nodes.
1046
 */
1047
ccv_nnc_graph_exec_t* ccv_nnc_graph_destinations(const ccv_nnc_graph_t* const graph);
1048
/**
1049
 * Get the number of default destination execution nodes.
1050
 * @param graph The concrete graph.
1051
 * @return The number of default destination execution nodes.
1052
 */
1053
int ccv_nnc_graph_destination_size(const ccv_nnc_graph_t* const graph);
1054
/**
1055
 * This graph, and its relevant auxiliary objects (opaque to user) are deallocated.
1056
 * @param graph The concrete graph.
1057
 */
1058
void ccv_nnc_graph_free(ccv_nnc_graph_t* const graph);
1059
/**
1060
 * Opaque pointer to the tape of tensors. The tape are used by the while loop.
1061
 */
1062
typedef struct ccv_nnc_tensor_tape_s ccv_nnc_tensor_tape_t;
1063
/**
1064
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1065
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1066
 * @param graph The concrete graph.
1067
 * @param flags A reserved field for flags.
1068
 * @param sources The source execution nodes array.
1069
 * @param source_size The size of source execution nodes array. 0 uses default sources.
1070
 * @param destinations The destination execution nodes array.
1071
 * @param destination_size The size of destination execution nodes array. 0 uses default destinations.
1072
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1073
 * @param stream_context Which stream this graph will be executed upon.
1074
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1075
 */
1076
int ccv_nnc_graph_run(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_exec_t* const sources, const int source_size, const ccv_nnc_graph_exec_t* const destinations, const int destination_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1077
/**
1078
 * Execute a computation graph with all bells and whistles. Need to supply a tensor tape if it contains backward pass
1079
 * for while loop or branches. With tensor tape, the tensors are versioned, so you can "backpropagate through time".
1080
 * Comparing with ccv_nnc_graph_run method, this method doesn't take sources / destinations node, rather, it takes the
1081
 * schedule object.
1082
 * @param graph The concrete graph.
1083
 * @param flags A reserved field for flags.
1084
 * @param schedule The schedule object specified the sources / destinations and how to efficiently run this.
1085
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
1086
 * @param stream_context Which stream this graph will be executed upon.
1087
 * @return CCV_NNC_EXEC_SUCCESS if succeed.
1088
 */
1089
int ccv_nnc_graph_run_with_schedule(ccv_nnc_graph_t* const graph, const int flags, const ccv_nnc_graph_static_schedule_t* const schedule, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
1090
1091
/** @} */
1092
1093
/**
1094
 * @defgroup level_2_others Others
1095
 * @{
1096
 */
1097
1098
/**
1099
 * Set input / output flags for an existing execution node.
1100
 * This must be called after set_io, set additional flags for tensors related to this exec.
1101
 * @param graph The concrete graph.
1102
 * @param exec The execution node reference.
1103
 * @param input_flags The input flags array.
1104
 * @param input_flag_size the size of input flags array, should be the same as input tensors array (or 0).
1105
 * @param output_flags The output flags array.
1106
 * @param output_flag_size the size of output flags array, should be the same as output tensors array (or 0).
1107
 */
1108
void ccv_nnc_graph_exec_set_io_flags(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const int* const input_flags, const int input_flag_size, const int* const output_flags, const int output_flag_size);
1109
/**
1110
 * Set the pair reference for exec. In backward pass, an execution node's pair node is the forward pass node.
1111
 * @param graph The concrete graph.
1112
 * @param exec The execution node reference.
1113
 * @param pair_exec The pair execution node reference.
1114
 */
1115
void ccv_nnc_graph_exec_pair_with(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const ccv_nnc_graph_exec_t pair_exec);
1116
/**
1117
 * Add tensor pair that can be used to "carry over". (carry over: passing a tensor from current loop to the next loop).
1118
 * @param graph The concrete graph.
1119
 * @param from The tensor we have output in this loop.
1120
 * @param to The tensor we will use as input in the next loop.
1121
 */
1122
void ccv_nnc_graph_add_carry_over(ccv_nnc_graph_t* const graph, const ccv_nnc_tensor_t* const from, const ccv_nnc_tensor_t* const to);
1123
/**
1124
 * Updates are the tensors that not directly involved in the computation, but its pointers need to get updated
1125
 * along with this exec, thus need to be "update" to other exec nodes.
1126
 * @param graph The concrete graph.
1127
 * @param exec The execution node reference.
1128
 * @param update The tensor need to be updated along the execution node.
1129
 */
1130
void ccv_nnc_graph_exec_add_as_affected(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_tensor_t* const update);
1131
1132
/** @} */
1133
1134
/** @} */
1135
1136
/**
1137
 * @defgroup level_3 Level-3 API
1138
 * @{
1139
 */
1140
1141
/**
1142
 * @defgroup level_3_essentials Essentials
1143
 * @{
1144
 */
1145
1146
/**
1147
 * Opaque pointer to the symbolic graph object.
1148
 */
1149
typedef struct ccv_nnc_symbolic_graph_s ccv_nnc_symbolic_graph_t;
1150
1151
/**
1152
 * Opaque pointer to an arena of allocated tensors.
1153
 */
1154
typedef struct ccv_nnc_tensor_arena_s ccv_nnc_tensor_arena_t;
1155
1156
/**
1157
 * Opaque pointer to an arena of allocated execs.
1158
 */
1159
typedef struct ccv_nnc_graph_exec_arena_s ccv_nnc_graph_exec_arena_t;
1160
1161
/**
1162
 * On stack object references a tensor symbol in the symbolic graph.
1163
 */
1164
typedef struct {
1165
  int32_t d;
1166
  const ccv_nnc_symbolic_graph_t* graph;
1167
} ccv_nnc_tensor_symbol_t;
1168
1169
/**
1170
 * On stack object references a execution node symbol in the symbolic graph.
1171
 */
1172
typedef struct {
1173
  int32_t d;
1174
  const ccv_nnc_symbolic_graph_t* graph;
1175
} ccv_nnc_graph_exec_symbol_t;
1176
1177
enum {
1178
  CCV_NNC_TENSOR_SYMBOL_INIT_ZEROS = 0x01, /**< Initialize underlying tensor for the symbol with zeros */
1179
  CCV_NNC_TENSOR_SYMBOL_INIT_ONES = 0x02, /**< Initialize underlying tensor for the symbol with ones */
1180
  CCV_NNC_TENSOR_SYMBOL_TAPE_VAR = 0x04, /**< Mark this as a tape variable (it cannot be folded, will contain flag CCV_TAPE_ALLOC) */
1181
  // The one below is special.
1182
  CCV_NNC_TENSOR_SYMBOL_DEAD = 0x80000000, /**< Mark this tensor symbol as dead, any future usage will cause assertion */
1183
};
1184
1185
151k
#define CCV_NNC_TENSOR_SYMBOL_IS_DEAD(x) ((x) & CCV_NNC_TENSOR_SYMBOL_DEAD)
1186
1187
enum {
1188
  CCV_NNC_GRAPH_EXEC_DEAD = 0x1, /**< Mark this node as dead. */
1189
  CCV_NNC_GRAPH_EXEC_P_WHILE = 0x10, /**< Mark this node keyword is while */
1190
  CCV_NNC_GRAPH_EXEC_CASE_OF = 0x20, /**< Mark this node keyword is case_of */
1191
};
1192
1193
422k
#define CCV_NNC_GRAPH_EXEC_IS_DEAD(x) ((x) & CCV_NNC_GRAPH_EXEC_DEAD)
1194
18.6k
#define CCV_NNC_GRAPH_REF(x) ((x)->_heap_graph_ref ? 
(x)->_heap_graph_ref178
:
(x)->_inline_graph_ref18.4k
)
1195
1196
enum {
1197
  CCV_NNC_NO_TENSOR_SYMBOL = -1, /**< Special symbol reference for no tensor symbol. */
1198
  CCV_NNC_WHILE_COUNT_TENSOR_SYMBOL = -2, /**< Special symbol reference for while loop count tensor. */
1199
};
1200
1201
enum {
1202
  CCV_NNC_NO_GRAPH_EXEC_SYMBOL = -1, /**< Special symbol reference for no exec symbol. */
1203
};
1204
1205
1206
enum {
1207
  CCV_NNC_SYMBOL_TENSOR, /**< Identifier for tensor symbol */
1208
  CCV_NNC_SYMBOL_TENSOR_ALIAS, /**< Identifier for tensor alias symbol */
1209
  CCV_NNC_SYMBOL_GRAPH_EXEC, /**< Identifier for exec symbol */
1210
};
1211
1212
22
#define CCV_NNC_IS_WHILE_COUNT_TENSOR_SYMBOL(d) (((uint32_t)(d) & 0xf) == 0xe)
1213
1214
/**
1215
 * A data structure to pass in a pair of tensor symbols.
1216
 */
1217
typedef struct {
1218
  ccv_nnc_tensor_symbol_t source; /**< The 'from' tensor symbol. */
1219
  ccv_nnc_tensor_symbol_t destination; /**< The 'to' tensor symbol. */
1220
} ccv_nnc_tensor_symbol_map_t;
1221
1222
/**
1223
 * Create a new empty symbolic graph. It is an opaque data structure that maintains the whole graph of computation in its symbolic form.
1224
 * Note that all graph mutation methods are not thread-safe. You should only operate the graph in serial fashion.
1225
 */
1226
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_new(void);
1227
/**
1228
 * Create an tensor symbol (thus, with no actual memory space allocation) in a symbolic graph.
1229
 * @param graph The symbolic graph.
1230
 * @param info The tensor parameters.
1231
 * @param name The name of the tensor symbol, it is optional.
1232
 * @return A tensor symbol reference.
1233
 */
1234
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_param_t info, const char* const name);
1235
/**
1236
 * Create an alias to the tensor symbol as tensor view (thus, pointing to the same memory region, but with a different header info and offset).
1237
 * @param graph The symbolic graph.
1238
 * @param tensor_symbol The tensor symbol we are going to reference to.
1239
 * @param ofs The offset on each of the dimension.
1240
 * @param inc The line size of each dimension.
1241
 * @param info The tensor parameters for the new alias.
1242
 * @param name The name of the tensor symbol alias, it is optional.
1243
 * @return A tensor symbol alias reference.
1244
 */
1245
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
1246
/**
1247
 * Manually delete a tensor symbol off the symbolic graph.
1248
 * @param graph The symbolic graph.
1249
 * @param tensor The tensor symbol reference.
1250
 */
1251
void ccv_nnc_tensor_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_t tensor);
1252
/**
1253
 * Create a graph execution node (an operation that takes a set of inputs and generates a set of outputs).
1254
 * @param graph The symbolic graph.
1255
 * @param cmd The wrapped command.
1256
 * @param inputs The input tensor symbols array.
1257
 * @param input_size The size of input tensor symbols array.
1258
 * @param outputs The output tensor symbols array.
1259
 * @param output_size The size of output tensor symbols array.
1260
 * @param name The name of this execution node, optional.
1261
 * @return The execution node symbol reference.
1262
 */
1263
ccv_nnc_graph_exec_symbol_t ccv_nnc_graph_exec_symbol_new(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
1264
/**
1265
 * ccv_nnc_graph_exec_symbol_new defaults to use `ccv_nnc_hint_auto` find the best hints for a set of inputs / outputs.
1266
 * However, you can also set your own hints.
1267
 * @param graph The symbolic graph.
1268
 * @param exec The execution node symbol reference.
1269
 * @param hint The hint for the command.
1270
 */
1271
void ccv_nnc_graph_exec_symbol_set_hint(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_hint_t hint);
1272
/**
1273
 * Manually delete a exec symbol off the symbolic graph.
1274
 * @param graph The symbolic graph.
1275
 * @param symbol The execution node symbol reference.
1276
 */
1277
void ccv_nnc_graph_exec_symbol_free(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_t symbol);
1278
enum {
1279
  CCV_NNC_AUTOGEN_ALL_EXECS = 0x1, /**< Automatic concatenation for all execution nodes */
1280
  CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS = 0x2, /**< Automatically find all source and destination nodes. */
1281
};
1282
/**
1283
 * Automatic concatenate these nodes together based on its inputs / outputs.
1284
 * Imagining this is to generate the execution flow based on input tensors and output tensors.
1285
 * nil for execs and 0 for exec_size means to loop over all the execs on the graph and autogen.
1286
 * @param graph The symbolic graph.
1287
 * @param execs The execution nodes array.
1288
 * @param exec_size The size of execution nodes array.
1289
 * @param flags The flags determines what operations to perform when concatenating.
1290
 * @return non-zero if cannot figure out.
1291
 */
1292
int ccv_nnc_graph_exec_symbol_autogen(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const execs, const int exec_size, const int flags);
1293
/**
1294
 * Set the default sources for a symbolic graph.
1295
 * @param graph The symbolic graph.
1296
 * @param sources The source execution nodes array.
1297
 * @param source_size The size of source execution nodes array.
1298
 */
1299
void ccv_nnc_symbolic_graph_set_sources(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size);
1300
/**
1301
 * Add one node to the default sources for a symbolic graph.
1302
 * @param graph The symbolic graph.
1303
 * @param source The source execution node.
1304
 */
1305
void ccv_nnc_symbolic_graph_add_source(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source);
1306
/**
1307
 * Get the pointer to the default sources.
1308
 * @param graph The symbolic graph.
1309
 * @return The pointer to the source execution nodes array.
1310
 */
1311
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_sources(const ccv_nnc_symbolic_graph_t* const graph);
1312
/**
1313
 * Get the size of the default source nodes array.
1314
 * @param graph The symbolic graph.
1315
 * @return The size of the default source nodes array.
1316
 */
1317
int ccv_nnc_symbolic_graph_source_size(const ccv_nnc_symbolic_graph_t* const graph);
1318
/**
1319
 * Set the default destinations for a symbolic graph.
1320
 * @param graph The symbolic graph.
1321
 * @param destinations The destination execution nodes array.
1322
 * @param destination_size The size of destination execution nodes array.
1323
 */
1324
void ccv_nnc_symbolic_graph_set_destinations(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1325
/**
1326
 * Add one node to the default destinations for a symbolic graph.
1327
 * @param graph The symbolic graph.
1328
 * @param destination The destination execution node.
1329
 */
1330
void ccv_nnc_symbolic_graph_add_destination(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t destination);
1331
/**
1332
 * Get the pointer to the default destinations.
1333
 * @param graph The symbolic graph.
1334
 * @return The pointer to the destinationsexecution nodes array.
1335
 */
1336
ccv_nnc_graph_exec_symbol_t* ccv_nnc_symbolic_graph_destinations(const ccv_nnc_symbolic_graph_t* const graph);
1337
/**
1338
 * Get the size of the default destination nodes array.
1339
 * @param graph The symbolic graph.
1340
 * @return The size of the default destination nodes array.
1341
 */
1342
int ccv_nnc_symbolic_graph_destination_size(const ccv_nnc_symbolic_graph_t* const graph);
1343
/**
1344
 * Generate output that can be parsed by GraphViz (DOT language).
1345
 * @param graph The symbolic graph.
1346
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
1347
 * @param out The output file stream.
1348
 */
1349
void ccv_nnc_symbolic_graph_dot(const ccv_nnc_symbolic_graph_t* const graph, const int flags, FILE* out);
1350
1351
/**
1352
 * The data structure to wrap a tensor symbol and a concrete tensor together.
1353
 */
1354
typedef struct {
1355
  ccv_nnc_tensor_symbol_t symbol;
1356
  const ccv_nnc_tensor_t* tensor;
1357
} ccv_nnc_tensor_bind_t;
1358
1359
typedef struct {
1360
  void* (*alloc)(const int type, const int pinned_mem /* Currently only used to annotate CCV_TENSOR_PINNED_MEM, future can be expanded to generic flags */, const size_t size, void* const arg);
1361
  void (*free)(void* const ptr, void* const arg);
1362
} ccv_nnc_symbolic_graph_compile_allocator_vtab_t;
1363
1364
typedef struct {
1365
  const ccv_nnc_symbolic_graph_compile_allocator_vtab_t* isa;
1366
  struct {
1367
    void* alloc;
1368
    void* free;
1369
  } context;
1370
} ccv_nnc_symbolic_graph_compile_allocator_t;
1371
1372
typedef struct {
1373
  ccv_nnc_symbolic_graph_compile_allocator_t allocator;
1374
} ccv_nnc_symbolic_graph_compile_param_t;
1375
1376
/**
1377
 * Compile a symbolic graph into a graph that can be executed, and a set of tensors (opaque data structure tensor arena) are allocated based on which tensor symbols are the input and which are the outputs. The tensor allocation is done to minimize the required storage.
1378
 * tensor_binds provide custom binding for these tensors. You still responsible to manage the life-time of these tensors.
1379
 * outputs marks the tensor symbols that need to be kept til the end of the graph.
1380
 * @param graph The symbolic graph.
1381
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
1382
 * @param tensor_binds The binding array (a tensor symbol and a concrete tensor). We replace everywhere that uses the tensor symbol with the concrete tensor.
1383
 * @param tensor_bind_size The size of the binding array.
1384
 * @param outputs The output tensor symbols that we want to keep the value.
1385
 * @param output_size The size of the output tensor symbols array.
1386
 * @param sources The sources for the graph.
1387
 * @param source_size The size of the sources array. 0 to use default sources.
1388
 * @param destinations The destinations for the graph.
1389
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1390
 * @param graph_ref The pointer to store the generated concrete graph.
1391
 * @param tensor_arena_ref The pointer to store ccv_nnc_tensor_arena_t.
1392
 * @param graph_exec_arena_ref The pointer to store ccv_nnc_graph_exec_arena_t.
1393
 */
1394
void ccv_nnc_symbolic_graph_compile(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_symbolic_graph_compile_param_t compile_params, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_graph_t** const graph_ref, ccv_nnc_tensor_arena_t** const tensor_arena_ref, ccv_nnc_graph_exec_arena_t** const graph_exec_arena_ref);
1395
/**
1396
 * Free the symbolic graph and its associated memory. Note that if you compiled a graph / tensor arena out of this symbolic graph, these won't be free'd.
1397
 * @param graph The symbolic graph.
1398
 */
1399
void ccv_nnc_symbolic_graph_free(ccv_nnc_symbolic_graph_t* const graph);
1400
/**
1401
 * Find corresponding tensor by a symbol from the tensor arena.
1402
 * @param tensor_arena The tensor arena object generated through compilation,
1403
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1404
 * @return A concrete tensor from the tensor arena.
1405
 */
1406
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_symbol(const ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol);
1407
/**
1408
 * Bind a tensor to a symbol. You still responsible to manage the life-time of the tensor to make sure it is not freed until everything is done.
1409
 * @param tensor_arena The tensor arena object generated through compilation.
1410
 * @param symbol The tensor symbol reference. Because tensor symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1411
 * @param tensor The new tensor to bind to.
1412
 */
1413
void ccv_nnc_tensor_bind_symbol(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_t* const tensor);
1414
/**
1415
 * Clear existing bindings on the tensor arena.
1416
 * @param tensor_arena The tensor arena object generated through compilation to clear bindings.
1417
 */
1418
void ccv_nnc_tensor_arena_clear_bindings(ccv_nnc_tensor_arena_t* const tensor_arena);
1419
/**
1420
 * Free the data buffer of the tensor arena.
1421
 * @param tensor_arena The tensor arena object generated through compilation.
1422
 */
1423
void ccv_nnc_tensor_arena_buffer_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1424
/**
1425
 * Free the opaque tensor arena structure.
1426
 * @param tensor_arena The tensor arena object generated through compilation.
1427
 */
1428
void ccv_nnc_tensor_arena_free(ccv_nnc_tensor_arena_t* const tensor_arena);
1429
/**
1430
 * Find corresponding graph exec by a exec symbol from graph exec arena.
1431
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1432
 * @param symbol The execution node symbol reference. Because execution node symbol reference is on stack. It can still be used even the original symbolic graph is free'd.
1433
 * @return A execution node reference to the concrete graph.
1434
 */
1435
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_from_symbol(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena, const ccv_nnc_graph_exec_symbol_t symbol);
1436
/**
1437
 * Return the node that can drive all the source nodes from the compilation.
1438
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1439
 * @return A execution node reference that is the source.
1440
 */
1441
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_source(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1442
/**
1443
 * Return the node that can drain all the destination nodes from the compilation.
1444
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1445
 * @return A execution node reference that is the destination.
1446
 */
1447
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_exec_destination(const ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1448
/**
1449
 * Free the opaque graph exec arena structure.
1450
 * @param graph_exec_arena The graph execution node arena object generated through compilation,
1451
 */
1452
void ccv_nnc_graph_exec_arena_free(ccv_nnc_graph_exec_arena_t* const graph_exec_arena);
1453
/**
1454
 * Write symbolic graph to disk, along with some binding tensors.
1455
 * @param graph The symbolic graph.
1456
 * @param tensor_binds The binding array (pair of tensor symbol and concrete tensor).
1457
 * @param tensor_bind_size The size of the binding array.
1458
 * @param fn The file name.
1459
 */
1460
void ccv_nnc_symbolic_graph_write(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_bind_t* const tensor_binds, const int tensor_bind_size, const char* const fn);
1461
/**
1462
 * Read symbolic graph from disk, with some binding tensors.
1463
 * @param fn The file name.
1464
 * @param graph_ref The pointer to store symbolic graph.
1465
 * @param tensor_binds_ref The pointer to store the binding array.
1466
 * @param tensor_bind_size_ref The pointer to store the size of the binding array.
1467
 */
1468
void ccv_nnc_symbolic_graph_read(const char* const fn, ccv_nnc_symbolic_graph_t** const graph_ref, ccv_nnc_tensor_bind_t** const tensor_binds_ref, int* const tensor_bind_size_ref);
1469
1470
/** @} */
1471
1472
/**
1473
 * @defgroup level_3_others Others
1474
 * @{
1475
 */
1476
1477
/**
1478
 * Return the symbol it alias to.
1479
 * @param graph The symbolic graph.
1480
 * @param tensor_symbol The tensor symbol alias.
1481
 * @return A tensor symbol reference to the original tensor symbol. If this symbol has no reference, return NO_SYMBOL (.graph = 0)
1482
 */
1483
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_alias_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1484
/**
1485
 * Set the tensor symbol parameters.
1486
 * @param graph The symbolic graph.
1487
 * @param tensor The tensor symbol reference.
1488
 * @param info The new tensor parameters.
1489
 * @return non-zero if encountered errors.
1490
 */
1491
int ccv_nnc_tensor_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const ccv_nnc_tensor_param_t info);
1492
/**
1493
 * Get the parameters for a tensor symbol.
1494
 * @param graph The symbolic graph.
1495
 * @param tensor The tensor symbol reference.
1496
 * @return The tensor parameters.
1497
 */
1498
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_symbol_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1499
/**
1500
 * Set the tensor symbol alias parameters.
1501
 * @param graph The symbolic graph.
1502
 * @param tensor The tensor symbol reference.
1503
 * @param ofs The offset on each of the dimension.
1504
 * @param inc The line size of each dimension.
1505
 * @return non-zero if it is not a tensor alias.
1506
 */
1507
int ccv_nnc_tensor_symbol_alias_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC]);
1508
/**
1509
 * Get the parameters for a tensor symbol.
1510
 * @param graph The symbolic graph.
1511
 * @param tensor The tensor symbol reference.
1512
 * @param ofs The offset on each of the dimension.
1513
 * @param inc The line size of each dimension.
1514
 * @return non-zero if it is not a tensor alias.
1515
 */
1516
int ccv_nnc_tensor_symbol_alias_params(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, int ofs[CCV_NNC_MAX_DIM_ALLOC], int inc[CCV_NNC_MAX_DIM_ALLOC]);
1517
/**
1518
 * Set the flags for this tensor symbol. The flags are only used for symbol, not for tensor.
1519
 * @param graph The symbolic graph.
1520
 * @param tensor The tensor symbol reference.
1521
 * @param flags A reserved field for flags.
1522
 */
1523
int ccv_nnc_tensor_symbol_set_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor, const int flags);
1524
/**
1525
 * Get all the flags for a tensor.
1526
 * @param graph The symbolic graph.
1527
 * @param tensor The tensor symbol reference.
1528
 */
1529
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_flags(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor);
1530
/**
1531
 * Set the cmd of this exec symbol.
1532
 * @param graph The symbolic graph.
1533
 * @param exec The execution node symbol reference.
1534
 * @param cmd The new wrapped command.
1535
 */
1536
void ccv_nnc_graph_exec_symbol_set(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_cmd_t cmd);
1537
/**
1538
 * Return the command on this exec symbol.
1539
 * @param graph The symbolic graph.
1540
 * @param exec The execution node symbol reference.
1541
 * @return The wrapped command.
1542
 */
1543
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_nnc_graph_exec_symbol_cmd(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec);
1544
/**
1545
 * Set the inputs / outputs for a exec symbol.
1546
 * @param graph The symbolic graph.
1547
 * @param exec The execution node symbol reference.
1548
 * @param inputs The input tensor symbols array.
1549
 * @param input_size The size of input tensor symbols array.
1550
 * @param outputs The output tensor symbols array.
1551
 * @param output_size The size of output tensor symbols array.
1552
 */
1553
void ccv_nnc_graph_exec_symbol_set_io(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size);
1554
/**
1555
 * Manually concatenate input node with an output graph node.
1556
 * @param graph The symbolic graph.
1557
 * @param source The source execution node symbol to connect.
1558
 * @param destination The destination execution node symbol connect to.
1559
 * @return non-zero if cannot concat successfully.
1560
 */
1561
int ccv_nnc_graph_exec_symbol_concat(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1562
/**
1563
 * Manually disconnect input node with an output graph node for this graph.
1564
 * @param graph The symbolic graph.
1565
 * @param source The source execution node symbol to disconnect.
1566
 * @param destination The destination execution node symbol disconnect to.
1567
 * @return non-zero if cannot disjoin successfully.
1568
 */
1569
int ccv_nnc_graph_exec_symbol_disjoin(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t source, const ccv_nnc_graph_exec_symbol_t destination);
1570
/**
1571
 * Number of exec symbols.
1572
 * @param graph The symbolic graph.
1573
 */
1574
CCV_WARN_UNUSED(int) ccv_nnc_graph_exec_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1575
/**
1576
 * Number of active exec symbols.
1577
 * @param graph The symbolic graph.
1578
 * @param type The type of op, can be CCV_NNC_SYMBOL_TENSOR, CCV_NNC_SYMBOL_GRAPH_EXEC (will error out on CCV_NNC_SYMBOL_TENSOR_ALIAS)
1579
 */
1580
CCV_WARN_UNUSED(int) ccv_nnc_symbolic_graph_active_symbol_count(const ccv_nnc_symbolic_graph_t* const graph, const int type);
1581
/**
1582
 * Substitution function. Given an execution node symbol and a command, return a new command.
1583
 */
1584
typedef ccv_nnc_cmd_t(*ccv_nnc_symbolic_graph_subst_f)(const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd);
1585
/**
1586
 * Generate a duplicate of the provided graph.
1587
 * While generating the duplicate, it calls the function pointer to re-process the node type.
1588
 * @param graph The symbolic graph.
1589
 * @param subst The substitution function.
1590
 * @return The duplicated symbolic graph.
1591
 */
1592
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_dup(const ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_symbolic_graph_subst_f subst);
1593
/**
1594
 * Number of tensor symbols.
1595
 * @param graph The symbolic graph.
1596
 */
1597
CCV_WARN_UNUSED(int) ccv_nnc_tensor_symbol_count(const ccv_nnc_symbolic_graph_t* const graph);
1598
/**
1599
 * Compute all the tensor shapes within this graph.
1600
 * @param graph The symbolic graph.
1601
 * @param sources The sources for the graph.
1602
 * @param source_size The size of the sources array. 0 to use default sources.
1603
 * @param destinations The destinations for the graph.
1604
 * @param destination_size The size of the destinations array. 0 to use default destinations.
1605
 */
1606
void ccv_nnc_symbolic_graph_tensor_auto(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1607
/**
1608
 * For a given tensor symbol, this method resolves to its local reference inside the given graph.
1609
 * This is related to the sub-graph of symbolic graphs. A tensor symbol in the sub-graph can still have a
1610
 * representation in the parent graph. This method used to find the local reference in any graph.
1611
 * @param graph The symbolic graph.
1612
 * @param tensor_symbol The tensor symbol we want to resolve.
1613
 * @return A tensor symbol reference in the given graph.
1614
 */
1615
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_resolve(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol);
1616
/**
1617
 * Pass graph's tensor symbol into its sub graph. We will make the connection that the source tensor
1618
 * symbol in the source symbolic graph is the destination tensor symbol in the destination symbolic graph.
1619
 * The reason to do this inference is because a tensor symbol is local to a symbolic graph under the hood.
1620
 * Although you can use tensor symbols from different graphs directly (it calls this method or the resolve
1621
 * method above when create an execution node symbol), sometimes you need this method to do it manually.
1622
 * @param src_graph The source symbolic graph.
1623
 * @param dest_graph The destination symbolic graph.
1624
 * @param src_tensor_symbol The tensor symbol we want to resolve.
1625
 * @param dest_tensor_symbol The tensor symbol we want to resolve.
1626
 */
1627
void ccv_nnc_tensor_symbol_hookup(ccv_nnc_symbolic_graph_t* const src_graph, ccv_nnc_symbolic_graph_t* const dest_graph, const ccv_nnc_tensor_symbol_t src_tensor_symbol, const ccv_nnc_tensor_symbol_t dest_tensor_symbol);
1628
/**
1629
 * Set bypasses for a tensor symbol.
1630
 * For case..of graphs, if the condition doesn't meet, we will skip the execution of a sub-graph.
1631
 * However, in that case, we cannot express easily which output tensor corresponds to which input tensor.
1632
 * This methods provides the way.
1633
 * @param graph The symbolic graph.
1634
 * @param symbol_map The pair of tensors array, source is the input tensor, destination is the output tensor.
1635
 * @param symbol_map_size The size of the tensor pairs array.
1636
 */
1637
void ccv_nnc_tensor_symbol_set_bypasses(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
1638
/**
1639
 * Fetch input / output for an exec symbol. For efficiency consideration, this returns pointer directly.
1640
 * @param graph The symbolic graph.
1641
 * @param symbol The execution node symbol reference.
1642
 * @param inputs The pointer to store input tensor symbols array.
1643
 * @param input_size The pointer to store the size of input tensor symbols array.
1644
 * @param outputs The pointer to store output tensor symbols array.
1645
 * @param output_size The pointer to store the size of output tensor symbols array.
1646
 */
1647
void ccv_nnc_graph_exec_symbol_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const inputs, int* const input_size, const int** const outputs, int* const output_size);
1648
/**
1649
 * Replace a input / output tensor symbol on an exec symbol.
1650
 * @param graph The symbolic graph.
1651
 * @param symbol The execution node symbol reference.
1652
 * @param old_symbol The old tensor symbol to be replaced.
1653
 * @param new_symbol The new tensor symbol on input / output.
1654
 */
1655
void ccv_nnc_graph_exec_symbol_replace_io(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_tensor_symbol_t old_symbol, const ccv_nnc_tensor_symbol_t new_symbol);
1656
/**
1657
 * Which exec symbol this is connected to. For efficiency consideration, this returns pointer directly.
1658
 * @param graph The symbolic graph.
1659
 * @param symbol The execution node symbol reference.
1660
 * @param tos The pointer to store outgoing indexes of the execution nodes.
1661
 * @param to_size the pointer to store the number of outgoing indexes.
1662
 */
1663
void ccv_nnc_graph_exec_symbol_to(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int** const tos, int* const to_size);
1664
/**
1665
 * Find the size allocated on the opaque tensor arena structure.
1666
 * @param tensor_arena The tensor arena object generated through compilation.
1667
 * @return The total allocated size in bytes.
1668
 */
1669
CCV_WARN_UNUSED(uint64_t) ccv_nnc_tensor_arena_size(const ccv_nnc_tensor_arena_t* const tensor_arena);
1670
/**
1671
 * Query whether a set of sources are the ancestors to a set of destination nodes.
1672
 * @param graph The symbolic graph.
1673
 * @param sources The exec sources to check whether they can reach some of the destinations.
1674
 * @param source_size How many sources in the source list.
1675
 * @param destinations The exec destinations to check whether sources can reach.
1676
 * @param destination_size How many destinations in the destination list.
1677
 * @param bitmask Bit return value, each bit represents a source, and 1 meant it can reach some of the destinations.
1678
 */
1679
void ccv_nnc_symbolic_graph_sources_to_destinations(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, uint64_t* const bitmask);
1680
/**
1681
 * Re-init the tensor arena with updated symbolic graph. This won't work if the symbolic graph requires
1682
 * larger tensors than what's available. Use this method properly, you can avoid re-compile a graph
1683
 * just because some tensor shape changed.
1684
 * @param tensor_arena The tensor arena object generated through compilation.
1685
 * @param graph The updated symbolic graph with different tensor shape.
1686
 * @return 0 if successful, -1 if the tensor arena doesn't have enough space to just re-init.
1687
 */
1688
int ccv_nnc_tensor_arena_reinit(ccv_nnc_tensor_arena_t* const tensor_arena, const ccv_nnc_symbolic_graph_t* const graph);
1689
/**
1690
 * Re-init the graph exec arena with updated symbolic graph. This updated some hyper-parameters of
1691
 * executions to match the updated symbolic graph.
1692
 * @param graph_exec_arena The graph exec arena object provided mapping between symbolic and concrete graph.
1693
 * @param graph The concrete graph generated through compile method.
1694
 * @param symbolic_graph The updated symbolic graph.
1695
 */
1696
void ccv_nnc_graph_exec_reinit(ccv_nnc_graph_exec_arena_t* const graph_exec_arena, ccv_nnc_graph_t* const graph, const ccv_nnc_symbolic_graph_t* const symbolic_graph);
1697
/**
1698
 * Function prototype for tensor symbol creation callback.
1699
 */
1700
typedef void(*ccv_nnc_tensor_symbol_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_param_t info, const char* const name);
1701
/**
1702
 * Hook into the call to ccv_nnc_tensor_symbol_new, return previous provided context if call into this method.
1703
 * @param graph The symbolic graph.
1704
 * @param hook The function to be called if a new tensor symbol created.
1705
 * @param context The context associated with the callback function.
1706
 */
1707
void* ccv_nnc_tensor_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_new_hook_f hook, void* context);
1708
/**
1709
 * Function prototype for tensor symbol alias creation callback.
1710
 */
1711
typedef void(*ccv_nnc_tensor_symbol_alias_new_hook_f)(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_symbol_t from_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info, const char* const name);
1712
/**
1713
 * Hook into the call to ccv_nnc_tensor_symbol_alias_new, return previous provided context if call into this method.
1714
 * @param graph The symbolic graph.
1715
 * @param hook The function to be called if a new tensor symbol alias created.
1716
 * @param context The context associated with the callback function.
1717
 */
1718
void* ccv_nnc_tensor_symbol_alias_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_tensor_symbol_alias_new_hook_f hook, void* context);
1719
/**
1720
 * Set the pair reference for tensor symbols. Peer reference for tensor symbols has very specific meanings.
1721
 * For a backward pass involves sub-graphs. The commands in the sub-graph could reference to tensor symbols of
1722
 * a different graph (its forward pass graph). That is not allowed (two graph has no ancestral relationship
1723
 * cannot share a tensor symbol). So we create a new tensor symbol, but set the pair reference.
1724
 * @param graph The symbolic graph.
1725
 * @param tensor_symbol The tensor symbol in the current graph.
1726
 * @param pair_tensor_symbol The tensor symbol in the pair graph.
1727
 */
1728
void ccv_nnc_tensor_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_nnc_tensor_symbol_t pair_tensor_symbol);
1729
/**
1730
 * Function prototype for execution node symbol creation callback.
1731
 */
1732
typedef void(*ccv_nnc_graph_exec_symbol_new_hook_f)(void* context, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
1733
/**
1734
 * Hook into the call to ccv_nnc_graph_exec_symbol_new, return previous provided context if call into this method.
1735
 * @param graph The symbolic graph.
1736
 * @param hook The function to be called if a new execution node symbol created.
1737
 * @param context The context associated with the callback function.
1738
 */
1739
void* ccv_nnc_graph_exec_symbol_new_hook(ccv_nnc_symbolic_graph_t* const graph, ccv_nnc_graph_exec_symbol_new_hook_f hook, void* context);
1740
/**
1741
 * Set the pair reference for exec. This is very similar to the one for concrete graph. A pair reference
1742
 * of a backward pass execution node is its forward pass counterpart.
1743
 * @param graph The symbolic graph.
1744
 * @param exec_symbol The execution node symbol in the current graph.
1745
 * @param pair_exec_symbol The pairing execution node symbol.
1746
 */
1747
void ccv_nnc_graph_exec_symbol_pair_with(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec_symbol, const ccv_nnc_graph_exec_symbol_t pair_exec_symbol);
1748
1749
/** @} */
1750
1751
/** @} */
1752
1753
/**
1754
 * @defgroup level_3_5 Level-3.5 API
1755
 * @{
1756
 */
1757
1758
/**
1759
 * @defgroup level_3_5_autograd Automatic Differentiation
1760
 * @{
1761
 */
1762
1763
/**
1764
 * Compute the backward graph, assuming the provided symbolic graph only contain the "forward" part from sources to destinations.
1765
 * This effectively is called the "autograd" or automatic differentiation process (specifically, "reverse AD") in other libs.
1766
 * For a expression y = f(x), to compute dx, x is the wrt_symbol, y is the f_symbol.
1767
 * @param graph The symbolic graph.
1768
 * @param f_symbols The tensor symbols array of the result (or loss).
1769
 * @param f_symbol_size The size of the f symbols array.
1770
 * @param wrt_symbols The tensor symbols array of the inputs.
1771
 * @param wrt_symbol_size The size of the wrt symbols array.
1772
 * @param sources The source execution nodes array for the computation.
1773
 * @param source_size The size of the source nodes array.
1774
 * @param destinations The destination execution nodes array for the computation.
1775
 * @param destination_size The size of the destination nodes array.
1776
 */
1777
void ccv_nnc_symbolic_graph_backward(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t* const f_symbols, const int f_symbol_size, const ccv_nnc_tensor_symbol_t* const wrt_symbols, const int wrt_symbol_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
1778
/**
1779
 * Get the symbol that contains the gradient. The list will be flushed if the ccv_nnc_symbolic_graph_backward function is called again.
1780
 * @param graph The symbolic graph.
1781
 * @param symbol The tensor symbol we want to retrieve its gradient (must be one of the wrt symbols or the f symbols).
1782
 * @return A tensor symbol that represents the gradient.
1783
 */
1784
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
1785
/**
1786
 * Get the execution node symbol for a tensor symbol. This used to retrieve the execution node for a gradient tensor symbol.
1787
 * @param graph The symbolic graph.
1788
 * @param symbol The tensor symbol that represents the gradient (must be one of the wrt symbols).
1789
 * @return A execution node symbol that generates the gradient.
1790
 */
1791
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_for_backward(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol);
1792
1793
/** @} */
1794
1795
/**
1796
 * @defgroup level_3_5_while While Loop
1797
 * @{
1798
 */
1799
1800
/**
1801
 * @page symbolic_while Construct a "while" loop in a symbolic graph
1802
 *
1803
 * (This document was written in 2016, since then, Caffe2 added support for While loop (as sub-graph), similar
1804
 * implementation added for ONNX as well.)
1805
 *
1806
 * In NNC, a computation graph cannot allow cycles. Thus, there is no flexible way to express loops.
1807
 *
1808
 * A little survey on this problem:
1809
 *
1810
 * * Caffe2 supports specific type of recurrent neural network.
1811
 *
1812
 * * TensorFlow as it stands, supports while construct. Its while construct is very straightforward, a body and
1813
 *   a condition is provided, you can construct whatever graph as you want.
1814
 *
1815
 * * mxnet supports recurrent neural network by unrolling it into normal none-looped graph.
1816
 *
1817
 * * Theano supports "scan" ops, which is a terminable loop (with loop variant, known as sequence).
1818
 *
1819
 * * CNTK supports this with custom BrainScript. Within BrainScript, you can access the previous state in a
1820
 *   function, therefore, effectively supports calling a method multiple times (looping over).
1821
 *
1822
 * Of above, Caffe2 and mxnet gave up on supporting generic loop for performance reasons. TensorFlow supports
1823
 * generic while loop, with all the trouble it may introduce (see the Nested while loop bug in TensorFlow that
1824
 * recently fixed). Theano picked a point seems pretty sweet, although there are limitations. CNTK's BrainScript
1825
 * is a DSL, they can do whatever they want with the drawback now that they need to implement a language runtime.
1826
 * TensorFlow, Theano and CNTK all support auto-differentiation over the while loop with tape (Wengert list).
1827
 *
1828
 * A simple way to support loop is to support conditional jump. In fact, conditional jump is a more generic way
1829
 * of doing loops. However, if you put this into the consideration that fully differentiable computation graph
1830
 * wanna to be supported, it is terrible. With conditional jump, it is really hard for you to know which tensor
1831
 * is used where, thus keep track for reverse accumulation (backward propagation). There is no counter or
1832
 * whatsoever, it is pretty hard to trace back on which line is executed how many times. Compounding this with
1833
 * NNC's promise that as long as it shows on the graph can be "parallel" computed, it will be parallel computed,
1834
 * it is close to impossible to track if conditional jump used in its raw form. Certain restrictions must be
1835
 * applied to how to do the loop. The compromise comes from closer examination of NNC's preferences.
1836
 *
1837
 * NNC prefers to have the graph without cycles. It also prefers to be fully differentiable. Another important
1838
 * criteria is that most functions in NNC require SSA (Static Single Assignment) representation. With these in
1839
 * mind, supporting while loop has to be strict.
1840
 *
1841
 * Luckily, there are well-formalized way of supporting this in literature and practice. Because it is
1842
 * well-formalized, translating this into existing NNC implementation is actually pretty straightforward. We
1843
 * are going to introduce a special version of while loop. In literature that discussed about SSA, it may be
1844
 * called parameterized loop. For us, it works like this:
1845
 *
1846
 * To construct a while loop for existing NNC graph, you need to be able to separate the existing graph into
1847
 * two sub-graphs.
1848
 *
1849
 * The while-loop sub-graph (WL sub-graph) contains a set of incoming nodes (I-nodes), Condition false output
1850
 * nodes (CFO-nodes) and end nodes (E-nodes). Each set have its own properties, but in short, all incoming edges
1851
 * to the WL sub-graph connect to one of the I-nodes, but nothing else. All outgoing edges from the WL sub-graph
1852
 * connect to one of the CFO-nodes, but nothing else. A nodes can be either a I-node, CFO-node or E-node,
1853
 * non-exclusively.
1854
 *
1855
 * There are also 3 types of tensors used for all nodes in WL sub-graph: Input tensors (I-tensors) are tensors
1856
 * that are inputs to some nodes, and will never be outputs. Output tensors (O-tensors) are tensors that are
1857
 * outputs from some nodes, but never be inputs to any nodes. I-tensors can be outputs from some nodes that
1858
 * outside of WL sub-graph. O-tensors can be inputs to some nodes that outside of WL sub-graph. Internal
1859
 * tensors (IN-tensors) are not visible outside of WL sub-graph, therefore, they can be both inputs and outputs
1860
 * of some nodes inside the sub-graph. Some tensors can be feedback into the WL sub-graph, given either
1861
 * O-tensors or IN-tensors. A parameter map can be given in these cases to describe which maps to what.
1862
 *
1863
 * The way to drive a WL sub-graph like this: the WL sub-graph runs until all CFO-nodes are reached. At this
1864
 * point, the while_f condition is checked. If true, we continue until all the end-nodes are reached. At this
1865
 * point, we increase the counter, reconfigure the WL sub-graph with parameter map, and run from I-nodes all
1866
 * over again. When reached all CFO-nodes, the condition is checked again, if false, WL sub-graph terminates,
1867
 * and the graph continues from the nodes that are pointed by CFO-nodes.
1868
 *
1869
 * Given these constraints, doing automatic differentiation is not that hard any more. A WL sub-graph, from
1870
 * the whole graph's point of view, is just a giant command supports both forward / backward operations, with
1871
 * some extra information passed around in the form of userdata (tape).
1872
 *
1873
 * For WL sub-graph, we can continue to leverage the compile / backward function that already written for
1874
 * symbolic graph as well.
1875
 *
1876
 * For compile function, we just need to take care of parameter maps (these need to be converted into binded
1877
 * tensors).
1878
 *
1879
 * For backward function, we need to convert parameter maps from assigner (thus, y = x) to accumulator (x += y).
1880
 *
1881
 * This function will replace the nodes that it affects to one sub-graph node. Thus, how to drive this
1882
 * sub-graph is opaque. Its backward form is opaque as well.
1883
 *
1884
 * There are no connection between its nodes and the outside graph nodes other than the three sets:
1885
 *
1886
 * 1. Incoming nodes, the set of nodes that contains the incoming edges from outside, they cannot have edges
1887
 *    points by inside nodes. The sub-graph computation starts from these incoming nodes;
1888
 *
1889
 * 2. Condition false output nodes, when condition is false, we will break out of this while loop, these
1890
 *    nodes pointing to the outside nodes, but no inside nodes;
1891
 *
1892
 * 3. End nodes, the set of nodes that marks the end of the while body, and after these nodes are executed,
1893
 *    we will return to the incoming nodes. These end nodes shouldn't have any edges pointing to inside nodes
1894
 *    (OK if end nodes are condition true output nodes as well);
1895
 *
1896
 * Since these will become a sub-graph (which, to its owner graph, just simple "node"), it will have inputs
1897
 * and outputs. Besides that, the loop body needs to be parameterized to be SSA compliant (see:
1898
 * https://www.cs.cmu.edu/~fp/courses/15411-f13/lectures/06-ssa.pdf). Thus, a list of body parameters need to
1899
 * be provided.
1900
 */
1901
1902
/**
1903
 * @defgroup level_3_5_while_essentials While Loop Essentials
1904
 * @{
1905
 */
1906
1907
/**
1908
 * The given tensors contains all the common / input / output tensors specified in the sub-graph.
1909
 */
1910
typedef int(*ccv_nnc_graph_while_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
1911
/**
1912
 * Create a tensor tape that can be used to record for while loop or case..of.
1913
 * @return A ccv_nnc_tensor_tape_t pointer.
1914
 */
1915
CCV_WARN_UNUSED(ccv_nnc_tensor_tape_t*) ccv_nnc_tensor_tape_new(void);
1916
/**
1917
 * Deallocate the tensor tape and all the memory it allocated.
1918
 * @param tape The tensor tape object.
1919
 */
1920
void ccv_nnc_tensor_tape_free(ccv_nnc_tensor_tape_t* const tape);
1921
/**
1922
 * The API to operate on the symbolic graph is more involved than the concrete graph for while loops.
1923
 * The reason is because symbolic graph operates in SSA form (static single assignment), therefore, the while
1924
 * loops for the symbolic graph has to be parameterized.
1925
 * @param graph The symbolic graph.
1926
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
1927
 * @param while_graph The sub-graph to run the while loop.
1928
 * @param name The name of the while loop. Optional.
1929
 * @return A while loop execution symbol (backed by a sub-graph) of the giving graph.
1930
 */
1931
ccv_nnc_graph_exec_symbol_t ccv_nnc_symbolic_graph_while(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, ccv_nnc_symbolic_graph_t* const while_graph, const char* const name);
1932
/**
1933
 * Set the expression to be evaluated, and at which nodes to be evaluated.
1934
 * @param while_graph The symbolic graph that will run the while loop.
1935
 * @param while_expr The function pointer to the expression.
1936
 * @param while_data A custom data provided to the expression evaluation function.
1937
 * @param inputs The input tensor symbols array to the expression evaluation function.
1938
 * @param input_size The size of the input tensor symbols array.
1939
 * @param breakpoints The execution node symbols at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
1940
 * @param breakpoint_size The size of the execution node symbols array.
1941
 */
1942
void ccv_nnc_symbolic_graph_set_while_expr(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const breakpoints, const int breakpoint_size);
1943
/**
1944
 * Set the loop carry parameters when reuse. (parameterized loop, these will be carried over to the next loop).
1945
 * @param while_graph The symbolic graph that will run the while loop.
1946
 * @param symbol_map A pair of tensor symbols array, where the source tensor symbol is the output tensor symbol in this loop, the destination tensor symbol is the input tensor symbol in the next loop.
1947
 * @param symbol_map_size The size of the symbol map array.
1948
 */
1949
void ccv_nnc_symbolic_graph_set_carry_overs(ccv_nnc_symbolic_graph_t* const while_graph, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
1950
/**
1951
 * Retrieve the special (magical) tensor symbol that retains the while loop counter (thus, dimension of 1x1x1, CCV_64S type).
1952
 * @param while_graph The symbolic graph that will run the while loop.
1953
 * @return A tensor symbol represents the implicit loop count.
1954
 */
1955
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_for_while_count(const ccv_nnc_symbolic_graph_t* const while_graph);
1956
/**
1957
 * Extract the sub-graph of the while loop from a symbol.
1958
 * @param graph The symbolic graph.
1959
 * @param while_symbol The execution node symbol.
1960
 * @return The sub-graph that represents a while loop.
1961
 */
1962
CCV_WARN_UNUSED(ccv_nnc_symbolic_graph_t*) ccv_nnc_symbolic_graph_from_while_symbol(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t while_symbol);
1963
/**
1964
 * Constructing looped concrete graph. Note that this interface is a little bit simpler than the one for symbolic
1965
 * graph. The reason is that a concrete graph operates on allocated tensors, thus, there is no mapping of tensor
1966
 * symbols between the parent graph and the while graph. (The reason to have a mapping in symbolic graphs is to
1967
 * constraint the variable leaking between the sub graph and parent graph).
1968
 * @param graph The concrete graph.
1969
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
1970
 * @param while_graph The sub-graph to run the while loop.
1971
 * @return A execution node that represents the sub-graph.
1972
 */
1973
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_while(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_graph_t* const while_graph);
1974
/**
1975
 * Set the evaluated expression for the while loop. The while loop will break out if the expression evaluates to 0.
1976
 * @param while_graph The concrete graph that will run the while loop.
1977
 * @param while_expr The function pointer to the expression.
1978
 * @param while_data A custom data provided to the expression evaluation function.
1979
 * @param inputs The input tensors array to the expression evaluation function.
1980
 * @param input_size The size of the input tensors array.
1981
 * @param breakpoints The execution nodes at which the while loop will pause, evaluate the expression, and choose to either break out or continue.
1982
 * @param breakpoint_size The size of the execution nodes array.
1983
 */
1984
void ccv_nnc_graph_set_while_expr(ccv_nnc_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_graph_exec_t* const breakpoints, const int breakpoint_size);
1985
/**
1986
 * Get the special tensor for the while loop count. It contains one uint64_t value. We keep an implicit count
1987
 * when evaluate the while loop and you can access it with this tensor.
1988
 * @param while_graph The concrete graph that will run the while loop.
1989
 * @return A special tensor that you can retrieve the loop count at .data.i64[0].
1990
 */
1991
CCV_WARN_UNUSED(ccv_nnc_tensor_t) ccv_nnc_tensor_for_while_count(const ccv_nnc_graph_t* const while_graph);
1992
/**
1993
 * Retrieve the sub-graph from a execution node.
1994
 * @param graph The concrete graph.
1995
 * @param exec The execution node represents the sub-graph.
1996
 * @return The sub-graph.
1997
 */
1998
CCV_WARN_UNUSED(ccv_nnc_graph_t*) ccv_nnc_graph_from_while_exec(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_t exec);
1999
2000
/** @} */
2001
2002
/**
2003
 * @defgroup level_3_5_while_others While Loop Others
2004
 * @{
2005
 */
2006
2007
/**
2008
 * For a given tape on a given graph, update the input / output tensors so new version will be created (if needed).
2009
 * @param tape The tensor tape object.
2010
 * @param graph The concrete graph this tensor tape is executing in.
2011
 * @param input_flags The flags associated with input tensors.
2012
 * @param inputs The input tensors.
2013
 * @param input_size The size of input tensors array.
2014
 * @param output_flags The flags associated with output tensors.
2015
 * @param outputs The output tensors.
2016
 * @param output_size The size of output tensors array.
2017
 */
2018
void ccv_nnc_tensor_tape_io(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const int* const input_flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, const int* const output_flags, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2019
/**
2020
 * Retrieve the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2021
 * @param tape The tensor tape object.
2022
 * @param graph The concrete graph this tensor tape is executing in.
2023
 * @param exec The execution node.
2024
 * @return The number associated with the execution node.
2025
 */
2026
uint64_t ccv_nnc_tensor_tape_numbering(ccv_nnc_tensor_tape_t* const tape, const ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec);
2027
/**
2028
 * Set the number we associated with the execution node that recorded on the tape for a particular run of the graph.
2029
 * @param tape The tensor tape object.
2030
 * @param graph The concrete graph this tensor tape is executing in.
2031
 * @param exec The execution node.
2032
 * @param numbering The number associated with the execution node.
2033
 */
2034
void ccv_nnc_tensor_tape_set_numbering(ccv_nnc_tensor_tape_t* const tape, ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, const uint64_t numbering);
2035
/**
2036
 * Augmented tensor to run a graph with while loop (An obvious example is dynamic RNN).
2037
 */
2038
typedef struct ccv_nnc_tensor_multiview_s {
2039
  // This is an augmented ccv_nnc_tensor_view_t
2040
  // Namely, it can point to multiple versions of tensors.
2041
  int type; // This type is CCV_NNC_TENSOR_MULTI_VIEW
2042
  // kind specified how the multi-version tensors stored.
2043
  // See the comment on the follow up enums.
2044
  uint8_t kind;
2045
  uint16_t repeat;
2046
  intptr_t anchor; // on which graph this multi-view tensor is wrapped. This helps to determine on which level the multi-view tensor should be unwrapped.
2047
  // If this tensor points to a tensor view, data.u8 - offset is the real pointer start.
2048
  off_t offset;
2049
  struct ccv_nnc_tensor_multiview_s* p; // If this is wrapped with another multiview tensor. Get to the parent one.
2050
  ccv_nnc_tensor_t* it; // Current tensor (tensor in use), this is updated along with the graph computation.
2051
  // This is useful because by just traverse tv, I can get the latest up-to-date reference to this multi-view tensor.
2052
  ccv_array_t* sp; // Synchronized tensor views. This corresponds to ccv_nnc_tensor_synchronize_to_multiview method, that records all the tensors registered for updates.
2053
  ccv_nnc_tensor_t* _inline_data[4];
2054
  ccv_nnc_tensor_t** _heap_data;
2055
} ccv_nnc_tensor_multiview_t;
2056
3.40k
#define CCV_NNC_MULTIVIEW_DATA(x) ((x)->_heap_data ? 
(x)->_heap_data0
: (x)->_inline_data)
2057
234
#define CCV_NNC_MULTIVIEW_PHI (intptr_t)0x1 /**< Denote this is a phi multi-view tensor. */
2058
2059
enum {
2060
  CCV_NNC_MULTIVIEW_K0N = 0, /**< All of them are repeated. */
2061
  CCV_NNC_MULTIVIEW_K1N = 1, /**< The first one is the first, the second one starts to repeat. (0111111...) */
2062
};
2063
#define CCV_NNC_MULTIVIEW_K01(x) ((x)->kind == CCV_NNC_MULTIVIEW_K0N && (x)->repeat == 1)
2064
/**
2065
 * Setup a tensor multiview with a given set of tensors.
2066
 * A multiview tensor point to a list of tensors, and its access depends on the loop count.
2067
 * For example, if we have a multiview tensor with list of [a, b, c, d], and kind is 1N, repeat is 3.
2068
 * For loop count 0, 1, 2, 3, 4, 5, 6, the corresponding tensors used will be a, b, c, d, b, c. If kind
2069
 * is 0N, and repeat is 4, it will be a, b, c, d, a, b.
2070
 * @param data[] The pointer to the list of tensors the multiview object can point to.
2071
 * @param kind Can be either CCV_NNC_MULTIVIEW_K0N or CCV_NNC_MULTIVIEW_K1N, basically whether to keep the initial tensor.
2072
 * @param repeat The length of the repeat.
2073
 * @param graph Which graph this multiview object attaches to.
2074
 * @param tensor_multiview The tensor multiview object to be updated.
2075
 */
2076
void ccv_nnc_tensor_multiview(ccv_nnc_tensor_t* data[], const uint8_t kind, const uint16_t repeat, const ccv_nnc_graph_t* const graph, ccv_nnc_tensor_multiview_t* const tensor_multiview);
2077
/**
2078
 * Since tensor_multiview will never be allocated with *_new method, the *_free method simply frees anything that is dynamically allocated afterwards (such as the reference items).
2079
 * @param tensor_multiview The tensor multiview object to be deallocated.
2080
 */
2081
void ccv_nnc_tensor_multiview_free(const ccv_nnc_tensor_multiview_t tensor_multiview);
2082
/**
2083
 * Setup a tensor as a reference to a tensor multiview, thus, when tensor multiview's tu (current tensor) updates, the tensor reference's data.u8 will get update as well (point to the same memory region as the tu).
2084
 * @param tensor_multiview The tensor multiview object.
2085
 * @param tensor The tensor that will be updated along with the multiview object.
2086
 */
2087
void ccv_nnc_tensor_synchronize_to_multiview(ccv_nnc_tensor_multiview_t* const tensor_multiview, ccv_nnc_tensor_t* const tensor);
2088
/**
2089
 * Send broadcast to subscribers of the multiview, call this in the beginning of exec.
2090
 * @param tensor_multiview The tensor multiview object.
2091
 */
2092
void ccv_nnc_tensor_multiview_synchronize(ccv_nnc_tensor_multiview_t* const tensor_multiview);
2093
2094
/** @} */
2095
2096
/** @} */
2097
2098
/**
2099
 * @defgroup level_3_5_case_of Branching
2100
 * @{
2101
 */
2102
2103
/**
2104
 * @page symbolic_switch Construct "switch" control structure in symbolic graph
2105
 *
2106
 * Here I use the keyword case_of. To provide a "switch" control structure within NNC has some nice properties
2107
 * even though you can simulate this with a while loop technically.
2108
 *
2109
 * 1. More optimal memory allocation: with "switch" control structure, memory can be multiplexed for each code
2110
 *    path because they are mutually exclusive.
2111
 *
2112
 * 2. No tape should be used within each branch: if we simulate with a "while" loop, any results from within
2113
 *    the "switch" statement has to be kept on the tape, which is inefficient because you don't need any tape
2114
 *    for the "switch" statement other than record which path it is taken.
2115
 *
2116
 * The particular "switch" control structure provided here is a multi-way structured "switch". Each branch is a
2117
 * sub-graph, so it is well-scoped. A node branch out based on the case_of condition return value to either of
2118
 * the branch (numbering from 0 to n, -1 means no path taken). If no path taken, the output tensors will be
2119
 * assigned with the default tensors and continue. Otherwise the computation within the sub-graph will be
2120
 * carried out and the output tensors will be assigned with the tensors specified within that sub-graph and
2121
 * continue.
2122
 *
2123
 * If we want to consider speculative execution in the future, we need to revisit our memory allocation scheme.
2124
 */
2125
2126
/**
2127
 * Function prototype to evaluate a branch expression.
2128
 */
2129
typedef int(*ccv_nnc_graph_case_of_f)(ccv_nnc_tensor_t* const* const inputs, const int input_size, const void* const data);
2130
/**
2131
 * Create a new case..of execution node symbol.
2132
 * @param graph The symbolic graph.
2133
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2134
 * @param inputs The input tensor symbols array for the expression.
2135
 * @param input_size The size of the input tensor symbols array.
2136
 * @param symbol_map The pair of tensor symbols array where the source is the input tensor symbol and the destination is the output tensor symbol.
2137
 * @param symbol_map_size The size of symbol map array.
2138
 * @param name The name of the case..of graph. Optional.
2139
 * @return A execution node symbol that represents the case..of graph.
2140
 */
2141
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_symbolic_graph_case_of_new(ccv_nnc_symbolic_graph_t* const graph, const uint32_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size, const char* const name);
2142
/**
2143
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2144
 * @param graph The symbolic graph.
2145
 * @param exec The execution node symbol that represents the case..of graph.
2146
 * @param case_of The function pointer to evaluate.
2147
 * @param case_of_data The data associated with the function pointer.
2148
 */
2149
void ccv_nnc_symbolic_graph_set_case_of_expr(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data);
2150
/**
2151
 * Set a sub-graph as one of the branch for the case..of graph.
2152
 * @param graph The symbolic graph.
2153
 * @param symbol The execution node symbol that represents the case..of graph.
2154
 * @param case_graph The sub-graph for one of the branch.
2155
 * @param case_of The index assigned to this sub-graph (expression returns this index to determine which sub-graph to execute).
2156
 * @param symbol_map The pair of tensor symbols array where the source is the output tensor symbol of the sub-graph, and the destination is the output tensor symbol of the execution node symbol.
2157
 * @param symbol_map_size The size of the symbol map array.
2158
 */
2159
void ccv_nnc_symbolic_graph_set_case_of(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, ccv_nnc_symbolic_graph_t* const case_graph, const int case_of, const ccv_nnc_tensor_symbol_map_t* const symbol_map, const int symbol_map_size);
2160
/**
2161
 * Create a new case..of execution node.
2162
 * @param graph The concrete graph.
2163
 * @param cmd The command idenfitier, can be either CCV_NNC_GRAPH_FORWARD or CCV_NNC_GRAPH_BACKWARD
2164
 * @param inputs The input tensors array supplied to the expression.
2165
 * @param input_size The size of the input tensors array.
2166
 * @param outputs The output tensors array.
2167
 * @param output_size The size of the output tensors array.
2168
 * @return A execution node that represents the case..of graph.
2169
 */
2170
CCV_WARN_UNUSED(ccv_nnc_graph_exec_t) ccv_nnc_graph_case_of_new(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size);
2171
/**
2172
 * Set the expression to be evaluated when choose which sub-graph to branch to.
2173
 * @param graph The concrete graph.
2174
 * @param exec The execution node that represents the case..of graph.
2175
 * @param case_of The function pointer to evaluate.
2176
 * @param case_of_data The data associated with the function pointer.
2177
 * @param offset A integer added to the expression output to help choose the index. Thus, real index = expression index + offset.
2178
 */
2179
void ccv_nnc_graph_set_case_of_expr(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_case_of_f case_of, const void* case_of_data, const int offset);
2180
/**
2181
 * Set a sub-graph as one of the branch for the case..of graph.
2182
 * @param graph The concrete graph.
2183
 * @param exec The execution node that represents the case..of graph.
2184
 * @param case_graph The sub-graph for one of the branch.
2185
 * @param case_of The index assigned to this sub-graph (expression returns this index + offset to determine which sub-graph to execute).
2186
 */
2187
void ccv_nnc_graph_set_case_of(ccv_nnc_graph_t* const graph, const ccv_nnc_graph_exec_t exec, ccv_nnc_graph_t* const case_graph, const int case_of);
2188
2189
/** @} */
2190
2191
/**
2192
 * @defgroup level_3_5_minimizer Gradient-based Optimization
2193
 * @{
2194
 */
2195
2196
/**
2197
 * This is the comparable part to Caffe's solver or TensorFlow's optimizer. It took a step further than just
2198
 * compute the gradient, but also apply the gradient to update parameters to minimize the loss.
2199
 * @param graph The symbolic graph.
2200
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2201
 * @param losses The tensor symbols array of losses.
2202
 * @param loss_size The size of the loss symbols array.
2203
 * @param parameters The parameter tensor symbols to optimize.
2204
 * @param parameter_size The size of parameter symbols array.
2205
 * @param inputs The additional input symbols we compute gradient against.
2206
 * @param input_size The size of the additional input symbols array.
2207
 * @param sources The source execution nodes array.
2208
 * @param source_size The size of source nodes array.
2209
 * @param destinations The destinations execution nodes array.
2210
 * @param destination_size The size of destination nodes array.
2211
 * @param gradients The tensor symbols that represents the gradient for update, should be the same size as the parameters array + input array size. This can be 0 (optional).
2212
 * @param updated_parameters The tensor symbols that represents the updated parameters, should be the same size as the parameters array.
2213
 * @param saved_aux The tensor symbols that is helpful for particular optimization strategy.
2214
 * @param graph_exec_symbols The execution node symbols for the updates, should be the same size as the parameters array.
2215
 */
2216
void ccv_nnc_symbolic_graph_minimize(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_symbol_t* const losses, const int loss_size, const ccv_nnc_tensor_symbol_t* const parameters, const int parameter_size, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size, ccv_nnc_tensor_symbol_t* const gradients, ccv_nnc_tensor_symbol_t* const updated_parameters, ccv_nnc_tensor_symbol_map_t* const saved_aux, ccv_nnc_graph_exec_symbol_t* const graph_exec_symbols);
2217
/**
2218
 * The number of extra saved aux per parameter only depends on the commands. For example, SGD with momentum requires 1 aux (for momentum).
2219
 * Others require more.
2220
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2221
 * @return the number of saved aux per parameter.
2222
 */
2223
CCV_WARN_UNUSED(int) ccv_nnc_minimizer_saved_aux_size(const ccv_nnc_cmd_t minimizer);
2224
2225
/** @} */
2226
2227
/**
2228
 * @defgroup level_3_5_simplify Graph Simplification
2229
 * @{
2230
 */
2231
2232
/**
2233
 * @page symbolic_simplify Symbolic graph simplification
2234
 *
2235
 * We make a distinction between graph simplifications and optimizations (autotune).
2236
 *
2237
 * Simplification: rewrite the graph and the resulting graph will have less nodes. This is done on the symbolic
2238
 * graph only. Passes that is "simplification" include pruning, common sub-expression eliminations, constant
2239
 * folding etc.
2240
 *
2241
 * Optimization (autotune): graph optimization can have more objectives. The most obvious objective is to reduce
2242
 * computation time. For symbolic graph, passes that reduces computation time include data layout optimizations,
2243
 * auto parallel etc (in normal optimization implementations, they have a cost model to guide the optimization.
2244
 * NNC's implementation uses a cost database that profiles the time cost on the device to guide the optimization.
2245
 * We call it autotune to distinguish with the normal optimization passes because we need device profile data).
2246
 * There could be other objectives, for example, in many deep learning applications, reducing memory footprint
2247
 * can be desirable. However, as always in computer science, memory and time is a typical trade-off. Memory
2248
 * optimization almost always results longer computation time, and the objective is to trade between these two
2249
 * with a bias term (in other frameworks such as TensorFlow, the memory optimizer uses a list of "cheap ops" to
2250
 * bias between the time and memory footprint).
2251
 *
2252
 * For graph optimizations, it can happen on both the symbolic graph level as well as the concrete graph level.
2253
 * For NNC, symbolic graph is already very explicit (data layout, device allocation and data transfer between
2254
 * devices / nodes, even the command backend can all be specified on the symbolic graph), however, some
2255
 * information is unknown until it is compiled down to concrete graph (tensor addresses, tensor initialization
2256
 * etc.), and since graph optimizations need all the information to optimize. Keeping the flexibility to do
2257
 * optimization on both symbolic and concrete graph level seems reasonable.
2258
 */
2259
2260
enum {
2261
  /**
2262
   * If two commands generated the same outputs, all the places where the newer output used will be replaced by
2263
   * the old output. Later on the graph pruning stage, the command that generate the newer output will be
2264
   * eliminated.
2265
   */
2266
  CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION,
2267
  /**
2268
   * For the given outputs, eliminate unused input tensors, and then eliminate graph execs that don't contribute
2269
   * to the outputs.
2270
   */
2271
  CCV_NNC_SIMPLIFY_GRAPH_PRUNING,
2272
  /**
2273
   * For CCV_NNC_DATA_TRANSFER, if the input / output is the same (on the same device, no alias), we can skip.
2274
   * Similarly, if it is on the same device, but alias of some, for some cases we can skip as well (if neither
2275
   * are carry overs, bypasses etc.)
2276
   */
2277
  CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT,
2278
  /**
2279
   * Combine a few smaller ops into bigger one. For now, this functionality is limited. I can only address ops
2280
   * that are sequential.
2281
   */
2282
  CCV_NNC_SIMPLIFY_OPS_FUSION,
2283
  // CCV_NNC_SIMPLIFY_CONSTANT_FOLDING, // This currently is not supported, because we don't have efficient way to express constant in symbolic graph.
2284
};
2285
/**
2286
 * Simplify a graph with given list of passes, in that particular order.
2287
 * Note, when a graph is simplified, its sources / destinations are changed as well.
2288
 * @param graph The symbolic graph.
2289
 * @param passes The array of passes we are going to apply.
2290
 * @param pass_size The size of the passes array.
2291
 * @param binds The tensor symbols we may bind to an input later (it doesn't prevent pruning any execution nodes).
2292
 * @param bind_size The size of the bind array.
2293
 * @param outputs The output tensor symbols we want to retain (we are going to prune any execution nodes that is not related to these outputs).
2294
 * @param output_size The size of the output array.
2295
 * @param sources The source execution node symbols array.
2296
 * @param source_size The size of source node symbols array.
2297
 * @param destinations The destinations execution node symbols array.
2298
 * @param destination_size The size of destination node symbols array.
2299
 */
2300
void ccv_nnc_symbolic_graph_simplify(ccv_nnc_symbolic_graph_t* const graph, const int* const passes, const int pass_size, const ccv_nnc_tensor_symbol_t* const binds, const int bind_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2301
2302
/** @} */
2303
2304
/**
2305
 * @defgroup level_3_5_parallel Automatic Graph Parallelization
2306
 * @{
2307
 */
2308
2309
enum {
2310
  /**
2311
   * Op for reducer / allreducer. Currently only supports sum.
2312
   */
2313
  CCV_NNC_PARALLEL_REDUCE_OP_SUM,
2314
};
2315
2316
/**
2317
 * Turn the existing graph to be capable to run on several devices with different data inputs at parallel.
2318
 * With this method, additional tensor symbols will be created that runs on different devices. That has
2319
 * been said, there are concepts of "broadcast" and "reduce". "broadcast" tensor symbols will be copied to
2320
 * different devices, while "reduce" tensors will be summed from different devices to the default device.
2321
 * "allreducer" concept is simpler. The allreduce operation will be performed on these tensors and then
2322
 * be used on different devices again.
2323
 *
2324
 * Limitations: right now, the way to reduce / allreduce tensors only supports "sum". The data parallel
2325
 * only supports GPU, thus, the nodes will be duplicated are GPU computations and GPU memory backed
2326
 * tensors. Also, right now, the tensors to be broadcasted / allreduced / reduced should have no aliases.
2327
 *
2328
 * @param graph The symbolic graph.
2329
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
2330
 * @param broadcasts The tensor symbols to be broadcasted.
2331
 * @param broadcast_size The size of the broadcast tensor symbols array.
2332
 * @param allreducers The tensor symbols that to be allreduced.
2333
 * @param allreducer_size The size of the allreducer tensor symbols array.
2334
 * @param allreducer_outs Return the tensor symbols for allreducers that before allreduced. Optional, 0
2335
 *        means I don't care about this.
2336
 * @param reducers The tensor symbols to be reduced.
2337
 * @param reducer_size The size of the reducer tensor symbols array.
2338
 * @param reducer_outs Return the tensor symbols for reducers that after reduced. Optional, 0 means
2339
 *        I don't care about this.
2340
 * @param reduce_op_type The reduce op for reducer / allreducer.
2341
 * @param sources The source execution node symbols array.
2342
 * @param source_size The size of source node symbols array.
2343
 * @param destinations The destinations execution node symbols array.
2344
 * @param destination_size The size of destination node symbols array.
2345
 */
2346
void ccv_nnc_symbolic_graph_data_parallel(ccv_nnc_symbolic_graph_t* const graph, const int parallel, const ccv_nnc_tensor_symbol_t* const broadcasts, const int broadcast_size, const ccv_nnc_tensor_symbol_t* const allreducers, const int allreducer_size, ccv_nnc_tensor_symbol_t* const allreducer_outs, const ccv_nnc_tensor_symbol_t* const reducers, const int reducer_size, ccv_nnc_tensor_symbol_t* const reducer_outs, const int reduce_op_type, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2347
/**
2348
 * Get the symbol that is on a device other than the default one. The list will be flushed if the
2349
 * ccv_nnc_symbolic_graph_data_parallel function is called again.
2350
 * @param graph The symbolic graph.
2351
 * @param symbol The tensor symbol we want to retrieve its counterpart on a different device.
2352
 * @param device_id The device numeric id for this symbol.
2353
 * @return A tensor symbol that is on a different device.
2354
 */
2355
CCV_WARN_UNUSED(ccv_nnc_tensor_symbol_t) ccv_nnc_tensor_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id);
2356
/**
2357
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2358
 * later with ccv_nnc_tensor_symbol_copy
2359
 * @param graph The symbolic graph.
2360
 * @param symbol The tensor symbol we want to set its counterpart on a different device.
2361
 * @param device_id The device numeric id for this symbol.
2362
 * @param copy The tensor symbol counterpart on a different device.
2363
 */
2364
void ccv_nnc_tensor_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_tensor_symbol_t symbol, const int device_id, const ccv_nnc_tensor_symbol_t copy);
2365
/**
2366
 * Get the execution node that is on a device other than the default one. The list will be flushed
2367
 * if the ccv_nnc_symbolic_graph_data_parallel function is called again.
2368
 * @param graph The symbolic graph.
2369
 * @param symbol The execution node we want to retrieve its counterpart on a different device.
2370
 * @param device_id The device numeric id for this symbol.
2371
 * @return A execution node that is on a different device.
2372
 */
2373
CCV_WARN_UNUSED(ccv_nnc_graph_exec_symbol_t) ccv_nnc_graph_exec_symbol_copy(const ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id);
2374
/**
2375
 * Set corresponding symbol for this symbol on another device. Thus, someone else can query this
2376
 * later with ccv_nnc_graph_exec_symbol_copy
2377
 * @param graph The symbolic graph.
2378
 * @param symbol The execution node we want to set its counterpart on a different device.
2379
 * @param device_id The device numeric id for this symbol.
2380
 * @param copy The execution node counterpart on a different device.
2381
 */
2382
void ccv_nnc_graph_exec_symbol_set_copy(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t symbol, const int device_id, const ccv_nnc_graph_exec_symbol_t copy);
2383
2384
/** @} */
2385
2386
/**
2387
 * @defgroup level_3_5_memory_compression Memory Compression
2388
 * @{
2389
 */
2390
2391
/**
2392
 * Apply LSSC memory compression algorithm to the convolution activations. This will compress the activation
2393
 * layer for convolution, therefore, save the overall memory usage during training time.
2394
 *
2395
 * @param graph The symbolic graph.
2396
 * @param sources The source execution node symbols array.
2397
 * @param source_size The size of source node symbols array.
2398
 * @param destinations The destinations execution node symbols array.
2399
 * @param destination_size The size of destination node symbols array.
2400
 */
2401
void ccv_nnc_symbolic_graph_memory_compression(ccv_nnc_symbolic_graph_t* const graph, const ccv_nnc_graph_exec_symbol_t* const sources, const int source_size, const ccv_nnc_graph_exec_symbol_t* const destinations, const int destination_size);
2402
2403
/** @} */
2404
2405
/** @} */
2406
2407
/**
2408
 * @defgroup level_4 Level-4 API
2409
 * @{
2410
 */
2411
2412
/**
2413
 * Opaque pointer to the dynamic graph structure.
2414
 */
2415
typedef struct ccv_nnc_dynamic_graph_s ccv_nnc_dynamic_graph_t;
2416
2417
/**
2418
 * Masquerade this as if it is a on stack variable, there is a heap allocation but managed by the dynamic graph.
2419
 * The fact that ccv_nnc_tensor_variable_t is a pointer is an implementation detail. It should be treated as an
2420
 * opaque type throughout. We may later extends this to be some on-stack information or even just a uid.
2421
 */
2422
typedef struct ccv_nnc_tensor_variable_s* ccv_nnc_tensor_variable_t;
2423
2424
/**
2425
 * Create a dynamic graph.
2426
 * @return A newly created dynamic graph.
2427
 */
2428
CCV_WARN_UNUSED(ccv_nnc_dynamic_graph_t*) ccv_nnc_dynamic_graph_new(void);
2429
2430
/** @cond ALL */
2431
// Get a new tensor variable.
2432
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2433
16.5k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_1(graph) ccv_nnc_tensor_variable_new_impl(graph, ccv_nnc_tensor_auto)
2434
14.6k
#define CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(_1, _2, _FX, ...) _FX
2435
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2436
31.2k
#define ccv_nnc_tensor_variable_new(graph, ...) CCV_NNC_TENSOR_VARIABLE_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_variable_new_impl, 
CCV_NNC_TENSOR_VARIABLE_NEW_X_116.5k
)(graph, ##
__VA_ARGS__8.30k
)
2437
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_constant_new_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_param_t info);
2438
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_1(graph) ccv_nnc_tensor_constant_new_impl(graph, ccv_nnc_tensor_auto)
2439
29
#define CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(_1, _2, _FX, ...) _FX
2440
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2441
29
#define ccv_nnc_tensor_constant_new(graph, ...) CCV_NNC_TENSOR_CONSTANT_NEW_X_SEL(graph, ##__VA_ARGS__, ccv_nnc_tensor_constant_new_impl, CCV_NNC_TENSOR_CONSTANT_NEW_X_1)(graph, ##
__VA_ARGS__5
)
2442
/** @endcond */
2443
2444
/**
2445
 * Create a new tensor variable that is an alias of a given tensor variable.
2446
 * @param graph The dynamic graph.
2447
 * @param tensor_variable The tensor variable we are going to alias from.
2448
 * @param ofs The offset on each of the dimension.
2449
 * @param inc The line size of each dimension.
2450
 * @param info The tensor parameters for the new alias.
2451
 * @return New tensor variable that is an alias.
2452
 */
2453
CCV_WARN_UNUSED(ccv_nnc_tensor_variable_t) ccv_nnc_tensor_variable_alias_new(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_tensor_param_t info);
2454
/**
2455
 * Get the parameters for a tensor variable.
2456
 * @param graph The dynamic graph.
2457
 * @param tensor_variable The tensor variable reference.
2458
 * @return The tensor parameters.
2459
 */
2460
CCV_WARN_UNUSED(ccv_nnc_tensor_param_t) ccv_nnc_tensor_variable_params(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2461
2462
/** @cond ALL */
2463
/**
2464
 * Get the underlying tensor for the tensor variable. The tensor allocation may be performed when calling this
2465
 * method. If the tensor cannot be allocated (because no shape specified), return 0.
2466
 * @param graph The dynamic graph.
2467
 * @param tensor_variable The tensor variable to get the underlying tensor.
2468
 * @param stream_context Which stream this command will be executed upon.
2469
 * @return The underlying tensor.
2470
 */
2471
CCV_WARN_UNUSED(ccv_nnc_tensor_t*) ccv_nnc_tensor_from_variable_impl(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_stream_context_t* const stream_context);
2472
8.48k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_1(graph, tensor_variable) ccv_nnc_tensor_from_variable_impl(graph, tensor_variable, 0)
2473
60.3k
#define CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL(_1, _2, _3, _FX, ...) _FX
2474
// Making so that this new method can take parameters for both no parameter or with tensor_param.
2475
68.8k
#define ccv_nnc_tensor_from_variable(graph, tensor_variable, ...) CCV_NNC_TENSOR_FROM_VARIABLE_X_SEL
(graph, tensor_variable, ##__VA_ARGS__, ccv_nnc_tensor_from_variable_impl, 45.9k
CCV_NNC_TENSOR_FROM_VARIABLE_X_18.48k
)(graph, tensor_variable, ##__VA_ARGS__)
2476
/** @endcond */
2477
/**
2478
 * Query whether a given tensor variable is a constant (no gradient).
2479
 * @param graph The dynamic graph.
2480
 * @param tensor_variable The tensor variable to query whether it is a constant.
2481
 */
2482
CCV_WARN_UNUSED(int) ccv_nnc_tensor_variable_is_constant(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2483
/**
2484
 * Set a tensor on the tensor variable. Tensor variable doesn't take over the life-cycle management of the tensor
2485
 * (in similar way as the tensor binds).
2486
 * @param graph The dynamic graph.
2487
 * @param tensor_variable The tensor variable to set.
2488
 * @param tensor The tensor that is going to be associated with the tensor variable.
2489
 */
2490
void ccv_nnc_tensor_variable_set(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_t* const tensor);
2491
/**
2492
 * A destructor function to be called when a tensor variable will be freed in the sense that no
2493
 * backward computation need it no more.
2494
 * Thus, we pass in tensor rather than tensor variable for the destructor.
2495
 */
2496
typedef void (*ccv_nnc_tensor_variable_destructor_f)(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_t* const tensor, void* const context);
2497
/**
2498
 * Hook into a tensor variable such that when it is actually freed (destroyed), the callback will receive
2499
 * the update.
2500
 * @param graph The dynamic graph.
2501
 * @param tensor_variable The tensor variable to observe when it is destroyed.
2502
 * @param func The callback function.
2503
 * @param context The context to be passed along to the callback function.
2504
 **/
2505
void ccv_nnc_tensor_variable_destructor_hook(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable, ccv_nnc_tensor_variable_destructor_f func, void* const context);
2506
/**
2507
 * Check given tensor variables whether have effects to another set of tensor variables.
2508
 * @param graph The dynamic graph.
2509
 * @param source_variables The tensor variables to check whether it has effect to another set of variables.
2510
 * @param source_variable_size The size of source tensor variables.
2511
 * @param destination_variables Whether the source variables has effect to this list of variables.
2512
 * @param destination_variable_size The size of destination tensor variables.
2513
 * @param bitmask Bit return value, each bit represents a source tensor variable, and 1 meant it can reach some of the destinations.
2514
 */
2515
void ccv_nnc_dynamic_graph_has_effect_to_tensor_variables(const ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t* const source_variables, const int source_variable_size, const ccv_nnc_tensor_variable_t* const destination_variables, const int destination_variable_size, uint64_t* const bitmask);
2516
/**
2517
 * Execute a command with given tensor variables, the output is in the output tensor variables.
2518
 * @param graph The dynamic graph.
2519
 * @param cmd The wrapped command.
2520
 * @param hint The hint associated with the command.
2521
 * @param flags A reserved field for flags.
2522
 * @param inputs The input tensor variables array.
2523
 * @param input_size The size of the input tensor variables array.
2524
 * @param outputs The output tensor variables array.
2525
 * @param output_size The size of the output tensor variables array.
2526
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2527
 * @param stream_context Which stream this command will be executed upon.
2528
 */
2529
int ccv_nnc_dynamic_graph_exec(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2530
/**
2531
 * Compute the gradient of given tensor, with respect to the f. Thus, df / dt.
2532
 * @param dynamic_graph The dynamic graph.
2533
 * @param f_variables The output losses.
2534
 * @param f_variable_size The size of output losses array.
2535
 * @param df_optionals The custom gradients for f. If not provided, will default to 1.
2536
 * @param inputs The input variables.
2537
 * @param input_size The size of the input variables array.
2538
 * @param outputs The gradients with respect to the inputs. If the gradient already have value exist, it will be
2539
 *        accumulated into the final value.
2540
 * @param output_size The size of the outputs array. Should be equal to the input_size.
2541
 * @param stream_context Which stream this computation will be executed upon.
2542
 */
2543
void ccv_nnc_dynamic_graph_backward(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_tensor_variable_t* const f_variables, const int f_variable_size, const ccv_nnc_tensor_variable_t* const df_optionals, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context);
2544
/**
2545
 * Apply gradients to the set of parameters to update them with appropriate minimizer.
2546
 * @param dynamic_graph The dynamic graph.
2547
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2548
 * @param gradients The computed gradients to be applied.
2549
 * @param gradient_size The size of gradients array.
2550
 * @param parameters The parameters to update.
2551
 * @param parameter_size The size of parameters array, should be the same length as gradients.
2552
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2553
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2554
 * @param stream_context Which stream this computation will be executed upon.
2555
 */
2556
void ccv_nnc_dynamic_graph_apply_gradients(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const gradients, const int gradient_size, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2557
/**
2558
 * Apply one step of minimization (most likely, a gradient descent) to the parameters with a given loss (or
2559
 * losses).
2560
 * @param dynamic_graph The dynamic graph.
2561
 * @param minimizer The wrapped command that represents a particular optimization strategy.
2562
 * @param losses The losses we are trying to minimize.
2563
 * @param loss_size The size of the losses array.
2564
 * @param dloss_optionals The custom gradient for losses. If not provided, will default to 1.
2565
 * @param parameters The parameters to update.
2566
 * @param parameter_size The size of parameters array.
2567
 * @param saved_aux The aux variables to faciliate the minimizer. See ccv_nnc_minimizer_saved_aux_size.
2568
 * @param parallel The parallel parameter, how many concurrent computations we need to execute.
2569
 * @param stream_context Which stream this computation will be executed upon.
2570
 */
2571
void ccv_nnc_dynamic_graph_minimize(ccv_nnc_dynamic_graph_t* const dynamic_graph, const ccv_nnc_cmd_t minimizer, const ccv_nnc_tensor_variable_t* const losses, const int loss_size, const ccv_nnc_tensor_variable_t* const dloss_optionals, ccv_nnc_tensor_variable_t* const parameters, const int parameter_size, ccv_nnc_tensor_variable_t* const saved_aux, const int parallel, ccv_nnc_stream_context_t* const stream_context);
2572
/**
2573
 * Read more in Level-5 API section.
2574
 */
2575
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
2576
/**
2577
 * Evaluate a CNNP model on the dynamic graph with set of inputs / outputs.
2578
 * @param dynamic_graph The dynamic graph.
2579
 * @param model The CNNP model to be evaluated against. Note that ccv_nnc_dynamic_graph_backward /
2580
 *              ccv_nnc_dynamic_graph_apply_gradients / ccv_nnc_dynamic_graph_minimize all works with this
2581
 *              model. It takes over the life-cycle of the model, and now you don't need to free it any more.
2582
 * @param is_test Whether we are in test mode or not.
2583
 * @param inputs The input variables.
2584
 * @param input_size The size of the input variables array.
2585
 * @param outputs The gradients with respect to the inputs.
2586
 * @param output_size The size of the outputs array.
2587
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
2588
 * @param stream_context Which stream this computation will be executed upon.
2589
 */
2590
void ccv_nnc_dynamic_graph_evaluate(ccv_nnc_dynamic_graph_t* const dynamic_graph, ccv_cnnp_model_t* const model, const int is_test, const ccv_nnc_tensor_variable_t* const inputs, const int input_size, ccv_nnc_tensor_variable_t* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
2591
/**
2592
 * Enable or disable gradient computation on a dynamic graph.
2593
 * @param dynamic_graph The dynamic graph.
2594
 * @param no_grad If it is 1, disable gradient computation on the dynamic graph.
2595
 */
2596
void ccv_nnc_dynamic_graph_set_no_grad(ccv_nnc_dynamic_graph_t* const dynamic_graph, const int no_grad);
2597
/**
2598
 * Dynamic graph will retain a memory it allocated for efficient reuse. Triggering this method
2599
 * intentionally will force these memory to be collected. This is helpful if you know the existing
2600
 * allocation won't be enough for the future use.
2601
 * @param dynamic_graph The dynamic graph.
2602
 */
2603
void ccv_nnc_dynamic_graph_gc(ccv_nnc_dynamic_graph_t* const dynamic_graph);
2604
/**
2605
 * Dispose a tensor variable. You cannot do any computation against this tensor variable afterwards.
2606
 * @param graph The dynamic graph.
2607
 * @param tensor_variable The tensor variable to be disposed.
2608
 */
2609
void ccv_nnc_tensor_variable_free(ccv_nnc_dynamic_graph_t* const graph, const ccv_nnc_tensor_variable_t tensor_variable);
2610
/**
2611
 * Free the dynamic graph.
2612
 * @param graph The dynamic graph.
2613
 */
2614
void ccv_nnc_dynamic_graph_free(ccv_nnc_dynamic_graph_t* const graph);
2615
/**
2616
 * Generate output that can be parsed by GraphViz (DOT language).
2617
 * @param graph The dynamic graph.
2618
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
2619
 * @param out The output file stream.
2620
 */
2621
void ccv_nnc_dynamic_graph_dot(const ccv_nnc_dynamic_graph_t* const graph, const int flags, FILE* out);
2622
/**
2623
 * Count how many ops we kept for gradient computation purpose. This method is useful when we
2624
 * want to assert at end of some train loop, we shouldn't have any gradient computation left.
2625
 * @param graph The dynamic graph.
2626
 * @param type The type of variables to trace. CCV_NNC_SYMBOL_TENSOR / CCV_NNC_SYMBOL_GRAPH_EXEC
2627
 * @return How many gradient computations we kept.
2628
 */
2629
CCV_WARN_UNUSED(int) ccv_nnc_dynamic_graph_bookkeeping_count(const ccv_nnc_dynamic_graph_t* const graph, const int type);
2630
2631
/** @} */
2632
2633
/**
2634
 * @defgroup level_5 Level-5 API
2635
 * @{
2636
 */
2637
2638
/**
2639
 * @page dataframe What is "dataframe" in ML?
2640
 *
2641
 * A large part of machine learning consists of go through data, process them to a shape / form that makes sense,
2642
 * and pass that into the model to train. Deep learning frameworks such as TensorFlow or PyTorch provides some
2643
 * dataset APIs for this purpose. It is convenient for these frameworks because by being Python, people can use
2644
 * Pandas to process the data. In Pandas, this is called Dataframe, which again, imitates R language.
2645
 *
2646
 * Another interesting observation comes from recent (2018) release of Create ML framework from Apple. It provides
2647
 * a very close to Pandas style data process API (MLDataTable) but in Swift. This implementation is important because
2648
 * it provides a survey point other than Python.
2649
 *
2650
 * Comparing to Python, Swift is a stronger typed language. Though all being high-level, they all have pretty good
2651
 * string support (of course!), operator overloading, and polymorphism. String support makes column naming natural,
2652
 * Operator overloading makes conditioning and filtering easier, and polymorphism makes column type representation
2653
 * straight-forward. These, unfortunately, are the challenges I need to face when implementing in C with the eye
2654
 * towards that later the similar ideas can be implemented on top on a high-level language based on this one.
2655
 *
2656
 * It seems I haven't answered the most crucial question yet: what's special about these data process APIs? It is
2657
 * easier to answer this to first see what Pandas or MLDataTable does.
2658
 *
2659
 * * They both represent data as tables. Each column represents different type of the data (time, nd-array, scalar
2660
 *   or string). As such, they both have API to add / remove / rename columns, and load tabular data from disk.
2661
 *
2662
 * * They both provide API to filter (remove / add) rows, and derive new column from existing columns.
2663
 *
2664
 * * Pandas provides more API for data alignment (merge columns from different tables into one table), and compute
2665
 *   statistics (group rows by some criteria, and compute min / max / std / mean within that group).
2666
 *
2667
 * * MLDataTable provides API to batching data (random split) which covered in TensorFlow / PyTorch's Dataset API
2668
 *   as well.
2669
 *
2670
 * It turns out when you have a noisy dataset, these functionalities are useful to remove unwanted data quickly.
2671
 * If you have a relatively clean dataset, it also allows you to prepare data in a more elegant way. For NNC,
2672
 * the interesting requirements are:
2673
 *
2674
 * 1. Represents scalars, tensors, string as columns; columns can be named.
2675
 *
2676
 * 2. New columns can be derived, from existing ones.
2677
 *
2678
 * 3. Rows can be filtered, grouped, and statistics can be computed.
2679
 *
2680
 * 4. Columns can be aligned, with some given indexes.
2681
 *
2682
 * 5. All these can be done efficiently, on a scale of hundreds of Gigabytes data.
2683
 */
2684
2685
/**
2686
 * @defgroup level_5_dataframe Dataframe API
2687
 * @{
2688
 */
2689
2690
/**
2691
 * A data enumeration function to supply data for given row indexes.
2692
 */
2693
typedef void (*ccv_cnnp_column_data_enum_f)(const int column_idx, const int* const row_idxs, const int row_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
2694
/**
2695
 * A destructor for data.
2696
 */
2697
typedef void (*ccv_cnnp_column_data_deinit_f)(void* const data, void* const context);
2698
/**
2699
 * A destructor for context.
2700
 */
2701
typedef void (*ccv_cnnp_column_data_context_deinit_f)(void* const context);
2702
/**
2703
 * Column data.
2704
 */
2705
typedef struct {
2706
  int stream_type; /**< The type of stream context for this column. Each column only compatible with one stream type. */
2707
  char* name; /**< The name of the column. */
2708
  ccv_cnnp_column_data_enum_f data_enum; /**< The data enumeration function for this column. */
2709
  ccv_cnnp_column_data_deinit_f data_deinit; /**< The deinit function that will be used to destroy the data. */
2710
  void* context; /**< The context go along with this column. */
2711
  ccv_cnnp_column_data_context_deinit_f context_deinit; /**< The deinit function that will be used to destroy the context. */
2712
} ccv_cnnp_column_data_t;
2713
/**
2714
 * An opaque structure point to the dataframe object.
2715
 */
2716
typedef struct ccv_cnnp_dataframe_s ccv_cnnp_dataframe_t;
2717
/**
2718
 * Create a dataframe object with given column data.
2719
 * @param column_data The column data that can be loaded.
2720
 * @param column_size The size of column data array.
2721
 * @param row_count The number of rows in this dataframe.
2722
 */
2723
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_new(const ccv_cnnp_column_data_t* const column_data, const int column_size, const int row_count);
2724
/**
2725
 * Add a new column to the dataframe.
2726
 * @param dataframe The dataframe object to add column to.
2727
 * @param data_enum The data provider function for the new column.
2728
 * @param stream_type The type of stream context for this derived column.
2729
 * @param data_deinit The deinit function will be used to destroy the derived data.
2730
 * @param context The context that can be used to generate new column.
2731
 * @param context_deinit The deinit function will be used to destroy the context.
2732
 * @param name The name of the newly added column.
2733
 * @return The new column index.
2734
 */
2735
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_enum_f data_enum, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
2736
/**
2737
 * A map function that takes the data from multiple columns and derive new data out of it.
2738
 */
2739
typedef void (*ccv_cnnp_column_data_map_f)(void* const* const* const column_data, const int column_size, const int batch_size, void** const data, void* const context, ccv_nnc_stream_context_t* const stream_context);
2740
/**
2741
 * Derive a new column out of existing columns in the dataframe.
2742
 * @param dataframe The dataframe object that contains existing columns.
2743
 * @param map The map function used to derive new column from existing columns.
2744
 * @param stream_type The type of stream context for this derived column.
2745
 * @param data_deinit The deinit function will be used to destroy the derived data.
2746
 * @param column_idxs The columns that will be used to derive new column.
2747
 * @param column_idx_size The size of existing columns array.
2748
 * @param context The context that can be used to generate new column.
2749
 * @param context_deinit The deinit function will be used to destroy the context.
2750
 * @param name The name of the new column.
2751
 * @return The new column index.
2752
 */
2753
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_map(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_map_f map, const int stream_type, ccv_cnnp_column_data_deinit_f data_deinit, const int* const column_idxs, const int column_idx_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit, const char* name);
2754
/**
2755
 * Shuffle an existing dataframe.
2756
 * @param dataframe The dataframe that is about to be shuffled.
2757
 */
2758
void ccv_cnnp_dataframe_shuffle(ccv_cnnp_dataframe_t* const dataframe);
2759
/**
2760
 * Query row count of the dataframe.
2761
 * @param dataframe The dataframe we want to query row count.
2762
 * @return The row count of the dataframe.
2763
 */
2764
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_row_count(ccv_cnnp_dataframe_t* const dataframe);
2765
/**
2766
 * Query the column name of a given column on the dataframe.
2767
 * @param dataframe The dataframe we want to query the column name.
2768
 * @param column_idx The index of a column.
2769
 * @return The name of the column.
2770
 */
2771
CCV_WARN_UNUSED(const char*) ccv_cnnp_dataframe_column_name(ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
2772
/**
2773
 * A sampling function that takes multiple rows of one column, and sample to one row.
2774
 */
2775
typedef void (*ccv_cnnp_column_data_sample_f)(void* const* const input_data, const int batch_size, void** const output_data, void* const context, ccv_nnc_stream_context_t* const stream_context);
2776
/**
2777
 * Sample a dataframe by batch size. Thus, n rows are sampled to 1 row per sample function on
2778
 * one specific column. This will also sample the multi-column dataframe down to 1 column
2779
 * by selecting the one column to sample.
2780
 * @param dataframe The dataframe that is about to be sampled.
2781
 * @param sample The sample function used to sample n rows into 1.
2782
 * @param data_deinit The deinit function will be used to destroy the derived data.
2783
 * @param column_idx The column we selected to sample.
2784
 * @param batch_size How many rows will be sampled to 1 row from the original data.
2785
 * @param context The context that can be used in sample function.
2786
 * @param context_deinit The deinit function will be used to destroy the context.
2787
 * @return The sampled dataframe.
2788
 */
2789
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_sample_new(ccv_cnnp_dataframe_t* const dataframe, ccv_cnnp_column_data_sample_f sample, ccv_cnnp_column_data_deinit_f data_deinit, const int column_idx, const int batch_size, void* const context, ccv_cnnp_column_data_context_deinit_f context_deinit);
2790
/**
2791
 * Extract a value out of a struct. Assuming the data points to a struct. This method extract
2792
 * n-offset value of that struct. For example, if you have struct { ccv_nnc_tensor_t* a; ccv_nnc_tensor_t* b; } S;
2793
 * if you want to extract the b tensor to a different column, you can call this function with
2794
 * offsetof(S, b).
2795
 * @param dataframe The dataframe object to be extracted.
2796
 * @param column_idx The column that we want to extract value of.
2797
 * @param offset The offset. For example, offsetof(S, b).
2798
 * @param name The name of the new column.
2799
 * @return The new column that contains the extracted value.
2800
 */
2801
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_value(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t offset, const char* name);
2802
/**
2803
 * Make a tuple out of columns specified. Thus, the new derived column will contains a tuple
2804
 * with data from all the columns specified here. Tuple here represented as void* tuple[], an
2805
 * array of void* pointers.
2806
 * @param dataframe The dataframe that will contain the new column.
2807
 * @param column_idxs The columns to be tupled.
2808
 * @param column_idx_size The number of columns.
2809
 * @param name The name of the new column.
2810
 * @return The derived column with the tuple.
2811
 */
2812
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_make_tuple(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const char* name);
2813
/**
2814
 * The size of the tuple. It is equal to the number of columns we specified. The behavior of
2815
 * calling this method on a column that is not a tuple is undefined.
2816
 * @param dataframe The dataframe that contains the tuple column.
2817
 * @param column_idx The tuple column we are going to inspect.
2818
 * @return The tuple size of the column.
2819
 */
2820
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_tuple_size(const ccv_cnnp_dataframe_t* const dataframe, const int column_idx);
2821
/**
2822
 * Extract a data out of a tuple.
2823
 * @param dataframe The dataframe that will contain the new column.
2824
 * @param column_idx The column that is a tuple.
2825
 * @param index The index into the tuple.
2826
 * @param name The name of the new column.
2827
 * @return The derived column with the extracted value.
2828
 */
2829
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_extract_tuple(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int index, const char* name);
2830
/**
2831
 * The opaque pointer to the iterator.
2832
 */
2833
typedef struct ccv_cnnp_dataframe_iter_s ccv_cnnp_dataframe_iter_t;
2834
/**
2835
 * Get a new iterator of the dataframe.
2836
 * @param dataframe The dataframe object to iterate through.
2837
 * @param column_idxs The columns that will be iterated.
2838
 * @param column_idx_size The size of columns array.
2839
 * @return The opaque iterator object.
2840
 */
2841
CCV_WARN_UNUSED(ccv_cnnp_dataframe_iter_t*) ccv_cnnp_dataframe_iter_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size);
2842
/**
2843
 * Get the next item from the iterator.
2844
 * @param iter The iterator to go through.
2845
 * @param data_ref The output for the data.
2846
 * @param column_idx_size The size of the data_ref array.
2847
 * @param stream_context The stream context to extract data asynchronously.
2848
 * @return 0 if the iteration is successful, -1 if there is no more row. -2 if it is already ended.
2849
 */
2850
int ccv_cnnp_dataframe_iter_next(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int column_idx_size, ccv_nnc_stream_context_t* const stream_context);
2851
/**
2852
 * Assuming iterator is on the same row, peek into potentially different column index.
2853
 * @param iter The iterator to go through.
2854
 * @param data_ref The output for the data.
2855
 * @param offset The offset for which column in this iterator to peek at.
2856
 * @param data_ref_size How many columns in this iterator to peek at.
2857
 * @param stream_context The stream context to extract data asynchronously.
2858
 */
2859
void ccv_cnnp_dataframe_iter_peek(ccv_cnnp_dataframe_iter_t* const iter, void** const data_ref, const int offset, const int data_ref_size, ccv_nnc_stream_context_t* const stream_context);
2860
/**
2861
 * Prefetch next item on the iterator with the given stream context. You can call this method multiple times
2862
 * to prefetch multiple items ahead of time.
2863
 * @param iter The iterator to go through.
2864
 * @param prefetch_count How much ahead we should advance for.
2865
 * @param stream_context The stream context to extract data asynchronously.
2866
 * @return 0 if the prefetch is successful, -1 if it is ended.
2867
 */
2868
int ccv_cnnp_dataframe_iter_prefetch(ccv_cnnp_dataframe_iter_t* const iter, const int prefetch_count, ccv_nnc_stream_context_t* const stream_context);
2869
/**
2870
 * Set the cursor of the iterator. When set to 0, the iterator effectively restarts.
2871
 * @param iter The iterator to go through.
2872
 * @param idx The index of the cursor.
2873
 * @return 0 if it is successful, -1 if it is not (exceed the range).
2874
 */
2875
int ccv_cnnp_dataframe_iter_set_cursor(ccv_cnnp_dataframe_iter_t* const iter, const int idx);
2876
/**
2877
 * Free the dataframe iterator object.
2878
 * @param iter The dataframe iterator to be freed.
2879
 */
2880
void ccv_cnnp_dataframe_iter_free(ccv_cnnp_dataframe_iter_t* const iter);
2881
/**
2882
 * Free the dataframe object.
2883
 * @param dataframe The dataframe object to be freed.
2884
 */
2885
void ccv_cnnp_dataframe_free(ccv_cnnp_dataframe_t* const dataframe);
2886
2887
/** @} */
2888
2889
/**
2890
 * @defgroup level_5_dataframe_add_ons Dataframe Add-ons
2891
 * @{
2892
 */
2893
2894
/**
2895
 * Turn a ccv_array_t to a dataframe object.
2896
 * @param array The array we want to turn into a dataframe object.
2897
 * @return The new dataframe object.
2898
 */
2899
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_array_new(ccv_array_t* const array);
2900
/**
2901
 * Derive a new column that copies a tensor array from given column to the derived column on GPU.
2902
 * @param dataframe The dataframe object that get the derived column.
2903
 * @param column_idx The original column contains tensor array on CPU.
2904
 * @param tensor_offset Only copy as outputs[i] = inputs[i + tensor_offset].
2905
 * @param tensor_size How many tensors in the tensor array.
2906
 * @param device_id The device we want to copy the tensors to.
2907
 * @param name The name of the new column.
2908
 * @return The index of the newly derived column.
2909
 */
2910
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_to_gpu(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int tensor_offset, const int tensor_size, const int device_id, const char* name);
2911
/**
2912
 * Derive a new column by executing a generic command.
2913
 * @param dataframe The dataframe object that get the derived column.
2914
 * @param column_idx The original column contains tensor array.
2915
 * @param cmd The command for this operation.
2916
 * @param hint The hint to run the command.
2917
 * @param flags The flags with the command.
2918
 * @param input_offset Use inputs[i + input_offset] to inputs[i + input_offset + input_size - 1] as the inputs
2919
 * @param input_size How many tensors in the input array.
2920
 * @param output_params The parameters for the outputs.
2921
 * @param output_size How many tensors in the output array.
2922
 * @param stream_type The type of stream context we are going to use.
2923
 * @param name The name of the new column.
2924
 * @return The index of the newly derived column.
2925
 */
2926
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_cmd_exec(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const int input_offset, const int input_size, const ccv_nnc_tensor_param_t* const output_params, const int output_size, const int stream_type, const char* name);
2927
/**
2928
 * Add a new column contains some tensors. This will add a new column that each row is the tensor specified
2929
 * as the parameters. It comes handy when you want to have some auxiliary tensors along with each row.
2930
 * @param dataframe The dataframe object that get the new column.
2931
 * @param params The parameters for the tensors.
2932
 * @param name The name of the new column.
2933
 * @return The index of the newly added column.
2934
 */
2935
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_add_aux(ccv_cnnp_dataframe_t* const dataframe, const ccv_nnc_tensor_param_t params, const char* name);
2936
/**
2937
 * Read image off a said column. That column should contain the filename (as char array). The new column
2938
 * will contain the ccv_dense_matrix_t / ccv_nnc_tensor_t (both are toll-free bridging) of the image.
2939
 * @param dataframe The dataframe object that loads the images.
2940
 * @param column_idx The column which contains the filename.
2941
 * @param structof The offset to the filename (as char array) from that column. For example, the column
2942
 *        could be a struct and filename could be one of the field. In that case, you can pass offsetof(S, filename)
2943
 * @param name The name of the new column.
2944
 * @return The index of the newly derived column.
2945
 */
2946
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_read_image(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const char* name);
2947
/**
2948
 * The structure to describe how to apply random jitter to the image.
2949
 */
2950
typedef struct {
2951
  float contrast; /**< The random contrast, the final contrast will be [1 / (1 + contrast), 1 + contrast] */
2952
  float saturation; /**< The saturation, the final saturation will be [1 / (1 + saturation), 1 + saturation] */
2953
  float brightness; /**< The brightness, the final brightness will be between [1 / (1 + brightness), 1 + brightness] */
2954
  float lighting; /**< AlexNet style PCA based image jitter */
2955
  float aspect_ratio; /**< Stretch aspect ratio between [1 / (1 + asepct_ratio), 1 + aspect_ratio] */
2956
  int symmetric; /**< Apply random flip on x-axis (around y-axis */
2957
  int seed; /**< The seed for random generator. */
2958
  int center_crop; /**< Enable crop to the center (otherwise do random crop). */
2959
  struct {
2960
    int min; /**< The minimal dimension of resize */
2961
    int max; /**< The maximal dimension of resize. The final resize can be computed from min + (max - min) * random_unit */
2962
    int roundup; /**< The dimension on both height / width are a multiple of roundup value. */
2963
  } resize;
2964
  struct {
2965
    int rows; /**< The height of the final image. */
2966
    int cols; /**< The width of the final image. */
2967
  } size;
2968
  struct {
2969
    int x; /**< The extra random offset on x-axis. */
2970
    int y; /**< The extra random offset on y-axis. */
2971
  } offset;
2972
  struct {
2973
    float mean[3]; /**< Normalize the image with mean. */
2974
    float std[3];/**< Normalize the image with std. pixel = (pixel - mean) / std */
2975
  } normalize;
2976
} ccv_cnnp_random_jitter_t;
2977
/**
2978
 * Apply random jitter on a image to generate a new image.
2979
 * @param dataframe The dataframe object that contains the original image.
2980
 * @param column_idx The column which contains the original image.
2981
 * @param datatype The final datatype of the image. We only support CCV_32F right now.
2982
 * @param random_jitter The random jitter parameters to be applied to.
2983
 * @param name The name of the new column.
2984
 * @return The index of the newly derived column.
2985
 */
2986
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_image_random_jitter(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const int datatype, const ccv_cnnp_random_jitter_t random_jitter, const char* name);
2987
/**
2988
 * Generate a one-hot tensor off the label from a struct.
2989
 * @param dataframe The dataframe object that contains the label.
2990
 * @param column_idx The column which contains the label (as int).
2991
 * @param structof The offset to the label (as int) from that column. For example, the column
2992
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
2993
 * @param range The range of the label, from [0...range - 1]
2994
 * @param onval The value when it hit.
2995
 * @param offval The value for the others.
2996
 * @param datatype The datatype of the tensor.
2997
 * @param format The format of the tensor.
2998
 * @param name The name of the new column.
2999
 * @return The index of the newly derived column.
3000
 */
3001
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_hot(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int range, const float onval, const float offval, const int datatype, const int format, const char* name);
3002
/**
3003
 * Generate a scalar tensor (a tensor with one value) off a value from a struct.
3004
 * @param dataframe The dataframe object that contains the value.
3005
 * @param column_idx The column which contains the value (as datatype).
3006
 * @param structof The offset to the label (as int) from that column. For example, the column
3007
 *        could be a struct and label could be one of the field. You can pass offsetof(S, filename)
3008
 * @param from_dt The datatype of the value.
3009
 * @param to_dt The datatype of the tensor.
3010
 * @param format The format of the tensor.
3011
 * @param name The name of the new column.
3012
 * @return The index of the newly derived column.
3013
 */
3014
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_copy_scalar(ccv_cnnp_dataframe_t* const dataframe, const int column_idx, const off_t structof, const int from_dt, const int to_dt, const int format, const char* name);
3015
/**
3016
 * Generate vector with ones up to a given length, the rest will be zeros. When applied to batched lengths
3017
 * array, this will generate a matrix of these vectors, squared. The derived column will be a tuple of vectors
3018
 * for the given number of columns.
3019
 * @param dataframe The dataframe object that will contain the matrix.
3020
 * @param column_idxs The columns which contain the sequence lengths (a 1d tensor).
3021
 * @param column_idx_size The number of columns. The derived column will be a tuple of vectors.
3022
 * @param variable_size The size of the final vector can vary, depending on the max length of current batch.
3023
 * @param max_length The absolute max length for inputs.
3024
 * @param name The name of the new column.
3025
 * @return The index of the newly derived column.
3026
 */
3027
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_one_squared(ccv_cnnp_dataframe_t* const dataframe,  const int* const column_idxs, const int column_idx_size, const int variable_size, const int max_length, const char* name);
3028
/**
3029
 * Truncate a given matrix (as a list of vector) to the given size provided by another vector. The truncated
3030
 * column will be a tuple of vectors for the given columns.
3031
 * @param dataframe The dataframe object that will contain the matrix.
3032
 * @param vec_idxs The columns of the given matrix to be truncated.
3033
 * @param vec_idx_size The number of columns for vec_idxs.
3034
 * @param len_idxs The columns of the given sizes as a vector.
3035
 * @param len_idx_size The number of columns for len_idxs.
3036
 * @param name The name of the new column.
3037
 * @return The index of the newly derived column.
3038
 */
3039
CCV_WARN_UNUSED(int) ccv_cnnp_dataframe_truncate(ccv_cnnp_dataframe_t* const dataframe, const int* const vec_idxs, const int vec_idx_size, const int* len_idxs, const int len_idx_size, const char* name);
3040
/**
3041
 * Combine multiple tensors in a column into one tensor. This method can take multiple columns, which
3042
 * will result a tuple of tensors. Each tensor in the tuple is a batched one from a given column.
3043
 * @param dataframe The dataframe contains the columns of tensors to be batched.
3044
 * @param column_idxs The columns that contain the tensors.
3045
 * @param column_idx_size The number of columns that contain the tensors.
3046
 * @param batch_count How many tensors in one column to be batched together.
3047
 * @param group_count We can generate many groups of batched tensor. For example, if you have column A, B, C, each
3048
 *        have different tensors. If group_count is 1, the result tuple will be (A_b, B_b, C_b). If group count is
3049
 *        2, the result tuple will be (A_b1, B_b1, C_b1, A_b2, B_b2, C_b2). A_b1 etc. will still contain the same
3050
 *        number of batch_count tensors.
3051
 * @param format The result format of the tensor. We support simply transformation NCHW <=> NHWC with the source tensor.
3052
 * @return The newly created dataframe with the 0-th column is the tuple of batched tensors.
3053
 */
3054
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_combine_new(ccv_cnnp_dataframe_t* const dataframe, const int* const column_idxs, const int column_idx_size, const int batch_count, const int group_count, const int format);
3055
3056
/** @} */
3057
3058
/**
3059
 * @page dataframe_csv Why to support comma-separated-values files in dataframe?
3060
 *
3061
 * C can be used as a parser. It usually can be fast. But most of them can be buggy and has bugs that can either crash, be
3062
 * exploited, or simply incorrect. There really isn't much motivation for me to start write a parser, even as simple as
3063
 * for CSV files.
3064
 *
3065
 * However, it does brought to my attention that a full-speed (defined by saturating the PCIx4 for SSD) implementation would
3066
 * be beneficial. I am also started to use nnc in many places that is handy to load a csv file and generate some tensors out
3067
 * of it.
3068
 *
3069
 * This implementation plan to use a variant of the two-pass approach documented in
3070
 * https://www.microsoft.com/en-us/research/uploads/prod/2019/04/chunker-sigmod19.pdf while first implemented in
3071
 * https://github.com/wiseio/paratext. It is differentiated from these two in these particular ways:
3072
 *
3073
 * 1. The first pass will not only find the quotes and even / odd CRLF, but also collect statistics on how many lines assuming
3074
 *    the first CRLF is within quote / outside of the quote;
3075
 *
3076
 * 2. The second pass will do a copy into a continuous page mirrors the original csv file, but null-terminate each column, and
3077
 *    assign the start pointer for each.
3078
 *
3079
 * The speculative approach while interesting, for many-core system implementation, it can be challenging and the worse-case
3080
 * scenario is indeed worse.
3081
 *
3082
 * The implementation itself follows https://tools.ietf.org/html/rfc4180, with only customization of delimiters (so it can support
3083
 * table-separated-values) and quotes (so you can choose between " and '). Escaping only supports double-quotes for whatever quote
3084
 * symbol you elect.
3085
 */
3086
3087
/**
3088
 * @defgroup level_5_dataframe_csv Dataframe for Comma-Separated-Values Files
3089
 * @{
3090
 */
3091
enum {
3092
  /* It is a file pointer. */
3093
  CCV_CNNP_DATAFRAME_CSV_FILE = 0,
3094
  /* It is a pointer to a memory. */
3095
  CCV_CNNP_DATAFRAME_CSV_MEMORY = 1,
3096
};
3097
3098
/**
3099
 * Create a dataframe object that read a CSV file. This will eagerly load the file into memory, parse each row / column
3100
 * into null-terminated strings, you can later convert these into numerics if needed. Each column will be a column indexed
3101
 * from 0 to column_size - 1. If there are syntax errors, the parser will make guesses and continue to parse to its best knowledge.
3102
 * If it cannot, we will return null for the object. We support both CRLF, LF, and LFCR termination.
3103
 * @param input The FILE handle for on-disk file, or the pointer to the region of the memory we are going to use.
3104
 * @param type The type of either `CCV_CNNP_DATAFRAME_CSV_FILE` or `CCV_CNNP_DATAFRAME_CSV_MEMORY`
3105
 * @param len The length of the memory region, if it is `CCV_CNNP_DATAFRAME_CSV_MEMORY`.
3106
 * @param delim The delim, it is ',' by default (if you provided '\0')
3107
 * @param quote The quote for escape strings, it is '"' by default (if you provided '\0')
3108
 * @param include_header whether to parse the header seperately. 1 means we treat the first line as header.
3109
 * @param column_size The number of columns in the resulted dataframe.
3110
 * @return A dataframe that can represent the csv file. nullptr if failed.
3111
 */
3112
CCV_WARN_UNUSED(ccv_cnnp_dataframe_t*) ccv_cnnp_dataframe_from_csv_new(void* const input, const int type, const size_t len, const char delim, const char quote, const int include_header, int* const column_size);
3113
3114
/** @} */
3115
3116
/**
3117
 * @page model Models, layers, and Keras
3118
 *
3119
 * With Keras API in mind, this model implementation essentially is a light-weight way to group neural network layers
3120
 * together. This is a rare case in NNC (or ccv in general) where Object-Oriented programming makes sense. I borrowed
3121
 * heavily from Objective-C / C++ to implement this Object-Oriented interface.
3122
 *
3123
 * Now back to elaboration of the Model interface. It is specifically designed with Keras in mind, asking question:
3124
 * If we are going to build Keras high-level API in any languages (Ruby, Python, Swift, Julia), what's the underlying
3125
 * C interface would look like? Here is your answer (hint: it looks very much like just Python Keras API).
3126
 *
3127
 * A model consists of a set of inputs and outputs. This sounds very much like what "Command" is in Level-1 APIs,
3128
 * however, they are different: a model is stateful. For example, a convolution command takes 3 inputs: image, kernel
3129
 * weight and bias, has 1 output: image. A convolution model takes 1 input: image, and 1 output: image. kernel weight
3130
 * and bias are internal states to the model (in Keras, it is called "layer" for convolution, and model means a set of
3131
 * layers. In NNC, that kind of differentiation feels superficial, therefore, a layer is a model).
3132
 *
3133
 * A model can be combined, and a new model can be a combination of other models.
3134
 *
3135
 * The simpler composed model is the sequential model. A sequential model is a model that consists a sequence of models
3136
 * that contains one input and one output. The output of the earlier model feed into the later one, thus, a sequential
3137
 * evaluation path.
3138
 */
3139
3140
/**
3141
 * @defgroup level_5_model Model API
3142
 * @{
3143
 */
3144
3145
/**
3146
 * model type is an abstract type, you won't interact with a naked model ever.
3147
 */
3148
typedef struct ccv_cnnp_model_s ccv_cnnp_model_t;
3149
/**
3150
 * With this type, now in NNC, we have 4 types that represents a "tensor":
3151
 *
3152
 * 1. ccv_nnc_tensor_t / ccv_nnc_tensor_view_t / ccv_nnc_tensor_multiview_t: a concrete tensor with memory allocated.
3153
 *
3154
 * 2. ccv_nnc_tensor_symbol_t: a symbol representation of a tensor, with its data layout, device affinity, and type
3155
 *                             specified.
3156
 *
3157
 * 3. ccv_nnc_tensor_variable_t: in dynamic graph, this represents a concrete tensor with memory allocated, but also
3158
 *                               associated with a recorded execution.
3159
 *
3160
 * 4. ccv_cnnp_model_io_t: this is the most flexible one. No data layout, device affinity or type specified. It can even
3161
 *                         represent a list of tensors rather than just one. This is a handle used by model API to
3162
 *                         associates model inputs / outputs.
3163
 */
3164
typedef struct ccv_cnnp_model_io_s* ccv_cnnp_model_io_t;
3165
/**
3166
 * Create a naked input.
3167
 * @return A ccv_cnnp_model_io_t represents an input.
3168
 */
3169
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_input(void);
3170
/**
3171
 * This method mimics Keras callable for model (thus, override __call__ method in Python class).
3172
 * @param model A model that we can apply a set of inputs to get one output.
3173
 * @param inputs The set of inputs.
3174
 * @param input_size The size of inputs array.
3175
 * @return A ccv_cnnp_model_io_t that represents the output of the given model.
3176
 */
3177
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_apply(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t* const inputs, const int input_size);
3178
enum {
3179
  /* Select only weights, no bias terms. */
3180
  CCV_CNNP_PARAMETER_SELECT_WEIGHT = 0,
3181
  /* Select bias terms, no weights. */
3182
  CCV_CNNP_PARAMETER_SELECT_BIAS = 1,
3183
};
3184
/**
3185
 * This method exposes parameter for a model out as a potential input for another model. Since
3186
 * it is a ccv_cnnp_model_io_t, it can also be used by other methods.
3187
 * @param model A model that we can extract parameters out.
3188
 * @param selector The selector for a parameter. ALL_PARAMETERS means all parameters, or you can select CCV_CNNP_PARAMETER_SELECT_WEIGHT or CCV_CNNP_PARAMETER_SELECT_BIAS.
3189
 * @param index The index into a parameter. ALL_PARAMETERS means all parameters.
3190
 */
3191
CCV_WARN_UNUSED(ccv_cnnp_model_io_t) ccv_cnnp_model_parameters(ccv_cnnp_model_t* const model, const int selector, const int index);
3192
/**
3193
 * A notification function such that a model can be notified.
3194
 * This is useful to broadcast a message to all models as sub-model of someone else.
3195
 */
3196
typedef void (*ccv_cnnp_model_notify_f)(const ccv_cnnp_model_t* const model, const int tag, void* const payload, void* const context);
3197
/**
3198
 * Hook into a model such that when there is a notification, the callback will receive it.
3199
 * @param model A model that can be notified.
3200
 * @param func The callback function.
3201
 * @param context The context to be passed along to the callback function.
3202
 **/
3203
void ccv_cnnp_model_notify_hook(ccv_cnnp_model_t* const model, ccv_cnnp_model_notify_f func, void* const context);
3204
/**
3205
 * Notify a model and its sub-models with a tag and a payload. This will be triggered
3206
 * synchronously.
3207
 * @param model A model that will be notified.
3208
 * @param tag An integer to help identify what kind of notification.
3209
 * @param payload A payload pointer that you can carry arbitrary information.
3210
 */
3211
void ccv_cnnp_model_notify(const ccv_cnnp_model_t* const model, const int tag, void* const payload);
3212
/**
3213
 * This method name is deceiving. It return a composed model, not a naked model.
3214
 * This composed model takes set of inputs, and run through various other models to arrive at
3215
 * the set of outputs.
3216
 * @param inputs The set of inputs.
3217
 * @param input_size The size of inputs array.
3218
 * @param outputs The set of outputs.
3219
 * @param output_size The size of outputs array.
3220
 * @param name The unique name of the model.
3221
 * @return A composed model that takes inputs, and generate the outputs.
3222
 */
3223
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_new(const ccv_cnnp_model_io_t* const inputs, const int input_size, const ccv_cnnp_model_io_t* const outputs, const int output_size, const char* const name);
3224
/**
3225
 * This method returns a sequential model, which composed from a sequence of models.
3226
 * @param models The list of models, that takes one input, and emit one output, feeding into the subsequent one.
3227
 * @param model_size The size of the list.
3228
 * @param name The unique name of the model.
3229
 * @return A composed model that applies these models one by one in sequence.
3230
 */
3231
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sequential_new(ccv_cnnp_model_t* const* const models, const int model_size, const char* const name);
3232
/**
3233
 * A model generation function to be called for dynamic models.
3234
 */
3235
typedef ccv_cnnp_model_t* (*ccv_cnnp_model_dynamic_f)(const ccv_nnc_tensor_param_t* const inputs, const int input_size, void* const context);
3236
/**
3237
 * This method returns a model that will be recreated if it is recompiled. Put it this way, you can call
3238
 * ccv_cnnp_model_compile multiple times with different inputs and input size, however, the model will
3239
 * only be recompiled to some extent. For example, if you called ccv_cnnp_reshape, the shape is determined
3240
 * at the moment you create that model, recompilation won't change. There are two ways to workaround this:
3241
 * 1. Use models that doesn't have explicit shape specified, for example, ccv_cnnp_dense, and avoid models
3242
 *    that is not as flexible, such as ccv_cnnp_reshape, or ccv_cnnp_cmd_exec.
3243
 * 2. Create with ccv_cnnp_dynamic_new such that the model will be recreated again whenever recompile.
3244
 * @param func The function to be called to create the model.
3245
 * @param context The context used along to create the model.
3246
 * @param name The unique name of the model.
3247
 * @return A model object that is yet to be created until build.
3248
 */
3249
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dynamic_new(ccv_cnnp_model_dynamic_f func, void* const context, const char* const name);
3250
/**
3251
 * Prepare the model to be trained, the input specifies the batch size etc.
3252
 * Input size technically is not needed, here is a safety check.
3253
 * @param model The model to be compiled.
3254
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3255
 * @param input_size The size of the inputs array.
3256
 * @param minimizer The wrapped command that represents a particular optimization strategy.
3257
 * @param loss The wrapped command that computes the loss function.
3258
 */
3259
void ccv_cnnp_model_compile(ccv_cnnp_model_t* const model, const ccv_nnc_tensor_param_t* const inputs, const int input_size, const ccv_nnc_cmd_t minimizer, const ccv_nnc_cmd_t loss);
3260
/**
3261
 * Absorb a new model into the existing model. This requires the new model has exactly the same parameters
3262
 * but other dimensionality's can change. The new model has to not be compiled yet, its life-cycle management
3263
 * will be take over by the existing model. You don't need to free it separately.
3264
 * @param model The existing model.
3265
 * @param init The new model.
3266
 * @param inputs The tensor parameters for the model's inputs, that can be used to derive all tensor shapes.
3267
 * @param input_size The size of the inputs array.
3268
 */
3269
void ccv_cnnp_model_absorb(ccv_cnnp_model_t* const model, ccv_cnnp_model_t* const init, const ccv_nnc_tensor_param_t* const inputs, const int input_size);
3270
/**
3271
 * Create a copy of an existing model.
3272
 * @param model The existing model.
3273
 * @return The new model that is exactly the same copy of the old one.
3274
 */
3275
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_model_copy(const ccv_cnnp_model_t* const model);
3276
/**
3277
 * Get the output size of the model.
3278
 * @param model The existing model.
3279
 * @return The output size of the model.
3280
 */
3281
CCV_WARN_UNUSED(int) ccv_cnnp_model_output_size(const ccv_cnnp_model_t* const model);
3282
/**
3283
 * Compute the shape of the output tensor after the model applied to the input.
3284
 * This can only be called after the model is compiled with proper input parameters.
3285
 * @param model The model to compute the output shapes.
3286
 * @param outputs The computed tensor parameters in the output.
3287
 * @param output_size The size of the output array, it has to match the model's output.
3288
 */
3289
void ccv_cnnp_model_tensor_auto(ccv_cnnp_model_t* const model, ccv_nnc_tensor_param_t* const outputs, const int output_size);
3290
/**
3291
 * Generate output that can be parsed by GraphViz (DOT language).
3292
 * @param model The composed model.
3293
 * @param flags Either CCV_NNC_SHORT_DOT_GRAPH or CCV_NNC_LONG_DOT_GRAPH
3294
 * @param outs The output file streams.
3295
 * @param out_size The size of output file stream array.
3296
 */
3297
void ccv_cnnp_model_dot(const ccv_cnnp_model_t* const model, const int flags, FILE** const outs, const int out_size);
3298
/**
3299
 * Fit a model to a given input / output. This is a combination of running ccv_cnnp_model_evaluate /
3300
 * ccv_cnnp_model_backward / ccv_cnnp_model_apply_gradients. The difference is that when calling
3301
 * individual functions, the graph is compiled piece by piece, thus, is less efficient than calling
3302
 * ccv_cnnp_model_fit directly. However, having the separate functions makes this implementation much
3303
 * more versatile, for example, can accumulate gradients for multiple batches, or using custom gradients
3304
 * etc.
3305
 * @param model The composed model.
3306
 * @param inputs The input tensors.
3307
 * @param input_size The size of the input tensors array.
3308
 * @param fits The target tensors.
3309
 * @param fit_size The size of the target tensors array.
3310
 * @param outputs The actual outputs from the model.
3311
 * @param output_size The size of the outputs array.
3312
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3313
 * @param stream_context The stream where the fit can be executed upon.
3314
 */
3315
void ccv_cnnp_model_fit(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const fits, const int fit_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3316
enum {
3317
  /**
3318
   * Don't disable any outgrad.
3319
   */
3320
  CCV_CNNP_DISABLE_OUTGRAD_NONE = (uint64_t)0,
3321
  /**
3322
   * Disable all inputs' outgrads.
3323
   */
3324
  CCV_CNNP_DISABLE_OUTGRAD_ALL = (uint64_t)(int64_t)-1,
3325
};
3326
/**
3327
 * The parameters for how evaluation should behave.
3328
 */
3329
typedef struct {
3330
  int requires_grad; /**< Whether we need to keep intermediate results for gradient computations. */
3331
  int is_test; /**< Whether we evaluate it as test, or just as forward pass of the training process. */
3332
  uint64_t disable_outgrad; /**< Whether we can compute outflow gradients when call ccv_cnnp_model_backward later, this is a bitmask, you can mark for which input the outgrad is disabled. */
3333
} ccv_cnnp_evaluate_param_t;
3334
/**
3335
 * Evaluate model with output.
3336
 * @param model The composed model.
3337
 * @param params The parameters for how evaluation should behave.
3338
 * @param inputs The input tensors.
3339
 * @param input_size The size of the input tensors array.
3340
 * @param outputs The actual outputs from the model.
3341
 * @param output_size The size of the outputs array.
3342
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3343
 * @param stream_context The stream where the evaluation can be executed upon.
3344
 */
3345
void ccv_cnnp_model_evaluate(ccv_cnnp_model_t* const model, const ccv_cnnp_evaluate_param_t params, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3346
/**
3347
 * Based on the input gradients, compute the output gradients (w.r.t. the inputs). This also adds parameter gradients.
3348
 * @param model The composed model.
3349
 * @param ingrads The input gradients.
3350
 * @param ingrad_size The size of the input gradients array.
3351
 * @param outgrads The output gradients (w.r.t. the inputs).
3352
 * @param outgrad_size The size of the output gradients array.
3353
 * @param tensor_tape An opaque tensor tape object to "backpropagate through time".
3354
 * @param stream_context The stream where the gradient computation can be executed upon.
3355
 */
3356
void ccv_cnnp_model_backward(ccv_cnnp_model_t* const model, ccv_nnc_tensor_t* const* const ingrads, const int ingrad_size, ccv_nnc_tensor_t* const* const outgrads, const int outgrad_size, ccv_nnc_tensor_tape_t* const tensor_tape, ccv_nnc_stream_context_t* const stream_context);
3357
/**
3358
 * Apply the computed gradients to the parameter tensors.
3359
 * @param model The composed model.
3360
 * @param stream_context The stream where the gradient computation can be executed upon.
3361
 */
3362
void ccv_cnnp_model_apply_gradients(ccv_cnnp_model_t* const model, ccv_nnc_stream_context_t* const stream_context);
3363
enum {
3364
  /**
3365
   * This is the default flag, if the model is not initialized, will attempt to read from the disk.
3366
   * Otherwise, will persist existing parameters to disk.
3367
   */
3368
  CCV_CNNP_MODEL_CHECKPOINT_READ_WRITE,
3369
  /**
3370
   * Only read parameters out of disk, even it is already initialized.
3371
   */
3372
  CCV_CNNP_MODEL_CHECKPOINT_READ_ONLY,
3373
  /**
3374
   * Only write parameters to disk.
3375
   */
3376
  CCV_CNNP_MODEL_CHECKPOINT_WRITE_ONLY,
3377
};
3378
/**
3379
 * This method checkpoint the given model. If the model is initialized, it will persist all parameters
3380
 * to the given file path. If it is not initialized, this method will try to load tensors off the
3381
 * disk. Under the hood, it calls ccv_cnnp_model_write / ccv_cnnp_model_read when appropriate.
3382
 * @param model The composed model.
3383
 * @param fn The file name.
3384
 * @param flags Whether we perform read / write on this checkpoint, or read only / write only.
3385
 */
3386
void ccv_cnnp_model_checkpoint(ccv_cnnp_model_t* const model, const char* const fn, const int flags);
3387
/**
3388
 * Write model's tensors to a SQLite database with a given name. Note that we specifically say
3389
 * "model's tensors" because it doesn't persist the model's structure. Hence, you shouldn't
3390
 * expect us to take a name to then have a fully functional model restored from there. You still
3391
 * need to construct the model. This method only write the tensors (weights and other internal ones)
3392
 * to disk.
3393
 * @param model The model.
3394
 * @param handle The SQLite handle.
3395
 * @param name The name to find the tensors related to the model in the database.
3396
 * @return CCV_IO_FINAL for success, otherwise error.
3397
 */
3398
int ccv_cnnp_model_write(const ccv_cnnp_model_t* const model, void* const handle, const char* const name);
3399
/**
3400
 * Read model's tensors from a SQLite database with a given name.
3401
 * @param handle The SQLite handle.
3402
 * @param name The name to find the tensors related to the model in the database.
3403
 * @param model_out The model which you want to restore the tensors. It should have the same
3404
 *                  structure as the one in write to.
3405
 * @return CCV_IO_FINAL for success, otherwise error.
3406
 */
3407
int ccv_cnnp_model_read(void* const handle, const char* const name, const ccv_cnnp_model_t* const model_out);
3408
/**
3409
 * Apply data parallel to the composed model. This method has to be called before we call either
3410
 * evaluate or fit and after the model is compiled.
3411
 * @param model The composed model.
3412
 * @param parallel Number of devices we want to run on. 0 will use all devices available. 1 will skip.
3413
 */
3414
void ccv_cnnp_model_set_data_parallel(ccv_cnnp_model_t* const model, const int parallel);
3415
/**
3416
 * Apply memory compression to the composed model. The memory compression technique can reduce memory
3417
 * usage up to 75% comparing with raw mix-precision model during training time.
3418
 * @param model The composed model.
3419
 * @param memory_compression Whether to enable the memory compression (1 - enable, 0 - disable (default))
3420
 */
3421
void ccv_cnnp_model_set_memory_compression(ccv_cnnp_model_t* const model, const int memory_compression);
3422
/**
3423
 * Set compile parameters on the model so it compiles the graph with the said parameters.
3424
 * @param model The composed model.
3425
 * @param compile_params A ccv_nnc_symbolic_graph_compile_param_t struct defines compilation parameters.
3426
 */
3427
void ccv_cnnp_model_set_compile_params(ccv_cnnp_model_t* const model, const ccv_nnc_symbolic_graph_compile_param_t compile_params);
3428
/**
3429
 * This method set the max workspace size. If the graph is already compiled. It will re-run
3430
 * autotune to use the new workspace size to find the best algorithm.
3431
 * @param model The composed model.
3432
 * @param workspace_size The size in bytes that we can use as workspace (scratch memory).
3433
 */
3434
void ccv_cnnp_model_set_workspace_size(ccv_cnnp_model_t* const model, size_t workspace_size);
3435
/**
3436
 * Set a parameter that is specified by the parameter span. This will override whatever value in that
3437
 * parameter. The given tensor should match the dimension of the parameter. It doesn't matter whether
3438
 * the given tensor is on CPU or GPU, it will be copied over. This method is limited, it can only set
3439
 * tensor once the model is compiled.
3440
 * @param model The composed model.
3441
 * @param parameter The parameter that is used to specify which parameter to override.
3442
 * @param tensor The tensor contains the value we want to copy over.
3443
 */
3444
void ccv_cnnp_model_set_parameter(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, const ccv_nnc_tensor_t* const tensor);
3445
/**
3446
 * Copy a parameter that is specified by the parameter span out of a model. This will override the value
3447
 * in the tensor you provided. The given tensor should match the dimension of the parameter and should
3448
 * already be allocated. It doesn't matter whether the given tensor is on CPU or GPU.
3449
 * @param model The composed model.
3450
 * @param parameter The parameter that is used to specify which parameter to override.
3451
 * @param tensor The tensor that receives value.
3452
 */
3453
void ccv_cnnp_model_parameter_copy(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameter, ccv_nnc_tensor_t* const tensor);
3454
/**
3455
 * Set parameters from another model. This will override whatever values in these parameters. The
3456
 * given parameters from another model should match the dimension of the parameter. It doesn't matter
3457
 * whether the given tensor is on CPU or GPU. This method can only set when both models are compiled.
3458
 * @param model The composed model to be set on parameters.
3459
 * @param parameters The parameters to be override.
3460
 * @param from_model The model to copy parameters from.
3461
 * @param from_parameters The parameters to be copied from.
3462
 */
3463
void ccv_cnnp_model_set_parameters(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
3464
/**
3465
 * Process parameters such as exponential averaging.
3466
 * parameters = zip(from_parameters, to_parameters).map { cmd(to_parameter, from_parameter) }
3467
 * The order is selected in such way because many of our commands only support inplace op if the first
3468
 * parameter matches.
3469
 * @param model The composed model to have parameters zip mapped.
3470
 * @param parameters The parameters to be written (and read).
3471
 * @param cmd The command to apply on the parameters.
3472
 * @param hint The hint supplied to the cmd.
3473
 * @param flags The flags supplied to the cmd.
3474
 * @param stream_context The stream context to be associated with.
3475
 * @param from_model The other composed model to have parameters zipped.
3476
 * @param from_parameters The parameters to be read.
3477
 */
3478
void ccv_cnnp_model_parameters_zip_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_stream_context_t* const stream_context, const ccv_cnnp_model_t* const from_model, const ccv_cnnp_model_io_t from_parameters);
3479
/**
3480
 * Process parameters such as clipping. parameters = parameters.map { cmd(parameter) }
3481
 * @param model The composed model to have parameters mapped.
3482
 * @param parameters The parameters to be mapped.
3483
 * @param cmd The command to apply on the parameters.
3484
 * @param hint The hint supplied to the cmd.
3485
 * @param flags The flags supplied to the cmd.
3486
 * @param stream_context The stream context to be associated with.
3487
 */
3488
void ccv_cnnp_model_parameters_map(ccv_cnnp_model_t* const model, const ccv_cnnp_model_io_t parameters, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_stream_context_t* const stream_context);
3489
/**
3490
 * Set a new minimizer for the model. This is useful when you need to update learn rate for stochastic
3491
 * gradient descent for example. This method can be called any time during the training process (after
3492
 * compilation).
3493
 * @param model The composed model.
3494
 * @param minimizer The wrapped command that represents a new optimization strategy.
3495
 * @param reset Reset all previous states of minimizers. This only makes sense if both parameters and parameter_size is 0.
3496
 * @param parameters The parameters to be applied the minimizer on. 0 meant for all.
3497
 * @param parameter_size The number of parameter spans.
3498
 */
3499
void ccv_cnnp_model_set_minimizer(ccv_cnnp_model_t* const model, const ccv_nnc_cmd_t minimizer, const int reset, const ccv_cnnp_model_io_t* const parameters, const int parameter_size);
3500
/**
3501
 * Retrieve the default minimizer for the model. This is set either you call model compile or
3502
 * ccv_cnnp_model_set_minimizer with no parameter spans.
3503
 * @param model The composed model.
3504
 * @return The minimizer command.
3505
 */
3506
CCV_WARN_UNUSED(ccv_nnc_cmd_t) ccv_cnnp_model_minimizer(ccv_cnnp_model_t* const model);
3507
/**
3508
 * Get the default stream from a compiled model. If the model is not compiled, the default stream is
3509
 * 0.
3510
 * @param model The composed model.
3511
 * @return The default stream for this model.
3512
 */
3513
CCV_WARN_UNUSED(ccv_nnc_stream_context_t*) ccv_cnnp_model_default_stream(const ccv_cnnp_model_t* const model);
3514
/**
3515
 * Get the allocated memory size (exclude workspace) from a compiled model. If the model is not compiled
3516
 * the size is 0.
3517
 * @param model The composed model.
3518
 * @return The number of bytes for memory allocated.
3519
 */
3520
CCV_WARN_UNUSED(uint64_t) ccv_cnnp_model_memory_size(const ccv_cnnp_model_t* const model);
3521
/**
3522
 * Free a given model.
3523
 * @param model The composed model.
3524
 */
3525
void ccv_cnnp_model_free(ccv_cnnp_model_t* const model);
3526
3527
/** @} */
3528
3529
/**
3530
 * @defgroup level_5_model_add_ons Model Add-ons
3531
 * @{
3532
 */
3533
3534
enum {
3535
  CCV_CNNP_IO, /**< The parameter is a ccv_cnnp_io_t. */
3536
  CCV_CNNP_NO_TENSOR, /**< The parameter is not used. */
3537
  CCV_CNNP_TENSOR_NOT_OUTPUT, /**< This parameter indicates this is a tensor parameter, but it is not an output reflected as ccv_cnnp_io_t */
3538
  CCV_CNNP_INIT_SHARED_TENSOR, /**< The parameter is a provided tensor for initialization. */
3539
  CCV_CNNP_INIT_SHARED_TENSOR_AS_TRAINABLE, /**< The parameter is a provided tensor that can be updated. */
3540
};
3541
3542
typedef void(*ccv_cnnp_state_initializer_f)(void* const context, const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const input, const ccv_nnc_tensor_symbol_t output_symbol);
3543
typedef void(*ccv_cnnp_cmd_exec_init_state_f)(const ccv_nnc_tensor_symbol_t tensor_symbol, const ccv_cnnp_state_initializer_f initializer, void* const initializer_context, void* const context);
3544
typedef void(*ccv_cnnp_cmd_exec_init_state_deinit_f)(void* const context);
3545
typedef void*(*ccv_cnnp_cmd_exec_init_state_copy_f)(void* const context);
3546
3547
typedef struct {
3548
  ccv_nnc_tensor_param_t info; /**< The tensor parameter for this one. */
3549
  void* context; /**< The context for which we initialize tensor. */
3550
  ccv_cnnp_cmd_exec_init_state_f init; /**< The function to init state for a tensor. */
3551
  ccv_cnnp_cmd_exec_init_state_copy_f copy; /**< The function to make a copy of the context. */
3552
  ccv_cnnp_cmd_exec_init_state_deinit_f deinit; /**< The function to release the context. */
3553
} ccv_cnnp_cmd_exec_io_init_state_t;
3554
3555
typedef struct {
3556
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, NO_TENSOR, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
3557
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
3558
} ccv_cnnp_cmd_exec_io_t;
3559
/**
3560
 * A generic model based on the command. If the tensors are labeled as ccv_cnnp_io_t, it will participate
3561
 * as the input / output of the model. If it is a init tensor, the model will use this tensor for that parameter.
3562
 * More over, if it is marked as parameter, that tensor will be differentiated against when you call
3563
 * ccv_cnnp_model_fit. This model however doesn't take over ownership of the tensor. You should manage the life
3564
 * cycle of the given tensor and it is your responsibility to make sure they outlive the model. Also, all inputs and
3565
 * outputs marked as init tensors will be shared if you reuse this model in other places.
3566
 * @param cmd The command to generate this model.
3567
 * @param hint The hint to run the command.
3568
 * @param flags The flags with the command.
3569
 * @param inputs A list of ccv_cnnp_cmd_exec_io_t identify each input as either a init tensor or a ccv_cnnp_io_t.
3570
 * @param input_size The size of input list.
3571
 * @param outputs A list of types identify each output as ccv_cnnp_io_t or a none tensor.
3572
 * @param output_size The size of the outputs. There is no need to give ccv_cnnp_tensor_param_t for outputs because
3573
 *        all of them are CCV_CNNP_IO type.
3574
 * @param name The unique name of the model.
3575
 * @return A model based on the given command.
3576
 */
3577
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_cmd_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_cnnp_cmd_exec_io_t* const inputs, const int input_size, const int* const outputs, const int output_size, const char* const name);
3578
/**
3579
 * Copy a tensor as initialization for the given parameter.
3580
 * @param tensor The tensor to copy from.
3581
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
3582
 */
3583
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_copy(const ccv_nnc_tensor_t* const tensor);
3584
/**
3585
 * Initialize a given parameter with the command.
3586
 * @param cmd The command to call when need to initialize.
3587
 * @param hint The hint to accompany the command.
3588
 * @param flags The flags to accompany the command.
3589
 * @param params The tensor configuration.
3590
 * @return A init_state that can be passed to ccv_cnnp_cmd_exec_io_t
3591
 */
3592
CCV_WARN_UNUSED(ccv_cnnp_cmd_exec_io_init_state_t) ccv_cnnp_cmd_exec_io_set_by(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, const ccv_nnc_tensor_param_t params);
3593
3594
typedef struct {
3595
  ccv_nnc_tensor_symbol_t symbol; /**< The tensor symbol this is reference to. */
3596
  int type; /**< The type of the parameter, could be CCV_CNNP_IO, INIT_SHARED_TENSOR, or INIT_SHARED_TENSOR_TRAINABLE */
3597
  ccv_cnnp_cmd_exec_io_init_state_t init_state; /** The set of state to initialize the given tensor. */
3598
} ccv_cnnp_tensor_symbol_param_t;
3599
/**
3600
 * A generic model based on the symbolic graph we provided. A list of tensor symbols are labeled whether it
3601
 * is ccv_cnnp_io_t or not (we identify whether this is a input or output based on whether it is in the graph).
3602
 * If it is not, we init it with a given tensor. If it is marked as parameter, that tensor will be differentiated
3603
 * against when you call ccv_cnnp_model_fit. The model doesn't take ownership over the init tensors. You are
3604
 * responsible to make sure the init tensors outlive the model until the initialization occurred. Also, these
3605
 * tensors will be shared if the model is reused.
3606
 * @param graph The symbolic graph that is our blue print for this model.
3607
 * @param tensor_symbol_params The list of tensor symbol parameters that labels a given symbol.
3608
 * @param tensor_symbol_param_size The size of the list.
3609
 * @param inputs The inputs to this graph. We can figure out which ones are inputs, but this gives us the order.
3610
 * @param input_size The size of the input list.
3611
 * @param outputs The outputs from this graph. We can figure out which ones are outputs, but this gives us the order.
3612
 * @param output_size The size of the output list.
3613
 * @param name The unique name of the model.
3614
 * @return A model based on the given symbolic graph.
3615
 */
3616
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_graph(const ccv_nnc_symbolic_graph_t* const graph, const ccv_cnnp_tensor_symbol_param_t* const tensor_symbol_params, const int tensor_symbol_param_size, ccv_nnc_tensor_symbol_t* const inputs, const int input_size, ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name);
3617
/**
3618
 * Sum multiple input tensors together.
3619
 * @param name The unique name of the model.
3620
 * @return A model that can be applied with multiple inputs, and generate output that is a sum of the inputs.
3621
 */
3622
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sum(const char* const name);
3623
/**
3624
 * Concatenate input tensors together.
3625
 * @param axis Along this axis, we concatenate tensors together. Other dimensions need to be exactly the same.
3626
 * @param name The unique name of the model.
3627
 * @return A model that can be applied with multiple inputs, and generate output that is a concatenation of the inputs.
3628
 */
3629
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_concat(const int axis, const char* const name);
3630
/**
3631
 * A convolution model.
3632
 * @param groups The number of kernel groups in the model.
3633
 * @param filters The total number of filters in the model (filters = groups * per group filters).
3634
 * @param kdim The dimensions of the kernel.
3635
 * @param no_bias Whether has bias term or not.
3636
 * @param hint The hint for alignment.
3637
 * @param name The unique name of the model.
3638
 * @return A convolution model.
3639
 */
3640
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_convolution(const int groups, const int filters, const int kdim[CCV_NNC_MAX_DIM_ALLOC], const int no_bias, ccv_nnc_hint_t hint, const char* const name);
3641
/**
3642
 * A dense layer model.
3643
 * @param count The output dimension.
3644
 * @param no_bias Whether has a bias term or not.
3645
 * @param name The unique name of the model.
3646
 * @return A dense layer model.
3647
 */
3648
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dense(const int count, const int no_bias, const char* const name);
3649
/**
3650
 * A batch norm layer model.
3651
 * @param momentum The momentum in batch norm parameter.
3652
 * @param epsilon The epsilon in batch norm parameter.
3653
 * @param name The unique name of the model.
3654
 * @return A batch norm layer model.
3655
 */
3656
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_batch_norm(const float momentum, const float epsilon, const char* const name);
3657
/**
3658
 * A RELU activation layer model.
3659
 * @return A RELU activation layer model.
3660
 */
3661
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_relu(const char* const name);
3662
/**
3663
 * A sigmoid activation layer model.
3664
 * @return A sigmoid activation layer model.
3665
 */
3666
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_sigmoid(const char* const name);
3667
/**
3668
 * A swish activation layer model.
3669
 * @return A swish activation layer model.
3670
 */
3671
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_swish(const char* const name);
3672
/**
3673
 * A softmax activation layer model.
3674
 * @return A softmax activation layer model.
3675
 */
3676
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_softmax(const char* const name);
3677
/**
3678
 * A max pool model.
3679
 * @param kdim The pooling window dimension.
3680
 * @param hint The hint for alignment.
3681
 * @param name The unique name of the model.
3682
 * @return A max pool model.
3683
 */
3684
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
3685
/**
3686
 * An average pool model.
3687
 * @param kdim The pooling window dimension.
3688
 * @param hint The hint for alignment.
3689
 * @param name The unique name of the model.
3690
 * @return An average pool model.
3691
 */
3692
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_average_pool(const int kdim[CCV_NNC_MAX_DIM_ALLOC], const ccv_nnc_hint_t hint, const char* const name);
3693
/**
3694
 * Reshape an input into a different dimension.
3695
 * @param dim The new dimension for the input.
3696
 * @param ofs The offset on each of the dimension.
3697
 * @param inc The line size of each dimension.
3698
 * @param name The unique name of the model.
3699
 * @return A reshape layer model.
3700
 */
3701
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reshape(const int dim[CCV_NNC_MAX_DIM_ALLOC], const int ofs[CCV_NNC_MAX_DIM_ALLOC], const int inc[CCV_NNC_MAX_DIM_ALLOC], const char* const name);
3702
/**
3703
 * Flatten an input tensor into a one dimensional array.
3704
 * @param name The unique name of the model.
3705
 * @return A flatten layer model.
3706
 */
3707
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_flatten(const char* const name);
3708
/**
3709
 * A layer norm model.
3710
 * @param epsilon The epsilon in layer norm parameter.
3711
 * @param axis The axis are the feature axis to compute norm.
3712
 * @param axis_count How many axis we count as feature.
3713
 * @param name The unique name of the model.
3714
 * @return A layer norm model.
3715
 */
3716
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_layer_norm(const float epsilon, const int axis[CCV_NNC_MAX_DIM_ALLOC], const int axis_count, const char* const name);
3717
/**
3718
 * Add two input tensors together. Different from sum because this support broadcasting.
3719
 * @param p The weight for the first input.
3720
 * @param q The weight for the second input.
3721
 * @param name The unique name of the model.
3722
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
3723
 */
3724
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_add(const float p, const float q, const char* const name);
3725
/**
3726
 * Multiple two input tensors together.
3727
 * @param p The weight for the output.
3728
 * @param name The unique name of the model.
3729
 * @return A model that can be applied with two inputs, and generate output that is a product of the inputs.
3730
 */
3731
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_mul(const float p, const char* const name);
3732
/**
3733
 * A scalar multiplication model. Y = aX where a is a scalar.
3734
 * @param a The scalar parameter.
3735
 * @param name The unique name of the model.
3736
 * @return A scalar multiplication model.
3737
 */
3738
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_scalar_mul(const float a, const char* const name);
3739
/**
3740
 * A matrix transpose model.
3741
 * @param axis_a The axis to be exchanged with axis_b
3742
 * @param axis_b The axis to be exchanged with axis_a
3743
 * @param name The unique name of the model.
3744
 * @return A matrix transpose model.
3745
 */
3746
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_transpose(const int axis_a, const int axis_b, const char* const name);
3747
/**
3748
 * A batched matrix multiplication model.
3749
 * @param transpose_a The axis to be transposed in the first matrix.
3750
 * @param transpose_b The axis to be transposed in the second matrix.
3751
 * @param name The unique name of the model.
3752
 * @return A batched matrix multiplication model.
3753
 */
3754
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_matmul(const int transpose_a[2], const int transpose_b[2], const char* const name);
3755
/**
3756
 * A dropout model.
3757
 * @param p The probability to drop the current value.
3758
 * @param entirety Drop the whole layer with the given probability.
3759
 * @param name The unique name of the model.
3760
 * @return A dropout model.
3761
 */
3762
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_dropout(const float p, const int entirety, const char* const name);
3763
/**
3764
 * A masked fill model.
3765
 * @param eq If a value in the given mask tensor is equal to this.
3766
 * @param fill Fill in this value to the output tensor.
3767
 * @param name The unique name of the model.
3768
 * @return A masked fill model.
3769
 */
3770
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_masked_fill(const float eq, const float fill, const char* const name);
3771
/**
3772
 * A index select model.
3773
 * @param datatype The data type of the vocabulary.
3774
 * @param vocab_size The size of the vocabulary.
3775
 * @param embed_size The size of the embedding.
3776
 * @param name The unique name of the model.
3777
 * @return A index select model.
3778
 */
3779
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_index_select(const int datatype, const int vocab_size, const int embed_size, const char* const name);
3780
/**
3781
 * A upsample model.
3782
 * @param width_scale The scale of the width of the input.
3783
 * @param height_scale The scale of the height of the input.
3784
 * @param name The unique name of the model.
3785
 * @return A upsample model.
3786
 */
3787
ccv_cnnp_model_t* ccv_cnnp_upsample(const float width_scale, const float height_scale, const char* const name);
3788
/**
3789
 * A sum value reducer model.
3790
 * @param axis The axis to be reduced.
3791
 * @param axis_count The size of the axis array.
3792
 * @param name The unique name of the model.
3793
 * @return A sum value reducer model.
3794
 */
3795
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_sum(const int* const axis, const int axis_count, const char* const name);
3796
/**
3797
 * A max value reducer model.
3798
 * @param axis The axis to be reduced.
3799
 * @param axis_count The size of the axis array.
3800
 * @param name The unique name of the model.
3801
 * @return A max value reducer model.
3802
 */
3803
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_reduce_max(const int* const axis, const int axis_count, const char* const name);
3804
/**
3805
 * A element-wise min model.
3806
 * @param name The unique name of the model.
3807
 * @return A element-wise min model.
3808
 */
3809
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_min(const char* const name);
3810
/**
3811
 * A element-wise max model.
3812
 * @param name The unique name of the model.
3813
 * @return A element-wise max model.
3814
 */
3815
CCV_WARN_UNUSED(ccv_cnnp_model_t*) ccv_cnnp_max(const char* const name);
3816
3817
/** @} */
3818
3819
/** @} */
3820
3821
#endif