Bug Summary

File:nnc/ccv_cnnp_model_gradient_checkpointing.c
Warning:line 74, column 1
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_cnnp_model_gradient_checkpointing.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -resource-dir /usr/local/lib/clang/19 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2024-10-18-174139-54102-1 -x c ccv_cnnp_model_gradient_checkpointing.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_internal.h"
5#include "_ccv_cnnp_model.h"
6// This can be removed once we organized ccv_cnnp_apply_gradient_checkpoints better.
7#include "_ccv_nnc_symbolic_graph.h"
8
9void ccv_cnnp_model_gradient_checkpoints_cleanup_after_build(ccv_cnnp_compiled_data_t* const compiled_data, ccv_nnc_symbolic_graph_t* const graph)
10{
11 ccv_array_t* const gradient_checkpoints = compiled_data->gradient_checkpoints;
12 if (!gradient_checkpoints || gradient_checkpoints->rnum == 0) // No saved gradient checkpoints, this is an easy way out.
13 return;
14 int i, j;
15 const ccv_nnc_tensor_symbol_info_t* const tensor_symbol_info = (const ccv_nnc_tensor_symbol_info_t*)ccv_array_get(graph->tensor_symbol_info, 0)((void*)(((char*)((graph->tensor_symbol_info)->data)) +
(size_t)(graph->tensor_symbol_info)->rsize * (size_t)(
0)))
;
16 // Go through to check if any tensors that supposes in this map is removed.
17 for (i = 0; i < gradient_checkpoints->rnum; i++)
18 {
19 ccv_cnnp_model_gradient_checkpoint_t* const checkpoint = (ccv_cnnp_model_gradient_checkpoint_t*)ccv_array_get(gradient_checkpoints, i)((void*)(((char*)((gradient_checkpoints)->data)) + (size_t
)(gradient_checkpoints)->rsize * (size_t)(i)))
;
20 for (j = 0; j < checkpoint->tensor_symbols->rnum; j++)
21 {
22 ccv_nnc_tensor_symbol_t* const symbol = ((ccv_nnc_tensor_symbol_t*)ccv_array_get(checkpoint->tensor_symbols, j)((void*)(((char*)((checkpoint->tensor_symbols)->data)) +
(size_t)(checkpoint->tensor_symbols)->rsize * (size_t)
(j)))
);
23 if (symbol->d >= 0 && symbol->d < graph->tensor_symbol_info->rnum)
24 // If it is dead, we need to remove this symbol.
25 if (CCV_NNC_TENSOR_SYMBOL_IS_DEAD(tensor_symbol_info[symbol->d].flags)((tensor_symbol_info[symbol->d].flags) & CCV_NNC_TENSOR_SYMBOL_DEAD
)
)
26 {
27 symbol->d = -1;
28 symbol->graph = 0;
29 }
30 }
31 }
32}
33
34typedef struct {
35 ccv_array_t* outgoings;
36} ccv_nnc_graph_exec_symbol_reverse_t;
37
38typedef struct {
39 ccv_array_t* tensor_symbols;
40 void* old_tensor_symbol_new_hook_context;
41 ccv_nnc_tensor_symbol_new_hook_f old_tensor_symbol_new_hook;
42 void* old_tensor_symbol_alias_new_hook_context;
43 ccv_nnc_tensor_symbol_alias_new_hook_f old_tensor_symbol_alias_new_hook;
44 ccv_array_t* graph_exec_symbols;
45 ccv_nnc_graph_exec_symbol_new_hook_f old_graph_exec_symbol_new_hook;
46 void* old_graph_exec_symbol_new_hook_context;
47} ccv_cnnp_gradient_checkpoint_build_t;
48
49static void _ccv_cnnp_gradient_checkpoint_tensor_symbol_new_hook(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_param_t info, const char* const name)
50{
51 ccv_cnnp_gradient_checkpoint_build_t* const build_context = (ccv_cnnp_gradient_checkpoint_build_t*)context;
52 ccv_array_push(build_context->tensor_symbols, &symbol);
53 if (build_context->old_tensor_symbol_new_hook)
54 build_context->old_tensor_symbol_new_hook(build_context->old_tensor_symbol_new_hook_context, symbol, info, name);
55}
56
57static void _ccv_cnnp_gradient_checkpoint_tensor_symbol_alias_new_hook(void* context, const ccv_nnc_tensor_symbol_t symbol, const ccv_nnc_tensor_symbol_t from_symbol, const int ofs[CCV_NNC_MAX_DIM_ALLOC(12)], const int inc[CCV_NNC_MAX_DIM_ALLOC(12)], const ccv_nnc_tensor_param_t info, const char* const name)
58{
59 ccv_cnnp_gradient_checkpoint_build_t* const build_context = (ccv_cnnp_gradient_checkpoint_build_t*)context;
60 ccv_array_push(build_context->tensor_symbols, &symbol);
61 if (build_context->old_tensor_symbol_alias_new_hook)
62 build_context->old_tensor_symbol_alias_new_hook(build_context->old_tensor_symbol_alias_new_hook_context, symbol, from_symbol, ofs, inc, info, name);
63}
64
65static void _ccv_cnnp_model_gradient_checkpoint_graph_exec_symbol_new_hook(void* context, const ccv_nnc_graph_exec_symbol_t symbol, const ccv_nnc_cmd_t cmd, const ccv_nnc_tensor_symbol_t* const inputs, const int input_size, const ccv_nnc_tensor_symbol_t* const outputs, const int output_size, const char* const name)
66{
67 ccv_cnnp_gradient_checkpoint_build_t* const build = (ccv_cnnp_gradient_checkpoint_build_t*)context;
68 ccv_array_push(build->graph_exec_symbols, &symbol);
69 if (build->old_graph_exec_symbol_new_hook)
70 build->old_graph_exec_symbol_new_hook(build->old_graph_exec_symbol_new_hook_context, symbol, cmd, inputs, input_size, outputs, output_size, name);
71}
72
73KHASH_MAP_INIT_INT(ccv_cnnp_tensor_symbol_map, int)typedef struct kh_ccv_cnnp_tensor_symbol_map_s { khint_t n_buckets
, size, n_occupied, upper_bound; khint32_t *flags; khint32_t *
keys; int *vals; } kh_ccv_cnnp_tensor_symbol_map_t; static inline
__attribute__ ((__unused__)) kh_ccv_cnnp_tensor_symbol_map_t
*kh_init_ccv_cnnp_tensor_symbol_map(void) { return (kh_ccv_cnnp_tensor_symbol_map_t
*)calloc(1,sizeof(kh_ccv_cnnp_tensor_symbol_map_t)); } static
inline __attribute__ ((__unused__)) void kh_destroy_ccv_cnnp_tensor_symbol_map
(kh_ccv_cnnp_tensor_symbol_map_t *h) { if (h) { free((void *)
h->keys); free(h->flags); free((void *)h->vals); free
(h); } } static inline __attribute__ ((__unused__)) void kh_clear_ccv_cnnp_tensor_symbol_map
(kh_ccv_cnnp_tensor_symbol_map_t *h) { if (h && h->
flags) { memset(h->flags, 0xaa, ((h->n_buckets) < 16
? 1 : (h->n_buckets)>>4) * sizeof(khint32_t)); h->
size = h->n_occupied = 0; } } static inline __attribute__ (
(__unused__)) khint_t kh_get_ccv_cnnp_tensor_symbol_map(const
kh_ccv_cnnp_tensor_symbol_map_t *h, khint32_t key) { if (h->
n_buckets) { khint_t k, i, last, mask, step = 0; mask = h->
n_buckets - 1; k = (khint32_t)(key); i = k & mask; last =
i; while (!((h->flags[i>>4]>>((i&0xfU)<<
1))&2) && (((h->flags[i>>4]>>((i&
0xfU)<<1))&1) || !((h->keys[i]) == (key)))) { i =
(i + (++step)) & mask; if (i == last) return h->n_buckets
; } return ((h->flags[i>>4]>>((i&0xfU)<<
1))&3)? h->n_buckets : i; } else return 0; } static inline
__attribute__ ((__unused__)) int kh_resize_ccv_cnnp_tensor_symbol_map
(kh_ccv_cnnp_tensor_symbol_map_t *h, khint_t new_n_buckets) {
khint32_t *new_flags = 0; khint_t j = 1; { (--(new_n_buckets
), (new_n_buckets)|=(new_n_buckets)>>1, (new_n_buckets)
|=(new_n_buckets)>>2, (new_n_buckets)|=(new_n_buckets)>>
4, (new_n_buckets)|=(new_n_buckets)>>8, (new_n_buckets)
|=(new_n_buckets)>>16, ++(new_n_buckets)); if (new_n_buckets
< 4) new_n_buckets = 4; if (h->size >= (khint_t)(new_n_buckets
* __ac_HASH_UPPER + 0.5)) j = 0; else { new_flags = (khint32_t
*)malloc(((new_n_buckets) < 16? 1 : (new_n_buckets)>>
4) * sizeof(khint32_t)); if (!new_flags) return -1; memset(new_flags
, 0xaa, ((new_n_buckets) < 16? 1 : (new_n_buckets)>>
4) * sizeof(khint32_t)); if (h->n_buckets < new_n_buckets
) { khint32_t *new_keys = (khint32_t*)realloc((void *)h->keys
,new_n_buckets * sizeof(khint32_t)); if (!new_keys) { free(new_flags
); return -1; } h->keys = new_keys; if (1) { int *new_vals
= (int*)realloc((void *)h->vals,new_n_buckets * sizeof(int
)); if (!new_vals) { free(new_flags); return -1; } h->vals
= new_vals; } } } } if (j) { for (j = 0; j != h->n_buckets
; ++j) { if (((h->flags[j>>4]>>((j&0xfU)<<
1))&3) == 0) { khint32_t key = h->keys[j]; int val; khint_t
new_mask; new_mask = new_n_buckets - 1; if (1) val = h->vals
[j]; (h->flags[j>>4]|=1ul<<((j&0xfU)<<
1)); while (1) { khint_t k, i, step = 0; k = (khint32_t)(key)
; i = k & new_mask; while (!((new_flags[i>>4]>>
((i&0xfU)<<1))&2)) i = (i + (++step)) & new_mask
; (new_flags[i>>4]&=~(2ul<<((i&0xfU)<<
1))); if (i < h->n_buckets && ((h->flags[i>>
4]>>((i&0xfU)<<1))&3) == 0) { { khint32_t
tmp = h->keys[i]; h->keys[i] = key; key = tmp; } if (1
) { int tmp = h->vals[i]; h->vals[i] = val; val = tmp; }
(h->flags[i>>4]|=1ul<<((i&0xfU)<<1)
); } else { h->keys[i] = key; if (1) h->vals[i] = val; break
; } } } } if (h->n_buckets > new_n_buckets) { h->keys
= (khint32_t*)realloc((void *)h->keys,new_n_buckets * sizeof
(khint32_t)); if (1) h->vals = (int*)realloc((void *)h->
vals,new_n_buckets * sizeof(int)); } free(h->flags); h->
flags = new_flags; h->n_buckets = new_n_buckets; h->n_occupied
= h->size; h->upper_bound = (khint_t)(h->n_buckets *
__ac_HASH_UPPER + 0.5); } return 0; } static inline __attribute__
((__unused__)) khint_t kh_put_ccv_cnnp_tensor_symbol_map(kh_ccv_cnnp_tensor_symbol_map_t
*h, khint32_t key, int *ret) { khint_t x; if (h->n_occupied
>= h->upper_bound) { if (h->n_buckets > (h->size
<<1)) { if (kh_resize_ccv_cnnp_tensor_symbol_map(h, h->
n_buckets - 1) < 0) { *ret = -1; return h->n_buckets; }
} else if (kh_resize_ccv_cnnp_tensor_symbol_map(h, h->n_buckets
+ 1) < 0) { *ret = -1; return h->n_buckets; } } { khint_t
k, i, site, last, mask = h->n_buckets - 1, step = 0; x = site
= h->n_buckets; k = (khint32_t)(key); i = k & mask; if
(((h->flags[i>>4]>>((i&0xfU)<<1))&
2)) x = i; else { last = i; while (!((h->flags[i>>4]
>>((i&0xfU)<<1))&2) && (((h->flags
[i>>4]>>((i&0xfU)<<1))&1) || !((h->
keys[i]) == (key)))) { if (((h->flags[i>>4]>>(
(i&0xfU)<<1))&1)) site = i; i = (i + (++step)) &
mask; if (i == last) { x = site; break; } } if (x == h->n_buckets
) { if (((h->flags[i>>4]>>((i&0xfU)<<
1))&2) && site != h->n_buckets) x = site; else
x = i; } } } if (((h->flags[x>>4]>>((x&0xfU
)<<1))&2)) { h->keys[x] = key; (h->flags[x>>
4]&=~(3ul<<((x&0xfU)<<1))); ++h->size;
++h->n_occupied; *ret = 1; } else if (((h->flags[x>>
4]>>((x&0xfU)<<1))&1)) { h->keys[x] = key
; (h->flags[x>>4]&=~(3ul<<((x&0xfU)<<
1))); ++h->size; *ret = 2; } else *ret = 0; return x; } static
inline __attribute__ ((__unused__)) void kh_del_ccv_cnnp_tensor_symbol_map
(kh_ccv_cnnp_tensor_symbol_map_t *h, khint_t x) { if (x != h->
n_buckets && !((h->flags[x>>4]>>((x&
0xfU)<<1))&3)) { (h->flags[x>>4]|=1ul<<
((x&0xfU)<<1)); --h->size; } }
74KHASH_SET_INIT_INT(ccv_cnnp_tensor_symbol_set)typedef struct kh_ccv_cnnp_tensor_symbol_set_s { khint_t n_buckets
, size, n_occupied, upper_bound; khint32_t *flags; khint32_t *
keys; char *vals; } kh_ccv_cnnp_tensor_symbol_set_t; static inline
__attribute__ ((__unused__)) kh_ccv_cnnp_tensor_symbol_set_t
*kh_init_ccv_cnnp_tensor_symbol_set(void) { return (kh_ccv_cnnp_tensor_symbol_set_t
*)calloc(1,sizeof(kh_ccv_cnnp_tensor_symbol_set_t)); } static
inline __attribute__ ((__unused__)) void kh_destroy_ccv_cnnp_tensor_symbol_set
(kh_ccv_cnnp_tensor_symbol_set_t *h) { if (h) { free((void *)
h->keys); free(h->flags); free((void *)h->vals); free
(h); } } static inline __attribute__ ((__unused__)) void kh_clear_ccv_cnnp_tensor_symbol_set
(kh_ccv_cnnp_tensor_symbol_set_t *h) { if (h && h->
flags) { memset(h->flags, 0xaa, ((h->n_buckets) < 16
? 1 : (h->n_buckets)>>4) * sizeof(khint32_t)); h->
size = h->n_occupied = 0; } } static inline __attribute__ (
(__unused__)) khint_t kh_get_ccv_cnnp_tensor_symbol_set(const
kh_ccv_cnnp_tensor_symbol_set_t *h, khint32_t key) { if (h->
n_buckets) { khint_t k, i, last, mask, step = 0; mask = h->
n_buckets - 1; k = (khint32_t)(key); i = k & mask; last =
i; while (!((h->flags[i>>4]>>((i&0xfU)<<
1))&2) && (((h->flags[i>>4]>>((i&
0xfU)<<1))&1) || !((h->keys[i]) == (key)))) { i =
(i + (++step)) & mask; if (i == last) return h->n_buckets
; } return ((h->flags[i>>4]>>((i&0xfU)<<
1))&3)? h->n_buckets : i; } else return 0; } static inline
__attribute__ ((__unused__)) int kh_resize_ccv_cnnp_tensor_symbol_set
(kh_ccv_cnnp_tensor_symbol_set_t *h, khint_t new_n_buckets) {
khint32_t *new_flags = 0; khint_t j = 1; { (--(new_n_buckets
), (new_n_buckets)|=(new_n_buckets)>>1, (new_n_buckets)
|=(new_n_buckets)>>2, (new_n_buckets)|=(new_n_buckets)>>
4, (new_n_buckets)|=(new_n_buckets)>>8, (new_n_buckets)
|=(new_n_buckets)>>16, ++(new_n_buckets)); if (new_n_buckets
< 4) new_n_buckets = 4; if (h->size >= (khint_t)(new_n_buckets
* __ac_HASH_UPPER + 0.5)) j = 0; else { new_flags = (khint32_t
*)malloc(((new_n_buckets) < 16? 1 : (new_n_buckets)>>
4) * sizeof(khint32_t)); if (!new_flags) return -1; memset(new_flags
, 0xaa, ((new_n_buckets) < 16? 1 : (new_n_buckets)>>
4) * sizeof(khint32_t)); if (h->n_buckets < new_n_buckets
) { khint32_t *new_keys = (khint32_t*)realloc((void *)h->keys
,new_n_buckets * sizeof(khint32_t)); if (!new_keys) { free(new_flags
); return -1; } h->keys = new_keys; if (0) { char *new_vals
= (char*)realloc((void *)h->vals,new_n_buckets * sizeof(char
)); if (!new_vals) { free(new_flags); return -1; } h->vals
= new_vals; } } } } if (j) { for (j = 0; j != h->n_buckets
; ++j) { if (((h->flags[j>>4]>>((j&0xfU)<<
1))&3) == 0) { khint32_t key = h->keys[j]; char val; khint_t
new_mask; new_mask = new_n_buckets - 1; if (0) val = h->vals
[j]; (h->flags[j>>4]|=1ul<<((j&0xfU)<<
1)); while (1) { khint_t k, i, step = 0; k = (khint32_t)(key)
; i = k & new_mask; while (!((new_flags[i>>4]>>
((i&0xfU)<<1))&2)) i = (i + (++step)) & new_mask
; (new_flags[i>>4]&=~(2ul<<((i&0xfU)<<
1))); if (i < h->n_buckets && ((h->flags[i>>
4]>>((i&0xfU)<<1))&3) == 0) { { khint32_t
tmp = h->keys[i]; h->keys[i] = key; key = tmp; } if (0
) { char tmp = h->vals[i]; h->vals[i] = val; val = tmp;
} (h->flags[i>>4]|=1ul<<((i&0xfU)<<
1)); } else { h->keys[i] = key; if (0) h->vals[i] = val
; break; } } } } if (h->n_buckets > new_n_buckets) { h->
keys = (khint32_t*)realloc((void *)h->keys,new_n_buckets *
sizeof(khint32_t)); if (0) h->vals = (char*)realloc((void
*)h->vals,new_n_buckets * sizeof(char)); } free(h->flags
); h->flags = new_flags; h->n_buckets = new_n_buckets; h
->n_occupied = h->size; h->upper_bound = (khint_t)(h
->n_buckets * __ac_HASH_UPPER + 0.5); } return 0; } static
inline __attribute__ ((__unused__)) khint_t kh_put_ccv_cnnp_tensor_symbol_set
(kh_ccv_cnnp_tensor_symbol_set_t *h, khint32_t key, int *ret)
{ khint_t x; if (h->n_occupied >= h->upper_bound) {
if (h->n_buckets > (h->size<<1)) { if (kh_resize_ccv_cnnp_tensor_symbol_set
(h, h->n_buckets - 1) < 0) { *ret = -1; return h->n_buckets
; } } else if (kh_resize_ccv_cnnp_tensor_symbol_set(h, h->
n_buckets + 1) < 0) { *ret = -1; return h->n_buckets; }
} { khint_t k, i, site, last, mask = h->n_buckets - 1, step
= 0; x = site = h->n_buckets; k = (khint32_t)(key); i = k
& mask; if (((h->flags[i>>4]>>((i&0xfU
)<<1))&2)) x = i; else { last = i; while (!((h->
flags[i>>4]>>((i&0xfU)<<1))&2) &&
(((h->flags[i>>4]>>((i&0xfU)<<1))&
1) || !((h->keys[i]) == (key)))) { if (((h->flags[i>>
4]>>((i&0xfU)<<1))&1)) site = i; i = (i +
(++step)) & mask; if (i == last) { x = site; break; } } if
(x == h->n_buckets) { if (((h->flags[i>>4]>>
((i&0xfU)<<1))&2) && site != h->n_buckets
) x = site; else x = i; } } } if (((h->flags[x>>4]>>
((x&0xfU)<<1))&2)) { h->keys[x] = key; (h->
flags[x>>4]&=~(3ul<<((x&0xfU)<<1)))
; ++h->size; ++h->n_occupied; *ret = 1; } else if (((h->
flags[x>>4]>>((x&0xfU)<<1))&1)) { h
->keys[x] = key; (h->flags[x>>4]&=~(3ul<<
((x&0xfU)<<1))); ++h->size; *ret = 2; } else *ret
= 0; return x; } static inline __attribute__ ((__unused__)) void
kh_del_ccv_cnnp_tensor_symbol_set(kh_ccv_cnnp_tensor_symbol_set_t
*h, khint_t x) { if (x != h->n_buckets && !((h->
flags[x>>4]>>((x&0xfU)<<1))&3)) { (
h->flags[x>>4]|=1ul<<((x&0xfU)<<1));
--h->size; } }
11
Taking true branch
12
Taking false branch
13
Calling 'kh_resize_ccv_cnnp_tensor_symbol_set'
14
Taking true branch
15
Assuming the condition is false
16
Taking false branch
17
'?' condition is true
18
Assuming 'new_flags' is non-null
19
Taking false branch
20
'?' condition is true
21
Taking true branch
22
Storing uninitialized value
23
Assuming 'new_keys' is non-null
24
Taking false branch
25
Taking false branch
26
Taking true branch
27
Loop condition is false. Execution continues on line 74
28
Taking false branch
29
Returning from 'kh_resize_ccv_cnnp_tensor_symbol_set'
30
Taking false branch
31
Assuming right operand of bit shift is less than 32
32
Assuming the condition is true
33
Taking true branch
34
Assuming the condition is false
35
Taking false branch
36
Assuming the condition is false
37
Taking false branch
42
Assuming field 'n_occupied' is >= field 'upper_bound'
43
Taking true branch
44
Taking true branch
45
Calling 'kh_resize_ccv_cnnp_tensor_symbol_set'
46
Taking false branch
47
Assuming the condition is false
48
Taking false branch
49
'?' condition is true
50
Assuming 'new_flags' is non-null
51
Taking false branch
52
'?' condition is true
53
Taking false branch
54
Taking true branch
55
The value 0 is assigned to 'j'
56
Loop condition is true. Entering loop body
57
Assuming the condition is true
58
Taking true branch
59
Assigned value is garbage or undefined
75
76void ccv_cnnp_model_apply_gradient_checkpoints(ccv_cnnp_compiled_data_t* const compiled_data, ccv_nnc_symbolic_graph_t* const graph)
77{
78 ccv_array_t* const gradient_checkpoints = compiled_data->gradient_checkpoints;
79 if (!gradient_checkpoints || gradient_checkpoints->rnum == 0) // No saved gradient checkpoints, this is an easy way out.
1
Assuming 'gradient_checkpoints' is non-null
2
Assuming field 'rnum' is not equal to 0
3
Taking false branch
80 return;
81 // Otherwise, for each gradient checkpoint, there are 3 steps:
82 // 1. Find currently, what execs exists from inputs to outputs.
83 // 2. Find execs that generates the outputs, and their corresponding backward execs.
84 // 3. Find all backward execs flow from outputs back to inputs.
85 // 4. Generate new ops by calling build again with old inputs, record all new tensors / execs.
86 // 5. Replace inputs in backward execs with the new tensors.
87 // 6. Hook the execs takes inputs with edge from parents of backward execs in step 2.
88 // 7. Delete newly generated execs that has no use (i.e. its outputs are not used by backward pass).
89 // 8. Mark all new execs with DISABLE_OPT to avoid common sub-expression elimination pass.
90 int i, j, k, l;
91 ccv_array_t* input_execs = ccv_array_new(sizeof(ccv_nnc_graph_exec_symbol_t), 0, 0);
92 ccv_array_t* output_execs = ccv_array_new(sizeof(ccv_nnc_graph_exec_symbol_t), 0, 0);
93 ccv_array_t* input_gradient_execs = ccv_array_new(sizeof(ccv_nnc_graph_exec_symbol_t), 0, 0);
94 ccv_array_t* output_gradient_execs = ccv_array_new(sizeof(ccv_nnc_graph_exec_symbol_t), 0, 0);
95 ccv_array_t* visited_backward_execs = ccv_array_new(sizeof(int), 0, 0);
96 ccv_array_t* replaced_backward_execs = ccv_array_new(sizeof(int), 0, 0);
97 const int exec_rnum = graph->exec_symbol_info->rnum;
98 ccv_nnc_graph_exec_symbol_reverse_t* const reversed_nodes = cccalloccalloc(exec_rnum, sizeof(ccv_nnc_graph_exec_symbol_reverse_t));
99 for (i = 0; i < exec_rnum; i++)
4
Assuming 'i' is >= 'exec_rnum'
5
Loop condition is false. Execution continues on line 115
100 {
101 const int* tos = 0;
102 int to_size = 0;
103 ccv_nnc_graph_exec_symbol_to(graph, (ccv_nnc_graph_exec_symbol_t){
104 .graph = graph,
105 .d = i
106 }, &tos, &to_size);
107 if (tos)
108 for (j = 0; j < to_size; j++)
109 {
110 if (!reversed_nodes[tos[j]].outgoings)
111 reversed_nodes[tos[j]].outgoings = ccv_array_new(sizeof(int), 1, 0);
112 ccv_array_add_unique_int(reversed_nodes[tos[j]].outgoings, i);
113 }
114 }
115 uint32_t* const maskbit = cccalloccalloc((exec_rnum + 31) >> 5, sizeof(uint32_t));
116 // Temporary for build_data.
117 ccv_array_t* const parameters = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0);
118 ccv_array_t* const parameter_ids = ccv_array_new(sizeof(char*), 0, 0);
119 ccv_array_t* const parameter_trainables = ccv_array_new(sizeof(int), 0, 0);
120 ccv_array_t* const internals = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0);
121 ccv_array_t* const internal_ids = ccv_array_new(sizeof(char*), 0, 0);
122 ccv_array_t* const buf = ccv_array_new(sizeof(int), 0, 0);
123 int max_output_size = 0;
124 for (i = 0; i < gradient_checkpoints->rnum; i++)
6
Assuming 'i' is >= field 'rnum'
7
Loop condition is false. Execution continues on line 129
125 {
126 ccv_cnnp_model_gradient_checkpoint_t* const checkpoint = (ccv_cnnp_model_gradient_checkpoint_t*)ccv_array_get(gradient_checkpoints, i)((void*)(((char*)((gradient_checkpoints)->data)) + (size_t
)(gradient_checkpoints)->rsize * (size_t)(i)))
;
127 max_output_size = ccv_max(checkpoint->output_size, max_output_size)({ typeof (checkpoint->output_size) _a = (checkpoint->output_size
); typeof (max_output_size) _b = (max_output_size); (_a > _b
) ? _a : _b; })
;
128 }
129 ccv_nnc_tensor_symbol_t* max_outputs = ccmallocmalloc(sizeof(ccv_nnc_tensor_symbol_t) * max_output_size);
130 ccv_array_t* newly_used_outputs = ccv_array_new(sizeof(int), 0, 0);
131 khash_t(ccv_cnnp_tensor_symbol_set)kh_ccv_cnnp_tensor_symbol_set_t* const parameters_or_internals = kh_init(ccv_cnnp_tensor_symbol_set)kh_init_ccv_cnnp_tensor_symbol_set();
132 for (i = 0; i < compiled_data->parameters->rnum; i++)
8
Assuming 'i' is < field 'rnum'
9
Loop condition is true. Entering loop body
39
Assuming 'i' is < field 'rnum'
40
Loop condition is true. Entering loop body
133 {
134 const ccv_nnc_tensor_symbol_t* const symbol = (const ccv_nnc_tensor_symbol_t*)ccv_array_get(compiled_data->parameters, i)((void*)(((char*)((compiled_data->parameters)->data)) +
(size_t)(compiled_data->parameters)->rsize * (size_t)(
i)))
;
135 int ret;
136 kh_put(ccv_cnnp_tensor_symbol_set, parameters_or_internals, symbol->d, &ret)kh_put_ccv_cnnp_tensor_symbol_set(parameters_or_internals, symbol
->d, &ret)
;
10
Calling 'kh_put_ccv_cnnp_tensor_symbol_set'
38
Returning from 'kh_put_ccv_cnnp_tensor_symbol_set'
41
Calling 'kh_put_ccv_cnnp_tensor_symbol_set'
137 }
138 for (i = 0; i < compiled_data->internals->rnum; i++)
139 {
140 const ccv_nnc_tensor_symbol_t* const symbol = (const ccv_nnc_tensor_symbol_t*)ccv_array_get(compiled_data->parameters, i)((void*)(((char*)((compiled_data->parameters)->data)) +
(size_t)(compiled_data->parameters)->rsize * (size_t)(
i)))
;
141 int ret;
142 kh_put(ccv_cnnp_tensor_symbol_set, parameters_or_internals, symbol->d, &ret)kh_put_ccv_cnnp_tensor_symbol_set(parameters_or_internals, symbol
->d, &ret)
;
143 }
144 khash_t(ccv_cnnp_tensor_symbol_set)kh_ccv_cnnp_tensor_symbol_set_t* const newly_created_tensor_symbols = kh_init(ccv_cnnp_tensor_symbol_set)kh_init_ccv_cnnp_tensor_symbol_set();
145 khash_t(ccv_cnnp_tensor_symbol_map)kh_ccv_cnnp_tensor_symbol_map_t* symbol_map = kh_init(ccv_cnnp_tensor_symbol_map)kh_init_ccv_cnnp_tensor_symbol_map();
146 for (i = 0; i < gradient_checkpoints->rnum; i++)
147 {
148 ccv_cnnp_model_gradient_checkpoint_t* const checkpoint = (ccv_cnnp_model_gradient_checkpoint_t*)ccv_array_get(gradient_checkpoints, i)((void*)(((char*)((gradient_checkpoints)->data)) + (size_t
)(gradient_checkpoints)->rsize * (size_t)(i)))
;
149 kh_clear(ccv_cnnp_tensor_symbol_set, newly_created_tensor_symbols)kh_clear_ccv_cnnp_tensor_symbol_set(newly_created_tensor_symbols
)
;
150 for (j = 0; j < checkpoint->tensor_symbols->rnum; j++)
151 {
152 const int idx = ((ccv_nnc_tensor_symbol_t*)ccv_array_get(checkpoint->tensor_symbols, j)((void*)(((char*)((checkpoint->tensor_symbols)->data)) +
(size_t)(checkpoint->tensor_symbols)->rsize * (size_t)
(j)))
)->d;
153 if (idx < 0)
154 continue;
155 // Skip parameters or internals.
156 if (kh_get(ccv_cnnp_tensor_symbol_set, parameters_or_internals, idx)kh_get_ccv_cnnp_tensor_symbol_set(parameters_or_internals, idx
)
!= kh_end(parameters_or_internals)((parameters_or_internals)->n_buckets))
157 continue;
158 int ret;
159 kh_put(ccv_cnnp_tensor_symbol_set, newly_created_tensor_symbols, idx, &ret)kh_put_ccv_cnnp_tensor_symbol_set(newly_created_tensor_symbols
, idx, &ret)
;
160 }
161 ccv_array_clear(input_execs);
162 ccv_array_clear(output_execs);
163 ccv_nnc_graph_exec_symbol_info_t* exec_info = (ccv_nnc_graph_exec_symbol_info_t*)ccv_array_get(graph->exec_symbol_info, 0)((void*)(((char*)((graph->exec_symbol_info)->data)) + (
size_t)(graph->exec_symbol_info)->rsize * (size_t)(0)))
;
164 for (j = 0; j < exec_rnum; j++)
165 {
166 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[j].flags)((exec_info[j].flags) & CCV_NNC_GRAPH_EXEC_DEAD))
167 continue;
168 const int* inputs = exec_info[j].inputs;
169 int input_size = exec_info[j].input_size;
170 const int* outputs = exec_info[j].outputs;
171 int output_size = exec_info[j].output_size;
172 if (input_size == 0 && output_size == 0)
173 continue;
174 // Only go through forward pass.
175 if (ccv_nnc_cmd_is_backward(exec_info[j].cmd))
176 continue;
177 const ccv_nnc_graph_exec_symbol_t symbol = {
178 .graph = graph,
179 .d = j
180 };
181 int flag = 0;
182 for (k = 0; inputs && k < input_size && !flag; k++)
183 if (inputs[k] >= 0)
184 for (l = 0; l < checkpoint->input_size && !flag; l++)
185 if (checkpoint->inputs[l].d >= 0 && inputs[k] == checkpoint->inputs[l].d)
186 flag = 1;
187 // Input logic is different from output logic. We need to filter out these exec that contains inputs from within the graph.
188 for (k = 0; inputs && k < input_size && flag; k++)
189 if (inputs[k] >= 0 && kh_get(ccv_cnnp_tensor_symbol_set, newly_created_tensor_symbols, inputs[k])kh_get_ccv_cnnp_tensor_symbol_set(newly_created_tensor_symbols
, inputs[k])
!= kh_end(newly_created_tensor_symbols)((newly_created_tensor_symbols)->n_buckets))
190 flag = 0;
191 if (flag)
192 ccv_array_push(input_execs, &symbol);
193 flag = 0;
194 for (k = 0; outputs && k < output_size && !flag; k++)
195 if (outputs[k] >= 0)
196 for (l = 0; l < checkpoint->output_size && !flag; l++)
197 if (checkpoint->outputs[l].d >= 0 && outputs[k] == checkpoint->outputs[l].d)
198 flag = 1;
199 if (flag)
200 ccv_array_push(output_execs, &symbol);
201 }
202 if (input_execs->rnum <= 0 || output_execs->rnum <= 0)
203 continue;
204 // Fill in blanks (i.e. the backward ops that are not showing in above, but should be included to avoid excluding necessary ones). This is done by flowing gradients from outputs back all the way to inputs.
205 ccv_array_clear(input_gradient_execs);
206 ccv_array_clear(output_gradient_execs);
207 for (j = 0; j < input_execs->rnum; j++)
208 {
209 const int d = ((ccv_nnc_graph_exec_symbol_t*)ccv_array_get(input_execs, j)((void*)(((char*)((input_execs)->data)) + (size_t)(input_execs
)->rsize * (size_t)(j)))
)->d;
210 for (k = 0; k < exec_info[d].input_size; k++)
211 if (exec_info[d].inputs[k] >= 0)
212 {
213 const ccv_nnc_tensor_symbol_t gradient_symbol = ccv_nnc_tensor_symbol_for_backward(graph, (ccv_nnc_tensor_symbol_t){
214 .graph = graph,
215 .d = exec_info[d].inputs[k]
216 });
217 if (gradient_symbol.d < 0)
218 continue;
219 const ccv_nnc_graph_exec_symbol_t backward = ccv_nnc_graph_exec_symbol_for_backward(graph, gradient_symbol);
220 if (backward.d < 0)
221 continue;
222 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[backward.d].flags)((exec_info[backward.d].flags) & CCV_NNC_GRAPH_EXEC_DEAD))
223 continue;
224 int flag = 0;
225 for (l = 0; !flag && l < output_gradient_execs->rnum; l++)
226 if (((ccv_nnc_graph_exec_symbol_t*)ccv_array_get(output_gradient_execs, l)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(l)))
)->d == backward.d)
227 flag = 1;
228 if (!flag)
229 ccv_array_push(output_gradient_execs, &backward);
230 }
231 if (exec_info[d].outgoings && exec_info[d].outgoings->rnum > 0)
232 for (k = 0; k < exec_info[d].outgoings->rnum; k++)
233 {
234 const int to_d = *(int*)ccv_array_get(exec_info[d].outgoings, k)((void*)(((char*)((exec_info[d].outgoings)->data)) + (size_t
)(exec_info[d].outgoings)->rsize * (size_t)(k)))
;
235 if (!ccv_nnc_cmd_is_backward(exec_info[to_d].cmd))
236 continue;
237 int flag = 0;
238 for (l = 0; !flag && l < output_gradient_execs->rnum; l++)
239 if (((ccv_nnc_graph_exec_symbol_t*)ccv_array_get(output_gradient_execs, l)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(l)))
)->d == to_d)
240 flag = 1;
241 if (!flag)
242 {
243 const ccv_nnc_graph_exec_symbol_t backward = {
244 .graph = graph,
245 .d = to_d
246 };
247 ccv_array_push(output_gradient_execs, &backward);
248 }
249 }
250 }
251 // For output_gradient_execs, we can be opportunistic and use the wrt symbols (if exists) to find relevant bits.
252 // For input_gradient_execs, there is no other way but to loop over all outgoings, find the ones are direct link as backward execs.
253 for (j = 0; j < output_execs->rnum; j++)
254 {
255 const int d = ((ccv_nnc_graph_exec_symbol_t*)ccv_array_get(output_execs, j)((void*)(((char*)((output_execs)->data)) + (size_t)(output_execs
)->rsize * (size_t)(j)))
)->d;
256 if (exec_info[d].outgoings && exec_info[d].outgoings->rnum > 0)
257 for (k = 0; k < exec_info[d].outgoings->rnum; k++)
258 {
259 const int to_d = *(int*)ccv_array_get(exec_info[d].outgoings, k)((void*)(((char*)((exec_info[d].outgoings)->data)) + (size_t
)(exec_info[d].outgoings)->rsize * (size_t)(k)))
;
260 if (!ccv_nnc_cmd_is_backward(exec_info[to_d].cmd))
261 continue;
262 int flag = 0;
263 for (l = 0; !flag && l < input_gradient_execs->rnum; l++)
264 if (((ccv_nnc_graph_exec_symbol_t*)ccv_array_get(input_gradient_execs, l)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(l)))
)->d == to_d)
265 flag = 1;
266 if (!flag)
267 {
268 const ccv_nnc_graph_exec_symbol_t backward = {
269 .graph = graph,
270 .d = to_d
271 };
272 ccv_array_push(input_gradient_execs, &backward);
273 }
274 }
275 }
276 // Note that we have to use up-to-date ones because the exec_info might have outgoings that is up-to-date.
277 ccv_nnc_graph_visit_t* const visit = ccv_nnc_graph_visit_new(graph, exec_info, graph->exec_symbol_info->rnum, (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(input_gradient_execs, 0), input_gradient_execs->rnum, (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(output_gradient_execs, 0), output_gradient_execs->rnum, 1)({ ccv_nnc_graph_visit_t* _visit_ = (ccv_nnc_graph_visit_t*)malloc
(sizeof(ccv_nnc_graph_visit_t) + sizeof(_visit_->node[0]) *
((graph->exec_symbol_info->rnum) - 1)); _visit_->size
= 0; do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (graph->exec_symbol_info->
rnum); _i_++) _incoming_edges_ += ((exec_info)[_i_].outgoings
) ? (exec_info)[_i_].outgoings->rnum : 0; const int _heap_mem_
= ((graph->exec_symbol_info->rnum) + _incoming_edges_ >
1024); ccv_nnc_incoming_t* _incomings_; if (_heap_mem_) _incomings_
= (ccv_nnc_incoming_t*)malloc(sizeof(ccv_nnc_incoming_t) * (
graph->exec_symbol_info->rnum) + sizeof(int32_t) * ((graph
->exec_symbol_info->rnum) * 2 + _incoming_edges_)); else
_incomings_ = (ccv_nnc_incoming_t*)__builtin_alloca (sizeof(
ccv_nnc_incoming_t) * (graph->exec_symbol_info->rnum) +
sizeof(int32_t) * ((graph->exec_symbol_info->rnum) * 2
+ _incoming_edges_)); memset(_incomings_, 0, sizeof(ccv_nnc_incoming_t
) * (graph->exec_symbol_info->rnum)); int32_t* _exists_
[2] = { (int32_t*)(_incomings_ + (graph->exec_symbol_info->
rnum)), (int32_t*)(_incomings_ + (graph->exec_symbol_info->
rnum)) + (graph->exec_symbol_info->rnum), }; int32_t* const
_edges_ = _exists_[1] + (graph->exec_symbol_info->rnum
); for (_i_ = 0; _i_ < (input_gradient_execs->rnum); _i_
++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t*)((void*
)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs
)->rsize * (size_t)(0))))[_i_].graph == graph) ? 1 : 0), __extension__
({ if (((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs
)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t
)(0))))[_i_].graph == graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].r =
1; _exists_[0][_i_] = ((ccv_nnc_graph_exec_symbol_t*)((void*
)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs
)->rsize * (size_t)(0))))[_i_].d; } int _exist_size_[2] = {
(input_gradient_execs->rnum), 0, }; int _p_ = 0, _q_ = 1;
while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for
(_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 1) continue
; _incomings_[_idx_].r = 2; if ((exec_info)[_idx_].outgoings)
for (_j_ = 0; _j_ < (exec_info)[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)(((exec_info
)[_idx_].outgoings)->data)) + (size_t)((exec_info)[_idx_].
outgoings)->rsize * (size_t)(_j_))); ++_incomings_[d].c; if
(_incomings_[d].r != 0) continue; _incomings_[d].r = 1; ((void
) sizeof ((_exist_size_[_q_] < (graph->exec_symbol_info
->rnum)) ? 1 : 0), __extension__ ({ if (_exist_size_[_q_] <
(graph->exec_symbol_info->rnum)) ; else __assert_fail (
"_exist_size_[_q_] < (graph->exec_symbol_info->rnum)"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _exists_[_q_][_exist_size_[_q_]] =
d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (
_q_) = (_i_)); } for (_i_ = 0; _i_ < (input_gradient_execs
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].r =
3; _exists_[0][_i_] = ((ccv_nnc_graph_exec_symbol_t*)((void*
)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs
)->rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (input_gradient_execs
->rnum); _exist_size_[1] = 0; _p_ = 0, _q_ = 1; int _bump_
= 1; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 3) continue
; _incomings_[_idx_].r = 4; if ((exec_info)[_idx_].outgoings)
for (_j_ = 0; _j_ < (exec_info)[_idx_].outgoings->rnum
; _j_++) { const int d = *(int*)((void*)(((char*)(((exec_info
)[_idx_].outgoings)->data)) + (size_t)((exec_info)[_idx_].
outgoings)->rsize * (size_t)(_j_))); if (_incomings_[d].edges
== 0) { _incomings_[d].edges = _bump_; _bump_ += _incomings_
[d].c; _incomings_[d].c = 0; } _edges_[_incomings_[d].edges -
1 + _incomings_[d].c] = _idx_; ++_incomings_[d].c; if (_incomings_
[d].r != 2) continue; _incomings_[d].r = 3; ((void) sizeof ((
_exist_size_[_q_] < (graph->exec_symbol_info->rnum))
? 1 : 0), __extension__ ({ if (_exist_size_[_q_] < (graph
->exec_symbol_info->rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_symbol_info->rnum)"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _exists_[_q_][_exist_size_[_q_]] =
d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (
_q_) = (_i_)); } for (_i_ = 0; _i_ < (output_gradient_execs
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].r
= 5; _exists_[0][_i_] = ((ccv_nnc_graph_exec_symbol_t*)((void
*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs
)->rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (output_gradient_execs
->rnum); _exist_size_[1] = 0; _p_ = 0, _q_ = 1; while (_exist_size_
[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ <
_exist_size_[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_
][_i_]; if (_incomings_[_idx_].r != 5) continue; _incomings_[
_idx_].r = 6; if (_incomings_[_idx_].edges > 0) for (_j_ =
0; _j_ < _incomings_[_idx_].c; _j_++) { const int d = _edges_
[_incomings_[_idx_].edges - 1 + _j_]; if (_incomings_[d].r !=
4) continue; _incomings_[d].r = 5; ((void) sizeof ((_exist_size_
[_q_] < (graph->exec_symbol_info->rnum)) ? 1 : 0), __extension__
({ if (_exist_size_[_q_] < (graph->exec_symbol_info->
rnum)) ; else __assert_fail ("_exist_size_[_q_] < (graph->exec_symbol_info->rnum)"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _exists_[_q_][_exist_size_[_q_]] =
d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (
_q_) = (_i_)); } for (_i_ = 0; _i_ < (output_gradient_execs
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].d
= 1; } for (_i_ = 0; _i_ < (input_gradient_execs->rnum
); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t*)(
(void*)(((char*)((input_gradient_execs)->data)) + (size_t)
(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph ==
graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _exists_[0][_i_] = ((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d; } _p_
= 0; _q_ = 1; _exist_size_[0] = (input_gradient_execs->rnum
); _exist_size_[1] = 0; int _d_ = 0; while (_exist_size_[_p_]
> 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_];) { const int32_t _idx_ = _exists_[_p_][_i_]; _visit_->
node[_visit_->size].index = ((_idx_)); _visit_->node[_visit_
->size].term = ((_incomings_[_idx_].d)); ++_visit_->size
;; if (_incomings_[_idx_].d) { ++_d_; _incomings_[_idx_].r = 7
; } if ((exec_info)[_idx_].outgoings) { if ((exec_info)[_idx_
].outgoings->rnum == 1) { const int d = *(int*)((void*)(((
char*)(((exec_info)[_idx_].outgoings)->data)) + (size_t)((
exec_info)[_idx_].outgoings)->rsize * (size_t)(0))); --_incomings_
[d].c; if (_incomings_[d].c == 0 && _incomings_[d].r ==
6 && _d_ < (output_gradient_execs->rnum)) { _exists_
[_p_][_i_] = d; continue; } } else for (_j_ = 0; _j_ < (exec_info
)[_idx_].outgoings->rnum; _j_++) { const int d = *(int*)((
void*)(((char*)(((exec_info)[_idx_].outgoings)->data)) + (
size_t)((exec_info)[_idx_].outgoings)->rsize * (size_t)(_j_
))); --_incomings_[d].c; if (_incomings_[d].c == 0 &&
_incomings_[d].r == 6 && _d_ < (output_gradient_execs
->rnum)) { ((void) sizeof ((_exist_size_[_q_] < (graph->
exec_symbol_info->rnum)) ? 1 : 0), __extension__ ({ if (_exist_size_
[_q_] < (graph->exec_symbol_info->rnum)) ; else __assert_fail
("_exist_size_[_q_] < (graph->exec_symbol_info->rnum)"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _exists_[_q_][_exist_size_[_q_]] =
d; ++_exist_size_[_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_)
= (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (output_gradient_execs
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); if (_incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].r
== 7) continue; if (!(1)) { ((void) sizeof ((_incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].c
== 0) ? 1 : 0), __extension__ ({ if (_incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].c
== 0) ; else __assert_fail ("_incomings_[((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].c == 0"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); } else if (_incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].c
> 0) continue; _visit_->node[_visit_->size].index =
((((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs
)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t
)(0))))[_i_].d)); _visit_->node[_visit_->size].term = (
(_incomings_[((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)
((output_gradient_execs)->data)) + (size_t)(output_gradient_execs
)->rsize * (size_t)(0))))[_i_].d].d)); ++_visit_->size;
; } if (_heap_mem_) free(_incomings_); } while (0);; ((void) sizeof
((_visit_->size <= (graph->exec_symbol_info->rnum
)) ? 1 : 0), __extension__ ({ if (_visit_->size <= (graph
->exec_symbol_info->rnum)) ; else __assert_fail ("_visit_->size <= (graph->exec_symbol_info->rnum)"
, "ccv_cnnp_model_gradient_checkpointing.c", 277, __extension__
__PRETTY_FUNCTION__); })); _visit_; })
;
278 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
279 if (idx < exec_rnum && !CCV_NNC_GRAPH_EXEC_IS_DEAD(node->flags)((node->flags) & CCV_NNC_GRAPH_EXEC_DEAD))
280 maskbit[idx >> 5] |= (1u << (idx & 0x1f));
281 } ccv_nnc_graph_visit_endfor} }
282 ccv_array_clear(visited_backward_execs);
283 // Add more backward pass to the list. Note that we don't add everything, particularly there are new nodes created through gradient checkpointing are ignored.
284#define visitor(node, idx, _) \
285 if (idx < exec_rnum && !CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[idx].flags)((exec_info[idx].flags) & CCV_NNC_GRAPH_EXEC_DEAD) && maskbit[idx >> 5] & (1u << (idx & 0x1f))) \
286 ccv_array_add_unique_int(visited_backward_execs, idx);
287 CCV_NNC_GRAPH_VISIT(graph, reversed_nodes, exec_rnum, (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(output_gradient_execs, 0), output_gradient_execs->rnum, (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(input_gradient_execs, 0), input_gradient_execs->rnum, 0, visitor)do { typedef struct { int8_t d; int8_t r; uint16_t c; int32_t
edges; } ccv_nnc_incoming_t; int _i_, _j_; int _incoming_edges_
= 0; for (_i_ = 0; _i_ < (exec_rnum); _i_++) _incoming_edges_
+= ((reversed_nodes)[_i_].outgoings) ? (reversed_nodes)[_i_]
.outgoings->rnum : 0; const int _heap_mem_ = ((exec_rnum) +
_incoming_edges_ > 1024); ccv_nnc_incoming_t* _incomings_
; if (_heap_mem_) _incomings_ = (ccv_nnc_incoming_t*)malloc(sizeof
(ccv_nnc_incoming_t) * (exec_rnum) + sizeof(int32_t) * ((exec_rnum
) * 2 + _incoming_edges_)); else _incomings_ = (ccv_nnc_incoming_t
*)__builtin_alloca (sizeof(ccv_nnc_incoming_t) * (exec_rnum) +
sizeof(int32_t) * ((exec_rnum) * 2 + _incoming_edges_)); memset
(_incomings_, 0, sizeof(ccv_nnc_incoming_t) * (exec_rnum)); int32_t
* _exists_[2] = { (int32_t*)(_incomings_ + (exec_rnum)), (int32_t
*)(_incomings_ + (exec_rnum)) + (exec_rnum), }; int32_t* const
_edges_ = _exists_[1] + (exec_rnum); for (_i_ = 0; _i_ < (
output_gradient_execs->rnum); _i_++) { ((void) sizeof ((((
ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs
)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t
)(0))))[_i_].graph == graph) ? 1 : 0), __extension__ ({ if ((
(ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs
)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t
)(0))))[_i_].graph == graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); _incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].r
= 1; _exists_[0][_i_] = ((ccv_nnc_graph_exec_symbol_t*)((void
*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs
)->rsize * (size_t)(0))))[_i_].d; } int _exist_size_[2] = {
(output_gradient_execs->rnum), 0, }; int _p_ = 0, _q_ = 1
; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0; for
(_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 1) continue
; _incomings_[_idx_].r = 2; if ((reversed_nodes)[_idx_].outgoings
) for (_j_ = 0; _j_ < (reversed_nodes)[_idx_].outgoings->
rnum; _j_++) { const int d = *(int*)((void*)(((char*)(((reversed_nodes
)[_idx_].outgoings)->data)) + (size_t)((reversed_nodes)[_idx_
].outgoings)->rsize * (size_t)(_j_))); ++_incomings_[d].c;
if (_incomings_[d].r != 0) continue; _incomings_[d].r = 1; (
(void) sizeof ((_exist_size_[_q_] < (exec_rnum)) ? 1 : 0),
__extension__ ({ if (_exist_size_[_q_] < (exec_rnum)) ; else
__assert_fail ("_exist_size_[_q_] < (exec_rnum)", "ccv_cnnp_model_gradient_checkpointing.c"
, 287, __extension__ __PRETTY_FUNCTION__); })); _exists_[_q_]
[_exist_size_[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_
), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (output_gradient_execs
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); _incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d].r
= 3; _exists_[0][_i_] = ((ccv_nnc_graph_exec_symbol_t*)((void
*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs
)->rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (output_gradient_execs
->rnum); _exist_size_[1] = 0; _p_ = 0, _q_ = 1; int _bump_
= 1; while (_exist_size_[_p_] > 0) { _exist_size_[_q_] = 0
; for (_i_ = 0; _i_ < _exist_size_[_p_]; _i_++) { const int32_t
_idx_ = _exists_[_p_][_i_]; if (_incomings_[_idx_].r != 3) continue
; _incomings_[_idx_].r = 4; if ((reversed_nodes)[_idx_].outgoings
) for (_j_ = 0; _j_ < (reversed_nodes)[_idx_].outgoings->
rnum; _j_++) { const int d = *(int*)((void*)(((char*)(((reversed_nodes
)[_idx_].outgoings)->data)) + (size_t)((reversed_nodes)[_idx_
].outgoings)->rsize * (size_t)(_j_))); if (_incomings_[d].
edges == 0) { _incomings_[d].edges = _bump_; _bump_ += _incomings_
[d].c; _incomings_[d].c = 0; } _edges_[_incomings_[d].edges -
1 + _incomings_[d].c] = _idx_; ++_incomings_[d].c; if (_incomings_
[d].r != 2) continue; _incomings_[d].r = 3; ((void) sizeof ((
_exist_size_[_q_] < (exec_rnum)) ? 1 : 0), __extension__ (
{ if (_exist_size_[_q_] < (exec_rnum)) ; else __assert_fail
("_exist_size_[_q_] < (exec_rnum)", "ccv_cnnp_model_gradient_checkpointing.c"
, 287, __extension__ __PRETTY_FUNCTION__); })); _exists_[_q_]
[_exist_size_[_q_]] = d; ++_exist_size_[_q_]; } } ((_i_) = (_p_
), (_p_) = (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (input_gradient_execs
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); _incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].r =
5; _exists_[0][_i_] = ((ccv_nnc_graph_exec_symbol_t*)((void*
)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs
)->rsize * (size_t)(0))))[_i_].d; } _exist_size_[0] = (input_gradient_execs
->rnum); _exist_size_[1] = 0; _p_ = 0, _q_ = 1; while (_exist_size_
[_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ <
_exist_size_[_p_]; _i_++) { const int32_t _idx_ = _exists_[_p_
][_i_]; if (_incomings_[_idx_].r != 5) continue; _incomings_[
_idx_].r = 6; if (_incomings_[_idx_].edges > 0) for (_j_ =
0; _j_ < _incomings_[_idx_].c; _j_++) { const int d = _edges_
[_incomings_[_idx_].edges - 1 + _j_]; if (_incomings_[d].r !=
4) continue; _incomings_[d].r = 5; ((void) sizeof ((_exist_size_
[_q_] < (exec_rnum)) ? 1 : 0), __extension__ ({ if (_exist_size_
[_q_] < (exec_rnum)) ; else __assert_fail ("_exist_size_[_q_] < (exec_rnum)"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); _exists_[_q_][_exist_size_[_q_]] =
d; ++_exist_size_[_q_]; } } ((_i_) = (_p_), (_p_) = (_q_), (
_q_) = (_i_)); } for (_i_ = 0; _i_ < (input_gradient_execs
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); _incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].d =
1; } for (_i_ = 0; _i_ < (output_gradient_execs->rnum)
; _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t*)((
void*)(((char*)((output_gradient_execs)->data)) + (size_t)
(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); _exists_[0][_i_] = ((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((output_gradient_execs)->data)) + (size_t
)(output_gradient_execs)->rsize * (size_t)(0))))[_i_].d; }
_p_ = 0; _q_ = 1; _exist_size_[0] = (output_gradient_execs->
rnum); _exist_size_[1] = 0; int _d_ = 0; while (_exist_size_[
_p_] > 0) { _exist_size_[_q_] = 0; for (_i_ = 0; _i_ < _exist_size_
[_p_];) { const int32_t _idx_ = _exists_[_p_][_i_]; visitor((
(reversed_nodes) + _idx_), (_idx_), (_incomings_[_idx_].d)); if
(_incomings_[_idx_].d) { ++_d_; _incomings_[_idx_].r = 7; } if
((reversed_nodes)[_idx_].outgoings) { if ((reversed_nodes)[_idx_
].outgoings->rnum == 1) { const int d = *(int*)((void*)(((
char*)(((reversed_nodes)[_idx_].outgoings)->data)) + (size_t
)((reversed_nodes)[_idx_].outgoings)->rsize * (size_t)(0))
); --_incomings_[d].c; if (_incomings_[d].c == 0 && _incomings_
[d].r == 6 && _d_ < (input_gradient_execs->rnum
)) { _exists_[_p_][_i_] = d; continue; } } else for (_j_ = 0;
_j_ < (reversed_nodes)[_idx_].outgoings->rnum; _j_++) {
const int d = *(int*)((void*)(((char*)(((reversed_nodes)[_idx_
].outgoings)->data)) + (size_t)((reversed_nodes)[_idx_].outgoings
)->rsize * (size_t)(_j_))); --_incomings_[d].c; if (_incomings_
[d].c == 0 && _incomings_[d].r == 6 && _d_ <
(input_gradient_execs->rnum)) { ((void) sizeof ((_exist_size_
[_q_] < (exec_rnum)) ? 1 : 0), __extension__ ({ if (_exist_size_
[_q_] < (exec_rnum)) ; else __assert_fail ("_exist_size_[_q_] < (exec_rnum)"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); _exists_[_q_][_exist_size_[_q_]] =
d; ++_exist_size_[_q_]; } } } ++_i_; } ((_i_) = (_p_), (_p_)
= (_q_), (_q_) = (_i_)); } for (_i_ = 0; _i_ < (input_gradient_execs
->rnum); _i_++) { ((void) sizeof ((((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ? 1 : 0), __extension__ ({ if (((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph
== graph) ; else __assert_fail ("((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].graph == graph"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); if (_incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].r ==
7) continue; if (!(0)) { ((void) sizeof ((_incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].c ==
0) ? 1 : 0), __extension__ ({ if (_incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].c ==
0) ; else __assert_fail ("_incomings_[((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].c == 0"
, "ccv_cnnp_model_gradient_checkpointing.c", 287, __extension__
__PRETTY_FUNCTION__); })); } else if (_incomings_[((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].c >
0) continue; visitor(((reversed_nodes) + ((ccv_nnc_graph_exec_symbol_t
*)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d), (
((ccv_nnc_graph_exec_symbol_t*)((void*)(((char*)((input_gradient_execs
)->data)) + (size_t)(input_gradient_execs)->rsize * (size_t
)(0))))[_i_].d), (_incomings_[((ccv_nnc_graph_exec_symbol_t*)
((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(0))))[_i_].d].d)
); } if (_heap_mem_) free(_incomings_); } while (0);
;
288 for (j = 0; j < input_gradient_execs->rnum; j++)
289 ccv_array_add_unique_int(visited_backward_execs, ((ccv_nnc_graph_exec_symbol_t*)ccv_array_get(input_gradient_execs, j)((void*)(((char*)((input_gradient_execs)->data)) + (size_t
)(input_gradient_execs)->rsize * (size_t)(j)))
)->d);
290#undef visitor
291 ccv_cnnp_gradient_checkpoint_build_t build = {
292 .tensor_symbols = ccv_array_new(sizeof(ccv_nnc_tensor_symbol_t), 0, 0),
293 .graph_exec_symbols = ccv_array_new(sizeof(ccv_nnc_graph_exec_symbol_t), 0, 0),
294 };
295 build.old_tensor_symbol_new_hook_context = ccv_nnc_tensor_symbol_new_hook(graph, _ccv_cnnp_gradient_checkpoint_tensor_symbol_new_hook, &build, &build.old_tensor_symbol_new_hook);
296 build.old_tensor_symbol_alias_new_hook_context = ccv_nnc_tensor_symbol_alias_new_hook(graph, _ccv_cnnp_gradient_checkpoint_tensor_symbol_alias_new_hook, &build, &build.old_tensor_symbol_alias_new_hook);
297 build.old_graph_exec_symbol_new_hook_context = ccv_nnc_graph_exec_symbol_new_hook(graph, _ccv_cnnp_model_gradient_checkpoint_graph_exec_symbol_new_hook, &build, &build.old_graph_exec_symbol_new_hook);
298 ccv_array_clear(parameters);
299 ccv_array_clear(parameter_ids);
300 ccv_array_clear(parameter_trainables);
301 ccv_array_clear(internals);
302 ccv_array_clear(internal_ids);
303 ccv_cnnp_model_sequence_t model_sequence = {
304 .bank = kh_init(ccv_cnnp_model_name_bank)kh_init_ccv_cnnp_model_name_bank()
305 };
306 ccv_cnnp_model_add_to_array_context_t add_to_parameter_context = {
307 .sequence = &model_sequence,
308 .prefix = 't',
309 .symbols = parameters,
310 .ids = parameter_ids,
311 .trainables = parameter_trainables,
312 };
313 ccv_cnnp_model_add_to_array_context_t add_to_output_context = {
314 .sequence = &model_sequence,
315 .prefix = 'r',
316 .symbols = internals,
317 .ids = internal_ids,
318 .trainables = 0,
319 };
320 ccv_cnnp_model_build_data_t build_data = {
321 .is_trainable = checkpoint->is_trainable,
322 .model_sequence = &model_sequence,
323 .add_to_array = ccv_cnnp_model_add_to_array,
324 .parameters = parameters,
325 .context = {
326 .add_to_parameter = &add_to_parameter_context,
327 .add_to_output = &add_to_output_context,
328 },
329 .is_gradient_checkpointing = 1, // Mark this as true so we don't allocate gradient_checkpoints array or override the hooks.
330 .gradient_checkpoints = 0,
331 };
332 checkpoint->model->data = &build_data;
333 checkpoint->build(checkpoint->model, graph, checkpoint->inputs, checkpoint->input_size, max_outputs, checkpoint->output_size);
334 checkpoint->model->data = 0;
335 kh_destroy(ccv_cnnp_model_name_bank, model_sequence.bank)kh_destroy_ccv_cnnp_model_name_bank(model_sequence.bank);
336 if (model_sequence.sequences)
337 ccv_array_free(model_sequence.sequences);
338 ccv_nnc_tensor_symbol_new_hook(graph, build.old_tensor_symbol_new_hook, build.old_tensor_symbol_new_hook_context, 0);
339 ccv_nnc_tensor_symbol_alias_new_hook(graph, build.old_tensor_symbol_alias_new_hook, build.old_tensor_symbol_alias_new_hook_context, 0);
340 ccv_nnc_graph_exec_symbol_autogen(graph, (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, 0)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(0)))
, build.graph_exec_symbols->rnum, 0);
341 for (j = 0; j < parameter_ids->rnum; j++)
342 ccfreefree(*(char**)ccv_array_get(parameter_ids, j)((void*)(((char*)((parameter_ids)->data)) + (size_t)(parameter_ids
)->rsize * (size_t)(j)))
);
343 for (j = 0; j < internal_ids->rnum; j++)
344 ccfreefree(*(char**)ccv_array_get(internal_ids, j)((void*)(((char*)((internal_ids)->data)) + (size_t)(internal_ids
)->rsize * (size_t)(j)))
);
345 // Note that there is no graph optimization applied here.
346 exec_info = (ccv_nnc_graph_exec_symbol_info_t*)ccv_array_get(graph->exec_symbol_info, 0)((void*)(((char*)((graph->exec_symbol_info)->data)) + (
size_t)(graph->exec_symbol_info)->rsize * (size_t)(0)))
;
347 // Reuse existing one.
348 kh_clear(ccv_cnnp_tensor_symbol_set, newly_created_tensor_symbols)kh_clear_ccv_cnnp_tensor_symbol_set(newly_created_tensor_symbols
)
;
349 for (j = 0; j < build.tensor_symbols->rnum; j++)
350 {
351 const int idx = ((ccv_nnc_tensor_symbol_t*)ccv_array_get(build.tensor_symbols, j)((void*)(((char*)((build.tensor_symbols)->data)) + (size_t
)(build.tensor_symbols)->rsize * (size_t)(j)))
)->d;
352 if (idx < 0)
353 continue;
354 if (kh_get(ccv_cnnp_tensor_symbol_set, parameters_or_internals, idx)kh_get_ccv_cnnp_tensor_symbol_set(parameters_or_internals, idx
)
!= kh_end(parameters_or_internals)((parameters_or_internals)->n_buckets))
355 continue;
356 int ret;
357 kh_put(ccv_cnnp_tensor_symbol_set, newly_created_tensor_symbols, idx, &ret)kh_put_ccv_cnnp_tensor_symbol_set(newly_created_tensor_symbols
, idx, &ret)
;
358 }
359 ccv_array_t* const newly_input_execs = input_execs;
360 ccv_array_t* const newly_output_execs = output_execs;
361 ccv_array_clear(newly_input_execs);
362 ccv_array_clear(newly_output_execs);
363 for (j = 0; j < build.graph_exec_symbols->rnum; j++)
364 {
365 const int idx = ((ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, j)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(j)))
)->d;
366 if (idx < 0)
367 continue;
368 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[idx].flags)((exec_info[idx].flags) & CCV_NNC_GRAPH_EXEC_DEAD))
369 continue;
370 const ccv_nnc_graph_exec_symbol_t symbol = {
371 .graph = graph,
372 .d = idx
373 };
374 const int* inputs = exec_info[idx].inputs;
375 int input_size = exec_info[idx].input_size;
376 // Only go through forward pass.
377 assert(!ccv_nnc_cmd_is_backward(exec_info[idx].cmd))((void) sizeof ((!ccv_nnc_cmd_is_backward(exec_info[idx].cmd)
) ? 1 : 0), __extension__ ({ if (!ccv_nnc_cmd_is_backward(exec_info
[idx].cmd)) ; else __assert_fail ("!ccv_nnc_cmd_is_backward(exec_info[idx].cmd)"
, "ccv_cnnp_model_gradient_checkpointing.c", 377, __extension__
__PRETTY_FUNCTION__); }))
;
378 int flag = 0;
379 for (k = 0; inputs && k < input_size && !flag; k++)
380 if (inputs[k] >= 0)
381 for (l = 0; l < checkpoint->input_size && !flag; l++)
382 if (checkpoint->inputs[l].d >= 0 && inputs[k] == checkpoint->inputs[l].d)
383 flag = 1;
384 // Input logic is different from output logic. We need to filter out these exec that contains inputs from within the graph.
385 for (k = 0; inputs && k < input_size && flag; k++)
386 if (inputs[k] >= 0 && kh_get(ccv_cnnp_tensor_symbol_set, newly_created_tensor_symbols, inputs[k])kh_get_ccv_cnnp_tensor_symbol_set(newly_created_tensor_symbols
, inputs[k])
!= kh_end(newly_created_tensor_symbols)((newly_created_tensor_symbols)->n_buckets))
387 flag = 0;
388 if (flag)
389 ccv_array_push(newly_input_execs, &symbol);
390 flag = 0;
391 const int* outputs = exec_info[idx].outputs;
392 int output_size = exec_info[idx].output_size;
393 for (k = 0; inputs && k < output_size && !flag; k++)
394 if (outputs[k] >= 0)
395 for (l = 0; l < checkpoint->output_size && !flag; l++)
396 if (max_outputs[l].d >= 0 && outputs[k] == max_outputs[l].d)
397 flag = 1;
398 if (flag)
399 ccv_array_push(newly_output_execs, &symbol);
400 }
401 for (j = 0; j < checkpoint->input_size; j++)
402 if (checkpoint->inputs[j].d >= 0)
403 ccv_array_push(parameters, checkpoint->inputs + j);
404 ccv_nnc_symbolic_graph_simplify(graph,
405 SYMBOLIC_GRAPH_PASSES(CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION,(const int []){CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION
, CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT, CCV_NNC_SIMPLIFY_OPS_FUSION
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
406 CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT,(const int []){CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION
, CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT, CCV_NNC_SIMPLIFY_OPS_FUSION
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
407 CCV_NNC_SIMPLIFY_OPS_FUSION)(const int []){CCV_NNC_SIMPLIFY_COMMON_SUBEXPRESSION_ELIMINATION
, CCV_NNC_SIMPLIFY_DATA_TRANSFER_OPT, CCV_NNC_SIMPLIFY_OPS_FUSION
}, (1 +1 +1 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0
+0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +
0 +0 +0 +0 -1)
,
408 ccv_array_get(parameters, 0)((void*)(((char*)((parameters)->data)) + (size_t)(parameters
)->rsize * (size_t)(0)))
, parameters->rnum,
409 max_outputs, checkpoint->output_size,
410 ccv_array_get(newly_input_execs, 0)((void*)(((char*)((newly_input_execs)->data)) + (size_t)(newly_input_execs
)->rsize * (size_t)(0)))
, newly_input_execs->rnum, ccv_array_get(newly_output_execs, 0)((void*)(((char*)((newly_output_execs)->data)) + (size_t)(
newly_output_execs)->rsize * (size_t)(0)))
, newly_output_execs->rnum);
411 ccv_nnc_graph_exec_symbol_new_hook(graph, build.old_graph_exec_symbol_new_hook, build.old_graph_exec_symbol_new_hook_context, 0);
412 // Need to autogen and redo source / destination.
413 ccv_nnc_graph_exec_symbol_autogen(graph, (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, 0)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(0)))
, build.graph_exec_symbols->rnum, 0);
414 ccv_nnc_tensor_symbol_info_t* const tensor_symbol_info = (ccv_nnc_tensor_symbol_info_t*)ccv_array_get(graph->tensor_symbol_info, 0)((void*)(((char*)((graph->tensor_symbol_info)->data)) +
(size_t)(graph->tensor_symbol_info)->rsize * (size_t)(
0)))
;
415 exec_info = (ccv_nnc_graph_exec_symbol_info_t*)ccv_array_get(graph->exec_symbol_info, 0)((void*)(((char*)((graph->exec_symbol_info)->data)) + (
size_t)(graph->exec_symbol_info)->rsize * (size_t)(0)))
;
416 ccv_array_clear(newly_input_execs);
417 for (j = 0; j < build.graph_exec_symbols->rnum; j++)
418 {
419 const int idx = ((ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, j)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(j)))
)->d;
420 if (idx < 0)
421 continue;
422 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[idx].flags)((exec_info[idx].flags) & CCV_NNC_GRAPH_EXEC_DEAD))
423 continue;
424 const ccv_nnc_graph_exec_symbol_t symbol = {
425 .graph = graph,
426 .d = idx
427 };
428 const int* inputs = exec_info[idx].inputs;
429 int input_size = exec_info[idx].input_size;
430 // Only go through forward pass.
431 assert(!ccv_nnc_cmd_is_backward(exec_info[idx].cmd))((void) sizeof ((!ccv_nnc_cmd_is_backward(exec_info[idx].cmd)
) ? 1 : 0), __extension__ ({ if (!ccv_nnc_cmd_is_backward(exec_info
[idx].cmd)) ; else __assert_fail ("!ccv_nnc_cmd_is_backward(exec_info[idx].cmd)"
, "ccv_cnnp_model_gradient_checkpointing.c", 431, __extension__
__PRETTY_FUNCTION__); }))
;
432 int flag = 0;
433 for (k = 0; inputs && k < input_size && !flag; k++)
434 if (inputs[k] >= 0)
435 for (l = 0; l < checkpoint->input_size && !flag; l++)
436 if (checkpoint->inputs[l].d >= 0 && inputs[k] == checkpoint->inputs[l].d)
437 flag = 1;
438 for (k = 0; inputs && k < input_size && flag; k++)
439 if (inputs[k] >= 0 && kh_get(ccv_cnnp_tensor_symbol_set, newly_created_tensor_symbols, inputs[k])kh_get_ccv_cnnp_tensor_symbol_set(newly_created_tensor_symbols
, inputs[k])
!= kh_end(newly_created_tensor_symbols)((newly_created_tensor_symbols)->n_buckets))
440 flag = 0;
441 if (flag)
442 ccv_array_push(newly_input_execs, &symbol);
443 }
444 // Build a map between old tensor symbols and new tensor symbols.
445 assert(build.tensor_symbols->rnum <= checkpoint->tensor_symbols->rnum)((void) sizeof ((build.tensor_symbols->rnum <= checkpoint
->tensor_symbols->rnum) ? 1 : 0), __extension__ ({ if (
build.tensor_symbols->rnum <= checkpoint->tensor_symbols
->rnum) ; else __assert_fail ("build.tensor_symbols->rnum <= checkpoint->tensor_symbols->rnum"
, "ccv_cnnp_model_gradient_checkpointing.c", 445, __extension__
__PRETTY_FUNCTION__); }))
;
446 // Build a map to potentially map from old input to new input.
447 kh_clear(ccv_cnnp_tensor_symbol_map, symbol_map)kh_clear_ccv_cnnp_tensor_symbol_map(symbol_map);
448 for (j = 0, k = 0; j < build.tensor_symbols->rnum && k < checkpoint->tensor_symbols->rnum;)
449 {
450 const int from_d = ((ccv_nnc_tensor_symbol_t*)ccv_array_get(checkpoint->tensor_symbols, k)((void*)(((char*)((checkpoint->tensor_symbols)->data)) +
(size_t)(checkpoint->tensor_symbols)->rsize * (size_t)
(k)))
)->d;
451 if (from_d < 0) // This is removed, move to the next one.
452 {
453 ++j;
454 ++k;
455 continue;
456 }
457 const int to_d = ((ccv_nnc_tensor_symbol_t*)ccv_array_get(build.tensor_symbols, j)((void*)(((char*)((build.tensor_symbols)->data)) + (size_t
)(build.tensor_symbols)->rsize * (size_t)(j)))
)->d;
458 assert(to_d >= 0)((void) sizeof ((to_d >= 0) ? 1 : 0), __extension__ ({ if (
to_d >= 0) ; else __assert_fail ("to_d >= 0", "ccv_cnnp_model_gradient_checkpointing.c"
, 458, __extension__ __PRETTY_FUNCTION__); }))
;
459 int from_flag = kh_get(ccv_cnnp_tensor_symbol_set, parameters_or_internals, from_d)kh_get_ccv_cnnp_tensor_symbol_set(parameters_or_internals, from_d
)
!= kh_end(parameters_or_internals)((parameters_or_internals)->n_buckets);
460 int to_flag = kh_get(ccv_cnnp_tensor_symbol_set, parameters_or_internals, to_d)kh_get_ccv_cnnp_tensor_symbol_set(parameters_or_internals, to_d
)
!= kh_end(parameters_or_internals)((parameters_or_internals)->n_buckets);
461 if (from_flag)
462 ++k;
463 if (to_flag)
464 ++j;
465 if (from_flag || to_flag)
466 continue;
467 ++k;
468 ++j;
469 // Skip if from_d is outputs.
470 for (l = 0; l < !from_flag && checkpoint->output_size; l++)
471 if (checkpoint->outputs[l].d == from_d)
472 from_flag = 1;
473 if (from_flag)
474 continue;
475 // Skip if to_d is outputs.
476 for (l = 0; l < !to_flag && checkpoint->output_size; l++)
477 if (checkpoint->outputs[l].d == to_d)
478 to_flag = 1;
479 if (to_flag)
480 continue;
481 int ret = 0;
482 khiter_t h = kh_put(ccv_cnnp_tensor_symbol_map, symbol_map, from_d, &ret)kh_put_ccv_cnnp_tensor_symbol_map(symbol_map, from_d, &ret
)
;
483 kh_val(symbol_map, h)((symbol_map)->vals[h]) = to_d;
484 }
485 // Now go over all backward passes to replace inputs with the ones from symbol map. Record these that are used.
486 ccv_array_clear(newly_used_outputs);
487 ccv_array_clear(replaced_backward_execs);
488 for (j = 0; j < visited_backward_execs->rnum; j++)
489 {
490 const int idx = *(int*)ccv_array_get(visited_backward_execs, j)((void*)(((char*)((visited_backward_execs)->data)) + (size_t
)(visited_backward_execs)->rsize * (size_t)(j)))
;
491 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[idx].flags)((exec_info[idx].flags) & CCV_NNC_GRAPH_EXEC_DEAD))
492 continue;
493 assert(idx >= 0)((void) sizeof ((idx >= 0) ? 1 : 0), __extension__ ({ if (
idx >= 0) ; else __assert_fail ("idx >= 0", "ccv_cnnp_model_gradient_checkpointing.c"
, 493, __extension__ __PRETTY_FUNCTION__); }))
;
494 assert(idx < exec_rnum)((void) sizeof ((idx < exec_rnum) ? 1 : 0), __extension__ (
{ if (idx < exec_rnum) ; else __assert_fail ("idx < exec_rnum"
, "ccv_cnnp_model_gradient_checkpointing.c", 494, __extension__
__PRETTY_FUNCTION__); }))
;
495 if (!ccv_nnc_cmd_is_backward(exec_info[idx].cmd))
496 continue;
497 for (k = 0; k < exec_info[idx].input_size; k++)
498 if (exec_info[idx].inputs[k] >= 0)
499 {
500 const khiter_t h = kh_get(ccv_cnnp_tensor_symbol_map, symbol_map, exec_info[idx].inputs[k])kh_get_ccv_cnnp_tensor_symbol_map(symbol_map, exec_info[idx].
inputs[k])
;
501 if (h != kh_end(symbol_map)((symbol_map)->n_buckets)) // Replacing it.
502 {
503 int newly_created_output = kh_val(symbol_map, h)((symbol_map)->vals[h]);
504 exec_info[idx].inputs[k] = newly_created_output;
505 ccv_array_add_unique_int(newly_used_outputs, newly_created_output);
506 if (tensor_symbol_info[newly_created_output].alias_ref > 0)
507 {
508 newly_created_output = tensor_symbol_info[newly_created_output].alias_ref - 1;
509 ccv_array_add_unique_int(newly_used_outputs, newly_created_output);
510 }
511 ccv_array_add_unique_int(replaced_backward_execs, idx);
512 }
513 }
514 }
515 for (j = 0; j < build.graph_exec_symbols->rnum; j++)
516 {
517 ccv_nnc_graph_exec_symbol_t* const symbol = (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, j)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(j)))
;
518 if (symbol->d < 0)
519 continue;
520 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[symbol->d].flags)((exec_info[symbol->d].flags) & CCV_NNC_GRAPH_EXEC_DEAD
)
)
521 continue;
522 int x, y;
523 for (k = 0; k < replaced_backward_execs->rnum; k++)
524 {
525 const int idx = *(int*)ccv_array_get(replaced_backward_execs, k)((void*)(((char*)((replaced_backward_execs)->data)) + (size_t
)(replaced_backward_execs)->rsize * (size_t)(k)))
;
526 assert(idx >= 0)((void) sizeof ((idx >= 0) ? 1 : 0), __extension__ ({ if (
idx >= 0) ; else __assert_fail ("idx >= 0", "ccv_cnnp_model_gradient_checkpointing.c"
, 526, __extension__ __PRETTY_FUNCTION__); }))
;
527 assert(idx < exec_rnum)((void) sizeof ((idx < exec_rnum) ? 1 : 0), __extension__ (
{ if (idx < exec_rnum) ; else __assert_fail ("idx < exec_rnum"
, "ccv_cnnp_model_gradient_checkpointing.c", 527, __extension__
__PRETTY_FUNCTION__); }))
;
528 assert(ccv_nnc_cmd_is_backward(exec_info[idx].cmd))((void) sizeof ((ccv_nnc_cmd_is_backward(exec_info[idx].cmd))
? 1 : 0), __extension__ ({ if (ccv_nnc_cmd_is_backward(exec_info
[idx].cmd)) ; else __assert_fail ("ccv_nnc_cmd_is_backward(exec_info[idx].cmd)"
, "ccv_cnnp_model_gradient_checkpointing.c", 528, __extension__
__PRETTY_FUNCTION__); }))
;
529 int flag = 0;
530 for (x = 0; !flag && x < exec_info[idx].input_size; x++)
531 {
532 int x_d = exec_info[idx].inputs[x];
533 if (x_d < 0)
534 continue;
535 if (tensor_symbol_info[x_d].alias_ref > 0)
536 x_d = tensor_symbol_info[x_d].alias_ref - 1;
537 for (y = 0; !flag && y < exec_info[symbol->d].output_size; y++)
538 {
539 int y_d = exec_info[symbol->d].outputs[y];
540 if (y_d < 0)
541 continue;
542 if (tensor_symbol_info[y_d].alias_ref > 0)
543 y_d = tensor_symbol_info[y_d].alias_ref - 1;
544 if (x_d == y_d)
545 flag = 1;
546 }
547 }
548 if (flag)
549 ccv_nnc_graph_exec_symbol_concat(graph, *symbol, (ccv_nnc_graph_exec_symbol_t){
550 .graph = graph,
551 .d = idx
552 });
553 }
554 }
555 // Find parents to visited_backward_execs, and use that as the starting point of all newly added graph_exec_symbols. Use the visited backward execs as the source, use all its parents as destination, go through with graph visit.
556 ccv_sparse_matrix_t* const exec_dep = ccv_sparse_matrix_new(graph->exec_symbol_info->rnum, graph->exec_symbol_info->rnum, CCV_8U | CCV_C1, CCV_SPARSE_ROW_MAJOR, 0);
557#define for_block(x, val) \
558 do { \
559 if (((uint8_t*)val)[0] != 0) \
560 ccv_array_push(buf, &x); \
561 } while (0)
562 const uint8_t one = 1;
563 // Now go from outputs to inputs, unmark visited ones.
564 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
565 if (idx < exec_rnum && !CCV_NNC_GRAPH_EXEC_IS_DEAD(node->flags)((node->flags) & CCV_NNC_GRAPH_EXEC_DEAD) && maskbit[idx >> 5] & (1u << (idx & 0x1f)))
566 {
567 ccv_array_clear(buf);
568 ccv_sparse_matrix_vector_t* vector = ccv_get_sparse_matrix_vector(exec_dep, idx);
569 if (vector)
570 CCV_SPARSE_VECTOR_FOREACH(exec_dep, vector, for_block)do { switch ((((exec_dep)->type) & 0xFF000)) { case CCV_32S
: { do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.i32 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.i32 + (0))); } } } while (0); break; } case CCV_32F:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.f32 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.f32 + (0))); } } } while (0); break; } case CCV_64S:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.i64 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.i64 + (0))); } } } while (0); break; } case CCV_64F:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.f64 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.f64 + (0))); } } } while (0); break; } default: { do
{ int _i_; __attribute__((unused)) const size_t _c_ = (((exec_dep
)->type) & 0xFFF); if ((exec_dep)->type & CCV_DENSE_VECTOR
) { for (_i_ = 0; _i_ < (vector)->size; _i_++) { for_block
((_i_), ((vector)->data.u8 + (_i_ * _c_))); } } else { const
size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t) + ((_ccv_get_data_type_size
[(((exec_dep)->type) & 0xFF000) >> 12] * (((exec_dep
)->type) & 0xFFF) + 3) & -4); uint8_t* const _vidx_
= (uint8_t*)(vector)->index; for (_i_ = 0; _i_ < (vector
)->size; _i_++) { ccv_sparse_matrix_index_t* const _idx_i_
= (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_ * _i_); if
(_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t _d_
= { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.u8 + (0))); } } } while (0); } } } while (0)
;
571 if (node->outgoings && node->outgoings->rnum > 0)
572 {
573 ccv_array_t* const outgoings = node->outgoings;
574 for (k = 0; k < outgoings->rnum; k++)
575 {
576 const int outgoing_d = *(int*)ccv_array_get(outgoings, k)((void*)(((char*)((outgoings)->data)) + (size_t)(outgoings
)->rsize * (size_t)(k)))
;
577 if (outgoing_d >= exec_rnum)
578 continue;
579 int l;
580 // We cannot avoid the ones that visited, because these may not contain all the deps.
581 ccv_set_sparse_matrix_cell(exec_dep, outgoing_d, idx, &one);
582 for (l = 0; l < buf->rnum; l++)
583 ccv_set_sparse_matrix_cell(exec_dep, outgoing_d, *(int*)ccv_array_get(buf, l)((void*)(((char*)((buf)->data)) + (size_t)(buf)->rsize *
(size_t)(l)))
, &one);
584 }
585 }
586 }
587 } ccv_nnc_graph_visit_endfor} }
588 // Now go from outputs to inputs, unmark visited ones.
589 ccv_nnc_graph_visit_for(visit, exec_info, node, idx){ int _i_; for (_i_ = 0; _i_ < (visit)->size; _i_++) { const
int idx __attribute__((unused)) = (visit)->node[_i_].index
; const int _node_unused_ __attribute__((unused)) = (visit)->
node[_i_].term; typeof ((exec_info)) const node __attribute__
((unused)) = (exec_info) + idx;
{
590 if (idx < exec_rnum)
591 maskbit[idx >> 5] &= ~(1u << (idx & 0x1f));
592 } ccv_nnc_graph_visit_endfor} }
593 ccv_nnc_graph_visit_free(visit);
594#undef for_block
595 // Go through visited backward execs, remove the ones that has no dependency on any replaced backward execs.
596 for (j = 0; j < visited_backward_execs->rnum;)
597 {
598 const int idx = *(int*)ccv_array_get(visited_backward_execs, j)((void*)(((char*)((visited_backward_execs)->data)) + (size_t
)(visited_backward_execs)->rsize * (size_t)(j)))
;
599 if (ccv_array_contain_int(replaced_backward_execs, idx))
600 {
601 ++j;
602 continue;
603 }
604 ccv_sparse_matrix_vector_t* vector = ccv_get_sparse_matrix_vector(exec_dep, idx);
605 int flag = 0;
606#define for_block(x, val) \
607 do { \
608 if (((uint8_t*)val)[0] != 0) \
609 if (ccv_array_contain_int(replaced_backward_execs, x)) \
610 flag = 1; \
611 } while (0)
612 if (vector)
613 CCV_SPARSE_VECTOR_FOREACH(exec_dep, vector, for_block)do { switch ((((exec_dep)->type) & 0xFF000)) { case CCV_32S
: { do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.i32 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.i32 + (0))); } } } while (0); break; } case CCV_32F:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.f32 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.f32 + (0))); } } } while (0); break; } case CCV_64S:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.i64 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.i64 + (0))); } } } while (0); break; } case CCV_64F:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.f64 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.f64 + (0))); } } } while (0); break; } default: { do
{ int _i_; __attribute__((unused)) const size_t _c_ = (((exec_dep
)->type) & 0xFFF); if ((exec_dep)->type & CCV_DENSE_VECTOR
) { for (_i_ = 0; _i_ < (vector)->size; _i_++) { for_block
((_i_), ((vector)->data.u8 + (_i_ * _c_))); } } else { const
size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t) + ((_ccv_get_data_type_size
[(((exec_dep)->type) & 0xFF000) >> 12] * (((exec_dep
)->type) & 0xFFF) + 3) & -4); uint8_t* const _vidx_
= (uint8_t*)(vector)->index; for (_i_ = 0; _i_ < (vector
)->size; _i_++) { ccv_sparse_matrix_index_t* const _idx_i_
= (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_ * _i_); if
(_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t _d_
= { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.u8 + (0))); } } } while (0); } } } while (0)
;
614#undef for_block
615 if (!flag)
616 {
617 if (j < visited_backward_execs->rnum - 1)
618 *(int*)ccv_array_get(visited_backward_execs, j)((void*)(((char*)((visited_backward_execs)->data)) + (size_t
)(visited_backward_execs)->rsize * (size_t)(j)))
= *(int*)ccv_array_get(visited_backward_execs, visited_backward_execs->rnum - 1)((void*)(((char*)((visited_backward_execs)->data)) + (size_t
)(visited_backward_execs)->rsize * (size_t)(visited_backward_execs
->rnum - 1)))
;
619 --visited_backward_execs->rnum;
620 continue;
621 }
622 ++j;
623 }
624 // Now go through all replaced_backward_execs to find the ones has no dependencies in visited_backward_execs.
625 for (j = 0; j < replaced_backward_execs->rnum; j++)
626 {
627 const int idx = *(int*)ccv_array_get(replaced_backward_execs, j)((void*)(((char*)((replaced_backward_execs)->data)) + (size_t
)(replaced_backward_execs)->rsize * (size_t)(j)))
;
628 ccv_sparse_matrix_vector_t* vector = ccv_get_sparse_matrix_vector(exec_dep, idx);
629 int flag = 0;
630#define for_block(x, val) \
631 do { \
632 if (((uint8_t*)val)[0] != 0) \
633 if (ccv_array_contain_int(visited_backward_execs, x)) \
634 flag = 1; \
635 } while (0)
636 if (vector)
637 CCV_SPARSE_VECTOR_FOREACH(exec_dep, vector, for_block)do { switch ((((exec_dep)->type) & 0xFF000)) { case CCV_32S
: { do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.i32 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.i32 + (0))); } } } while (0); break; } case CCV_32F:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.f32 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.f32 + (0))); } } } while (0); break; } case CCV_64S:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.i64 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.i64 + (0))); } } } while (0); break; } case CCV_64F:
{ do { int _i_; __attribute__((unused)) const size_t _c_ = (
((exec_dep)->type) & 0xFFF); if ((exec_dep)->type &
CCV_DENSE_VECTOR) { for (_i_ = 0; _i_ < (vector)->size
; _i_++) { for_block((_i_), ((vector)->data.f64 + (_i_ * _c_
))); } } else { const size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t
) + ((_ccv_get_data_type_size[(((exec_dep)->type) & 0xFF000
) >> 12] * (((exec_dep)->type) & 0xFFF) + 3) &
-4); uint8_t* const _vidx_ = (uint8_t*)(vector)->index; for
(_i_ = 0; _i_ < (vector)->size; _i_++) { ccv_sparse_matrix_index_t
* const _idx_i_ = (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_
* _i_); if (_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t
_d_ = { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.f64 + (0))); } } } while (0); break; } default: { do
{ int _i_; __attribute__((unused)) const size_t _c_ = (((exec_dep
)->type) & 0xFFF); if ((exec_dep)->type & CCV_DENSE_VECTOR
) { for (_i_ = 0; _i_ < (vector)->size; _i_++) { for_block
((_i_), ((vector)->data.u8 + (_i_ * _c_))); } } else { const
size_t _idx_size_ = sizeof(ccv_sparse_matrix_index_t) + ((_ccv_get_data_type_size
[(((exec_dep)->type) & 0xFF000) >> 12] * (((exec_dep
)->type) & 0xFFF) + 3) & -4); uint8_t* const _vidx_
= (uint8_t*)(vector)->index; for (_i_ = 0; _i_ < (vector
)->size; _i_++) { ccv_sparse_matrix_index_t* const _idx_i_
= (ccv_sparse_matrix_index_t*)(_vidx_ + _idx_size_ * _i_); if
(_idx_i_->ifbit <= 1) continue; ccv_numeric_data_t _d_
= { .u8 = (uint8_t*)(_idx_i_ + 1) }; for_block((_idx_i_->
i), (_d_.u8 + (0))); } } } while (0); } } } while (0)
;
638#undef for_block
639 // If this one has no parents that is within the visited_backward_execs, it is a good place for us to add all its parents as dependency for input_execs.
640 if (!flag)
641 {
642 assert(idx < exec_rnum)((void) sizeof ((idx < exec_rnum) ? 1 : 0), __extension__ (
{ if (idx < exec_rnum) ; else __assert_fail ("idx < exec_rnum"
, "ccv_cnnp_model_gradient_checkpointing.c", 642, __extension__
__PRETTY_FUNCTION__); }))
;
643 ccv_array_t* const outgoings = reversed_nodes[idx].outgoings;
644 assert(outgoings)((void) sizeof ((outgoings) ? 1 : 0), __extension__ ({ if (outgoings
) ; else __assert_fail ("outgoings", "ccv_cnnp_model_gradient_checkpointing.c"
, 644, __extension__ __PRETTY_FUNCTION__); }))
;
645 for (k = 0; k < outgoings->rnum; k++)
646 {
647 const int d = *(int*)ccv_array_get(outgoings, k)((void*)(((char*)((outgoings)->data)) + (size_t)(outgoings
)->rsize * (size_t)(k)))
;
648 for (l = 0; l < newly_input_execs->rnum; l++)
649 {
650 ccv_nnc_graph_exec_symbol_concat(graph, (ccv_nnc_graph_exec_symbol_t){
651 .graph = graph,
652 .d = d
653 }, *(ccv_nnc_graph_exec_symbol_t*)ccv_array_get(newly_input_execs, l)((void*)(((char*)((newly_input_execs)->data)) + (size_t)(newly_input_execs
)->rsize * (size_t)(l)))
);
654 }
655 }
656 }
657 }
658 ccv_matrix_free(exec_dep);
659 // Go through all exec, free ones that doesn't have output used.
660 // Reuse this array because it is not useful any more.
661 ccv_array_t* forward_pass_inputs = visited_backward_execs;
662 int any_deleted;
663 do {
664 // Build a map of still active inputs.
665 ccv_array_clear(forward_pass_inputs);
666 for (j = 0; j < build.graph_exec_symbols->rnum; j++)
667 {
668 ccv_nnc_graph_exec_symbol_t* const symbol = (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, j)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(j)))
;
669 if (symbol->d < 0)
670 continue;
671 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[symbol->d].flags)((exec_info[symbol->d].flags) & CCV_NNC_GRAPH_EXEC_DEAD
)
)
672 continue;
673 int* const inputs = exec_info[symbol->d].inputs;
674 const int input_size = exec_info[symbol->d].input_size;
675 for (k = 0; k < input_size; k++)
676 {
677 int d = inputs[k];
678 if (d < 0)
679 continue;
680 ccv_array_add_unique_int(forward_pass_inputs, d);
681 if (tensor_symbol_info[d].alias_ref > 0)
682 {
683 d = tensor_symbol_info[d].alias_ref - 1;
684 ccv_array_add_unique_int(forward_pass_inputs, d);
685 }
686 }
687 }
688 any_deleted = 0;
689 for (j = 0; j < build.graph_exec_symbols->rnum; j++)
690 {
691 ccv_nnc_graph_exec_symbol_t* const symbol = (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, j)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(j)))
;
692 if (symbol->d < 0)
693 continue;
694 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[symbol->d].flags)((exec_info[symbol->d].flags) & CCV_NNC_GRAPH_EXEC_DEAD
)
)
695 continue;
696 int* const outputs = exec_info[symbol->d].outputs;
697 const int output_size = exec_info[symbol->d].output_size;
698 int flag = 0;
699 for (k = 0; !flag && k < output_size; k++)
700 {
701 int d = outputs[k];
702 if (d < 0)
703 continue;
704 flag = ccv_array_contain_int(newly_used_outputs, d) || ccv_array_contain_int(forward_pass_inputs, d);
705 if (!flag && tensor_symbol_info[d].alias_ref > 0)
706 {
707 d = tensor_symbol_info[d].alias_ref - 1;
708 flag = ccv_array_contain_int(newly_used_outputs, d) || ccv_array_contain_int(forward_pass_inputs, d);
709 }
710 }
711 if (flag)
712 continue;
713 ccv_nnc_graph_exec_symbol_free(graph, *symbol);
714 symbol->d = -1;
715 symbol->graph = 0;
716 any_deleted = 1;
717 }
718 } while (any_deleted);
719 ccv_array_clear(forward_pass_inputs);
720 for (j = 0; j < build.graph_exec_symbols->rnum; j++)
721 {
722 ccv_nnc_graph_exec_symbol_t* const symbol = (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, j)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(j)))
;
723 if (symbol->d < 0)
724 continue;
725 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[symbol->d].flags)((exec_info[symbol->d].flags) & CCV_NNC_GRAPH_EXEC_DEAD
)
)
726 continue;
727 int* const inputs = exec_info[symbol->d].inputs;
728 const int input_size = exec_info[symbol->d].input_size;
729 for (k = 0; k < input_size; k++)
730 {
731 if (inputs[k] < 0)
732 continue;
733 ccv_array_add_unique_int(forward_pass_inputs, inputs[k]);
734 if (tensor_symbol_info[inputs[k]].alias_ref > 0)
735 ccv_array_add_unique_int(forward_pass_inputs, tensor_symbol_info[inputs[k]].alias_ref - 1);
736 }
737 int* const outputs = exec_info[symbol->d].outputs;
738 const int output_size = exec_info[symbol->d].output_size;
739 for (k = 0; k < output_size; k++)
740 {
741 if (outputs[k] < 0)
742 continue;
743 ccv_array_add_unique_int(forward_pass_inputs, outputs[k]);
744 if (tensor_symbol_info[outputs[k]].alias_ref > 0)
745 ccv_array_add_unique_int(forward_pass_inputs, tensor_symbol_info[outputs[k]].alias_ref - 1);
746 }
747 }
748 // Free unused tensor symbols.
749 for (j = 0; j < build.tensor_symbols->rnum; j++)
750 {
751 const ccv_nnc_tensor_symbol_t* symbol = ((ccv_nnc_tensor_symbol_t*)ccv_array_get(build.tensor_symbols, j)((void*)(((char*)((build.tensor_symbols)->data)) + (size_t
)(build.tensor_symbols)->rsize * (size_t)(j)))
);
752 if (ccv_array_contain_int(newly_used_outputs, symbol->d) || ccv_array_contain_int(forward_pass_inputs, symbol->d))
753 continue;
754 if (tensor_symbol_info[symbol->d].alias_ref > 0)
755 {
756 const int d = tensor_symbol_info[symbol->d].alias_ref - 1;
757 if (ccv_array_contain_int(newly_used_outputs, d) || ccv_array_contain_int(forward_pass_inputs, d))
758 continue;
759 }
760 ccv_nnc_tensor_symbol_free(graph, *symbol);
761 }
762 for (j = 0; j < build.graph_exec_symbols->rnum; j++)
763 {
764 ccv_nnc_graph_exec_symbol_t* const symbol = (ccv_nnc_graph_exec_symbol_t*)ccv_array_get(build.graph_exec_symbols, j)((void*)(((char*)((build.graph_exec_symbols)->data)) + (size_t
)(build.graph_exec_symbols)->rsize * (size_t)(j)))
;
765 if (symbol->d < 0)
766 continue;
767 if (CCV_NNC_GRAPH_EXEC_IS_DEAD(exec_info[symbol->d].flags)((exec_info[symbol->d].flags) & CCV_NNC_GRAPH_EXEC_DEAD
)
)
768 continue;
769 ccv_nnc_graph_exec_symbol_set_flags(graph, *symbol, CCV_NNC_GRAPH_EXEC_DISABLE_OPT);
770 }
771 // Free these newly created execs and tensor symbols.
772 ccv_array_free(build.tensor_symbols);
773 ccv_array_free(build.graph_exec_symbols);
774 }
775 kh_destroy(ccv_cnnp_tensor_symbol_map, symbol_map)kh_destroy_ccv_cnnp_tensor_symbol_map(symbol_map);
776 kh_destroy(ccv_cnnp_tensor_symbol_set, newly_created_tensor_symbols)kh_destroy_ccv_cnnp_tensor_symbol_set(newly_created_tensor_symbols
)
;
777 kh_destroy(ccv_cnnp_tensor_symbol_set, parameters_or_internals)kh_destroy_ccv_cnnp_tensor_symbol_set(parameters_or_internals
)
;
778 ccfreefree(max_outputs);
779 ccv_array_free(buf);
780 ccv_array_free(newly_used_outputs);
781 ccv_array_free(parameters);
782 ccv_array_free(parameter_ids);
783 ccv_array_free(parameter_trainables);
784 ccv_array_free(internals);
785 ccv_array_free(internal_ids);
786 ccfreefree(maskbit);
787 ccv_array_free(input_gradient_execs);
788 ccv_array_free(output_gradient_execs);
789 ccv_array_free(input_execs);
790 ccv_array_free(output_execs);
791 ccv_array_free(replaced_backward_execs);
792 ccv_array_free(visited_backward_execs);
793 for (i = 0; i < exec_rnum; i++)
794 if (reversed_nodes[i].outgoings)
795 ccv_array_free(reversed_nodes[i].outgoings);
796 ccfreefree(reversed_nodes);
797}