Bug Summary

File:nnc/ccv_nnc_xpu_alloc.c
Warning:line 20, column 1
Access to field 'node' results in a dereference of a null pointer (loaded from variable 'nodep')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_nnc_xpu_alloc.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc -resource-dir /usr/local/lib/clang/19 -I ../ -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2024-12-04-153127-42938-1 -x c ccv_nnc_xpu_alloc.c
1#include "ccv_nnc.h"
2#include "ccv_nnc_easy.h"
3#include "ccv_nnc_internal.h"
4#include "ccv_nnc_easy.h"
5#include "ccv_internal.h"
6#include "_ccv_nnc_xpu_alloc.h"
7#if defined(HAVE_CUDA1) || defined(HAVE_MPS)
8#ifdef HAVE_CUDA1
9#include "gpu/ccv_nnc_compat.h"
10#else
11#include "mps/ccv_nnc_mps.h"
12#endif
13#include <stdbool.h>
14
15static int dy_alloc_tree_cmp(const dy_alloc_metadata_t* const a_node, const dy_alloc_metadata_t* const b_node)
16{
17 return (a_node->size > b_node->size) - (b_node->size > a_node->size);
18}
19
20rb_gen(, dy_alloc_tree_, dy_alloc_tree_t, dy_alloc_metadata_t, link, dy_alloc_tree_cmp)void dy_alloc_tree_new(dy_alloc_tree_t *rbtree) { do { (rbtree
)->rbt_root = ((void*)0); } while (0); } _Bool dy_alloc_tree_empty
(dy_alloc_tree_t *rbtree) { return (rbtree->rbt_root == ((
void*)0)); } dy_alloc_metadata_t * dy_alloc_tree_first(dy_alloc_tree_t
*rbtree) { dy_alloc_metadata_t *ret; do { (ret) = (rbtree->
rbt_root); if ((ret) != ((void*)0)) { for (; (((ret))->link
.rbn_left) != ((void*)0); (ret) = (((ret))->link.rbn_left)
) { } } } while (0); return ret; } dy_alloc_metadata_t * dy_alloc_tree_last
(dy_alloc_tree_t *rbtree) { dy_alloc_metadata_t *ret; do { (ret
) = (rbtree->rbt_root); if ((ret) != ((void*)0)) { for (; (
(dy_alloc_metadata_t *) (((intptr_t) ((ret))->link.rbn_right_red
) & ((ssize_t)-2))) != ((void*)0); (ret) = ((dy_alloc_metadata_t
*) (((intptr_t) ((ret))->link.rbn_right_red) & ((ssize_t
)-2)))) { } } } while (0); return ret; } dy_alloc_metadata_t *
dy_alloc_tree_next(dy_alloc_tree_t *rbtree, dy_alloc_metadata_t
*node) { dy_alloc_metadata_t *ret; if (((dy_alloc_metadata_t
*) (((intptr_t) (node)->link.rbn_right_red) & ((ssize_t
)-2))) != ((void*)0)) { do { (ret) = (((dy_alloc_metadata_t *
) (((intptr_t) (node)->link.rbn_right_red) & ((ssize_t
)-2)))); if ((ret) != ((void*)0)) { for (; (((ret))->link.
rbn_left) != ((void*)0); (ret) = (((ret))->link.rbn_left))
{ } } } while (0); } else { dy_alloc_metadata_t *tnode = rbtree
->rbt_root; ((void) sizeof ((tnode != ((void*)0)) ? 1 : 0)
, __extension__ ({ if (tnode != ((void*)0)) ; else __assert_fail
("tnode != NULL", "ccv_nnc_xpu_alloc.c", 20, __extension__ __PRETTY_FUNCTION__
); })); ret = ((void*)0); while (1) { int cmp = (dy_alloc_tree_cmp
)(node, tnode); if (cmp < 0) { ret = tnode; tnode = ((tnode
)->link.rbn_left); } else if (cmp > 0) { tnode = ((dy_alloc_metadata_t
*) (((intptr_t) (tnode)->link.rbn_right_red) & ((ssize_t
)-2))); } else { break; } ((void) sizeof ((tnode != ((void*)0
)) ? 1 : 0), __extension__ ({ if (tnode != ((void*)0)) ; else
__assert_fail ("tnode != NULL", "ccv_nnc_xpu_alloc.c", 20, __extension__
__PRETTY_FUNCTION__); })); } } return ret; } dy_alloc_metadata_t
* dy_alloc_tree_prev(dy_alloc_tree_t *rbtree, dy_alloc_metadata_t
*node) { dy_alloc_metadata_t *ret; if (((node)->link.rbn_left
) != ((void*)0)) { do { (ret) = (((node)->link.rbn_left));
if ((ret) != ((void*)0)) { for (; ((dy_alloc_metadata_t *) (
((intptr_t) ((ret))->link.rbn_right_red) & ((ssize_t)-
2))) != ((void*)0); (ret) = ((dy_alloc_metadata_t *) (((intptr_t
) ((ret))->link.rbn_right_red) & ((ssize_t)-2)))) { } }
} while (0); } else { dy_alloc_metadata_t *tnode = rbtree->
rbt_root; ((void) sizeof ((tnode != ((void*)0)) ? 1 : 0), __extension__
({ if (tnode != ((void*)0)) ; else __assert_fail ("tnode != NULL"
, "ccv_nnc_xpu_alloc.c", 20, __extension__ __PRETTY_FUNCTION__
); })); ret = ((void*)0); while (1) { int cmp = (dy_alloc_tree_cmp
)(node, tnode); if (cmp < 0) { tnode = ((tnode)->link.rbn_left
); } else if (cmp > 0) { ret = tnode; tnode = ((dy_alloc_metadata_t
*) (((intptr_t) (tnode)->link.rbn_right_red) & ((ssize_t
)-2))); } else { break; } ((void) sizeof ((tnode != ((void*)0
)) ? 1 : 0), __extension__ ({ if (tnode != ((void*)0)) ; else
__assert_fail ("tnode != NULL", "ccv_nnc_xpu_alloc.c", 20, __extension__
__PRETTY_FUNCTION__); })); } } return ret; } dy_alloc_metadata_t
* dy_alloc_tree_search(dy_alloc_tree_t *rbtree, const dy_alloc_metadata_t
*key) { dy_alloc_metadata_t *ret; int cmp; ret = rbtree->
rbt_root; while (ret != ((void*)0) && (cmp = (dy_alloc_tree_cmp
)(key, ret)) != 0) { if (cmp < 0) { ret = ((ret)->link.
rbn_left); } else { ret = ((dy_alloc_metadata_t *) (((intptr_t
) (ret)->link.rbn_right_red) & ((ssize_t)-2))); } } return
ret; } dy_alloc_metadata_t * dy_alloc_tree_nsearch(dy_alloc_tree_t
*rbtree, const dy_alloc_metadata_t *key) { dy_alloc_metadata_t
*ret; dy_alloc_metadata_t *tnode = rbtree->rbt_root; ret =
((void*)0); while (tnode != ((void*)0)) { int cmp = (dy_alloc_tree_cmp
)(key, tnode); if (cmp < 0) { ret = tnode; tnode = ((tnode
)->link.rbn_left); } else if (cmp > 0) { tnode = ((dy_alloc_metadata_t
*) (((intptr_t) (tnode)->link.rbn_right_red) & ((ssize_t
)-2))); } else { ret = tnode; break; } } return ret; } dy_alloc_metadata_t
* dy_alloc_tree_psearch(dy_alloc_tree_t *rbtree, const dy_alloc_metadata_t
*key) { dy_alloc_metadata_t *ret; dy_alloc_metadata_t *tnode
= rbtree->rbt_root; ret = ((void*)0); while (tnode != ((void
*)0)) { int cmp = (dy_alloc_tree_cmp)(key, tnode); if (cmp <
0) { tnode = ((tnode)->link.rbn_left); } else if (cmp >
0) { ret = tnode; tnode = ((dy_alloc_metadata_t *) (((intptr_t
) (tnode)->link.rbn_right_red) & ((ssize_t)-2))); } else
{ ret = tnode; break; } } return ret; } void dy_alloc_tree_insert
(dy_alloc_tree_t *rbtree, dy_alloc_metadata_t *node) { struct
{ dy_alloc_metadata_t *node; int cmp; } path[sizeof(void *) <<
4], *pathp; do { ((void) sizeof ((((uintptr_t)(node) & 0x1
) == 0) ? 1 : 0), __extension__ ({ if (((uintptr_t)(node) &
0x1) == 0) ; else __assert_fail ("((uintptr_t)(node) & 0x1) == 0"
, "ccv_nnc_xpu_alloc.c", 20, __extension__ __PRETTY_FUNCTION__
); })); do { ((node))->link.rbn_left = ((void*)0); } while
(0); do { ((node))->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) ((void*)0)) | (((uintptr_t) ((node))->link
.rbn_right_red) & ((size_t)1))); } while (0); do { ((node
))->link.rbn_right_red = (dy_alloc_metadata_t *) (((uintptr_t
) ((node))->link.rbn_right_red) | ((size_t)1)); } while (0
); } while (0); path->node = rbtree->rbt_root; for (pathp
= path; pathp->node != ((void*)0); pathp++) { int cmp = pathp
->cmp = dy_alloc_tree_cmp(node, pathp->node); ((void) sizeof
((cmp != 0) ? 1 : 0), __extension__ ({ if (cmp != 0) ; else __assert_fail
("cmp != 0", "ccv_nnc_xpu_alloc.c", 20, __extension__ __PRETTY_FUNCTION__
); })); if (cmp < 0) { pathp[1].node = ((pathp->node)->
link.rbn_left); } else { pathp[1].node = ((dy_alloc_metadata_t
*) (((intptr_t) (pathp->node)->link.rbn_right_red) &
((ssize_t)-2))); } } pathp->node = node; for (pathp--; (uintptr_t
)pathp >= (uintptr_t)path; pathp--) { dy_alloc_metadata_t *
cnode = pathp->node; if (pathp->cmp < 0) { dy_alloc_metadata_t
*left = pathp[1].node; do { (cnode)->link.rbn_left = left
; } while (0); if (((_Bool) (((uintptr_t) (left)->link.rbn_right_red
) & ((size_t)1)))) { dy_alloc_metadata_t *leftleft = ((left
)->link.rbn_left); if (leftleft != ((void*)0) && (
(_Bool) (((uintptr_t) (leftleft)->link.rbn_right_red) &
((size_t)1)))) { dy_alloc_metadata_t *tnode; do { (leftleft)
->link.rbn_right_red = (dy_alloc_metadata_t *) (((intptr_t
) (leftleft)->link.rbn_right_red) & ((ssize_t)-2)); } while
(0); do { (tnode) = (((cnode))->link.rbn_left); do { ((cnode
))->link.rbn_left = ((dy_alloc_metadata_t *) (((intptr_t) (
(tnode))->link.rbn_right_red) & ((ssize_t)-2))); } while
(0); do { ((tnode))->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (cnode)) | (((uintptr_t) ((tnode))->link
.rbn_right_red) & ((size_t)1))); } while (0); } while (0)
; cnode = tnode; } } else { return; } } else { dy_alloc_metadata_t
*right = pathp[1].node; do { (cnode)->link.rbn_right_red =
(dy_alloc_metadata_t *) (((uintptr_t) right) | (((uintptr_t)
(cnode)->link.rbn_right_red) & ((size_t)1))); } while
(0); if (((_Bool) (((uintptr_t) (right)->link.rbn_right_red
) & ((size_t)1)))) { dy_alloc_metadata_t *left = ((cnode)
->link.rbn_left); if (left != ((void*)0) && ((_Bool
) (((uintptr_t) (left)->link.rbn_right_red) & ((size_t
)1)))) { do { (left)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (left)->link.rbn_right_red) & ((ssize_t
)-2)); } while (0); do { (right)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (right)->link.rbn_right_red) & ((ssize_t
)-2)); } while (0); do { (cnode)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (cnode)->link.rbn_right_red) | ((size_t)
1)); } while (0); } else { dy_alloc_metadata_t *tnode; _Bool tred
= ((_Bool) (((uintptr_t) (cnode)->link.rbn_right_red) &
((size_t)1))); do { (tnode) = ((dy_alloc_metadata_t *) (((intptr_t
) ((cnode))->link.rbn_right_red) & ((ssize_t)-2))); do
{ ((cnode))->link.rbn_right_red = (dy_alloc_metadata_t *)
(((uintptr_t) (((tnode))->link.rbn_left)) | (((uintptr_t)
((cnode))->link.rbn_right_red) & ((size_t)1))); } while
(0); do { ((tnode))->link.rbn_left = (cnode); } while (0)
; } while (0); do { (tnode)->link.rbn_right_red = (dy_alloc_metadata_t
*) ((((intptr_t) (tnode)->link.rbn_right_red) & ((ssize_t
)-2)) | ((ssize_t)tred)); } while (0); do { (cnode)->link.
rbn_right_red = (dy_alloc_metadata_t *) (((uintptr_t) (cnode)
->link.rbn_right_red) | ((size_t)1)); } while (0); cnode =
tnode; } } else { return; } } pathp->node = cnode; } rbtree
->rbt_root = path->node; do { (rbtree->rbt_root)->
link.rbn_right_red = (dy_alloc_metadata_t *) (((intptr_t) (rbtree
->rbt_root)->link.rbn_right_red) & ((ssize_t)-2)); }
while (0); } void dy_alloc_tree_remove(dy_alloc_tree_t *rbtree
, dy_alloc_metadata_t *node) { struct { dy_alloc_metadata_t *
node; int cmp; } *pathp, *nodep, path[sizeof(void *) <<
4]; nodep = ((void*)0); path->node = rbtree->rbt_root;
for (pathp = path; pathp->node != ((void*)0); pathp++) { int
cmp = pathp->cmp = dy_alloc_tree_cmp(node, pathp->node
); if (cmp < 0) { pathp[1].node = ((pathp->node)->link
.rbn_left); } else { pathp[1].node = ((dy_alloc_metadata_t *)
(((intptr_t) (pathp->node)->link.rbn_right_red) & (
(ssize_t)-2))); if (cmp == 0) { pathp->cmp = 1; nodep = pathp
; for (pathp++; pathp->node != ((void*)0); pathp++) { pathp
->cmp = -1; pathp[1].node = ((pathp->node)->link.rbn_left
); } break; } } } ((void) sizeof ((nodep->node == node) ? 1
: 0), __extension__ ({ if (nodep->node == node) ; else __assert_fail
("nodep->node == node", "ccv_nnc_xpu_alloc.c", 20, __extension__
__PRETTY_FUNCTION__); })); pathp--; if (pathp->node != node
) { _Bool tred = ((_Bool) (((uintptr_t) (pathp->node)->
link.rbn_right_red) & ((size_t)1))); do { (pathp->node
)->link.rbn_right_red = (dy_alloc_metadata_t *) ((((intptr_t
) (pathp->node)->link.rbn_right_red) & ((ssize_t)-2
)) | ((ssize_t)((_Bool) (((uintptr_t) (node)->link.rbn_right_red
) & ((size_t)1))))); } while (0); do { (pathp->node)->
link.rbn_left = ((node)->link.rbn_left); } while (0); do {
(pathp->node)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) ((dy_alloc_metadata_t *) (((intptr_t) (node
)->link.rbn_right_red) & ((ssize_t)-2)))) | (((uintptr_t
) (pathp->node)->link.rbn_right_red) & ((size_t)1))
); } while (0); do { (node)->link.rbn_right_red = (dy_alloc_metadata_t
*) ((((intptr_t) (node)->link.rbn_right_red) & ((ssize_t
)-2)) | ((ssize_t)tred)); } while (0); nodep->node = pathp
->node; pathp->node = node; if (nodep == path) { rbtree
->rbt_root = nodep->node; } else { if (nodep[-1].cmp <
0) { do { (nodep[-1].node)->link.rbn_left = nodep->node
; } while (0); } else { do { (nodep[-1].node)->link.rbn_right_red
= (dy_alloc_metadata_t *) (((uintptr_t) nodep->node) | ((
(uintptr_t) (nodep[-1].node)->link.rbn_right_red) & ((
size_t)1))); } while (0); } } } else { dy_alloc_metadata_t *left
= ((node)->link.rbn_left); if (left != ((void*)0)) { ((void
) sizeof ((!((_Bool) (((uintptr_t) (node)->link.rbn_right_red
) & ((size_t)1)))) ? 1 : 0), __extension__ ({ if (!((_Bool
) (((uintptr_t) (node)->link.rbn_right_red) & ((size_t
)1)))) ; else __assert_fail ("!rbtn_red_get(dy_alloc_metadata_t, link, node)"
, "ccv_nnc_xpu_alloc.c", 20, __extension__ __PRETTY_FUNCTION__
); })); ((void) sizeof ((((_Bool) (((uintptr_t) (left)->link
.rbn_right_red) & ((size_t)1)))) ? 1 : 0), __extension__ (
{ if (((_Bool) (((uintptr_t) (left)->link.rbn_right_red) &
((size_t)1)))) ; else __assert_fail ("rbtn_red_get(dy_alloc_metadata_t, link, left)"
, "ccv_nnc_xpu_alloc.c", 20, __extension__ __PRETTY_FUNCTION__
); })); do { (left)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (left)->link.rbn_right_red) & ((ssize_t
)-2)); } while (0); if (pathp == path) { rbtree->rbt_root =
left; } else { if (pathp[-1].cmp < 0) { do { (pathp[-1].node
)->link.rbn_left = left; } while (0); } else { do { (pathp
[-1].node)->link.rbn_right_red = (dy_alloc_metadata_t *) (
((uintptr_t) left) | (((uintptr_t) (pathp[-1].node)->link.
rbn_right_red) & ((size_t)1))); } while (0); } } return; }
else if (pathp == path) { rbtree->rbt_root = ((void*)0); return
; } } if (((_Bool) (((uintptr_t) (pathp->node)->link.rbn_right_red
) & ((size_t)1)))) { ((void) sizeof ((pathp[-1].cmp < 0
) ? 1 : 0), __extension__ ({ if (pathp[-1].cmp < 0) ; else
__assert_fail ("pathp[-1].cmp < 0", "ccv_nnc_xpu_alloc.c"
, 20, __extension__ __PRETTY_FUNCTION__); })); do { (pathp[-1
].node)->link.rbn_left = ((void*)0); } while (0); return; }
pathp->node = ((void*)0); for (pathp--; (uintptr_t)pathp >=
(uintptr_t)path; pathp--) { ((void) sizeof ((pathp->cmp !=
0) ? 1 : 0), __extension__ ({ if (pathp->cmp != 0) ; else
__assert_fail ("pathp->cmp != 0", "ccv_nnc_xpu_alloc.c", 20
, __extension__ __PRETTY_FUNCTION__); })); if (pathp->cmp <
0) { do { (pathp->node)->link.rbn_left = pathp[1].node
; } while (0); if (((_Bool) (((uintptr_t) (pathp->node)->
link.rbn_right_red) & ((size_t)1)))) { dy_alloc_metadata_t
*right = ((dy_alloc_metadata_t *) (((intptr_t) (pathp->node
)->link.rbn_right_red) & ((ssize_t)-2))); dy_alloc_metadata_t
*rightleft = ((right)->link.rbn_left); dy_alloc_metadata_t
*tnode; if (rightleft != ((void*)0) && ((_Bool) (((uintptr_t
) (rightleft)->link.rbn_right_red) & ((size_t)1)))) { do
{ (pathp->node)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (pathp->node)->link.rbn_right_red) &
((ssize_t)-2)); } while (0); do { (tnode) = (((right))->link
.rbn_left); do { ((right))->link.rbn_left = ((dy_alloc_metadata_t
*) (((intptr_t) ((tnode))->link.rbn_right_red) & ((ssize_t
)-2))); } while (0); do { ((tnode))->link.rbn_right_red = (
dy_alloc_metadata_t *) (((uintptr_t) (right)) | (((uintptr_t)
((tnode))->link.rbn_right_red) & ((size_t)1))); } while
(0); } while (0); do { (pathp->node)->link.rbn_right_red
= (dy_alloc_metadata_t *) (((uintptr_t) tnode) | (((uintptr_t
) (pathp->node)->link.rbn_right_red) & ((size_t)1))
); } while (0); do { (tnode) = ((dy_alloc_metadata_t *) (((intptr_t
) ((pathp->node))->link.rbn_right_red) & ((ssize_t)
-2))); do { ((pathp->node))->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (((tnode))->link.rbn_left)) | (((uintptr_t
) ((pathp->node))->link.rbn_right_red) & ((size_t)1
))); } while (0); do { ((tnode))->link.rbn_left = (pathp->
node); } while (0); } while (0); } else { do { (tnode) = ((dy_alloc_metadata_t
*) (((intptr_t) ((pathp->node))->link.rbn_right_red) &
((ssize_t)-2))); do { ((pathp->node))->link.rbn_right_red
= (dy_alloc_metadata_t *) (((uintptr_t) (((tnode))->link.
rbn_left)) | (((uintptr_t) ((pathp->node))->link.rbn_right_red
) & ((size_t)1))); } while (0); do { ((tnode))->link.rbn_left
= (pathp->node); } while (0); } while (0); } ((void) sizeof
(((uintptr_t)pathp > (uintptr_t)path) ? 1 : 0), __extension__
({ if ((uintptr_t)pathp > (uintptr_t)path) ; else __assert_fail
("(uintptr_t)pathp > (uintptr_t)path", "ccv_nnc_xpu_alloc.c"
, 20, __extension__ __PRETTY_FUNCTION__); })); if (pathp[-1].
cmp < 0) { do { (pathp[-1].node)->link.rbn_left = tnode
; } while (0); } else { do { (pathp[-1].node)->link.rbn_right_red
= (dy_alloc_metadata_t *) (((uintptr_t) tnode) | (((uintptr_t
) (pathp[-1].node)->link.rbn_right_red) & ((size_t)1))
); } while (0); } return; } else { dy_alloc_metadata_t *right
= ((dy_alloc_metadata_t *) (((intptr_t) (pathp->node)->
link.rbn_right_red) & ((ssize_t)-2))); dy_alloc_metadata_t
*rightleft = ((right)->link.rbn_left); if (rightleft != (
(void*)0) && ((_Bool) (((uintptr_t) (rightleft)->link
.rbn_right_red) & ((size_t)1)))) { dy_alloc_metadata_t *tnode
; do { (rightleft)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (rightleft)->link.rbn_right_red) & ((
ssize_t)-2)); } while (0); do { (tnode) = (((right))->link
.rbn_left); do { ((right))->link.rbn_left = ((dy_alloc_metadata_t
*) (((intptr_t) ((tnode))->link.rbn_right_red) & ((ssize_t
)-2))); } while (0); do { ((tnode))->link.rbn_right_red = (
dy_alloc_metadata_t *) (((uintptr_t) (right)) | (((uintptr_t)
((tnode))->link.rbn_right_red) & ((size_t)1))); } while
(0); } while (0); do { (pathp->node)->link.rbn_right_red
= (dy_alloc_metadata_t *) (((uintptr_t) tnode) | (((uintptr_t
) (pathp->node)->link.rbn_right_red) & ((size_t)1))
); } while (0); do { (tnode) = ((dy_alloc_metadata_t *) (((intptr_t
) ((pathp->node))->link.rbn_right_red) & ((ssize_t)
-2))); do { ((pathp->node))->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (((tnode))->link.rbn_left)) | (((uintptr_t
) ((pathp->node))->link.rbn_right_red) & ((size_t)1
))); } while (0); do { ((tnode))->link.rbn_left = (pathp->
node); } while (0); } while (0); if (pathp == path) { rbtree->
rbt_root = tnode; } else { if (pathp[-1].cmp < 0) { do { (
pathp[-1].node)->link.rbn_left = tnode; } while (0); } else
{ do { (pathp[-1].node)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) tnode) | (((uintptr_t) (pathp[-1].node)->
link.rbn_right_red) & ((size_t)1))); } while (0); } } return
; } else { dy_alloc_metadata_t *tnode; do { (pathp->node)->
link.rbn_right_red = (dy_alloc_metadata_t *) (((uintptr_t) (pathp
->node)->link.rbn_right_red) | ((size_t)1)); } while (0
); do { (tnode) = ((dy_alloc_metadata_t *) (((intptr_t) ((pathp
->node))->link.rbn_right_red) & ((ssize_t)-2))); do
{ ((pathp->node))->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (((tnode))->link.rbn_left)) | (((uintptr_t
) ((pathp->node))->link.rbn_right_red) & ((size_t)1
))); } while (0); do { ((tnode))->link.rbn_left = (pathp->
node); } while (0); } while (0); pathp->node = tnode; } } }
else { dy_alloc_metadata_t *left; do { (pathp->node)->
link.rbn_right_red = (dy_alloc_metadata_t *) (((uintptr_t) pathp
[1].node) | (((uintptr_t) (pathp->node)->link.rbn_right_red
) & ((size_t)1))); } while (0); left = ((pathp->node)->
link.rbn_left); if (((_Bool) (((uintptr_t) (left)->link.rbn_right_red
) & ((size_t)1)))) { dy_alloc_metadata_t *tnode; dy_alloc_metadata_t
*leftright = ((dy_alloc_metadata_t *) (((intptr_t) (left)->
link.rbn_right_red) & ((ssize_t)-2))); dy_alloc_metadata_t
*leftrightleft = ((leftright)->link.rbn_left); if (leftrightleft
!= ((void*)0) && ((_Bool) (((uintptr_t) (leftrightleft
)->link.rbn_right_red) & ((size_t)1)))) { dy_alloc_metadata_t
*unode; do { (leftrightleft)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (leftrightleft)->link.rbn_right_red) &
((ssize_t)-2)); } while (0); do { (unode) = (((pathp->node
))->link.rbn_left); do { ((pathp->node))->link.rbn_left
= ((dy_alloc_metadata_t *) (((intptr_t) ((unode))->link.rbn_right_red
) & ((ssize_t)-2))); } while (0); do { ((unode))->link
.rbn_right_red = (dy_alloc_metadata_t *) (((uintptr_t) (pathp
->node)) | (((uintptr_t) ((unode))->link.rbn_right_red)
& ((size_t)1))); } while (0); } while (0); do { (tnode) =
(((pathp->node))->link.rbn_left); do { ((pathp->node
))->link.rbn_left = ((dy_alloc_metadata_t *) (((intptr_t) (
(tnode))->link.rbn_right_red) & ((ssize_t)-2))); } while
(0); do { ((tnode))->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (pathp->node)) | (((uintptr_t) ((tnode))
->link.rbn_right_red) & ((size_t)1))); } while (0); } while
(0); do { (unode)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) tnode) | (((uintptr_t) (unode)->link.rbn_right_red
) & ((size_t)1))); } while (0); do { (tnode) = ((dy_alloc_metadata_t
*) (((intptr_t) ((unode))->link.rbn_right_red) & ((ssize_t
)-2))); do { ((unode))->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (((tnode))->link.rbn_left)) | (((uintptr_t
) ((unode))->link.rbn_right_red) & ((size_t)1))); } while
(0); do { ((tnode))->link.rbn_left = (unode); } while (0)
; } while (0); } else { ((void) sizeof ((leftright != ((void*
)0)) ? 1 : 0), __extension__ ({ if (leftright != ((void*)0)) ;
else __assert_fail ("leftright != NULL", "ccv_nnc_xpu_alloc.c"
, 20, __extension__ __PRETTY_FUNCTION__); })); do { (leftright
)->link.rbn_right_red = (dy_alloc_metadata_t *) (((uintptr_t
) (leftright)->link.rbn_right_red) | ((size_t)1)); } while
(0); do { (tnode) = (((pathp->node))->link.rbn_left); do
{ ((pathp->node))->link.rbn_left = ((dy_alloc_metadata_t
*) (((intptr_t) ((tnode))->link.rbn_right_red) & ((ssize_t
)-2))); } while (0); do { ((tnode))->link.rbn_right_red = (
dy_alloc_metadata_t *) (((uintptr_t) (pathp->node)) | (((uintptr_t
) ((tnode))->link.rbn_right_red) & ((size_t)1))); } while
(0); } while (0); do { (tnode)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (tnode)->link.rbn_right_red) & ((ssize_t
)-2)); } while (0); } if (pathp == path) { rbtree->rbt_root
= tnode; } else { if (pathp[-1].cmp < 0) { do { (pathp[-1
].node)->link.rbn_left = tnode; } while (0); } else { do {
(pathp[-1].node)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) tnode) | (((uintptr_t) (pathp[-1].node)->
link.rbn_right_red) & ((size_t)1))); } while (0); } } return
; } else if (((_Bool) (((uintptr_t) (pathp->node)->link
.rbn_right_red) & ((size_t)1)))) { dy_alloc_metadata_t *leftleft
= ((left)->link.rbn_left); if (leftleft != ((void*)0) &&
((_Bool) (((uintptr_t) (leftleft)->link.rbn_right_red) &
((size_t)1)))) { dy_alloc_metadata_t *tnode; do { (pathp->
node)->link.rbn_right_red = (dy_alloc_metadata_t *) (((intptr_t
) (pathp->node)->link.rbn_right_red) & ((ssize_t)-2
)); } while (0); do { (left)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (left)->link.rbn_right_red) | ((size_t)1
)); } while (0); do { (leftleft)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (leftleft)->link.rbn_right_red) & ((ssize_t
)-2)); } while (0); do { (tnode) = (((pathp->node))->link
.rbn_left); do { ((pathp->node))->link.rbn_left = ((dy_alloc_metadata_t
*) (((intptr_t) ((tnode))->link.rbn_right_red) & ((ssize_t
)-2))); } while (0); do { ((tnode))->link.rbn_right_red = (
dy_alloc_metadata_t *) (((uintptr_t) (pathp->node)) | (((uintptr_t
) ((tnode))->link.rbn_right_red) & ((size_t)1))); } while
(0); } while (0); ((void) sizeof (((uintptr_t)pathp > (uintptr_t
)path) ? 1 : 0), __extension__ ({ if ((uintptr_t)pathp > (
uintptr_t)path) ; else __assert_fail ("(uintptr_t)pathp > (uintptr_t)path"
, "ccv_nnc_xpu_alloc.c", 20, __extension__ __PRETTY_FUNCTION__
); })); if (pathp[-1].cmp < 0) { do { (pathp[-1].node)->
link.rbn_left = tnode; } while (0); } else { do { (pathp[-1].
node)->link.rbn_right_red = (dy_alloc_metadata_t *) (((uintptr_t
) tnode) | (((uintptr_t) (pathp[-1].node)->link.rbn_right_red
) & ((size_t)1))); } while (0); } return; } else { do { (
left)->link.rbn_right_red = (dy_alloc_metadata_t *) (((uintptr_t
) (left)->link.rbn_right_red) | ((size_t)1)); } while (0);
do { (pathp->node)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((intptr_t) (pathp->node)->link.rbn_right_red) &
((ssize_t)-2)); } while (0); return; } } else { dy_alloc_metadata_t
*leftleft = ((left)->link.rbn_left); if (leftleft != ((void
*)0) && ((_Bool) (((uintptr_t) (leftleft)->link.rbn_right_red
) & ((size_t)1)))) { dy_alloc_metadata_t *tnode; do { (leftleft
)->link.rbn_right_red = (dy_alloc_metadata_t *) (((intptr_t
) (leftleft)->link.rbn_right_red) & ((ssize_t)-2)); } while
(0); do { (tnode) = (((pathp->node))->link.rbn_left); do
{ ((pathp->node))->link.rbn_left = ((dy_alloc_metadata_t
*) (((intptr_t) ((tnode))->link.rbn_right_red) & ((ssize_t
)-2))); } while (0); do { ((tnode))->link.rbn_right_red = (
dy_alloc_metadata_t *) (((uintptr_t) (pathp->node)) | (((uintptr_t
) ((tnode))->link.rbn_right_red) & ((size_t)1))); } while
(0); } while (0); if (pathp == path) { rbtree->rbt_root =
tnode; } else { if (pathp[-1].cmp < 0) { do { (pathp[-1].
node)->link.rbn_left = tnode; } while (0); } else { do { (
pathp[-1].node)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) tnode) | (((uintptr_t) (pathp[-1].node)->
link.rbn_right_red) & ((size_t)1))); } while (0); } } return
; } else { do { (left)->link.rbn_right_red = (dy_alloc_metadata_t
*) (((uintptr_t) (left)->link.rbn_right_red) | ((size_t)1
)); } while (0); } } } } rbtree->rbt_root = path->node;
((void) sizeof ((!((_Bool) (((uintptr_t) (rbtree->rbt_root
)->link.rbn_right_red) & ((size_t)1)))) ? 1 : 0), __extension__
({ if (!((_Bool) (((uintptr_t) (rbtree->rbt_root)->link
.rbn_right_red) & ((size_t)1)))) ; else __assert_fail ("!rbtn_red_get(dy_alloc_metadata_t, link, rbtree->rbt_root)"
, "ccv_nnc_xpu_alloc.c", 20, __extension__ __PRETTY_FUNCTION__
); })); } dy_alloc_metadata_t * dy_alloc_tree_iter_recurse(dy_alloc_tree_t
*rbtree, dy_alloc_metadata_t *node, dy_alloc_metadata_t *(*cb
)(dy_alloc_tree_t *, dy_alloc_metadata_t *, void *), void *arg
) { if (node == ((void*)0)) { return ((void*)0); } else { dy_alloc_metadata_t
*ret; if ((ret = dy_alloc_tree_iter_recurse(rbtree, ((node)->
link.rbn_left), cb, arg)) != ((void*)0) || (ret = cb(rbtree, node
, arg)) != ((void*)0)) { return ret; } return dy_alloc_tree_iter_recurse
(rbtree, ((dy_alloc_metadata_t *) (((intptr_t) (node)->link
.rbn_right_red) & ((ssize_t)-2))), cb, arg); } } dy_alloc_metadata_t
* dy_alloc_tree_iter_start(dy_alloc_tree_t *rbtree, dy_alloc_metadata_t
*start, dy_alloc_metadata_t *node, dy_alloc_metadata_t *(*cb
)(dy_alloc_tree_t *, dy_alloc_metadata_t *, void *), void *arg
) { int cmp = dy_alloc_tree_cmp(start, node); if (cmp < 0)
{ dy_alloc_metadata_t *ret; if ((ret = dy_alloc_tree_iter_start
(rbtree, start, ((node)->link.rbn_left), cb, arg)) != ((void
*)0) || (ret = cb(rbtree, node, arg)) != ((void*)0)) { return
ret; } return dy_alloc_tree_iter_recurse(rbtree, ((dy_alloc_metadata_t
*) (((intptr_t) (node)->link.rbn_right_red) & ((ssize_t
)-2))), cb, arg); } else if (cmp > 0) { return dy_alloc_tree_iter_start
(rbtree, start, ((dy_alloc_metadata_t *) (((intptr_t) (node)->
link.rbn_right_red) & ((ssize_t)-2))), cb, arg); } else {
dy_alloc_metadata_t *ret; if ((ret = cb(rbtree, node, arg)) !=
((void*)0)) { return ret; } return dy_alloc_tree_iter_recurse
(rbtree, ((dy_alloc_metadata_t *) (((intptr_t) (node)->link
.rbn_right_red) & ((ssize_t)-2))), cb, arg); } } dy_alloc_metadata_t
* dy_alloc_tree_iter(dy_alloc_tree_t *rbtree, dy_alloc_metadata_t
*start, dy_alloc_metadata_t *(*cb)( dy_alloc_tree_t *, dy_alloc_metadata_t
*, void *), void *arg) { dy_alloc_metadata_t *ret; if (start
!= ((void*)0)) { ret = dy_alloc_tree_iter_start(rbtree, start
, rbtree->rbt_root, cb, arg); } else { ret = dy_alloc_tree_iter_recurse
(rbtree, rbtree->rbt_root, cb, arg); } return ret; } dy_alloc_metadata_t
* dy_alloc_tree_reverse_iter_recurse(dy_alloc_tree_t *rbtree
, dy_alloc_metadata_t *node, dy_alloc_metadata_t *(*cb)(dy_alloc_tree_t
*, dy_alloc_metadata_t *, void *), void *arg) { if (node == (
(void*)0)) { return ((void*)0); } else { dy_alloc_metadata_t *
ret; if ((ret = dy_alloc_tree_reverse_iter_recurse(rbtree, ((
dy_alloc_metadata_t *) (((intptr_t) (node)->link.rbn_right_red
) & ((ssize_t)-2))), cb, arg)) != ((void*)0) || (ret = cb
(rbtree, node, arg)) != ((void*)0)) { return ret; } return dy_alloc_tree_reverse_iter_recurse
(rbtree, ((node)->link.rbn_left), cb, arg); } } dy_alloc_metadata_t
* dy_alloc_tree_reverse_iter_start(dy_alloc_tree_t *rbtree, dy_alloc_metadata_t
*start, dy_alloc_metadata_t *node, dy_alloc_metadata_t *(*cb
)(dy_alloc_tree_t *, dy_alloc_metadata_t *, void *), void *arg
) { int cmp = dy_alloc_tree_cmp(start, node); if (cmp > 0)
{ dy_alloc_metadata_t *ret; if ((ret = dy_alloc_tree_reverse_iter_start
(rbtree, start, ((dy_alloc_metadata_t *) (((intptr_t) (node)->
link.rbn_right_red) & ((ssize_t)-2))), cb, arg)) != ((void
*)0) || (ret = cb(rbtree, node, arg)) != ((void*)0)) { return
ret; } return dy_alloc_tree_reverse_iter_recurse(rbtree, ((node
)->link.rbn_left), cb, arg); } else if (cmp < 0) { return
dy_alloc_tree_reverse_iter_start(rbtree, start, ((node)->
link.rbn_left), cb, arg); } else { dy_alloc_metadata_t *ret; if
((ret = cb(rbtree, node, arg)) != ((void*)0)) { return ret; }
return dy_alloc_tree_reverse_iter_recurse(rbtree, ((node)->
link.rbn_left), cb, arg); } } dy_alloc_metadata_t * dy_alloc_tree_reverse_iter
(dy_alloc_tree_t *rbtree, dy_alloc_metadata_t *start, dy_alloc_metadata_t
*(*cb)(dy_alloc_tree_t *, dy_alloc_metadata_t *, void *), void
*arg) { dy_alloc_metadata_t *ret; if (start != ((void*)0)) {
ret = dy_alloc_tree_reverse_iter_start(rbtree, start, rbtree
->rbt_root, cb, arg); } else { ret = dy_alloc_tree_reverse_iter_recurse
(rbtree, rbtree->rbt_root, cb, arg); } return ret; } void dy_alloc_tree_destroy_recurse
(dy_alloc_tree_t *rbtree, dy_alloc_metadata_t *node, void (*cb
)( dy_alloc_metadata_t *, void *), void *arg) { if (node == (
(void*)0)) { return; } dy_alloc_tree_destroy_recurse(rbtree, (
(node)->link.rbn_left), cb, arg); do { ((node))->link.rbn_left
= ((void*)0); } while (0); dy_alloc_tree_destroy_recurse(rbtree
, ((dy_alloc_metadata_t *) (((intptr_t) (node)->link.rbn_right_red
) & ((ssize_t)-2))), cb, arg); do { ((node))->link.rbn_right_red
= (dy_alloc_metadata_t *) (((uintptr_t) ((void*)0)) | (((uintptr_t
) ((node))->link.rbn_right_red) & ((size_t)1))); } while
(0); if (cb) { cb(node, arg); } } void dy_alloc_tree_destroy
(dy_alloc_tree_t *rbtree, void (*cb)(dy_alloc_metadata_t *, void
*), void *arg) { dy_alloc_tree_destroy_recurse(rbtree, rbtree
->rbt_root, cb, arg); rbtree->rbt_root = ((void*)0); }
1
Null pointer value stored to 'nodep'
2
Assuming field 'node' is equal to null
3
Loop condition is false. Execution continues on line 20
4
Access to field 'node' results in a dereference of a null pointer (loaded from variable 'nodep')
21
22static void _ccv_nnc_xpu_metadata_free(dy_alloc_metadata_t* node, void* arg)
23{
24 do {
25 dy_alloc_metadata_t* const next = node->next;
26#ifdef HAVE_CUDA1
27 cufree(node->device, node->ptr);
28#elif defined(HAVE_MPS)
29 mpobjfree(node->device, node->ptr);
30#endif
31 ccfreefree(node);
32 node = next;
33 } while (node);
34}
35
36static void _ccv_nnc_xpu_alloc_drain(const int device, khash_t(dy_dev)kh_dy_dev_t* const dev, const ccv_nnc_stream_context_t* const stream)
37{
38 // Wait until the stream is free, and then do the free.
39 if (stream)
40 ccv_nnc_stream_context_wait(stream);
41 khiter_t k;
42 if (device >= 0)
43 {
44 k = kh_get(dy_dev, dev, device)kh_get_dy_dev(dev, device);
45 if (k != kh_end(dev)((dev)->n_buckets))
46 {
47 dy_alloc_tree_t* const tree = &kh_val(dev, k)((dev)->vals[k]);
48 dy_alloc_tree_destroy(tree, _ccv_nnc_xpu_metadata_free, 0);
49 kh_del(dy_dev, dev, k)kh_del_dy_dev(dev, k);
50 }
51 return;
52 }
53 for (k = kh_begin(dev)(khint_t)(0); k != kh_end(dev)((dev)->n_buckets); ++k)
54 {
55 if (!kh_exist(dev, k)(!(((dev)->flags[(k)>>4]>>(((k)&0xfU)<<
1))&3))
)
56 continue;
57 dy_alloc_tree_t* const tree = &kh_val(dev, k)((dev)->vals[k]);
58 dy_alloc_tree_destroy(tree, _ccv_nnc_xpu_metadata_free, 0);
59 kh_del(dy_dev, dev, k)kh_del_dy_dev(dev, k);
60 }
61}
62
63static void _ccv_nnc_xpu_stream_destructor_hook(const ccv_nnc_stream_context_t* const stream, void* const context)
64{
65 ccv_nnc_xpu_alloc_t* const xpu_alloc = (ccv_nnc_xpu_alloc_t*)context;
66 khash_t(dy_str)kh_dy_str_t* const freed = xpu_alloc->freed;
67 const int64_t str = (int64_t)(intptr_t)stream;
68 khiter_t i = kh_get(dy_str, freed, str)kh_get_dy_str(freed, str);
69 assert(i != kh_end(freed))((void) sizeof ((i != ((freed)->n_buckets)) ? 1 : 0), __extension__
({ if (i != ((freed)->n_buckets)) ; else __assert_fail ("i != kh_end(freed)"
, "ccv_nnc_xpu_alloc.c", 69, __extension__ __PRETTY_FUNCTION__
); }))
;
70 khash_t(dy_dev)kh_dy_dev_t* const dev = kh_val(freed, i)((freed)->vals[i]).dev;
71 _ccv_nnc_xpu_alloc_drain(-1, dev, stream);
72 kh_destroy(dy_dev, dev)kh_destroy_dy_dev(dev);
73 kh_del(dy_str, freed, i)kh_del_dy_str(freed, i);
74}
75
76void* ccv_nnc_xpu_alloc(ccv_nnc_xpu_alloc_t* const xpu_alloc, const int device, ccv_nnc_stream_context_t* const stream, const size_t size)
77{
78 khash_t(dy_str)kh_dy_str_t* const freed = xpu_alloc->freed;
79 const int64_t str = (int64_t)(intptr_t)stream;
80 int ret;
81 khiter_t i = kh_put(dy_str, freed, str, &ret)kh_put_dy_str(freed, str, &ret);
82 assert(ret >= 0)((void) sizeof ((ret >= 0) ? 1 : 0), __extension__ ({ if (
ret >= 0) ; else __assert_fail ("ret >= 0", "ccv_nnc_xpu_alloc.c"
, 82, __extension__ __PRETTY_FUNCTION__); }))
;
83 dy_alloc_metadata_t* node = 0;
84 if (ret == 0)
85 {
86 // If we can find stream related allocations, try to
87 // find the suitable ones.
88 khash_t(dy_dev)kh_dy_dev_t* const dev = kh_val(freed, i)((freed)->vals[i]).dev;
89 assert(dev)((void) sizeof ((dev) ? 1 : 0), __extension__ ({ if (dev) ; else
__assert_fail ("dev", "ccv_nnc_xpu_alloc.c", 89, __extension__
__PRETTY_FUNCTION__); }))
;
90 khiter_t j = kh_get(dy_dev, dev, device)kh_get_dy_dev(dev, device);
91 if (j != kh_end(dev)((dev)->n_buckets))
92 {
93 dy_alloc_tree_t* const tree = &kh_val(dev, j)((dev)->vals[j]);
94 dy_alloc_metadata_t key = {
95 .size = size
96 };
97 node = dy_alloc_tree_nsearch(tree, &key);
98 if (node)
99 {
100 if (node->next) // If it is a linked list, select the one.
101 {
102 dy_alloc_metadata_t* next_node = node->next;
103 node->next = node->next->next;
104 node = next_node;
105 } else
106 dy_alloc_tree_remove(tree, node);
107 }
108 }
109 } else {
110 // Otherwise, create it.
111 kh_val(freed, i)((freed)->vals[i]).dev = kh_init(dy_dev)kh_init_dy_dev();
112 kh_val(freed, i)((freed)->vals[i]).hook_id = stream ? ccv_nnc_stream_context_add_destructor_hook(stream, _ccv_nnc_xpu_stream_destructor_hook, xpu_alloc) : -1;
113
114 }
115 if (!node)
116 {
117 node = (dy_alloc_metadata_t*)ccmallocmalloc(sizeof(dy_alloc_metadata_t));
118#ifdef HAVE_CUDA1
119 if (xpu_alloc->mp_hdr < 0)
120 xpu_alloc->mp_hdr = curegmp(device, (cump_f)ccv_nnc_xpu_gc, xpu_alloc);
121 node->ptr = cumalloc(device, size);
122#elif defined(HAVE_MPS)
123 if (xpu_alloc->mp_hdr < 0)
124 xpu_alloc->mp_hdr = mpregmp(device, (mpmp_f)ccv_nnc_xpu_gc, xpu_alloc);
125 node->ptr = mpobjmalloc(device, size);
126#endif
127 if (!node->ptr) // If cannot allocate, drain the pool first and then allocate.
128 {
129 ccfreefree(node);
130 return 0;
131 }
132 node->device = device;
133 node->size = size;
134 node->str = str;
135 } else {
136 assert(node->size >= size)((void) sizeof ((node->size >= size) ? 1 : 0), __extension__
({ if (node->size >= size) ; else __assert_fail ("node->size >= size"
, "ccv_nnc_xpu_alloc.c", 136, __extension__ __PRETTY_FUNCTION__
); }))
;
137 assert(node->device == device)((void) sizeof ((node->device == device) ? 1 : 0), __extension__
({ if (node->device == device) ; else __assert_fail ("node->device == device"
, "ccv_nnc_xpu_alloc.c", 137, __extension__ __PRETTY_FUNCTION__
); }))
;
138 assert(node->str == str)((void) sizeof ((node->str == str) ? 1 : 0), __extension__
({ if (node->str == str) ; else __assert_fail ("node->str == str"
, "ccv_nnc_xpu_alloc.c", 138, __extension__ __PRETTY_FUNCTION__
); }))
;
139 }
140 node->next = 0;
141 khash_t(dy_alloc)kh_dy_alloc_t* const allocd = xpu_alloc->allocd;
142 i = kh_put(dy_alloc, allocd, (int64_t)(intptr_t)node->ptr, &ret)kh_put_dy_alloc(allocd, (int64_t)(intptr_t)node->ptr, &
ret)
;
143 assert(ret > 0)((void) sizeof ((ret > 0) ? 1 : 0), __extension__ ({ if (ret
> 0) ; else __assert_fail ("ret > 0", "ccv_nnc_xpu_alloc.c"
, 143, __extension__ __PRETTY_FUNCTION__); }))
;
144 kh_val(allocd, i)((allocd)->vals[i]) = node;
145 return node->ptr;
146}
147
148void ccv_nnc_xpu_free(ccv_nnc_xpu_alloc_t* const xpu_alloc, void* const ptr)
149{
150 khash_t(dy_alloc)kh_dy_alloc_t* const allocd = xpu_alloc->allocd;
151 khiter_t i = kh_get(dy_alloc, allocd, (int64_t)(intptr_t)ptr)kh_get_dy_alloc(allocd, (int64_t)(intptr_t)ptr);
152 assert(i != kh_end(allocd))((void) sizeof ((i != ((allocd)->n_buckets)) ? 1 : 0), __extension__
({ if (i != ((allocd)->n_buckets)) ; else __assert_fail (
"i != kh_end(allocd)", "ccv_nnc_xpu_alloc.c", 152, __extension__
__PRETTY_FUNCTION__); }))
;
153 dy_alloc_metadata_t* const node = kh_val(allocd, i)((allocd)->vals[i]);
154 kh_del(dy_alloc, allocd, i)kh_del_dy_alloc(allocd, i);
155 assert(node->ptr == ptr)((void) sizeof ((node->ptr == ptr) ? 1 : 0), __extension__
({ if (node->ptr == ptr) ; else __assert_fail ("node->ptr == ptr"
, "ccv_nnc_xpu_alloc.c", 155, __extension__ __PRETTY_FUNCTION__
); }))
;
156 khash_t(dy_str)kh_dy_str_t* const freed = xpu_alloc->freed;
157 i = kh_get(dy_str, freed, node->str)kh_get_dy_str(freed, node->str);
158 // If cannot find associated stream, that means this allocation associated
159 // stream has been freed. I have to do synchronous free of this pointer.
160 if (i == kh_end(freed)((freed)->n_buckets))
161 {
162#ifdef HAVE_CUDA1
163 cufree(node->device, node->ptr);
164#elif defined(HAVE_MPS)
165 mpobjfree(node->device, node->ptr);
166#endif
167 ccfreefree(node);
168 return;
169 }
170 khash_t(dy_dev)kh_dy_dev_t* const dev = kh_val(freed, i)((freed)->vals[i]).dev;
171 int ret;
172 khiter_t j = kh_put(dy_dev, dev, node->device, &ret)kh_put_dy_dev(dev, node->device, &ret);
173 assert(ret >= 0)((void) sizeof ((ret >= 0) ? 1 : 0), __extension__ ({ if (
ret >= 0) ; else __assert_fail ("ret >= 0", "ccv_nnc_xpu_alloc.c"
, 173, __extension__ __PRETTY_FUNCTION__); }))
;
174 dy_alloc_tree_t* const tree = &kh_val(dev, j)((dev)->vals[j]);
175 if (ret != 0)
176 dy_alloc_tree_new(tree);
177 dy_alloc_metadata_t* const canon_node = dy_alloc_tree_search(tree, node);
178 if (!canon_node)
179 dy_alloc_tree_insert(tree, node);
180 else { // Insert into the linked list.
181 node->next = canon_node->next;
182 canon_node->next = node;
183 }
184}
185
186void ccv_nnc_xpu_alloc_destroy(ccv_nnc_xpu_alloc_t* const xpu_alloc)
187{
188 khash_t(dy_alloc)kh_dy_alloc_t* const allocd = xpu_alloc->allocd;
189 khiter_t k;
190 for (k = kh_begin(allocd)(khint_t)(0); k != kh_end(allocd)((allocd)->n_buckets); ++k)
191 {
192 if (!kh_exist(allocd, k)(!(((allocd)->flags[(k)>>4]>>(((k)&0xfU)<<
1))&3))
)
193 continue;
194 _ccv_nnc_xpu_metadata_free(kh_val(allocd, k)((allocd)->vals[k]), 0);
195 }
196 kh_destroy(dy_alloc, allocd)kh_destroy_dy_alloc(allocd);
197 khash_t(dy_str)kh_dy_str_t* const freed = xpu_alloc->freed;
198 for (k = kh_begin(freed)(khint_t)(0); k != kh_end(freed)((freed)->n_buckets); ++k)
199 {
200 if (!kh_exist(freed, k)(!(((freed)->flags[(k)>>4]>>(((k)&0xfU)<<
1))&3))
)
201 continue;
202 khash_t(dy_dev)kh_dy_dev_t* const dev = kh_val(freed, k)((freed)->vals[k]).dev;
203 ccv_nnc_stream_context_t* const stream = (ccv_nnc_stream_context_t*)(intptr_t)kh_key(freed, k)((freed)->keys[k]);
204 _ccv_nnc_xpu_alloc_drain(-1, dev, stream);
205 if (stream)
206 {
207 const int hook_id = kh_val(freed, k)((freed)->vals[k]).hook_id;
208 ccv_nnc_stream_context_remove_destructor_hook(stream, hook_id);
209 }
210 kh_destroy(dy_dev, dev)kh_destroy_dy_dev(dev);
211 }
212 kh_destroy(dy_str, freed)kh_destroy_dy_str(freed);
213#ifdef HAVE_CUDA1
214 if (xpu_alloc->mp_hdr >= 0)
215 cuunregmp(xpu_alloc->mp_hdr);
216#elif defined(HAVE_MPS)
217 if (xpu_alloc->mp_hdr >= 0)
218 mpunregmp(xpu_alloc->mp_hdr);
219#endif
220}
221
222void ccv_nnc_xpu_gc(const int device, ccv_nnc_xpu_alloc_t* const xpu_alloc)
223{
224 khash_t(dy_str)kh_dy_str_t* const freed = xpu_alloc->freed;
225 khiter_t k;
226 for (k = kh_begin(freed)(khint_t)(0); k != kh_end(freed)((freed)->n_buckets); ++k)
227 {
228 if (!kh_exist(freed, k)(!(((freed)->flags[(k)>>4]>>(((k)&0xfU)<<
1))&3))
)
229 continue;
230 khash_t(dy_dev)kh_dy_dev_t* const dev = kh_val(freed, k)((freed)->vals[k]).dev;
231 ccv_nnc_stream_context_t* const stream = (ccv_nnc_stream_context_t*)(intptr_t)kh_key(freed, k)((freed)->keys[k]);
232 _ccv_nnc_xpu_alloc_drain(device, dev, stream);
233 }
234}
235#else
236void* ccv_nnc_xpu_alloc(ccv_nnc_xpu_alloc_t* const xpu_alloc, const int device, ccv_nnc_stream_context_t* const stream, const size_t size)
237{
238 return 0;
239}
240
241void ccv_nnc_xpu_free(ccv_nnc_xpu_alloc_t* const xpu_alloc, void* const ptr)
242{
243}
244
245void ccv_nnc_xpu_alloc_destroy(ccv_nnc_xpu_alloc_t* const xpu_alloc)
246{
247}
248
249void ccv_nnc_xpu_gc(const int device, ccv_nnc_xpu_alloc_t* const dynamic_graph)
250{
251}
252#endif