Bug Summary

File:nnc/cmd/scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c
Warning:line 243, column 28
Array access (from variable 'amp2') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_nnc_scaled_dot_product_attention_cpu_ref.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd -resource-dir /usr/local/lib/clang/19 -I ../../ -I .. -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -I /usr/local/include -internal-isystem /usr/local/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2026-05-02-115646-1519401-1 -x c scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c
1#include "ccv.h"
2#include "ccv_internal.h"
3#include "nnc/ccv_nnc.h"
4#include "nnc/ccv_nnc_easy.h"
5#include "nnc/ccv_nnc_internal.h"
6#ifdef USE_OPENMP
7#include <omp.h>
8#endif
9#ifdef USE_DISPATCH
10#include <dispatch/dispatch.h>
11#endif
12
13// Shared methods.
14#include "../_ccv_nnc_cpu_ref.h"
15
16static int _ccv_nnc_scaled_dot_product_attention_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
17{
18 assert(input_size >= 3)((void) sizeof ((input_size >= 3) ? 1 : 0), __extension__ (
{ if (input_size >= 3) ; else __assert_fail ("input_size >= 3"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 18, __extension__ __PRETTY_FUNCTION__); }))
;
1
Assuming 'input_size' is >= 3
2
Taking true branch
19 assert(output_size >= 1)((void) sizeof ((output_size >= 1) ? 1 : 0), __extension__
({ if (output_size >= 1) ; else __assert_fail ("output_size >= 1"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 19, __extension__ __PRETTY_FUNCTION__); }))
;
3
Assuming 'output_size' is >= 1
4
Taking true branch
20 const int is_varlen = cmd.info.scaled_dot_product_attention.is_varlen;
21 ccv_nnc_tensor_view_t* const q = (ccv_nnc_tensor_view_t*)inputs[0];
22 ccv_nnc_tensor_view_t* const k = (ccv_nnc_tensor_view_t*)inputs[1];
23 ccv_nnc_tensor_view_t* const v = (ccv_nnc_tensor_view_t*)inputs[2];
24 ccv_nnc_tensor_view_t* const attn_mask = input_size > 3 ? (ccv_nnc_tensor_view_t*)inputs[3] : 0;
5
Assuming 'input_size' is > 3
6
'?' condition is true
25 ccv_nnc_tensor_view_t* const w = input_size > 4 ? (ccv_nnc_tensor_view_t*)inputs[4] : 0;
7
Assuming 'input_size' is > 4
8
'?' condition is true
26 ccv_nnc_tensor_view_t* const bias = input_size > 5 ? (ccv_nnc_tensor_view_t*)inputs[5] : 0;
9
Assuming 'input_size' is > 5
10
'?' condition is true
27 ccv_nnc_tensor_view_t* const q_seq_offsets = is_varlen && input_size > 6 ? (ccv_nnc_tensor_view_t*)inputs[6] : 0;
11
Assuming 'is_varlen' is 0
28 ccv_nnc_tensor_view_t* const kv_seq_offsets = is_varlen
11.1
'is_varlen' is 0
&& input_size > 7 ? (ccv_nnc_tensor_view_t*)inputs[7] : 0;
29 if (bias) // bias always requires a weight matrix.
12
Assuming 'bias' is null
30 { assert(w)((void) sizeof ((w) ? 1 : 0), __extension__ ({ if (w) ; else __assert_fail
("w", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 30, __extension__ __PRETTY_FUNCTION__); }))
; }
31 if (is_varlen
12.1
'is_varlen' is 0
&& (attn_mask || w || bias || !q_seq_offsets || !kv_seq_offsets))
32 return CCV_NNC_EXEC_INVALID;
33 ccv_nnc_tensor_view_t* const c = (w) ? (ccv_nnc_tensor_view_t*)outputs[2] : (ccv_nnc_tensor_view_t*)outputs[0];
13
Assuming 'w' is non-null
14
'?' condition is true
34 const int q_nd = ccv_nnc_tensor_nd(q->info.dim);
35 assert(q_nd == 3 || q_nd == 4)((void) sizeof ((q_nd == 3 || q_nd == 4) ? 1 : 0), __extension__
({ if (q_nd == 3 || q_nd == 4) ; else __assert_fail ("q_nd == 3 || q_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 35, __extension__ __PRETTY_FUNCTION__); }))
;
15
Assuming 'q_nd' is not equal to 3
16
Assuming 'q_nd' is equal to 4
17
Taking true branch
36 const int k_nd = ccv_nnc_tensor_nd(k->info.dim);
37 assert(k_nd == 3 || k_nd == 4)((void) sizeof ((k_nd == 3 || k_nd == 4) ? 1 : 0), __extension__
({ if (k_nd == 3 || k_nd == 4) ; else __assert_fail ("k_nd == 3 || k_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 37, __extension__ __PRETTY_FUNCTION__); }))
;
18
Assuming 'k_nd' is not equal to 3
19
Assuming 'k_nd' is equal to 4
20
Taking true branch
38 const int v_nd = ccv_nnc_tensor_nd(v->info.dim);
39 assert(v_nd == 3 || v_nd == 4)((void) sizeof ((v_nd == 3 || v_nd == 4) ? 1 : 0), __extension__
({ if (v_nd == 3 || v_nd == 4) ; else __assert_fail ("v_nd == 3 || v_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 39, __extension__ __PRETTY_FUNCTION__); }))
;
21
Assuming 'v_nd' is not equal to 3
22
Assuming 'v_nd' is equal to 4
23
Taking true branch
40 const int c_nd = ccv_nnc_tensor_nd(c->info.dim);
41 assert(c_nd == 3 || c_nd == 4)((void) sizeof ((c_nd == 3 || c_nd == 4) ? 1 : 0), __extension__
({ if (c_nd == 3 || c_nd == 4) ; else __assert_fail ("c_nd == 3 || c_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 41, __extension__ __PRETTY_FUNCTION__); }))
;
24
Assuming 'c_nd' is not equal to 3
25
Assuming 'c_nd' is equal to 4
26
Taking true branch
42 assert(q_nd == k_nd && k_nd == v_nd && v_nd == c_nd)((void) sizeof ((q_nd == k_nd && k_nd == v_nd &&
v_nd == c_nd) ? 1 : 0), __extension__ ({ if (q_nd == k_nd &&
k_nd == v_nd && v_nd == c_nd) ; else __assert_fail (
"q_nd == k_nd && k_nd == v_nd && v_nd == c_nd"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 42, __extension__ __PRETTY_FUNCTION__); }))
;
27
Taking true branch
43 if (is_varlen
27.1
'is_varlen' is 0
&& q_nd != 4)
44 return CCV_NNC_EXEC_INVALID;
45 // Assuming this is float 32.
46 int qdim[CCV_NNC_MAX_DIM_ALLOC(12)];
47 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
48 int vdim[CCV_NNC_MAX_DIM_ALLOC(12)];
49 int cdim[CCV_NNC_MAX_DIM_ALLOC(12)];
50 int amdim[CCV_NNC_MAX_DIM_ALLOC(12)];
51 ccv_nnc_tensor_view_get_dim(q, qdim);
52 ccv_nnc_tensor_view_get_dim(k, kdim);
53 ccv_nnc_tensor_view_get_dim(v, vdim);
54 ccv_nnc_tensor_view_get_dim(c, cdim);
55 if (is_varlen
27.2
'is_varlen' is 0
)
28
Taking false branch
56 {
57 assert(q_seq_offsets->info.datatype == CCV_32S)((void) sizeof ((q_seq_offsets->info.datatype == CCV_32S) ?
1 : 0), __extension__ ({ if (q_seq_offsets->info.datatype
== CCV_32S) ; else __assert_fail ("q_seq_offsets->info.datatype == CCV_32S"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 57, __extension__ __PRETTY_FUNCTION__); }))
;
58 assert(kv_seq_offsets->info.datatype == CCV_32S)((void) sizeof ((kv_seq_offsets->info.datatype == CCV_32S)
? 1 : 0), __extension__ ({ if (kv_seq_offsets->info.datatype
== CCV_32S) ; else __assert_fail ("kv_seq_offsets->info.datatype == CCV_32S"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 58, __extension__ __PRETTY_FUNCTION__); }))
;
59 assert(CCV_IS_TENSOR_CONTIGUOUS(q_seq_offsets))((void) sizeof (((!((*(int*)(q_seq_offsets)) & CCV_TENSOR_VIEW
) || (((ccv_nnc_tensor_view_t*)q_seq_offsets)->contiguous ==
1))) ? 1 : 0), __extension__ ({ if ((!((*(int*)(q_seq_offsets
)) & CCV_TENSOR_VIEW) || (((ccv_nnc_tensor_view_t*)q_seq_offsets
)->contiguous == 1))) ; else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(q_seq_offsets)"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 59, __extension__ __PRETTY_FUNCTION__); }))
;
60 assert(CCV_IS_TENSOR_CONTIGUOUS(kv_seq_offsets))((void) sizeof (((!((*(int*)(kv_seq_offsets)) & CCV_TENSOR_VIEW
) || (((ccv_nnc_tensor_view_t*)kv_seq_offsets)->contiguous
== 1))) ? 1 : 0), __extension__ ({ if ((!((*(int*)(kv_seq_offsets
)) & CCV_TENSOR_VIEW) || (((ccv_nnc_tensor_view_t*)kv_seq_offsets
)->contiguous == 1))) ; else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(kv_seq_offsets)"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 60, __extension__ __PRETTY_FUNCTION__); }))
;
61 }
62 if (q_nd
28.1
'q_nd' is not equal to 3
== 3)
29
Taking false branch
63 {
64 qdim[0] = qdim[1], qdim[1] = qdim[2], qdim[2] = 1;
65 kdim[0] = kdim[1], kdim[1] = kdim[2], kdim[2] = 1;
66 vdim[0] = vdim[1], vdim[1] = vdim[2], vdim[2] = 1;
67 cdim[0] = cdim[1], cdim[1] = cdim[2], cdim[2] = 1;
68 }
69 assert(qdim[0] == kdim[0] && kdim[0] == vdim[0] && vdim[0] == cdim[0])((void) sizeof ((qdim[0] == kdim[0] && kdim[0] == vdim
[0] && vdim[0] == cdim[0]) ? 1 : 0), __extension__ ({
if (qdim[0] == kdim[0] && kdim[0] == vdim[0] &&
vdim[0] == cdim[0]) ; else __assert_fail ("qdim[0] == kdim[0] && kdim[0] == vdim[0] && vdim[0] == cdim[0]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 69, __extension__ __PRETTY_FUNCTION__); }))
;
30
Assuming the condition is true
31
Assuming the condition is true
32
Assuming the condition is true
33
Taking true branch
70 assert(qdim[2] == cdim[2])((void) sizeof ((qdim[2] == cdim[2]) ? 1 : 0), __extension__ (
{ if (qdim[2] == cdim[2]) ; else __assert_fail ("qdim[2] == cdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 70, __extension__ __PRETTY_FUNCTION__); }))
;
34
Assuming the condition is true
35
Taking true branch
71 assert(kdim[2] == vdim[2])((void) sizeof ((kdim[2] == vdim[2]) ? 1 : 0), __extension__ (
{ if (kdim[2] == vdim[2]) ; else __assert_fail ("kdim[2] == vdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 71, __extension__ __PRETTY_FUNCTION__); }))
;
36
Assuming the condition is true
37
Taking true branch
72 assert(qdim[2] % kdim[2] == 0)((void) sizeof ((qdim[2] % kdim[2] == 0) ? 1 : 0), __extension__
({ if (qdim[2] % kdim[2] == 0) ; else __assert_fail ("qdim[2] % kdim[2] == 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 72, __extension__ __PRETTY_FUNCTION__); }))
;
38
Assuming the condition is true
39
Taking true branch
73 assert(qdim[2] >= kdim[2])((void) sizeof ((qdim[2] >= kdim[2]) ? 1 : 0), __extension__
({ if (qdim[2] >= kdim[2]) ; else __assert_fail ("qdim[2] >= kdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 73, __extension__ __PRETTY_FUNCTION__); }))
;
40
Assuming the condition is true
41
Taking true branch
74 assert(qdim[3] == kdim[3])((void) sizeof ((qdim[3] == kdim[3]) ? 1 : 0), __extension__ (
{ if (qdim[3] == kdim[3]) ; else __assert_fail ("qdim[3] == kdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 74, __extension__ __PRETTY_FUNCTION__); }))
;
42
Assuming the condition is true
43
Taking true branch
75 assert(kdim[1] == vdim[1])((void) sizeof ((kdim[1] == vdim[1]) ? 1 : 0), __extension__ (
{ if (kdim[1] == vdim[1]) ; else __assert_fail ("kdim[1] == vdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 75, __extension__ __PRETTY_FUNCTION__); }))
;
44
Assuming the condition is true
45
Taking true branch
76 assert(cdim[1] == qdim[1])((void) sizeof ((cdim[1] == qdim[1]) ? 1 : 0), __extension__ (
{ if (cdim[1] == qdim[1]) ; else __assert_fail ("cdim[1] == qdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 76, __extension__ __PRETTY_FUNCTION__); }))
;
46
Assuming the condition is true
47
Taking true branch
77 assert(cdim[3] == vdim[3])((void) sizeof ((cdim[3] == vdim[3]) ? 1 : 0), __extension__ (
{ if (cdim[3] == vdim[3]) ; else __assert_fail ("cdim[3] == vdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 77, __extension__ __PRETTY_FUNCTION__); }))
;
48
Assuming the condition is true
49
Taking true branch
78 assert(CCV_NNC_MAX_DIM == 2)((void) sizeof (((2) == 2) ? 1 : 0), __extension__ ({ if ((2)
== 2) ; else __assert_fail ("CCV_NNC_MAX_DIM == 2", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 78, __extension__ __PRETTY_FUNCTION__); }))
; // Need to change this logic for CCV_NNC_MAX_DIM == other number.
50
Taking true branch
79 int qstride[CCV_NNC_MAX_DIM_ALLOC(12)];
80 int kstride[CCV_NNC_MAX_DIM_ALLOC(12)];
81 int vstride[CCV_NNC_MAX_DIM_ALLOC(12)];
82 int cstride[CCV_NNC_MAX_DIM_ALLOC(12)];
83 int amstride[CCV_NNC_MAX_DIM_ALLOC(12)];
84 ccv_nnc_tensor_view_get_stride(q, qstride);
85 ccv_nnc_tensor_view_get_stride(k, kstride);
86 ccv_nnc_tensor_view_get_stride(v, vstride);
87 ccv_nnc_tensor_view_get_stride(c, cstride);
88 if (q_nd
50.1
'q_nd' is not equal to 3
== 3)
51
Taking false branch
89 {
90 qstride[0] = qstride[1], qstride[1] = qstride[2], qstride[2] = qstride[3];
91 kstride[0] = kstride[1], kstride[1] = kstride[2], kstride[2] = kstride[3];
92 vstride[0] = vstride[1], vstride[1] = vstride[2], vstride[2] = vstride[3];
93 cstride[0] = cstride[1], cstride[1] = cstride[2], cstride[2] = cstride[3];
94 }
95 if (attn_mask)
52
Assuming 'attn_mask' is non-null
53
Taking true branch
96 {
97 ccv_nnc_tensor_view_get_dim(attn_mask, amdim);
98 ccv_nnc_tensor_view_get_stride(attn_mask, amstride);
99 assert(amdim[0] == qdim[0] || amdim[0] == 1)((void) sizeof ((amdim[0] == qdim[0] || amdim[0] == 1) ? 1 : 0
), __extension__ ({ if (amdim[0] == qdim[0] || amdim[0] == 1)
; else __assert_fail ("amdim[0] == qdim[0] || amdim[0] == 1"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 99, __extension__ __PRETTY_FUNCTION__); }))
;
54
Assuming the condition is true
100 assert(amdim[1] == qdim[2] || amdim[1] == 1)((void) sizeof ((amdim[1] == qdim[2] || amdim[1] == 1) ? 1 : 0
), __extension__ ({ if (amdim[1] == qdim[2] || amdim[1] == 1)
; else __assert_fail ("amdim[1] == qdim[2] || amdim[1] == 1"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 100, __extension__ __PRETTY_FUNCTION__); }))
;
55
Assuming the condition is true
101 assert(amdim[2] == qdim[1])((void) sizeof ((amdim[2] == qdim[1]) ? 1 : 0), __extension__
({ if (amdim[2] == qdim[1]) ; else __assert_fail ("amdim[2] == qdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 101, __extension__ __PRETTY_FUNCTION__); }))
;
56
Assuming the condition is true
57
Taking true branch
102 assert(amdim[3] == kdim[1])((void) sizeof ((amdim[3] == kdim[1]) ? 1 : 0), __extension__
({ if (amdim[3] == kdim[1]) ; else __assert_fail ("amdim[3] == kdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 102, __extension__ __PRETTY_FUNCTION__); }))
;
58
Assuming the condition is true
59
Taking true branch
103 }
104 int i[CCV_NNC_MAX_DIM(2) + 2];
105 float* qk = ccv_nnc_stream_context_get_workspace(stream_context, sizeof(float) * qdim[1] * kdim[1], CCV_TENSOR_CPU_MEMORY);
106 const float* const qp = q->data.f32;
107 const float* const kp = k->data.f32;
108 const float* const vp = v->data.f32;
109 const float* const amp = attn_mask
59.1
'attn_mask' is non-null
? attn_mask->data.f32 : 0;
60
'?' condition is true
110 float* const cp = c->data.f32;
111 const float scale = cmd.info.scaled_dot_product_attention.scale;
112 const int is_causal = cmd.info.scaled_dot_product_attention.is_causal;
113 const int h_h_k_ratio = qdim[2] / kdim[2];
114 assert(kdim[2] == vdim[2])((void) sizeof ((kdim[2] == vdim[2]) ? 1 : 0), __extension__ (
{ if (kdim[2] == vdim[2]) ; else __assert_fail ("kdim[2] == vdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 114, __extension__ __PRETTY_FUNCTION__); }))
;
61
Taking true branch
115 assert(qdim[2] >= kdim[2])((void) sizeof ((qdim[2] >= kdim[2]) ? 1 : 0), __extension__
({ if (qdim[2] >= kdim[2]) ; else __assert_fail ("qdim[2] >= kdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 115, __extension__ __PRETTY_FUNCTION__); }))
;
62
Taking true branch
116 assert(qdim[2] % kdim[2] == 0)((void) sizeof ((qdim[2] % kdim[2] == 0) ? 1 : 0), __extension__
({ if (qdim[2] % kdim[2] == 0) ; else __assert_fail ("qdim[2] % kdim[2] == 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 116, __extension__ __PRETTY_FUNCTION__); }))
;
63
Taking true branch
117 if (is_varlen
63.1
'is_varlen' is 0
)
64
Taking false branch
118 {
119 const int batch_size = ccv_nnc_tensor_count(q_seq_offsets->info) - 1;
120 assert(batch_size > 0)((void) sizeof ((batch_size > 0) ? 1 : 0), __extension__ (
{ if (batch_size > 0) ; else __assert_fail ("batch_size > 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 120, __extension__ __PRETTY_FUNCTION__); }))
;
121 assert(ccv_nnc_tensor_count(kv_seq_offsets->info) == batch_size + 1)((void) sizeof ((ccv_nnc_tensor_count(kv_seq_offsets->info
) == batch_size + 1) ? 1 : 0), __extension__ ({ if (ccv_nnc_tensor_count
(kv_seq_offsets->info) == batch_size + 1) ; else __assert_fail
("ccv_nnc_tensor_count(kv_seq_offsets->info) == batch_size + 1"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 121, __extension__ __PRETTY_FUNCTION__); }))
;
122 assert(qdim[0] == 1)((void) sizeof ((qdim[0] == 1) ? 1 : 0), __extension__ ({ if (
qdim[0] == 1) ; else __assert_fail ("qdim[0] == 1", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 122, __extension__ __PRETTY_FUNCTION__); }))
;
123 assert(kdim[0] == 1)((void) sizeof ((kdim[0] == 1) ? 1 : 0), __extension__ ({ if (
kdim[0] == 1) ; else __assert_fail ("kdim[0] == 1", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 123, __extension__ __PRETTY_FUNCTION__); }))
;
124 assert(vdim[0] == 1)((void) sizeof ((vdim[0] == 1) ? 1 : 0), __extension__ ({ if (
vdim[0] == 1) ; else __assert_fail ("vdim[0] == 1", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 124, __extension__ __PRETTY_FUNCTION__); }))
;
125 assert(cdim[0] == 1)((void) sizeof ((cdim[0] == 1) ? 1 : 0), __extension__ ({ if (
cdim[0] == 1) ; else __assert_fail ("cdim[0] == 1", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 125, __extension__ __PRETTY_FUNCTION__); }))
;
126 assert(cdim[1] == qdim[1])((void) sizeof ((cdim[1] == qdim[1]) ? 1 : 0), __extension__ (
{ if (cdim[1] == qdim[1]) ; else __assert_fail ("cdim[1] == qdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 126, __extension__ __PRETTY_FUNCTION__); }))
;
127 assert(cdim[2] == qdim[2])((void) sizeof ((cdim[2] == qdim[2]) ? 1 : 0), __extension__ (
{ if (cdim[2] == qdim[2]) ; else __assert_fail ("cdim[2] == qdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 127, __extension__ __PRETTY_FUNCTION__); }))
;
128 assert(cdim[3] == vdim[3])((void) sizeof ((cdim[3] == vdim[3]) ? 1 : 0), __extension__ (
{ if (cdim[3] == vdim[3]) ; else __assert_fail ("cdim[3] == vdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 128, __extension__ __PRETTY_FUNCTION__); }))
;
129 assert(kdim[1] == vdim[1])((void) sizeof ((kdim[1] == vdim[1]) ? 1 : 0), __extension__ (
{ if (kdim[1] == vdim[1]) ; else __assert_fail ("kdim[1] == vdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 129, __extension__ __PRETTY_FUNCTION__); }))
;
130 const int* const q_offset = q_seq_offsets->data.i32;
131 const int* const kv_offset = kv_seq_offsets->data.i32;
132 assert(q_offset[0] == 0)((void) sizeof ((q_offset[0] == 0) ? 1 : 0), __extension__ ({
if (q_offset[0] == 0) ; else __assert_fail ("q_offset[0] == 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 132, __extension__ __PRETTY_FUNCTION__); }))
;
133 assert(kv_offset[0] == 0)((void) sizeof ((kv_offset[0] == 0) ? 1 : 0), __extension__ (
{ if (kv_offset[0] == 0) ; else __assert_fail ("kv_offset[0] == 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 133, __extension__ __PRETTY_FUNCTION__); }))
;
134 assert(q_offset[batch_size] == qdim[1])((void) sizeof ((q_offset[batch_size] == qdim[1]) ? 1 : 0), __extension__
({ if (q_offset[batch_size] == qdim[1]) ; else __assert_fail
("q_offset[batch_size] == qdim[1]", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 134, __extension__ __PRETTY_FUNCTION__); }))
;
135 assert(kv_offset[batch_size] == kdim[1])((void) sizeof ((kv_offset[batch_size] == kdim[1]) ? 1 : 0), __extension__
({ if (kv_offset[batch_size] == kdim[1]) ; else __assert_fail
("kv_offset[batch_size] == kdim[1]", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 135, __extension__ __PRETTY_FUNCTION__); }))
;
136 for (i[0] = 0; i[0] < batch_size; i[0]++)
137 {
138 const int q_start = q_offset[i[0]];
139 const int q_end = q_offset[i[0] + 1];
140 const int k_start = kv_offset[i[0]];
141 const int k_end = kv_offset[i[0] + 1];
142 assert(q_start <= q_end)((void) sizeof ((q_start <= q_end) ? 1 : 0), __extension__
({ if (q_start <= q_end) ; else __assert_fail ("q_start <= q_end"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 142, __extension__ __PRETTY_FUNCTION__); }))
;
143 assert(k_start <= k_end)((void) sizeof ((k_start <= k_end) ? 1 : 0), __extension__
({ if (k_start <= k_end) ; else __assert_fail ("k_start <= k_end"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 143, __extension__ __PRETTY_FUNCTION__); }))
;
144 const int R = q_end - q_start;
145 const int K = k_end - k_start;
146 assert(R > 0)((void) sizeof ((R > 0) ? 1 : 0), __extension__ ({ if (R >
0) ; else __assert_fail ("R > 0", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 146, __extension__ __PRETTY_FUNCTION__); }))
;
147 assert(K > 0)((void) sizeof ((K > 0) ? 1 : 0), __extension__ ({ if (K >
0) ; else __assert_fail ("K > 0", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 147, __extension__ __PRETTY_FUNCTION__); }))
;
148 assert(R <= cmd.info.scaled_dot_product_attention.max_seqlen_q)((void) sizeof ((R <= cmd.info.scaled_dot_product_attention
.max_seqlen_q) ? 1 : 0), __extension__ ({ if (R <= cmd.info
.scaled_dot_product_attention.max_seqlen_q) ; else __assert_fail
("R <= cmd.info.scaled_dot_product_attention.max_seqlen_q"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 148, __extension__ __PRETTY_FUNCTION__); }))
;
149 assert(K <= cmd.info.scaled_dot_product_attention.max_seqlen_kv)((void) sizeof ((K <= cmd.info.scaled_dot_product_attention
.max_seqlen_kv) ? 1 : 0), __extension__ ({ if (K <= cmd.info
.scaled_dot_product_attention.max_seqlen_kv) ; else __assert_fail
("K <= cmd.info.scaled_dot_product_attention.max_seqlen_kv"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 149, __extension__ __PRETTY_FUNCTION__); }))
;
150 const float* const qp0 = qp + q_start * qstride[1];
151 const float* const kp0 = kp + k_start * kstride[1];
152 const float* const vp0 = vp + k_start * vstride[1];
153 float* const cp0 = cp + q_start * cstride[1];
154 for (i[1] = 0; i[1] < qdim[2]; i[1]++)
155 {
156 const float* const qp1 = qp0 + i[1] * qstride[2];
157 const float* const kp1 = kp0 + (i[1] / h_h_k_ratio) * kstride[2];
158 const float* const vp1 = vp0 + (i[1] / h_h_k_ratio) * vstride[2];
159 float* const cp1 = cp0 + i[1] * cstride[2];
160 parallel_for(x, R){ int x; for ((x) = 0; (x) < (R); (x)++) { {
161 int y, k;
162 const float* const qp2 = qp1 + x * qstride[1];
163 float* const cp2 = cp1 + x * cstride[1];
164 float* const qk0 = qk + x * K;
165 for (y = 0; y < K; y++)
166 {
167 const float* const kp2 = kp1 + y * kstride[1];
168 float v = 0;
169 for (k = 0; k < qdim[3]; k++)
170 v += qp2[k * qstride[3]] * kp2[k * kstride[3]];
171 qk0[y] = scale * v;
172 }
173 if (is_causal)
174 {
175 const int x_end = ccv_max(x - R + K + 1, 0)({ typeof (x - R + K + 1) _a = (x - R + K + 1); typeof (0) _b
= (0); (_a > _b) ? _a : _b; })
;
176 for (y = x_end; y < K; y++)
177 qk0[y] = 0;
178 double maxval = qk0[0];
179 for (y = 1; y < x_end; y++)
180 if (qk0[y] > maxval)
181 maxval = qk0[y];
182 double sumval = 0;
183 for (y = 0; y < x_end; y++)
184 sumval += (qk0[y] = expf(qk0[y] - maxval));
185 sumval = 1.0 / sumval;
186 for (y = 0; y < x_end; y++)
187 qk0[y] *= sumval;
188 } else {
189 double maxval = qk0[0];
190 for (y = 1; y < K; y++)
191 if (qk0[y] > maxval)
192 maxval = qk0[y];
193 double sumval = 0;
194 for (y = 0; y < K; y++)
195 sumval += (qk0[y] = expf(qk0[y] - maxval));
196 sumval = 1.0 / sumval;
197 for (y = 0; y < K; y++)
198 qk0[y] *= sumval;
199 }
200 for (k = 0; k < vdim[3]; k++)
201 cp2[k * cstride[3]] = 0;
202 for (y = 0; y < K; y++)
203 {
204 const float* const vp2 = vp1 + y * vstride[1];
205 const float v = qk0[y];
206 for (k = 0; k < vdim[3]; k++)
207 cp2[k * cstride[3]] += v * vp2[k * vstride[3]];
208 }
209 } parallel_endfor} }
210 }
211 }
212 return CCV_NNC_EXEC_SUCCESS;
213 }
214 for (i[0] = 0; i[0] < qdim[0]; i[0]++)
65
Assuming the condition is true
66
Loop condition is true. Entering loop body
215 {
216 const float* const qp0 = qp + i[0] * qstride[0];
217 const float* const kp0 = kp + i[0] * kstride[0];
218 const float* const vp0 = vp + i[0] * vstride[0];
219 const float* const amp0 = amp && amdim[0] > 1 ? amp + i[0] * amstride[0] : amp;
67
Assuming 'amp' is null
220 float* const cp0 = cp + i[0] * cstride[0];
221 for (i[1] = 0; i[1] < qdim[2]; i[1]++)
68
Assuming the condition is true
69
Loop condition is true. Entering loop body
222 {
223 const float* const qp1 = qp0 + i[1] * qstride[2];
224 const float* const kp1 = kp0 + (i[1] / h_h_k_ratio) * kstride[2];
225 const float* const vp1 = vp0 + (i[1] / h_h_k_ratio) * vstride[2];
226 const float* const amp1 = amp
69.1
'amp' is null
&& amdim[1] > 1 ? amp0 + i[1] * amstride[1] : amp0;
227 float* const cp1 = cp0 + i[1] * cstride[2];
228 // Compute Q @ K^T
229 parallel_for(x, qdim[1]){ int x; for ((x) = 0; (x) < (qdim[1]); (x)++) { {
70
Assuming the condition is true
71
Loop condition is true. Entering loop body
230 int y, k;
231 const float* const qp2 = qp1 + x * qstride[1];
232 float* const cp2 = cp1 + x * cstride[1];
233 float* const qk0 = qk + x * kdim[1];
234 const float* const amp2 = amp1
71.1
'amp1' is null
? amp1 + x * amstride[2] : 0;
72
'?' condition is false
73
'amp2' initialized to a null pointer value
235 if (attn_mask
73.1
'attn_mask' is non-null
)
74
Taking true branch
236 {
237 for (y = 0; y < kdim[1]; y++)
75
Assuming the condition is true
76
Loop condition is true. Entering loop body
238 {
239 const float* const kp2 = kp1 + y * kstride[1];
240 float v = 0;
241 for (k = 0; k < qdim[3]; k++)
77
Assuming the condition is false
78
Loop condition is false. Execution continues on line 243
242 v += qp2[k * qstride[3]] * kp2[k * kstride[3]];
243 qk0[y] = scale * v + amp2[y * amstride[3]];
79
Array access (from variable 'amp2') results in a null pointer dereference
244 }
245 } else {
246 for (y = 0; y < kdim[1]; y++)
247 {
248 const float* const kp2 = kp1 + y * kstride[1];
249 float v = 0;
250 for (k = 0; k < qdim[3]; k++)
251 v += qp2[k * qstride[3]] * kp2[k * kstride[3]];
252 qk0[y] = scale * v;
253 }
254 }
255 // Compute softmax on qk.
256 if (is_causal)
257 {
258 const int x_end = ccv_max(x - qdim[1] + kdim[1] + 1, 0)({ typeof (x - qdim[1] + kdim[1] + 1) _a = (x - qdim[1] + kdim
[1] + 1); typeof (0) _b = (0); (_a > _b) ? _a : _b; })
;
259 for (y = x_end; y < kdim[1]; y++)
260 qk0[y] = 0;
261 double maxval = qk0[0];
262 for (y = 1; y < x_end; y++)
263 if (qk0[y] > maxval)
264 maxval = qk0[y];
265 double sumval = 0;
266 for (y = 0; y < x_end; y++)
267 sumval += (qk0[y] = expf(qk0[y] - maxval));
268 sumval = 1.0 / sumval;
269 for (y = 0; y < x_end; y++)
270 qk0[y] *= sumval;
271 } else {
272 double maxval = qk0[0];
273 for (y = 1; y < kdim[1]; y++)
274 if (qk0[y] > maxval)
275 maxval = qk0[y];
276 double sumval = 0;
277 for (y = 0; y < kdim[1]; y++)
278 sumval += (qk0[y] = expf(qk0[y] - maxval));
279 sumval = 1.0 / sumval;
280 for (y = 0; y < kdim[1]; y++)
281 qk0[y] *= sumval;
282 }
283 for (k = 0; k < vdim[3]; k++)
284 cp2[k * cstride[3]] = 0;
285 for (y = 0; y < kdim[1]; y++)
286 {
287 const float* const vp2 = vp1 + y * vstride[1];
288 const float v = qk0[y];
289 for (k = 0; k < vdim[3]; k++)
290 cp2[k * cstride[3]] += v * vp2[k * vstride[3]];
291 }
292 } parallel_endfor} }
293 }
294 }
295 if (w)
296 {
297 const int num_heads = cdim[2];
298 ccv_nnc_tensor_view_t* const d = (ccv_nnc_tensor_view_t*)outputs[0];
299 const int w_nd = ccv_nnc_tensor_nd(w->info.dim);
300 assert(w_nd == 2)((void) sizeof ((w_nd == 2) ? 1 : 0), __extension__ ({ if (w_nd
== 2) ; else __assert_fail ("w_nd == 2", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 300, __extension__ __PRETTY_FUNCTION__); }))
;
301 assert(CCV_IS_TENSOR_CONTIGUOUS(w))((void) sizeof (((!((*(int*)(w)) & CCV_TENSOR_VIEW) || ((
(ccv_nnc_tensor_view_t*)w)->contiguous == 1))) ? 1 : 0), __extension__
({ if ((!((*(int*)(w)) & CCV_TENSOR_VIEW) || (((ccv_nnc_tensor_view_t
*)w)->contiguous == 1))) ; else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(w)"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 301, __extension__ __PRETTY_FUNCTION__); }))
;
302 const int d_nd = ccv_nnc_tensor_nd(d->info.dim);
303 assert(d_nd == 3)((void) sizeof ((d_nd == 3) ? 1 : 0), __extension__ ({ if (d_nd
== 3) ; else __assert_fail ("d_nd == 3", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 303, __extension__ __PRETTY_FUNCTION__); }))
;
304 int ddim[CCV_NNC_MAX_DIM_ALLOC(12)];
305 int dstride[CCV_NNC_MAX_DIM_ALLOC(12)];
306 ccv_nnc_tensor_view_get_dim(d, ddim);
307 ccv_nnc_tensor_view_get_stride(d, dstride);
308 assert(ddim[2] == cdim[1])((void) sizeof ((ddim[2] == cdim[1]) ? 1 : 0), __extension__ (
{ if (ddim[2] == cdim[1]) ; else __assert_fail ("ddim[2] == cdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 308, __extension__ __PRETTY_FUNCTION__); }))
;
309 assert(ddim[3] == num_heads * cdim[3])((void) sizeof ((ddim[3] == num_heads * cdim[3]) ? 1 : 0), __extension__
({ if (ddim[3] == num_heads * cdim[3]) ; else __assert_fail (
"ddim[3] == num_heads * cdim[3]", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 309, __extension__ __PRETTY_FUNCTION__); }))
;
310 assert(w->info.dim[1] == ddim[3])((void) sizeof ((w->info.dim[1] == ddim[3]) ? 1 : 0), __extension__
({ if (w->info.dim[1] == ddim[3]) ; else __assert_fail ("w->info.dim[1] == ddim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 310, __extension__ __PRETTY_FUNCTION__); }))
;
311 assert(w->info.dim[0] == ddim[3])((void) sizeof ((w->info.dim[0] == ddim[3]) ? 1 : 0), __extension__
({ if (w->info.dim[0] == ddim[3]) ; else __assert_fail ("w->info.dim[0] == ddim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 311, __extension__ __PRETTY_FUNCTION__); }))
;
312 float* const dp = d->data.f32;
313 const float* const wp = w->data.f32;
314 const float* const cp = c->data.f32;
315 if (bias)
316 {
317 assert(ccv_nnc_tensor_count(bias->info) == ddim[3])((void) sizeof ((ccv_nnc_tensor_count(bias->info) == ddim[
3]) ? 1 : 0), __extension__ ({ if (ccv_nnc_tensor_count(bias->
info) == ddim[3]) ; else __assert_fail ("ccv_nnc_tensor_count(bias->info) == ddim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 317, __extension__ __PRETTY_FUNCTION__); }))
;
318 assert(CCV_IS_TENSOR_CONTIGUOUS(bias))((void) sizeof (((!((*(int*)(bias)) & CCV_TENSOR_VIEW) ||
(((ccv_nnc_tensor_view_t*)bias)->contiguous == 1))) ? 1 :
0), __extension__ ({ if ((!((*(int*)(bias)) & CCV_TENSOR_VIEW
) || (((ccv_nnc_tensor_view_t*)bias)->contiguous == 1))) ;
else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(bias)", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 318, __extension__ __PRETTY_FUNCTION__); }))
;
319 const float* const biasp = bias->data.f32;
320 for (i[0] = 0; i[0] < ddim[1]; i[0]++)
321 {
322 const float* const cp0 = cp + i[0] * cstride[0];
323 float* const dp0 = dp + i[0] * dstride[1];
324 parallel_for(y, ddim[2]){ int y; for ((y) = 0; (y) < (ddim[2]); (y)++) { {
325 int x, j, k;
326 const float* const cp1 = cp0 + y * cstride[1];
327 float* const dp1 = dp0 + y * dstride[2];
328 for (x = 0; x < ddim[3]; x++)
329 {
330 const float* const wp0 = wp + x * ddim[3];
331 float v = biasp[x];
332 for (j = 0; j < num_heads; j++)
333 {
334 const float* const cp2 = cp1 + j * cstride[2];
335 for (k = 0; k < cdim[3]; k++)
336 v += wp0[j * cdim[3] + k] * cp2[k * cstride[3]];
337 }
338 dp1[x * dstride[3]] = v;
339 }
340 } parallel_endfor} }
341 }
342 } else {
343 for (i[0] = 0; i[0] < ddim[1]; i[0]++)
344 {
345 const float* const cp0 = cp + i[0] * cstride[0];
346 float* const dp0 = dp + i[0] * dstride[1];
347 parallel_for(y, ddim[2]){ int y; for ((y) = 0; (y) < (ddim[2]); (y)++) { {
348 int x, j, k;
349 const float* const cp1 = cp0 + y * cstride[1];
350 float* const dp1 = dp0 + y * dstride[2];
351 for (x = 0; x < ddim[3]; x++)
352 {
353 const float* const wp0 = wp + x * ddim[3];
354 float v = 0;
355 for (j = 0; j < num_heads; j++)
356 {
357 const float* const cp2 = cp1 + j * cstride[2];
358 for (k = 0; k < cdim[3]; k++)
359 v += wp0[j * cdim[3] + k] * cp2[k * cstride[3]];
360 }
361 dp1[x * dstride[3]] = v;
362 }
363 } parallel_endfor} }
364 }
365 }
366 }
367 return CCV_NNC_EXEC_SUCCESS;
368}
369
370static int _ccv_nnc_scaled_dot_product_attention_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
371{
372 // Assuming no saved_softmax, we need to recompute from q, k, v.
373 // We cannot do this with masks (yet).
374 assert(input_size >= 6)((void) sizeof ((input_size >= 6) ? 1 : 0), __extension__ (
{ if (input_size >= 6) ; else __assert_fail ("input_size >= 6"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 374, __extension__ __PRETTY_FUNCTION__); }))
;
375 if (cmd.info.scaled_dot_product_attention.is_varlen)
376 return CCV_NNC_EXEC_INVALID;
377 ccv_nnc_tensor_view_t* const g = (ccv_nnc_tensor_view_t*)inputs[0];
378 ccv_nnc_tensor_view_t* const q = (ccv_nnc_tensor_view_t*)inputs[3];
379 ccv_nnc_tensor_view_t* const k = (ccv_nnc_tensor_view_t*)inputs[4];
380 ccv_nnc_tensor_view_t* const v = (ccv_nnc_tensor_view_t*)inputs[5];
381 ccv_nnc_tensor_view_t* const dq = (ccv_nnc_tensor_view_t*)outputs[0];
382 ccv_nnc_tensor_view_t* const dk = (ccv_nnc_tensor_view_t*)outputs[1];
383 ccv_nnc_tensor_view_t* const dv = (ccv_nnc_tensor_view_t*)outputs[2];
384 const int q_nd = ccv_nnc_tensor_nd(q->info.dim);
385 assert(q_nd == 3 || q_nd == 4)((void) sizeof ((q_nd == 3 || q_nd == 4) ? 1 : 0), __extension__
({ if (q_nd == 3 || q_nd == 4) ; else __assert_fail ("q_nd == 3 || q_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 385, __extension__ __PRETTY_FUNCTION__); }))
;
386 const int k_nd = ccv_nnc_tensor_nd(k->info.dim);
387 assert(k_nd == 3 || k_nd == 4)((void) sizeof ((k_nd == 3 || k_nd == 4) ? 1 : 0), __extension__
({ if (k_nd == 3 || k_nd == 4) ; else __assert_fail ("k_nd == 3 || k_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 387, __extension__ __PRETTY_FUNCTION__); }))
;
388 const int v_nd = ccv_nnc_tensor_nd(v->info.dim);
389 assert(v_nd == 3 || v_nd == 4)((void) sizeof ((v_nd == 3 || v_nd == 4) ? 1 : 0), __extension__
({ if (v_nd == 3 || v_nd == 4) ; else __assert_fail ("v_nd == 3 || v_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 389, __extension__ __PRETTY_FUNCTION__); }))
;
390 const int g_nd = ccv_nnc_tensor_nd(g->info.dim);
391 assert(g_nd == 3 || g_nd == 4)((void) sizeof ((g_nd == 3 || g_nd == 4) ? 1 : 0), __extension__
({ if (g_nd == 3 || g_nd == 4) ; else __assert_fail ("g_nd == 3 || g_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 391, __extension__ __PRETTY_FUNCTION__); }))
;
392 const int dq_nd = ccv_nnc_tensor_nd(dq->info.dim);
393 assert(dq_nd == 3 || dq_nd == 4)((void) sizeof ((dq_nd == 3 || dq_nd == 4) ? 1 : 0), __extension__
({ if (dq_nd == 3 || dq_nd == 4) ; else __assert_fail ("dq_nd == 3 || dq_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 393, __extension__ __PRETTY_FUNCTION__); }))
;
394 assert(dq_nd == q_nd)((void) sizeof ((dq_nd == q_nd) ? 1 : 0), __extension__ ({ if
(dq_nd == q_nd) ; else __assert_fail ("dq_nd == q_nd", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 394, __extension__ __PRETTY_FUNCTION__); }))
;
395 const int dk_nd = ccv_nnc_tensor_nd(dk->info.dim);
396 assert(dk_nd == 3 || dk_nd == 4)((void) sizeof ((dk_nd == 3 || dk_nd == 4) ? 1 : 0), __extension__
({ if (dk_nd == 3 || dk_nd == 4) ; else __assert_fail ("dk_nd == 3 || dk_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 396, __extension__ __PRETTY_FUNCTION__); }))
;
397 assert(dk_nd == k_nd)((void) sizeof ((dk_nd == k_nd) ? 1 : 0), __extension__ ({ if
(dk_nd == k_nd) ; else __assert_fail ("dk_nd == k_nd", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 397, __extension__ __PRETTY_FUNCTION__); }))
;
398 const int dv_nd = ccv_nnc_tensor_nd(dv->info.dim);
399 assert(dv_nd == 3 || dv_nd == 4)((void) sizeof ((dv_nd == 3 || dv_nd == 4) ? 1 : 0), __extension__
({ if (dv_nd == 3 || dv_nd == 4) ; else __assert_fail ("dv_nd == 3 || dv_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 399, __extension__ __PRETTY_FUNCTION__); }))
;
400 assert(dv_nd == v_nd)((void) sizeof ((dv_nd == v_nd) ? 1 : 0), __extension__ ({ if
(dv_nd == v_nd) ; else __assert_fail ("dv_nd == v_nd", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 400, __extension__ __PRETTY_FUNCTION__); }))
;
401 assert(q_nd == k_nd && k_nd == v_nd && v_nd == g_nd)((void) sizeof ((q_nd == k_nd && k_nd == v_nd &&
v_nd == g_nd) ? 1 : 0), __extension__ ({ if (q_nd == k_nd &&
k_nd == v_nd && v_nd == g_nd) ; else __assert_fail (
"q_nd == k_nd && k_nd == v_nd && v_nd == g_nd"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 401, __extension__ __PRETTY_FUNCTION__); }))
;
402 // Assuming this is float 32.
403 int qdim[CCV_NNC_MAX_DIM_ALLOC(12)];
404 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
405 int vdim[CCV_NNC_MAX_DIM_ALLOC(12)];
406 int gdim[CCV_NNC_MAX_DIM_ALLOC(12)];
407 int dqdim[CCV_NNC_MAX_DIM_ALLOC(12)];
408 int dkdim[CCV_NNC_MAX_DIM_ALLOC(12)];
409 int dvdim[CCV_NNC_MAX_DIM_ALLOC(12)];
410 ccv_nnc_tensor_view_get_dim(q, qdim);
411 ccv_nnc_tensor_view_get_dim(k, kdim);
412 ccv_nnc_tensor_view_get_dim(v, vdim);
413 ccv_nnc_tensor_view_get_dim(g, gdim);
414 ccv_nnc_tensor_view_get_dim(dq, dqdim);
415 ccv_nnc_tensor_view_get_dim(dk, dkdim);
416 ccv_nnc_tensor_view_get_dim(dv, dvdim);
417 if (q_nd == 3)
418 {
419 qdim[0] = qdim[1], qdim[1] = qdim[2], qdim[2] = 1;
420 kdim[0] = kdim[1], kdim[1] = kdim[2], kdim[2] = 1;
421 vdim[0] = vdim[1], vdim[1] = vdim[2], vdim[2] = 1;
422 gdim[0] = gdim[1], gdim[1] = gdim[2], gdim[2] = 1;
423 dqdim[0] = dqdim[1], dqdim[1] = dqdim[2], dqdim[2] = 1;
424 dkdim[0] = dkdim[1], dkdim[1] = dkdim[2], dkdim[2] = 1;
425 dvdim[0] = dvdim[1], dvdim[1] = dvdim[2], dvdim[2] = 1;
426 }
427 assert(qdim[0] == kdim[0] && kdim[0] == vdim[0] && vdim[0] == gdim[0])((void) sizeof ((qdim[0] == kdim[0] && kdim[0] == vdim
[0] && vdim[0] == gdim[0]) ? 1 : 0), __extension__ ({
if (qdim[0] == kdim[0] && kdim[0] == vdim[0] &&
vdim[0] == gdim[0]) ; else __assert_fail ("qdim[0] == kdim[0] && kdim[0] == vdim[0] && vdim[0] == gdim[0]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 427, __extension__ __PRETTY_FUNCTION__); }))
;
428 assert(qdim[2] == gdim[2])((void) sizeof ((qdim[2] == gdim[2]) ? 1 : 0), __extension__ (
{ if (qdim[2] == gdim[2]) ; else __assert_fail ("qdim[2] == gdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 428, __extension__ __PRETTY_FUNCTION__); }))
;
429 assert(kdim[2] == vdim[2])((void) sizeof ((kdim[2] == vdim[2]) ? 1 : 0), __extension__ (
{ if (kdim[2] == vdim[2]) ; else __assert_fail ("kdim[2] == vdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 429, __extension__ __PRETTY_FUNCTION__); }))
;
430 assert(qdim[2] % kdim[2] == 0)((void) sizeof ((qdim[2] % kdim[2] == 0) ? 1 : 0), __extension__
({ if (qdim[2] % kdim[2] == 0) ; else __assert_fail ("qdim[2] % kdim[2] == 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 430, __extension__ __PRETTY_FUNCTION__); }))
;
431 assert(qdim[2] >= kdim[2])((void) sizeof ((qdim[2] >= kdim[2]) ? 1 : 0), __extension__
({ if (qdim[2] >= kdim[2]) ; else __assert_fail ("qdim[2] >= kdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 431, __extension__ __PRETTY_FUNCTION__); }))
;
432 assert(qdim[3] == kdim[3])((void) sizeof ((qdim[3] == kdim[3]) ? 1 : 0), __extension__ (
{ if (qdim[3] == kdim[3]) ; else __assert_fail ("qdim[3] == kdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 432, __extension__ __PRETTY_FUNCTION__); }))
;
433 assert(kdim[1] == vdim[1])((void) sizeof ((kdim[1] == vdim[1]) ? 1 : 0), __extension__ (
{ if (kdim[1] == vdim[1]) ; else __assert_fail ("kdim[1] == vdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 433, __extension__ __PRETTY_FUNCTION__); }))
;
434 assert(gdim[1] == qdim[1])((void) sizeof ((gdim[1] == qdim[1]) ? 1 : 0), __extension__ (
{ if (gdim[1] == qdim[1]) ; else __assert_fail ("gdim[1] == qdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 434, __extension__ __PRETTY_FUNCTION__); }))
;
435 assert(gdim[3] == vdim[3])((void) sizeof ((gdim[3] == vdim[3]) ? 1 : 0), __extension__ (
{ if (gdim[3] == vdim[3]) ; else __assert_fail ("gdim[3] == vdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 435, __extension__ __PRETTY_FUNCTION__); }))
;
436 assert(CCV_NNC_MAX_DIM == 2)((void) sizeof (((2) == 2) ? 1 : 0), __extension__ ({ if ((2)
== 2) ; else __assert_fail ("CCV_NNC_MAX_DIM == 2", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 436, __extension__ __PRETTY_FUNCTION__); }))
; // Need to change this logic for CCV_NNC_MAX_DIM == other number.
437 int qstride[CCV_NNC_MAX_DIM_ALLOC(12)];
438 int kstride[CCV_NNC_MAX_DIM_ALLOC(12)];
439 int vstride[CCV_NNC_MAX_DIM_ALLOC(12)];
440 int gstride[CCV_NNC_MAX_DIM_ALLOC(12)];
441 int dqstride[CCV_NNC_MAX_DIM_ALLOC(12)];
442 int dkstride[CCV_NNC_MAX_DIM_ALLOC(12)];
443 int dvstride[CCV_NNC_MAX_DIM_ALLOC(12)];
444 ccv_nnc_tensor_view_get_stride(q, qstride);
445 ccv_nnc_tensor_view_get_stride(k, kstride);
446 ccv_nnc_tensor_view_get_stride(v, vstride);
447 ccv_nnc_tensor_view_get_stride(g, gstride);
448 ccv_nnc_tensor_view_get_stride(dq, dqstride);
449 ccv_nnc_tensor_view_get_stride(dk, dkstride);
450 ccv_nnc_tensor_view_get_stride(dv, dvstride);
451 if (q_nd == 3)
452 {
453 qstride[0] = qstride[1], qstride[1] = qstride[2], qstride[2] = qstride[3];
454 kstride[0] = kstride[1], kstride[1] = kstride[2], kstride[2] = kstride[3];
455 vstride[0] = vstride[1], vstride[1] = vstride[2], vstride[2] = vstride[3];
456 gstride[0] = gstride[1], gstride[1] = gstride[2], gstride[2] = gstride[3];
457 dqstride[0] = dqstride[1], dqstride[1] = dqstride[2], dqstride[2] = dqstride[3];
458 dkstride[0] = dkstride[1], dkstride[1] = dkstride[2], dkstride[2] = dkstride[3];
459 dvstride[0] = dvstride[1], dvstride[1] = dvstride[2], dvstride[2] = dvstride[3];
460 }
461 int i[CCV_NNC_MAX_DIM(2) + 2];
462 float* qk = ccv_nnc_stream_context_get_workspace(stream_context, sizeof(float) * 2 * kdim[1], CCV_TENSOR_CPU_MEMORY);
463 const float* const qp = q->data.f32;
464 const float* const kp = k->data.f32;
465 const float* const vp = v->data.f32;
466 const float* const gp = g->data.f32;
467 float* const dqp = dq->data.f32;
468 float* const dkp = dk->data.f32;
469 float* const dvp = dv->data.f32;
470 const float scale = cmd.info.scaled_dot_product_attention.scale;
471 const int is_causal = cmd.info.scaled_dot_product_attention.is_causal;
472 const int h_h_k_ratio = qdim[2] / kdim[2];
473 for (i[0] = 0; i[0] < qdim[0]; i[0]++)
474 {
475 const float* const qp0 = qp + i[0] * qstride[0];
476 const float* const kp0 = kp + i[0] * kstride[0];
477 const float* const vp0 = vp + i[0] * vstride[0];
478 const float* const gp0 = gp + i[0] * gstride[0];
479 float* const dqp0 = dqp + i[0] * dqstride[0];
480 float* const dkp0 = dkp + i[0] * dkstride[0];
481 float* const dvp0 = dvp + i[0] * dvstride[0];
482 for (i[1] = 0; i[1] < qdim[2]; i[1]++)
483 {
484 const float* const qp1 = qp0 + i[1] * qstride[2];
485 const float* const kp1 = kp0 + (i[1] / h_h_k_ratio) * kstride[2];
486 const float* const vp1 = vp0 + (i[1] / h_h_k_ratio) * vstride[2];
487 const float* const gp1 = gp0 + i[1] * gstride[2];
488 float* const dqp1 = dqp0 + i[1] * dqstride[2];
489 float* const dkp1 = dkp0 + (i[1] / h_h_k_ratio) * dkstride[2];
490 float* const dvp1 = dvp0 + (i[1] / h_h_k_ratio) * dvstride[2];
491 // Compute Q @ K^T
492 int x, y, k;
493 for (x = 0; x < qdim[1]; x++)
494 {
495 float* const dqp2 = dqp1 + x * dqstride[1];
496 for (k = 0; k < qdim[3]; k++)
497 dqp2[k * dqstride[3]] = 0;
498 }
499 // Only zero out when it is at 0-index.
500 if (i[1] % h_h_k_ratio == 0)
501 for (y = 0; y < kdim[1]; y++)
502 {
503 float* const dkp2 = dkp1 + y * dkstride[1];
504 for (k = 0; k < qdim[3]; k++)
505 dkp2[k * dkstride[3]] = 0;
506 }
507 // Only zero out when it is at 0-index.
508 if (i[1] % h_h_k_ratio == 0)
509 for (y = 0; y < kdim[1]; y++)
510 {
511 float* const dvp2 = dvp1 + y * dvstride[1];
512 for (k = 0; k < vdim[3]; k++)
513 dvp2[k * dvstride[3]] = 0;
514 }
515 for (x = 0; x < qdim[1]; x++)
516 {
517 const float* const qp2 = qp1 + x * qstride[1];
518 const float* const gp2 = gp1 + x * gstride[1];
519 float* const qk0 = qk;
520 float* const qks0 = qk + kdim[1];
521 for (y = 0; y < kdim[1]; y++)
522 {
523 const float* const kp2 = kp1 + y * kstride[1];
524 float v = 0;
525 for (k = 0; k < qdim[3]; k++)
526 v += qp2[k * qstride[3]] * kp2[k * kstride[3]];
527 qk0[y] = scale * v;
528 }
529 // Compute softmax on qk.
530 if (is_causal)
531 {
532 const int x_end = ccv_max(x - qdim[1] + kdim[1] + 1, 0)({ typeof (x - qdim[1] + kdim[1] + 1) _a = (x - qdim[1] + kdim
[1] + 1); typeof (0) _b = (0); (_a > _b) ? _a : _b; })
;
533 for (y = x_end; y < kdim[1]; y++)
534 qk0[y] = 0;
535 double maxval = qk0[0];
536 for (y = 1; y < x_end; y++)
537 if (qk0[y] > maxval)
538 maxval = qk0[y];
539 double sumval = 0;
540 for (y = 0; y < x_end; y++)
541 sumval += (qk0[y] = expf(qk0[y] - maxval));
542 sumval = 1.0 / sumval;
543 for (y = 0; y < x_end; y++)
544 qk0[y] *= sumval;
545 } else {
546 double maxval = qk0[0];
547 for (y = 1; y < kdim[1]; y++)
548 if (qk0[y] > maxval)
549 maxval = qk0[y];
550 double sumval = 0;
551 for (y = 0; y < kdim[1]; y++)
552 sumval += (qk0[y] = expf(qk0[y] - maxval));
553 sumval = 1.0 / sumval;
554 for (y = 0; y < kdim[1]; y++)
555 qk0[y] *= sumval;
556 }
557 for (y = 0; y < kdim[1]; y++)
558 {
559 float* const dvp2 = dvp1 + y * dvstride[1];
560 const float v = qk0[y];
561 for (k = 0; k < vdim[3]; k++)
562 dvp2[k * dvstride[3]] += v * gp2[k * gstride[3]];
563 }
564 double sumval = 0;
565 for (y = 0; y < kdim[1]; y++)
566 {
567 const float* const vp2 = vp1 + y * vstride[1];
568 float v = 0;
569 for (k = 0; k < vdim[3]; k++)
570 v += gp2[k * gstride[3]] * vp2[k * vstride[3]];
571 qks0[y] = v;
572 sumval += v * qk0[y];
573 }
574 for (y = 0; y < kdim[1]; y++)
575 qk0[y] = (qks0[y] - sumval) * qk0[y];
576 float* const dqp2 = dqp1 + x * dqstride[1];
577 for (y = 0; y < kdim[1]; y++)
578 {
579 const float* const kp2 = kp1 + y * kstride[1];
580 float* const dkp2 = dkp1 + y * dkstride[1];
581 const float v = scale * qk0[y];
582 for (k = 0; k < qdim[3]; k++)
583 {
584 dqp2[k * dqstride[3]] += v * kp2[k * kstride[3]];
585 dkp2[k * dkstride[3]] += v * qp2[k * qstride[3]];
586 }
587 }
588 }
589 }
590 }
591 return CCV_NNC_EXEC_SUCCESS;
592}
593
594REGISTER_COMMAND_BACKEND(CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_FORWARD, CCV_NNC_BACKEND_CPU_REF)void _register_command_CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_FORWARD_backend_CCV_NNC_BACKEND_CPU_REF(ccv_nnc_cmd_backend_registry_t* const registry)
595{
596 registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC;
597 registry->tensor_datatypes = CCV_32F | CCV_32S;
598 registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
599 registry->algorithms = 1;
600 registry->exec = _ccv_nnc_scaled_dot_product_attention_forw;
601}
602
603REGISTER_COMMAND_BACKEND(CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_BACKWARD, CCV_NNC_BACKEND_CPU_REF)void _register_command_CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_BACKWARD_backend_CCV_NNC_BACKEND_CPU_REF(ccv_nnc_cmd_backend_registry_t* const registry)
604{
605 registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC;
606 registry->tensor_datatypes = CCV_32F;
607 registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
608 registry->algorithms = 1;
609 registry->exec = _ccv_nnc_scaled_dot_product_attention_back;
610}