Bug Summary

File:nnc/cmd/scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c
Warning:line 190, column 15
Array access (from variable 'ssp2') results in a null pointer dereference

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ccv_nnc_scaled_dot_product_attention_cpu_ref.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -pic-is-pie -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +sse2 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd -fcoverage-compilation-dir=/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd -resource-dir /usr/local/lib/clang/18 -I ../../ -I .. -I /usr/local/cuda/include -D HAVE_CBLAS -D HAVE_LIBPNG -D HAVE_LIBJPEG -D HAVE_FFTW3 -D HAVE_PTHREAD -D HAVE_LIBLINEAR -D HAVE_TESSERACT -D HAVE_AVCODEC -D HAVE_AVFORMAT -D HAVE_AVUTIL -D HAVE_SWSCALE -D HAVE_SSE2 -D HAVE_GSL -D HAVE_CUDA -D HAVE_CUDNN -D HAVE_NCCL -D USE_SYSTEM_CUB -D HAVE_CUDA_SM80 -I /usr/local/include -internal-isystem /usr/local/lib/clang/18/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -ferror-limit 19 -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liu/actions-runner/_work/ccv/ccv/_analyze/2024-06-10-094233-222984-1 -x c scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c
1#include "ccv.h"
2#include "ccv_internal.h"
3#include "nnc/ccv_nnc.h"
4#include "nnc/ccv_nnc_easy.h"
5#include "nnc/ccv_nnc_internal.h"
6#ifdef USE_OPENMP
7#include <omp.h>
8#endif
9#ifdef USE_DISPATCH
10#include <dispatch/dispatch.h>
11#endif
12
13// Shared methods.
14#include "../_ccv_nnc_cpu_ref.h"
15
16static int _ccv_nnc_scaled_dot_product_attention_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
17{
18 assert(input_size >= 3)((void) sizeof ((input_size >= 3) ? 1 : 0), __extension__ (
{ if (input_size >= 3) ; else __assert_fail ("input_size >= 3"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 18, __extension__ __PRETTY_FUNCTION__); }))
;
1
Assuming 'input_size' is >= 3
2
Taking true branch
19 assert(output_size >= 1)((void) sizeof ((output_size >= 1) ? 1 : 0), __extension__
({ if (output_size >= 1) ; else __assert_fail ("output_size >= 1"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 19, __extension__ __PRETTY_FUNCTION__); }))
;
3
Assuming 'output_size' is >= 1
4
Taking true branch
20 ccv_nnc_tensor_view_t* const q = (ccv_nnc_tensor_view_t*)inputs[0];
21 ccv_nnc_tensor_view_t* const k = (ccv_nnc_tensor_view_t*)inputs[1];
22 ccv_nnc_tensor_view_t* const v = (ccv_nnc_tensor_view_t*)inputs[2];
23 ccv_nnc_tensor_view_t* const attn_mask = input_size > 3 ? (ccv_nnc_tensor_view_t*)inputs[3] : 0;
5
Assuming 'input_size' is <= 3
6
'?' condition is false
24 ccv_nnc_tensor_view_t* const w = input_size
6.1
'input_size' is <= 4
> 4 ? (ccv_nnc_tensor_view_t*)inputs[4] : 0;
7
'?' condition is false
25 ccv_nnc_tensor_view_t* const bias = input_size
7.1
'input_size' is <= 5
> 5 ? (ccv_nnc_tensor_view_t*)inputs[5] : 0;
8
'?' condition is false
26 if (bias
8.1
'bias' is null
) // bias always requires a weight matrix.
27 { assert(w)((void) sizeof ((w) ? 1 : 0), __extension__ ({ if (w) ; else __assert_fail
("w", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 27, __extension__ __PRETTY_FUNCTION__); }))
; }
28 ccv_nnc_tensor_view_t* const c = (w
9.1
'w' is null
) ? (ccv_nnc_tensor_view_t*)outputs[2] : (ccv_nnc_tensor_view_t*)outputs[0];
9
Taking false branch
10
'?' condition is false
29 ccv_nnc_tensor_view_t* const saved_softmax = output_size > 1 ? (ccv_nnc_tensor_view_t*)outputs[1] : 0;
11
Assuming 'output_size' is > 1
12
'?' condition is true
30 const int q_nd = ccv_nnc_tensor_nd(q->info.dim);
31 assert(q_nd == 3 || q_nd == 4)((void) sizeof ((q_nd == 3 || q_nd == 4) ? 1 : 0), __extension__
({ if (q_nd == 3 || q_nd == 4) ; else __assert_fail ("q_nd == 3 || q_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 31, __extension__ __PRETTY_FUNCTION__); }))
;
13
Assuming 'q_nd' is equal to 3
32 const int k_nd = ccv_nnc_tensor_nd(k->info.dim);
33 assert(k_nd == 3 || k_nd == 4)((void) sizeof ((k_nd == 3 || k_nd == 4) ? 1 : 0), __extension__
({ if (k_nd == 3 || k_nd == 4) ; else __assert_fail ("k_nd == 3 || k_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 33, __extension__ __PRETTY_FUNCTION__); }))
;
14
Assuming 'k_nd' is equal to 3
34 const int v_nd = ccv_nnc_tensor_nd(v->info.dim);
35 assert(v_nd == 3 || v_nd == 4)((void) sizeof ((v_nd == 3 || v_nd == 4) ? 1 : 0), __extension__
({ if (v_nd == 3 || v_nd == 4) ; else __assert_fail ("v_nd == 3 || v_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 35, __extension__ __PRETTY_FUNCTION__); }))
;
15
Assuming 'v_nd' is equal to 3
36 const int c_nd = ccv_nnc_tensor_nd(c->info.dim);
37 assert(c_nd == 3 || c_nd == 4)((void) sizeof ((c_nd == 3 || c_nd == 4) ? 1 : 0), __extension__
({ if (c_nd == 3 || c_nd == 4) ; else __assert_fail ("c_nd == 3 || c_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 37, __extension__ __PRETTY_FUNCTION__); }))
;
16
Assuming 'c_nd' is equal to 3
38 assert(q_nd == k_nd && k_nd == v_nd && v_nd == c_nd)((void) sizeof ((q_nd == k_nd && k_nd == v_nd &&
v_nd == c_nd) ? 1 : 0), __extension__ ({ if (q_nd == k_nd &&
k_nd == v_nd && v_nd == c_nd) ; else __assert_fail (
"q_nd == k_nd && k_nd == v_nd && v_nd == c_nd"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 38, __extension__ __PRETTY_FUNCTION__); }))
;
17
Taking true branch
39 // Assuming this is float 32.
40 int qdim[CCV_NNC_MAX_DIM_ALLOC(12)];
41 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
42 int vdim[CCV_NNC_MAX_DIM_ALLOC(12)];
43 int cdim[CCV_NNC_MAX_DIM_ALLOC(12)];
44 int ssdim[CCV_NNC_MAX_DIM_ALLOC(12)];
45 int amdim[CCV_NNC_MAX_DIM_ALLOC(12)];
46 ccv_nnc_tensor_view_get_dim(q, qdim);
47 ccv_nnc_tensor_view_get_dim(k, kdim);
48 ccv_nnc_tensor_view_get_dim(v, vdim);
49 ccv_nnc_tensor_view_get_dim(c, cdim);
50 if (q_nd
17.1
'q_nd' is equal to 3
== 3)
18
Taking true branch
51 {
52 qdim[0] = qdim[1], qdim[1] = qdim[2], qdim[2] = 1;
53 kdim[0] = kdim[1], kdim[1] = kdim[2], kdim[2] = 1;
54 vdim[0] = vdim[1], vdim[1] = vdim[2], vdim[2] = 1;
55 cdim[0] = cdim[1], cdim[1] = cdim[2], cdim[2] = 1;
56 }
57 assert(qdim[0] == kdim[0] && kdim[0] == vdim[0] && vdim[0] == cdim[0])((void) sizeof ((qdim[0] == kdim[0] && kdim[0] == vdim
[0] && vdim[0] == cdim[0]) ? 1 : 0), __extension__ ({
if (qdim[0] == kdim[0] && kdim[0] == vdim[0] &&
vdim[0] == cdim[0]) ; else __assert_fail ("qdim[0] == kdim[0] && kdim[0] == vdim[0] && vdim[0] == cdim[0]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 57, __extension__ __PRETTY_FUNCTION__); }))
;
19
Assuming the condition is true
20
Assuming the condition is true
21
Assuming the condition is true
22
Taking true branch
58 assert(qdim[2] == cdim[2])((void) sizeof ((qdim[2] == cdim[2]) ? 1 : 0), __extension__ (
{ if (qdim[2] == cdim[2]) ; else __assert_fail ("qdim[2] == cdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 58, __extension__ __PRETTY_FUNCTION__); }))
;
23
Taking true branch
59 assert(kdim[2] == vdim[2])((void) sizeof ((kdim[2] == vdim[2]) ? 1 : 0), __extension__ (
{ if (kdim[2] == vdim[2]) ; else __assert_fail ("kdim[2] == vdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 59, __extension__ __PRETTY_FUNCTION__); }))
;
24
Taking true branch
60 assert(qdim[2] % kdim[2] == 0)((void) sizeof ((qdim[2] % kdim[2] == 0) ? 1 : 0), __extension__
({ if (qdim[2] % kdim[2] == 0) ; else __assert_fail ("qdim[2] % kdim[2] == 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 60, __extension__ __PRETTY_FUNCTION__); }))
;
25
Taking true branch
61 assert(qdim[2] >= kdim[2])((void) sizeof ((qdim[2] >= kdim[2]) ? 1 : 0), __extension__
({ if (qdim[2] >= kdim[2]) ; else __assert_fail ("qdim[2] >= kdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 61, __extension__ __PRETTY_FUNCTION__); }))
;
26
Taking true branch
62 assert(qdim[3] == kdim[3])((void) sizeof ((qdim[3] == kdim[3]) ? 1 : 0), __extension__ (
{ if (qdim[3] == kdim[3]) ; else __assert_fail ("qdim[3] == kdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 62, __extension__ __PRETTY_FUNCTION__); }))
;
27
Assuming the condition is true
28
Taking true branch
63 assert(kdim[1] == vdim[1])((void) sizeof ((kdim[1] == vdim[1]) ? 1 : 0), __extension__ (
{ if (kdim[1] == vdim[1]) ; else __assert_fail ("kdim[1] == vdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 63, __extension__ __PRETTY_FUNCTION__); }))
;
29
Assuming the condition is true
30
Taking true branch
64 assert(cdim[1] == qdim[1])((void) sizeof ((cdim[1] == qdim[1]) ? 1 : 0), __extension__ (
{ if (cdim[1] == qdim[1]) ; else __assert_fail ("cdim[1] == qdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 64, __extension__ __PRETTY_FUNCTION__); }))
;
31
Assuming the condition is true
32
Taking true branch
65 assert(cdim[3] == vdim[3])((void) sizeof ((cdim[3] == vdim[3]) ? 1 : 0), __extension__ (
{ if (cdim[3] == vdim[3]) ; else __assert_fail ("cdim[3] == vdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 65, __extension__ __PRETTY_FUNCTION__); }))
;
33
Assuming the condition is true
34
Taking true branch
66 assert(CCV_NNC_MAX_DIM == 2)((void) sizeof (((2) == 2) ? 1 : 0), __extension__ ({ if ((2)
== 2) ; else __assert_fail ("CCV_NNC_MAX_DIM == 2", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 66, __extension__ __PRETTY_FUNCTION__); }))
; // Need to change this logic for CCV_NNC_MAX_DIM == other number.
35
Taking true branch
67 int qstride[CCV_NNC_MAX_DIM_ALLOC(12)];
68 int kstride[CCV_NNC_MAX_DIM_ALLOC(12)];
69 int vstride[CCV_NNC_MAX_DIM_ALLOC(12)];
70 int cstride[CCV_NNC_MAX_DIM_ALLOC(12)];
71 int ssstride[CCV_NNC_MAX_DIM_ALLOC(12)];
72 int amstride[CCV_NNC_MAX_DIM_ALLOC(12)];
73 ccv_nnc_tensor_view_get_stride(q, qstride);
74 ccv_nnc_tensor_view_get_stride(k, kstride);
75 ccv_nnc_tensor_view_get_stride(v, vstride);
76 ccv_nnc_tensor_view_get_stride(c, cstride);
77 if (q_nd
35.1
'q_nd' is equal to 3
== 3)
36
Taking true branch
78 {
79 qstride[0] = qstride[1], qstride[1] = qstride[2], qstride[2] = qstride[3];
80 kstride[0] = kstride[1], kstride[1] = kstride[2], kstride[2] = kstride[3];
81 vstride[0] = vstride[1], vstride[1] = vstride[2], vstride[2] = vstride[3];
82 cstride[0] = cstride[1], cstride[1] = cstride[2], cstride[2] = cstride[3];
83 }
84 if (saved_softmax)
37
Assuming 'saved_softmax' is non-null
38
Taking true branch
85 {
86 ccv_nnc_tensor_view_get_dim(saved_softmax, ssdim);
87 ccv_nnc_tensor_view_get_stride(saved_softmax, ssstride);
88 assert(ssdim[0] == qdim[0])((void) sizeof ((ssdim[0] == qdim[0]) ? 1 : 0), __extension__
({ if (ssdim[0] == qdim[0]) ; else __assert_fail ("ssdim[0] == qdim[0]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 88, __extension__ __PRETTY_FUNCTION__); }))
;
39
Assuming the condition is true
40
Taking true branch
89 assert(ssdim[1] == qdim[2])((void) sizeof ((ssdim[1] == qdim[2]) ? 1 : 0), __extension__
({ if (ssdim[1] == qdim[2]) ; else __assert_fail ("ssdim[1] == qdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 89, __extension__ __PRETTY_FUNCTION__); }))
;
41
Assuming the condition is true
42
Taking true branch
90 assert(ssdim[2] == qdim[1])((void) sizeof ((ssdim[2] == qdim[1]) ? 1 : 0), __extension__
({ if (ssdim[2] == qdim[1]) ; else __assert_fail ("ssdim[2] == qdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 90, __extension__ __PRETTY_FUNCTION__); }))
;
43
Assuming the condition is true
44
Taking true branch
91 assert(ssdim[3] == kdim[1])((void) sizeof ((ssdim[3] == kdim[1]) ? 1 : 0), __extension__
({ if (ssdim[3] == kdim[1]) ; else __assert_fail ("ssdim[3] == kdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 91, __extension__ __PRETTY_FUNCTION__); }))
;
45
Assuming the condition is true
46
Taking true branch
92 }
93 if (attn_mask
46.1
'attn_mask' is null
)
47
Taking false branch
94 {
95 ccv_nnc_tensor_view_get_dim(attn_mask, amdim);
96 ccv_nnc_tensor_view_get_stride(attn_mask, amstride);
97 assert(amdim[0] == qdim[0] || amdim[0] == 1)((void) sizeof ((amdim[0] == qdim[0] || amdim[0] == 1) ? 1 : 0
), __extension__ ({ if (amdim[0] == qdim[0] || amdim[0] == 1)
; else __assert_fail ("amdim[0] == qdim[0] || amdim[0] == 1"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 97, __extension__ __PRETTY_FUNCTION__); }))
;
98 assert(amdim[1] == qdim[2] || amdim[1] == 1)((void) sizeof ((amdim[1] == qdim[2] || amdim[1] == 1) ? 1 : 0
), __extension__ ({ if (amdim[1] == qdim[2] || amdim[1] == 1)
; else __assert_fail ("amdim[1] == qdim[2] || amdim[1] == 1"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 98, __extension__ __PRETTY_FUNCTION__); }))
;
99 assert(amdim[2] == qdim[1])((void) sizeof ((amdim[2] == qdim[1]) ? 1 : 0), __extension__
({ if (amdim[2] == qdim[1]) ; else __assert_fail ("amdim[2] == qdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 99, __extension__ __PRETTY_FUNCTION__); }))
;
100 assert(amdim[3] == kdim[1])((void) sizeof ((amdim[3] == kdim[1]) ? 1 : 0), __extension__
({ if (amdim[3] == kdim[1]) ; else __assert_fail ("amdim[3] == kdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 100, __extension__ __PRETTY_FUNCTION__); }))
;
101 }
102 int i[CCV_NNC_MAX_DIM(2) + 2];
103 float* qk = ccv_nnc_stream_context_get_workspace(stream_context, sizeof(float) * qdim[1] * kdim[1], CCV_TENSOR_CPU_MEMORY);
104 const float* const qp = q->data.f32;
105 const float* const kp = k->data.f32;
106 const float* const vp = v->data.f32;
107 const float* const amp = attn_mask
47.1
'attn_mask' is null
? attn_mask->data.f32 : 0;
48
'?' condition is false
108 float* const cp = c->data.f32;
109 float* const ssp = saved_softmax
48.1
'saved_softmax' is non-null
? saved_softmax->data.f32 : 0;
49
'?' condition is true
110 const float scale = cmd.info.scaled_dot_product_attention.scale;
111 const int is_causal = cmd.info.scaled_dot_product_attention.is_causal;
112 const int h_h_k_ratio = qdim[2] / kdim[2];
113 assert(kdim[2] == vdim[2])((void) sizeof ((kdim[2] == vdim[2]) ? 1 : 0), __extension__ (
{ if (kdim[2] == vdim[2]) ; else __assert_fail ("kdim[2] == vdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 113, __extension__ __PRETTY_FUNCTION__); }))
;
50
Taking true branch
114 assert(qdim[2] >= kdim[2])((void) sizeof ((qdim[2] >= kdim[2]) ? 1 : 0), __extension__
({ if (qdim[2] >= kdim[2]) ; else __assert_fail ("qdim[2] >= kdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 114, __extension__ __PRETTY_FUNCTION__); }))
;
51
Taking true branch
115 assert(qdim[2] % kdim[2] == 0)((void) sizeof ((qdim[2] % kdim[2] == 0) ? 1 : 0), __extension__
({ if (qdim[2] % kdim[2] == 0) ; else __assert_fail ("qdim[2] % kdim[2] == 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 115, __extension__ __PRETTY_FUNCTION__); }))
;
52
Taking true branch
116 for (i[0] = 0; i[0] < qdim[0]; i[0]++)
53
Assuming the condition is true
54
Loop condition is true. Entering loop body
117 {
118 const float* const qp0 = qp + i[0] * qstride[0];
119 const float* const kp0 = kp + i[0] * kstride[0];
120 const float* const vp0 = vp + i[0] * vstride[0];
121 const float* const amp0 = amp
54.1
'amp' is null
&& amdim[0] > 1 ? amp + i[0] * amstride[0] : amp;
122 float* const cp0 = cp + i[0] * cstride[0];
123 float* const ssp0 = ssp ? ssp + i[0] * ssstride[0] : 0;
55
Assuming 'ssp' is null
56
'?' condition is false
124 for (i[1] = 0; i[1] < qdim[2]; i[1]++)
57
Loop condition is true. Entering loop body
125 {
126 const float* const qp1 = qp0 + i[1] * qstride[2];
127 const float* const kp1 = kp0 + (i[1] / h_h_k_ratio) * kstride[2];
128 const float* const vp1 = vp0 + (i[1] / h_h_k_ratio) * vstride[2];
129 const float* const amp1 = amp
57.1
'amp' is null
&& amdim[1] > 1 ? amp0 + i[1] * amstride[1] : amp0;
130 float* const cp1 = cp0 + i[1] * cstride[2];
131 float* const ssp1 = ssp0
57.2
'ssp0' is null
? ssp0 + i[1] * ssstride[1] : 0;
58
'?' condition is false
132 // Compute Q @ K^T
133 parallel_for(x, qdim[1]){ int x; for ((x) = 0; (x) < (qdim[1]); (x)++) { {
59
Assuming the condition is true
60
Loop condition is true. Entering loop body
134 int y, k;
135 const float* const qp2 = qp1 + x * qstride[1];
136 float* const cp2 = cp1 + x * cstride[1];
137 float* const ssp2 = ssp0
60.1
'ssp0' is null
? ssp1 + x * ssstride[2] : 0;
61
'?' condition is false
62
'ssp2' initialized to a null pointer value
138 float* const qk0 = qk + x * kdim[1];
139 const float* const amp2 = amp1
62.1
'amp1' is null
? amp1 + x * amstride[2] : 0;
63
'?' condition is false
140 if (attn_mask
63.1
'attn_mask' is null
)
64
Taking false branch
141 {
142 for (y = 0; y < kdim[1]; y++)
143 {
144 const float* const kp2 = kp1 + y * kstride[1];
145 float v = 0;
146 for (k = 0; k < qdim[3]; k++)
147 v += qp2[k * qstride[3]] * kp2[k * kstride[3]];
148 qk0[y] = scale * v + amp2[y * amstride[3]];
149 }
150 } else {
151 for (y = 0; y < kdim[1]; y++)
65
Assuming the condition is true
66
Loop condition is true. Entering loop body
69
Assuming the condition is false
70
Loop condition is false. Execution continues on line 161
152 {
153 const float* const kp2 = kp1 + y * kstride[1];
154 float v = 0;
155 for (k = 0; k < qdim[3]; k++)
67
Assuming the condition is false
68
Loop condition is false. Execution continues on line 157
156 v += qp2[k * qstride[3]] * kp2[k * kstride[3]];
157 qk0[y] = scale * v;
158 }
159 }
160 // Compute softmax on qk.
161 if (is_causal)
71
Assuming 'is_causal' is 0
72
Taking false branch
162 {
163 const int x_end = ccv_max(x - qdim[1] + kdim[1] + 1, 0)({ typeof (x - qdim[1] + kdim[1] + 1) _a = (x - qdim[1] + kdim
[1] + 1); typeof (0) _b = (0); (_a > _b) ? _a : _b; })
;
164 for (y = x_end; y < kdim[1]; y++)
165 qk0[y] = 0;
166 double maxval = qk0[0];
167 for (y = 1; y < x_end; y++)
168 if (qk0[y] > maxval)
169 maxval = qk0[y];
170 double sumval = 0;
171 for (y = 0; y < x_end; y++)
172 sumval += (qk0[y] = expf(qk0[y] - maxval));
173 sumval = 1.0 / sumval;
174 for (y = 0; y < x_end; y++)
175 qk0[y] *= sumval;
176 } else {
177 double maxval = qk0[0];
178 for (y = 1; y < kdim[1]; y++)
73
Loop condition is false. Execution continues on line 181
179 if (qk0[y] > maxval)
180 maxval = qk0[y];
181 double sumval = 0;
182 for (y = 0; y < kdim[1]; y++)
74
Loop condition is true. Entering loop body
75
Loop condition is false. Execution continues on line 184
183 sumval += (qk0[y] = expf(qk0[y] - maxval));
184 sumval = 1.0 / sumval;
185 for (y = 0; y < kdim[1]; y++)
76
Loop condition is true. Entering loop body
77
Loop condition is false. Execution continues on line 188
186 qk0[y] *= sumval;
187 }
188 if (saved_softmax
77.1
'saved_softmax' is non-null
)
78
Taking true branch
189 for (y = 0; y < kdim[1]; y++)
79
Loop condition is true. Entering loop body
190 ssp2[y] = qk0[y];
80
Array access (from variable 'ssp2') results in a null pointer dereference
191 for (k = 0; k < vdim[3]; k++)
192 cp2[k * cstride[3]] = 0;
193 for (y = 0; y < kdim[1]; y++)
194 {
195 const float* const vp2 = vp1 + y * vstride[1];
196 const float v = qk0[y];
197 for (k = 0; k < vdim[3]; k++)
198 cp2[k * cstride[3]] += v * vp2[k * vstride[3]];
199 }
200 } parallel_endfor} }
201 }
202 }
203 if (w)
204 {
205 const int num_heads = cdim[2];
206 ccv_nnc_tensor_view_t* const d = (ccv_nnc_tensor_view_t*)outputs[0];
207 const int w_nd = ccv_nnc_tensor_nd(w->info.dim);
208 assert(w_nd == 2)((void) sizeof ((w_nd == 2) ? 1 : 0), __extension__ ({ if (w_nd
== 2) ; else __assert_fail ("w_nd == 2", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 208, __extension__ __PRETTY_FUNCTION__); }))
;
209 assert(CCV_IS_TENSOR_CONTIGUOUS(w))((void) sizeof (((!((*(int*)(w)) & CCV_TENSOR_VIEW) || ((
(ccv_nnc_tensor_view_t*)w)->contiguous == 1))) ? 1 : 0), __extension__
({ if ((!((*(int*)(w)) & CCV_TENSOR_VIEW) || (((ccv_nnc_tensor_view_t
*)w)->contiguous == 1))) ; else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(w)"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 209, __extension__ __PRETTY_FUNCTION__); }))
;
210 const int d_nd = ccv_nnc_tensor_nd(d->info.dim);
211 assert(d_nd == 3)((void) sizeof ((d_nd == 3) ? 1 : 0), __extension__ ({ if (d_nd
== 3) ; else __assert_fail ("d_nd == 3", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 211, __extension__ __PRETTY_FUNCTION__); }))
;
212 int ddim[CCV_NNC_MAX_DIM_ALLOC(12)];
213 int dstride[CCV_NNC_MAX_DIM_ALLOC(12)];
214 ccv_nnc_tensor_view_get_dim(d, ddim);
215 ccv_nnc_tensor_view_get_stride(d, dstride);
216 assert(ddim[2] == cdim[1])((void) sizeof ((ddim[2] == cdim[1]) ? 1 : 0), __extension__ (
{ if (ddim[2] == cdim[1]) ; else __assert_fail ("ddim[2] == cdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 216, __extension__ __PRETTY_FUNCTION__); }))
;
217 assert(ddim[3] == num_heads * cdim[3])((void) sizeof ((ddim[3] == num_heads * cdim[3]) ? 1 : 0), __extension__
({ if (ddim[3] == num_heads * cdim[3]) ; else __assert_fail (
"ddim[3] == num_heads * cdim[3]", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 217, __extension__ __PRETTY_FUNCTION__); }))
;
218 assert(w->info.dim[1] == ddim[3])((void) sizeof ((w->info.dim[1] == ddim[3]) ? 1 : 0), __extension__
({ if (w->info.dim[1] == ddim[3]) ; else __assert_fail ("w->info.dim[1] == ddim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 218, __extension__ __PRETTY_FUNCTION__); }))
;
219 assert(w->info.dim[0] == ddim[3])((void) sizeof ((w->info.dim[0] == ddim[3]) ? 1 : 0), __extension__
({ if (w->info.dim[0] == ddim[3]) ; else __assert_fail ("w->info.dim[0] == ddim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 219, __extension__ __PRETTY_FUNCTION__); }))
;
220 float* const dp = d->data.f32;
221 const float* const wp = w->data.f32;
222 const float* const cp = c->data.f32;
223 if (bias)
224 {
225 assert(ccv_nnc_tensor_count(bias->info) == ddim[3])((void) sizeof ((ccv_nnc_tensor_count(bias->info) == ddim[
3]) ? 1 : 0), __extension__ ({ if (ccv_nnc_tensor_count(bias->
info) == ddim[3]) ; else __assert_fail ("ccv_nnc_tensor_count(bias->info) == ddim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 225, __extension__ __PRETTY_FUNCTION__); }))
;
226 assert(CCV_IS_TENSOR_CONTIGUOUS(bias))((void) sizeof (((!((*(int*)(bias)) & CCV_TENSOR_VIEW) ||
(((ccv_nnc_tensor_view_t*)bias)->contiguous == 1))) ? 1 :
0), __extension__ ({ if ((!((*(int*)(bias)) & CCV_TENSOR_VIEW
) || (((ccv_nnc_tensor_view_t*)bias)->contiguous == 1))) ;
else __assert_fail ("CCV_IS_TENSOR_CONTIGUOUS(bias)", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 226, __extension__ __PRETTY_FUNCTION__); }))
;
227 const float* const biasp = bias->data.f32;
228 for (i[0] = 0; i[0] < ddim[1]; i[0]++)
229 {
230 const float* const cp0 = cp + i[0] * cstride[0];
231 float* const dp0 = dp + i[0] * dstride[1];
232 parallel_for(y, ddim[2]){ int y; for ((y) = 0; (y) < (ddim[2]); (y)++) { {
233 int x, j, k;
234 const float* const cp1 = cp0 + y * cstride[1];
235 float* const dp1 = dp0 + y * dstride[2];
236 for (x = 0; x < ddim[3]; x++)
237 {
238 const float* const wp0 = wp + x * ddim[3];
239 float v = biasp[x];
240 for (j = 0; j < num_heads; j++)
241 {
242 const float* const cp2 = cp1 + j * cstride[2];
243 for (k = 0; k < cdim[3]; k++)
244 v += wp0[j * cdim[3] + k] * cp2[k * cstride[3]];
245 }
246 dp1[x * dstride[3]] = v;
247 }
248 } parallel_endfor} }
249 }
250 } else {
251 for (i[0] = 0; i[0] < ddim[1]; i[0]++)
252 {
253 const float* const cp0 = cp + i[0] * cstride[0];
254 float* const dp0 = dp + i[0] * dstride[1];
255 parallel_for(y, ddim[2]){ int y; for ((y) = 0; (y) < (ddim[2]); (y)++) { {
256 int x, j, k;
257 const float* const cp1 = cp0 + y * cstride[1];
258 float* const dp1 = dp0 + y * dstride[2];
259 for (x = 0; x < ddim[3]; x++)
260 {
261 const float* const wp0 = wp + x * ddim[3];
262 float v = 0;
263 for (j = 0; j < num_heads; j++)
264 {
265 const float* const cp2 = cp1 + j * cstride[2];
266 for (k = 0; k < cdim[3]; k++)
267 v += wp0[j * cdim[3] + k] * cp2[k * cstride[3]];
268 }
269 dp1[x * dstride[3]] = v;
270 }
271 } parallel_endfor} }
272 }
273 }
274 }
275 return CCV_NNC_EXEC_SUCCESS;
276}
277
278static int _ccv_nnc_scaled_dot_product_attention_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
279{
280 // Assuming no saved_softmax, we need to recompute from q, k, v.
281 // We cannot do this with masks (yet).
282 assert(input_size >= 6)((void) sizeof ((input_size >= 6) ? 1 : 0), __extension__ (
{ if (input_size >= 6) ; else __assert_fail ("input_size >= 6"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 282, __extension__ __PRETTY_FUNCTION__); }))
;
283 ccv_nnc_tensor_view_t* const g = (ccv_nnc_tensor_view_t*)inputs[0];
284 ccv_nnc_tensor_view_t* const q = (ccv_nnc_tensor_view_t*)inputs[3];
285 ccv_nnc_tensor_view_t* const k = (ccv_nnc_tensor_view_t*)inputs[4];
286 ccv_nnc_tensor_view_t* const v = (ccv_nnc_tensor_view_t*)inputs[5];
287 ccv_nnc_tensor_view_t* const dq = (ccv_nnc_tensor_view_t*)outputs[0];
288 ccv_nnc_tensor_view_t* const dk = (ccv_nnc_tensor_view_t*)outputs[1];
289 ccv_nnc_tensor_view_t* const dv = (ccv_nnc_tensor_view_t*)outputs[2];
290 const int q_nd = ccv_nnc_tensor_nd(q->info.dim);
291 assert(q_nd == 3 || q_nd == 4)((void) sizeof ((q_nd == 3 || q_nd == 4) ? 1 : 0), __extension__
({ if (q_nd == 3 || q_nd == 4) ; else __assert_fail ("q_nd == 3 || q_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 291, __extension__ __PRETTY_FUNCTION__); }))
;
292 const int k_nd = ccv_nnc_tensor_nd(k->info.dim);
293 assert(k_nd == 3 || k_nd == 4)((void) sizeof ((k_nd == 3 || k_nd == 4) ? 1 : 0), __extension__
({ if (k_nd == 3 || k_nd == 4) ; else __assert_fail ("k_nd == 3 || k_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 293, __extension__ __PRETTY_FUNCTION__); }))
;
294 const int v_nd = ccv_nnc_tensor_nd(v->info.dim);
295 assert(v_nd == 3 || v_nd == 4)((void) sizeof ((v_nd == 3 || v_nd == 4) ? 1 : 0), __extension__
({ if (v_nd == 3 || v_nd == 4) ; else __assert_fail ("v_nd == 3 || v_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 295, __extension__ __PRETTY_FUNCTION__); }))
;
296 const int g_nd = ccv_nnc_tensor_nd(g->info.dim);
297 assert(g_nd == 3 || g_nd == 4)((void) sizeof ((g_nd == 3 || g_nd == 4) ? 1 : 0), __extension__
({ if (g_nd == 3 || g_nd == 4) ; else __assert_fail ("g_nd == 3 || g_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 297, __extension__ __PRETTY_FUNCTION__); }))
;
298 const int dq_nd = ccv_nnc_tensor_nd(dq->info.dim);
299 assert(dq_nd == 3 || dq_nd == 4)((void) sizeof ((dq_nd == 3 || dq_nd == 4) ? 1 : 0), __extension__
({ if (dq_nd == 3 || dq_nd == 4) ; else __assert_fail ("dq_nd == 3 || dq_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 299, __extension__ __PRETTY_FUNCTION__); }))
;
300 assert(dq_nd == q_nd)((void) sizeof ((dq_nd == q_nd) ? 1 : 0), __extension__ ({ if
(dq_nd == q_nd) ; else __assert_fail ("dq_nd == q_nd", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 300, __extension__ __PRETTY_FUNCTION__); }))
;
301 const int dk_nd = ccv_nnc_tensor_nd(dk->info.dim);
302 assert(dk_nd == 3 || dk_nd == 4)((void) sizeof ((dk_nd == 3 || dk_nd == 4) ? 1 : 0), __extension__
({ if (dk_nd == 3 || dk_nd == 4) ; else __assert_fail ("dk_nd == 3 || dk_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 302, __extension__ __PRETTY_FUNCTION__); }))
;
303 assert(dk_nd == k_nd)((void) sizeof ((dk_nd == k_nd) ? 1 : 0), __extension__ ({ if
(dk_nd == k_nd) ; else __assert_fail ("dk_nd == k_nd", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 303, __extension__ __PRETTY_FUNCTION__); }))
;
304 const int dv_nd = ccv_nnc_tensor_nd(dv->info.dim);
305 assert(dv_nd == 3 || dv_nd == 4)((void) sizeof ((dv_nd == 3 || dv_nd == 4) ? 1 : 0), __extension__
({ if (dv_nd == 3 || dv_nd == 4) ; else __assert_fail ("dv_nd == 3 || dv_nd == 4"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 305, __extension__ __PRETTY_FUNCTION__); }))
;
306 assert(dv_nd == v_nd)((void) sizeof ((dv_nd == v_nd) ? 1 : 0), __extension__ ({ if
(dv_nd == v_nd) ; else __assert_fail ("dv_nd == v_nd", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 306, __extension__ __PRETTY_FUNCTION__); }))
;
307 assert(q_nd == k_nd && k_nd == v_nd && v_nd == g_nd)((void) sizeof ((q_nd == k_nd && k_nd == v_nd &&
v_nd == g_nd) ? 1 : 0), __extension__ ({ if (q_nd == k_nd &&
k_nd == v_nd && v_nd == g_nd) ; else __assert_fail (
"q_nd == k_nd && k_nd == v_nd && v_nd == g_nd"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 307, __extension__ __PRETTY_FUNCTION__); }))
;
308 // Assuming this is float 32.
309 int qdim[CCV_NNC_MAX_DIM_ALLOC(12)];
310 int kdim[CCV_NNC_MAX_DIM_ALLOC(12)];
311 int vdim[CCV_NNC_MAX_DIM_ALLOC(12)];
312 int gdim[CCV_NNC_MAX_DIM_ALLOC(12)];
313 int dqdim[CCV_NNC_MAX_DIM_ALLOC(12)];
314 int dkdim[CCV_NNC_MAX_DIM_ALLOC(12)];
315 int dvdim[CCV_NNC_MAX_DIM_ALLOC(12)];
316 ccv_nnc_tensor_view_get_dim(q, qdim);
317 ccv_nnc_tensor_view_get_dim(k, kdim);
318 ccv_nnc_tensor_view_get_dim(v, vdim);
319 ccv_nnc_tensor_view_get_dim(g, gdim);
320 ccv_nnc_tensor_view_get_dim(dq, dqdim);
321 ccv_nnc_tensor_view_get_dim(dk, dkdim);
322 ccv_nnc_tensor_view_get_dim(dv, dvdim);
323 if (q_nd == 3)
324 {
325 qdim[0] = qdim[1], qdim[1] = qdim[2], qdim[2] = 1;
326 kdim[0] = kdim[1], kdim[1] = kdim[2], kdim[2] = 1;
327 vdim[0] = vdim[1], vdim[1] = vdim[2], vdim[2] = 1;
328 gdim[0] = gdim[1], gdim[1] = gdim[2], gdim[2] = 1;
329 dqdim[0] = dqdim[1], dqdim[1] = dqdim[2], dqdim[2] = 1;
330 dkdim[0] = dkdim[1], dkdim[1] = dkdim[2], dkdim[2] = 1;
331 dvdim[0] = dvdim[1], dvdim[1] = dvdim[2], dvdim[2] = 1;
332 }
333 assert(qdim[0] == kdim[0] && kdim[0] == vdim[0] && vdim[0] == gdim[0])((void) sizeof ((qdim[0] == kdim[0] && kdim[0] == vdim
[0] && vdim[0] == gdim[0]) ? 1 : 0), __extension__ ({
if (qdim[0] == kdim[0] && kdim[0] == vdim[0] &&
vdim[0] == gdim[0]) ; else __assert_fail ("qdim[0] == kdim[0] && kdim[0] == vdim[0] && vdim[0] == gdim[0]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 333, __extension__ __PRETTY_FUNCTION__); }))
;
334 assert(qdim[2] == gdim[2])((void) sizeof ((qdim[2] == gdim[2]) ? 1 : 0), __extension__ (
{ if (qdim[2] == gdim[2]) ; else __assert_fail ("qdim[2] == gdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 334, __extension__ __PRETTY_FUNCTION__); }))
;
335 assert(kdim[2] == vdim[2])((void) sizeof ((kdim[2] == vdim[2]) ? 1 : 0), __extension__ (
{ if (kdim[2] == vdim[2]) ; else __assert_fail ("kdim[2] == vdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 335, __extension__ __PRETTY_FUNCTION__); }))
;
336 assert(qdim[2] % kdim[2] == 0)((void) sizeof ((qdim[2] % kdim[2] == 0) ? 1 : 0), __extension__
({ if (qdim[2] % kdim[2] == 0) ; else __assert_fail ("qdim[2] % kdim[2] == 0"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 336, __extension__ __PRETTY_FUNCTION__); }))
;
337 assert(qdim[2] >= kdim[2])((void) sizeof ((qdim[2] >= kdim[2]) ? 1 : 0), __extension__
({ if (qdim[2] >= kdim[2]) ; else __assert_fail ("qdim[2] >= kdim[2]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 337, __extension__ __PRETTY_FUNCTION__); }))
;
338 assert(qdim[3] == kdim[3])((void) sizeof ((qdim[3] == kdim[3]) ? 1 : 0), __extension__ (
{ if (qdim[3] == kdim[3]) ; else __assert_fail ("qdim[3] == kdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 338, __extension__ __PRETTY_FUNCTION__); }))
;
339 assert(kdim[1] == vdim[1])((void) sizeof ((kdim[1] == vdim[1]) ? 1 : 0), __extension__ (
{ if (kdim[1] == vdim[1]) ; else __assert_fail ("kdim[1] == vdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 339, __extension__ __PRETTY_FUNCTION__); }))
;
340 assert(gdim[1] == qdim[1])((void) sizeof ((gdim[1] == qdim[1]) ? 1 : 0), __extension__ (
{ if (gdim[1] == qdim[1]) ; else __assert_fail ("gdim[1] == qdim[1]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 340, __extension__ __PRETTY_FUNCTION__); }))
;
341 assert(gdim[3] == vdim[3])((void) sizeof ((gdim[3] == vdim[3]) ? 1 : 0), __extension__ (
{ if (gdim[3] == vdim[3]) ; else __assert_fail ("gdim[3] == vdim[3]"
, "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 341, __extension__ __PRETTY_FUNCTION__); }))
;
342 assert(CCV_NNC_MAX_DIM == 2)((void) sizeof (((2) == 2) ? 1 : 0), __extension__ ({ if ((2)
== 2) ; else __assert_fail ("CCV_NNC_MAX_DIM == 2", "scaled_dot_product_attention/ccv_nnc_scaled_dot_product_attention_cpu_ref.c"
, 342, __extension__ __PRETTY_FUNCTION__); }))
; // Need to change this logic for CCV_NNC_MAX_DIM == other number.
343 int qstride[CCV_NNC_MAX_DIM_ALLOC(12)];
344 int kstride[CCV_NNC_MAX_DIM_ALLOC(12)];
345 int vstride[CCV_NNC_MAX_DIM_ALLOC(12)];
346 int gstride[CCV_NNC_MAX_DIM_ALLOC(12)];
347 int dqstride[CCV_NNC_MAX_DIM_ALLOC(12)];
348 int dkstride[CCV_NNC_MAX_DIM_ALLOC(12)];
349 int dvstride[CCV_NNC_MAX_DIM_ALLOC(12)];
350 ccv_nnc_tensor_view_get_stride(q, qstride);
351 ccv_nnc_tensor_view_get_stride(k, kstride);
352 ccv_nnc_tensor_view_get_stride(v, vstride);
353 ccv_nnc_tensor_view_get_stride(g, gstride);
354 ccv_nnc_tensor_view_get_stride(dq, dqstride);
355 ccv_nnc_tensor_view_get_stride(dk, dkstride);
356 ccv_nnc_tensor_view_get_stride(dv, dvstride);
357 if (q_nd == 3)
358 {
359 qstride[0] = qstride[1], qstride[1] = qstride[2], qstride[2] = qstride[3];
360 kstride[0] = kstride[1], kstride[1] = kstride[2], kstride[2] = kstride[3];
361 vstride[0] = vstride[1], vstride[1] = vstride[2], vstride[2] = vstride[3];
362 gstride[0] = gstride[1], gstride[1] = gstride[2], gstride[2] = gstride[3];
363 dqstride[0] = dqstride[1], dqstride[1] = dqstride[2], dqstride[2] = dqstride[3];
364 dkstride[0] = dkstride[1], dkstride[1] = dkstride[2], dkstride[2] = dkstride[3];
365 dvstride[0] = dvstride[1], dvstride[1] = dvstride[2], dvstride[2] = dvstride[3];
366 }
367 int i[CCV_NNC_MAX_DIM(2) + 2];
368 float* qk = ccv_nnc_stream_context_get_workspace(stream_context, sizeof(float) * 2 * kdim[1], CCV_TENSOR_CPU_MEMORY);
369 const float* const qp = q->data.f32;
370 const float* const kp = k->data.f32;
371 const float* const vp = v->data.f32;
372 const float* const gp = g->data.f32;
373 float* const dqp = dq->data.f32;
374 float* const dkp = dk->data.f32;
375 float* const dvp = dv->data.f32;
376 const float scale = cmd.info.scaled_dot_product_attention.scale;
377 const int is_causal = cmd.info.scaled_dot_product_attention.is_causal;
378 const int h_h_k_ratio = qdim[2] / kdim[2];
379 for (i[0] = 0; i[0] < qdim[0]; i[0]++)
380 {
381 const float* const qp0 = qp + i[0] * qstride[0];
382 const float* const kp0 = kp + i[0] * kstride[0];
383 const float* const vp0 = vp + i[0] * vstride[0];
384 const float* const gp0 = gp + i[0] * gstride[0];
385 float* const dqp0 = dqp + i[0] * dqstride[0];
386 float* const dkp0 = dkp + i[0] * dkstride[0];
387 float* const dvp0 = dvp + i[0] * dvstride[0];
388 for (i[1] = 0; i[1] < qdim[2]; i[1]++)
389 {
390 const float* const qp1 = qp0 + i[1] * qstride[2];
391 const float* const kp1 = kp0 + (i[1] / h_h_k_ratio) * kstride[2];
392 const float* const vp1 = vp0 + (i[1] / h_h_k_ratio) * vstride[2];
393 const float* const gp1 = gp0 + i[1] * gstride[2];
394 float* const dqp1 = dqp0 + i[1] * dqstride[2];
395 float* const dkp1 = dkp0 + (i[1] / h_h_k_ratio) * dkstride[2];
396 float* const dvp1 = dvp0 + (i[1] / h_h_k_ratio) * dvstride[2];
397 // Compute Q @ K^T
398 int x, y, k;
399 for (x = 0; x < qdim[1]; x++)
400 {
401 float* const dqp2 = dqp1 + x * dqstride[1];
402 for (k = 0; k < qdim[3]; k++)
403 dqp2[k * dqstride[3]] = 0;
404 }
405 // Only zero out when it is at 0-index.
406 if (i[1] % h_h_k_ratio == 0)
407 for (y = 0; y < kdim[1]; y++)
408 {
409 float* const dkp2 = dkp1 + y * dkstride[1];
410 for (k = 0; k < qdim[3]; k++)
411 dkp2[k * dkstride[3]] = 0;
412 }
413 // Only zero out when it is at 0-index.
414 if (i[1] % h_h_k_ratio == 0)
415 for (y = 0; y < kdim[1]; y++)
416 {
417 float* const dvp2 = dvp1 + y * dvstride[1];
418 for (k = 0; k < vdim[3]; k++)
419 dvp2[k * dvstride[3]] = 0;
420 }
421 for (x = 0; x < qdim[1]; x++)
422 {
423 const float* const qp2 = qp1 + x * qstride[1];
424 const float* const gp2 = gp1 + x * gstride[1];
425 float* const qk0 = qk;
426 float* const qks0 = qk + kdim[1];
427 for (y = 0; y < kdim[1]; y++)
428 {
429 const float* const kp2 = kp1 + y * kstride[1];
430 float v = 0;
431 for (k = 0; k < qdim[3]; k++)
432 v += qp2[k * qstride[3]] * kp2[k * kstride[3]];
433 qk0[y] = scale * v;
434 }
435 // Compute softmax on qk.
436 if (is_causal)
437 {
438 const int x_end = ccv_max(x - qdim[1] + kdim[1] + 1, 0)({ typeof (x - qdim[1] + kdim[1] + 1) _a = (x - qdim[1] + kdim
[1] + 1); typeof (0) _b = (0); (_a > _b) ? _a : _b; })
;
439 for (y = x_end; y < kdim[1]; y++)
440 qk0[y] = 0;
441 double maxval = qk0[0];
442 for (y = 1; y < x_end; y++)
443 if (qk0[y] > maxval)
444 maxval = qk0[y];
445 double sumval = 0;
446 for (y = 0; y < x_end; y++)
447 sumval += (qk0[y] = expf(qk0[y] - maxval));
448 sumval = 1.0 / sumval;
449 for (y = 0; y < x_end; y++)
450 qk0[y] *= sumval;
451 } else {
452 double maxval = qk0[0];
453 for (y = 1; y < kdim[1]; y++)
454 if (qk0[y] > maxval)
455 maxval = qk0[y];
456 double sumval = 0;
457 for (y = 0; y < kdim[1]; y++)
458 sumval += (qk0[y] = expf(qk0[y] - maxval));
459 sumval = 1.0 / sumval;
460 for (y = 0; y < kdim[1]; y++)
461 qk0[y] *= sumval;
462 }
463 for (y = 0; y < kdim[1]; y++)
464 {
465 float* const dvp2 = dvp1 + y * dvstride[1];
466 const float v = qk0[y];
467 for (k = 0; k < vdim[3]; k++)
468 dvp2[k * dvstride[3]] += v * gp2[k * gstride[3]];
469 }
470 double sumval = 0;
471 for (y = 0; y < kdim[1]; y++)
472 {
473 const float* const vp2 = vp1 + y * vstride[1];
474 float v = 0;
475 for (k = 0; k < vdim[3]; k++)
476 v += gp2[k * gstride[3]] * vp2[k * vstride[3]];
477 qks0[y] = v;
478 sumval += v * qk0[y];
479 }
480 for (y = 0; y < kdim[1]; y++)
481 qk0[y] = (qks0[y] - sumval) * qk0[y];
482 float* const dqp2 = dqp1 + x * dqstride[1];
483 for (y = 0; y < kdim[1]; y++)
484 {
485 const float* const kp2 = kp1 + y * kstride[1];
486 float* const dkp2 = dkp1 + y * dkstride[1];
487 const float v = scale * qk0[y];
488 for (k = 0; k < qdim[3]; k++)
489 {
490 dqp2[k * dqstride[3]] += v * kp2[k * kstride[3]];
491 dkp2[k * dkstride[3]] += v * qp2[k * qstride[3]];
492 }
493 }
494 }
495 }
496 }
497 return CCV_NNC_EXEC_SUCCESS;
498}
499
500REGISTER_COMMAND_BACKEND(CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_FORWARD, CCV_NNC_BACKEND_CPU_REF)void _register_command_CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_FORWARD_backend_CCV_NNC_BACKEND_CPU_REF(ccv_nnc_cmd_backend_registry_t* const registry)
501{
502 registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC;
503 registry->tensor_datatypes = CCV_32F;
504 registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
505 registry->algorithms = 1;
506 registry->exec = _ccv_nnc_scaled_dot_product_attention_forw;
507}
508
509REGISTER_COMMAND_BACKEND(CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_BACKWARD, CCV_NNC_BACKEND_CPU_REF)void _register_command_CCV_NNC_SCALED_DOT_PRODUCT_ATTENTION_BACKWARD_backend_CCV_NNC_BACKEND_CPU_REF(ccv_nnc_cmd_backend_registry_t* const registry)
510{
511 registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC;
512 registry->tensor_datatypes = CCV_32F;
513 registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
514 registry->algorithms = 1;
515 registry->exec = _ccv_nnc_scaled_dot_product_attention_back;
516}