Coverage Report

Created: 2024-08-18 16:21

/home/liu/actions-runner/_work/ccv/ccv/test/unit/nnc/roi_align.tests.c
Line
Count
Source
1
#include "case.h"
2
#include "ccv_case.h"
3
#include "ccv_nnc_case.h"
4
#include <ccv.h>
5
#include <nnc/ccv_nnc.h>
6
#include <nnc/ccv_nnc_easy.h>
7
8
TEST_SETUP()
9
{
10
  ccv_nnc_init();
11
}
12
13
TEST_CASE("compare ROI align forward")
14
1
{
15
1
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 12, 24, 3), 0);
16
1
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
17
1
  ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 3), 0);
18
1
  int i, j;
19
289
  for (i = 0; i < 12 * 24; 
i++288
)
20
1.15k
    
for (j = 0; 288
j < 3;
j++864
)
21
864
      a->data.f32[i * 3 + j] = i;
22
1
  b->data.f32[0] = 0 / 24; // x
23
1
  b->data.f32[1] = 0 / 12; // y
24
1
  b->data.f32[2] = 1; // w
25
1
  b->data.f32[3] = 1; // h
26
  // This should be look like no bi-linear filtering at all.
27
1
  ccv_nnc_cmd_exec(CMD_ROI_ALIGN_FORWARD(4, 4), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0);
28
1
  ccv_nnc_tensor_t* const ct = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 3), 0);
29
5
  for (i = 0; i < 4; 
i++4
)
30
4
  {
31
20
    for (j = 0; j < 4; 
j++16
)
32
16
    {
33
16
      int x, y;
34
16
      float v = 0;
35
64
      for (y = 0; y < 3; 
y++48
)
36
336
        
for (x = 0; 48
x < 6;
x++288
)
37
288
          v += a->data.f32[(i * 3 + y) * 24 * 3 + (j * 6 + x) * 3];
38
16
      ct->data.f32[(i * 4 + j) * 3] =
39
16
        ct->data.f32[(i * 4 + j) * 3 + 1] =
40
16
        ct->data.f32[(i * 4 + j) * 3 + 2] = v / (3 * 6);
41
16
    }
42
4
  }
43
1
  REQUIRE_TENSOR_EQ(c, ct, "should have no loss of accuracy");
44
1
  ccv_nnc_tensor_free(a);
45
1
  ccv_nnc_tensor_free(b);
46
1
  ccv_nnc_tensor_free(c);
47
1
  ccv_nnc_tensor_free(ct);
48
1
}
49
50
TEST_CASE("compare ROI align forward with average pool")
51
1
{
52
1
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 12, 24, 3), 0);
53
1
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
54
1
  ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 3), 0);
55
1
  int i, j;
56
289
  for (i = 0; i < 12 * 24; 
i++288
)
57
1.15k
    
for (j = 0; 288
j < 3;
j++864
)
58
864
      a->data.f32[i * 3 + j] = i;
59
1
  b->data.f32[0] = 0 / 24; // x
60
1
  b->data.f32[1] = 0 / 12; // y
61
1
  b->data.f32[2] = 1; // w
62
1
  b->data.f32[3] = 1; // h
63
  // This should be look like no bi-linear filtering at all.
64
1
  ccv_nnc_cmd_exec(CMD_ROI_ALIGN_FORWARD(4, 4), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0);
65
1
  ccv_nnc_tensor_t* const ct = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 3), 0);
66
1
  ccv_nnc_cmd_exec(CMD_AVERAGE_POOL_FORWARD(3, 6), HINT((3, 6)), 0, TENSOR_LIST(a), TENSOR_LIST(ct), 0);
67
1
  REQUIRE_TENSOR_EQ(c, ct, "should have no loss of accuracy");
68
1
  ccv_nnc_tensor_free(a);
69
1
  ccv_nnc_tensor_free(b);
70
1
  ccv_nnc_tensor_free(c);
71
1
  ccv_nnc_tensor_free(ct);
72
1
}
73
74
TEST_CASE("compare ROI align backward with average pool")
75
1
{
76
1
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 12, 24, 3), 0);
77
1
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
78
1
  ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 3), 0);
79
1
  int i, j;
80
17
  for (i = 0; i < 4 * 4; 
i++16
)
81
64
    
for (j = 0; 16
j < 3;
j++48
)
82
48
      c->data.f32[i * 3 + j] = i;
83
1
  b->data.f32[0] = 0 / 24; // x
84
1
  b->data.f32[1] = 0 / 12; // y
85
1
  b->data.f32[2] = 1; // w
86
1
  b->data.f32[3] = 1; // h
87
  // This should be look like no bi-linear filtering at all.
88
1
  ccv_nnc_cmd_exec(CMD_ROI_ALIGN_BACKWARD(4, 4), ccv_nnc_no_hint, 0, TENSOR_LIST(c, 0, b), TENSOR_LIST(a), 0);
89
1
  ccv_nnc_tensor_t* const at = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 12, 24, 3), 0);
90
1
  ccv_nnc_cmd_exec(CMD_AVERAGE_POOL_BACKWARD(3, 6), HINT((3, 6)), 0, TENSOR_LIST(c), TENSOR_LIST(at), 0);
91
1
  REQUIRE_TENSOR_EQ(a, at, "should have no loss of accuracy");
92
1
  ccv_nnc_tensor_free(a);
93
1
  ccv_nnc_tensor_free(b);
94
1
  ccv_nnc_tensor_free(c);
95
1
  ccv_nnc_tensor_free(at);
96
1
}
97
98
TEST_CASE("compare ROI align forward with average pool, batch of 2")
99
1
{
100
1
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 12, 24, 3), 0);
101
1
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
102
1
  ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 4, 4, 3), 0);
103
1
  int i, j, k;
104
3
  for (k = 0; k < 2; 
k++2
)
105
578
    
for (i = 0; 2
i < 12 * 24;
i++576
)
106
2.30k
      
for (j = 0; 576
j < 3;
j++1.72k
)
107
1.72k
        a->data.f32[k * 12 * 24 * 3 + i * 3 + j] = i;
108
1
  b->data.f32[0] = 0 / 24; // x
109
1
  b->data.f32[1] = 0 / 12; // y
110
1
  b->data.f32[2] = 1; // w
111
1
  b->data.f32[3] = 1; // h
112
  // This should be look like no bi-linear filtering at all.
113
1
  ccv_nnc_cmd_exec(CMD_ROI_ALIGN_FORWARD(4, 4), ccv_nnc_no_hint, 0, TENSOR_LIST(a, b), TENSOR_LIST(c), 0);
114
1
  ccv_nnc_tensor_t* const at = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 12, 24, 3), 0);
115
1
  ccv_nnc_tensor_t* const ct = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 3), 0);
116
289
  for (i = 0; i < 12 * 24; 
i++288
)
117
1.15k
    
for (j = 0; 288
j < 3;
j++864
)
118
864
      at->data.f32[i * 3 + j] = i;
119
1
  ccv_nnc_cmd_exec(CMD_AVERAGE_POOL_FORWARD(3, 6), HINT((3, 6)), 0, TENSOR_LIST(a), TENSOR_LIST(ct), 0);
120
1
  REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, ct->data.f32, c->data.f32, 4 * 4 * 3, 1e-5, "should be equal");
121
1
  REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, ct->data.f32, c->data.f32 + 4 * 4 * 3, 4 * 4 * 3, 1e-5, "should be equal");
122
1
  ccv_nnc_tensor_free(a);
123
1
  ccv_nnc_tensor_free(at);
124
1
  ccv_nnc_tensor_free(b);
125
1
  ccv_nnc_tensor_free(c);
126
1
  ccv_nnc_tensor_free(ct);
127
1
}
128
129
TEST_CASE("compare ROI align backward with average pool, batch of 2")
130
1
{
131
1
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 12, 24, 3), 0);
132
1
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 4), 0);
133
1
  ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 4, 4, 3), 0);
134
1
  int i, j, k;
135
3
  for (k = 0; k < 2; 
k++2
)
136
34
    
for (i = 0; 2
i < 4 * 4;
i++32
)
137
128
      
for (j = 0; 32
j < 3;
j++96
)
138
96
        c->data.f32[k * 4 * 4 * 3 + i * 3 + j] = i;
139
1
  b->data.f32[0] = 0 / 24; // x
140
1
  b->data.f32[1] = 0 / 12; // y
141
1
  b->data.f32[2] = 1; // w
142
1
  b->data.f32[3] = 1; // h
143
1
  b->data.f32[4] = 0 / 24; // x
144
1
  b->data.f32[5] = 0 / 12; // y
145
1
  b->data.f32[6] = 1; // w
146
1
  b->data.f32[7] = 1; // h
147
  // This should be look like no bi-linear filtering at all.
148
1
  ccv_nnc_cmd_exec(CMD_ROI_ALIGN_BACKWARD(4, 4), ccv_nnc_no_hint, 0, TENSOR_LIST(c, 0, b), TENSOR_LIST(a), 0);
149
1
  ccv_nnc_tensor_t* const at = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 12, 24, 3), 0);
150
1
  ccv_nnc_tensor_t* const ct = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 4, 3), 0);
151
17
  for (i = 0; i < 4 * 4; 
i++16
)
152
64
    
for (j = 0; 16
j < 3;
j++48
)
153
48
      ct->data.f32[i * 3 + j] = i;
154
1
  ccv_nnc_cmd_exec(CMD_AVERAGE_POOL_BACKWARD(3, 6), HINT((3, 6)), 0, TENSOR_LIST(ct), TENSOR_LIST(at), 0);
155
1
  REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, at->data.f32, a->data.f32, 12 * 24 * 3, 1e-5, "should be equal");
156
1
  REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, at->data.f32, a->data.f32 + 12 * 24 * 3, 12 * 24 * 3, 1e-5, "should be equal");
157
1
  ccv_nnc_tensor_free(a);
158
1
  ccv_nnc_tensor_free(b);
159
1
  ccv_nnc_tensor_free(c);
160
1
  ccv_nnc_tensor_free(ct);
161
1
  ccv_nnc_tensor_free(at);
162
1
}
163
164
#include "case_main.h"