Coverage Report

Created: 2019-07-03 22:50

/home/liu/buildslave/linux-x64-runtests/build/test/unit/nnc/backward.tests.c
Line
Count
Source
1
#include "case.h"
2
#include "ccv_case.h"
3
#include "ccv_nnc_case.h"
4
#include <ccv.h>
5
#include <nnc/ccv_nnc.h>
6
#include <nnc/ccv_nnc_easy.h>
7
8
TEST_SETUP()
9
{
10
  ccv_nnc_init();
11
}
12
13
TEST_CASE("convolutional network of 3x5 on 21x31 for error backward propagation")
14
1
{
15
1
  ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 31, 21, 3), 0);
16
1
  ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 31, 21, 32), 0);
17
1
  ccv_nnc_cmd_t forw_cmd = CMD_CONVOLUTION_FORWARD(1, 32, 5, 3, 3);
18
1
  ccv_nnc_hint_t hint = ccv_nnc_hint_auto(forw_cmd.info, a->info, b->info);
19
1
  ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 32, 5, 3, 3), 0);
20
1
  ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 32), 0);
21
1
  int i, j, k;
22
1.44k
  for (i = 0; i < 3 * 5 * 3 * 32; 
i++1.44k
)
23
1.44k
    w->data.f32[i] = 2;
24
1.95k
  for (i = 0; i < 21 * 31 * 3; 
i++1.95k
)
25
1.95k
    a->data.f32[i] = 1;
26
33
  for (i = 0; i < 32; 
i++32
)
27
32
    bias->data.f32[i] = 0;
28
1
  ccv_nnc_cmd_exec(forw_cmd, hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0);
29
1
  ccv_nnc_cmd_t back_cmd = CMD_CONVOLUTION_BACKWARD(1, 32, 5, 3, 3);
30
1
  ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 32, 5, 3, 3), 0);
31
1
  ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 32), 0);
32
1
  ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 31, 21, 32), 0);
33
1
  ccv_nnc_tensor_t* h = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 31, 21, 3), 0);
34
20.8k
  for (i = 0; i < 21 * 31 * 32; 
i++20.8k
)
35
20.8k
    g->data.f32[i] = 1;
36
1
  ccv_nnc_cmd_exec(back_cmd, hint, 0, TENSOR_LIST(g, a, w), TENSOR_LIST(h, gw, gbias), 0);
37
1
  ccv_dense_matrix_t* dd = ccv_dense_matrix_new(31, 21, CCV_32F | CCV_C3, 0, 0);
38
32
  for (i = 0; i < 31; 
i++31
)
39
682
    
for (j = 0; 31
j < 21;
j++651
)
40
651
      dd->data.f32[(i * 21 + j) * 3] =
41
651
      dd->data.f32[(i * 21 + j) * 3 + 1] =
42
651
      dd->data.f32[(i * 21 + j) * 3 + 2] = 32 * 2 * (5 + ccv_min(i - 2, 0) + ccv_min(28 - i, 0)) * (3 + ccv_min(j - 1, 0) + ccv_min(19 - j, 0));
43
1
  REQUIRE_MATRIX_EQ(h, dd, "propagated error doesn't match the expected value");
44
1
  ccv_matrix_free(dd);
45
1
  float* dw = (float*)ccmalloc(sizeof(float) * 5 * 3 * 3 * 32);
46
33
  for (k = 0; k < 32; 
k++32
)
47
192
    
for (i = 0; 32
i < 5;
i++160
)
48
640
      
for (j = 0; 160
j < 3;
j++480
)
49
480
        dw[k * 5 * 3 * 3 + (i * 3 + j) * 3] =
50
480
        dw[k * 5 * 3 * 3 + (i * 3 + j) * 3 + 1] =
51
480
        dw[k * 5 * 3 * 3 + (i * 3 + j) * 3 + 2] = (31 - abs(i - 2)) * (21 - abs(j - 1));
52
1
  REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dw, gw->data.f32, 5 * 3 * 3 * 32, 1e-4, "weight gradient doesn't match the expected value");
53
1
  ccfree(dw);
54
1
  float* dbias = (float*)ccmalloc(sizeof(float) * 32);
55
33
  for (i = 0; i < 32; 
i++32
)
56
32
    dbias[i] = 21 * 31;
57
1
  REQUIRE_ARRAY_EQ_WITH_TOLERANCE(float, dbias, gbias->data.f32, 32, 1e-4, "bias gradient doesn't match the expected value");
58
1
  ccfree(dbias);
59
1
  ccv_nnc_tensor_free(a);
60
1
  ccv_nnc_tensor_free(b);
61
1
  ccv_nnc_tensor_free(g);
62
1
  ccv_nnc_tensor_free(h);
63
1
  ccv_nnc_tensor_free(w);
64
1
  ccv_nnc_tensor_free(bias);
65
1
  ccv_nnc_tensor_free(gw);
66
1
  ccv_nnc_tensor_free(gbias);
67
1
}
68
69
TEST_CASE("full connect back propagation")
70
1
{
71
1
  ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5), 0);
72
1
  ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
73
1
  ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
74
1
  bias->data.f32[0] = 1;
75
1
  bias->data.f32[1] = 4;
76
1
  bias->data.f32[2] = 2;
77
1
  bias->data.f32[3] = -1;
78
1
  a->data.f32[0] = 5;
79
1
  a->data.f32[1] = -3;
80
1
  a->data.f32[2] = 10;
81
1
  a->data.f32[3] = 11;
82
1
  a->data.f32[4] = -1;
83
1
  float m[] = {
84
1
    0.5, 0.2, -0.3, 2, 4,
85
1
    1, 8, 2, 8, -1,
86
1
    0, 10, -1, -2, 3,
87
1
    4, 7, 8, 10, 0
88
1
  };
89
1
  float ho[] = {
90
1
    0.5 + 4 - 4,
91
1
    0.2 + 4 * 8 + 2 * 10 - 7,
92
1
    -0.3 + 4 * 2 - 2 - 8,
93
1
    2 + 4 * 8 - 2 * 2 - 10,
94
1
    4 - 4 + 2 * 3,
95
1
  };
96
1
  ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(m, CPU_TENSOR_NHWC(32F, 4, 5), 0);
97
1
  ccv_nnc_cmd_t forw_cmd = CMD_GEMM_FORWARD(4);
98
1
  ccv_nnc_cmd_exec(forw_cmd, ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0);
99
1
  float bo[] = {
100
1
    0.5 * 5 - 0.2 * 3 - 0.3 * 10 + 2 * 11 - 4 + 1,
101
1
    1 * 5 - 8 * 3 + 2 * 10 + 8 * 11 + 1 + 4,
102
1
    -10 * 3 - 10 - 2 * 11 - 3 + 2,
103
1
    4 * 5 - 7 * 3 + 8 * 10 + 10 * 11 - 1
104
1
  };
105
1
  ccv_nnc_tensor_t bot = ccv_nnc_tensor(bo, CPU_TENSOR_NHWC(32F, 4), 0);
106
1
  REQUIRE_TENSOR_EQ(b, &bot, "forward propagation result should match expected value");
107
1
  ccv_nnc_cmd_t back_cmd = CMD_GEMM_BACKWARD(4);
108
1
  ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 5), 0);
109
1
  ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
110
1
  ccv_nnc_tensor_t* h = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 5), 0);
111
1
  // Pass in bias as gradient
112
1
  ccv_nnc_cmd_exec(back_cmd, ccv_nnc_no_hint, 0, TENSOR_LIST(bias, a, w), TENSOR_LIST(h, gw, gbias), 0);
113
1
  // Therefore, gradient bias should match bias.
114
1
  REQUIRE_TENSOR_EQ(gbias, bias, "bias gradients should match expected value");
115
1
  float go[] = {
116
1
    5, -3, 10, 11, -1,
117
1
    4 * 5, -4 * 3, 4 * 10, 4 * 11, -4,
118
1
    2 * 5, -2 * 3, 2 * 10, 2 * 11, -2,
119
1
    -5, 3, -10, -11, 1
120
1
  };
121
1
  ccv_nnc_tensor_t got = ccv_nnc_tensor(go, CPU_TENSOR_NHWC(32F, 4, 5), 0);
122
1
  REQUIRE_TENSOR_EQ(gw, &got, "weight gradients should match expected value");
123
1
  ccv_nnc_tensor_t hot = ccv_nnc_tensor(ho, CPU_TENSOR_NHWC(32F, 5), 0);
124
1
  REQUIRE_TENSOR_EQ(h, &hot, "back propagation error should match expected value");
125
1
  ccv_nnc_tensor_free(a);
126
1
  ccv_nnc_tensor_free(b);
127
1
  ccv_nnc_tensor_free(w);
128
1
  ccv_nnc_tensor_free(bias);
129
1
  ccv_nnc_tensor_free(h);
130
1
  ccv_nnc_tensor_free(gw);
131
1
  ccv_nnc_tensor_free(gbias);
132
1
}
133
134
TEST_CASE("full connect back propagation with batch = 2")
135
1
{
136
1
  ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 5), 0);
137
1
  ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 4), 0);
138
1
  ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
139
1
  bias->data.f32[0] = 1;
140
1
  bias->data.f32[1] = 4;
141
1
  bias->data.f32[2] = 2;
142
1
  bias->data.f32[3] = -1;
143
1
  a->data.f32[0] = 5;
144
1
  a->data.f32[1] = -3;
145
1
  a->data.f32[2] = 10;
146
1
  a->data.f32[3] = 11;
147
1
  a->data.f32[4] = -1;
148
1
  a->data.f32[0 + 5] = -5;
149
1
  a->data.f32[1 + 5] = 3;
150
1
  a->data.f32[2 + 5] = -10;
151
1
  a->data.f32[3 + 5] = -11;
152
1
  a->data.f32[4 + 5] = 1;
153
1
  float m[] = {
154
1
    0.5, 0.2, -0.3, 2, 4,
155
1
    1, 8, 2, 8, -1,
156
1
    0, 10, -1, -2, 3,
157
1
    4, 7, 8, 10, 0
158
1
  };
159
1
  float ho[] = {
160
1
    -(0.5 + 4 - 4),
161
1
    -(0.2 + 4 * 8 + 2 * 10 - 7),
162
1
    -(-0.3 + 4 * 2 - 2 - 8),
163
1
    -(2 + 4 * 8 - 2 * 2 - 10),
164
1
    -(4 - 4 + 2 * 3),
165
1
    0.5 + 4 - 4,
166
1
    0.2 + 4 * 8 + 2 * 10 - 7,
167
1
    -0.3 + 4 * 2 - 2 - 8,
168
1
    2 + 4 * 8 - 2 * 2 - 10,
169
1
    4 - 4 + 2 * 3,
170
1
  };
171
1
  ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(m, CPU_TENSOR_NHWC(32F, 4, 5), 0);
172
1
  ccv_nnc_cmd_t forw_cmd = CMD_GEMM_FORWARD(4);
173
1
  ccv_nnc_cmd_exec(forw_cmd, ccv_nnc_no_hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(b), 0);
174
1
  float bo[] = {
175
1
    0.5 * 5 - 0.2 * 3 - 0.3 * 10 + 2 * 11 - 4 + 1,
176
1
    1 * 5 - 8 * 3 + 2 * 10 + 8 * 11 + 1 + 4,
177
1
    -10 * 3 - 10 - 2 * 11 - 3 + 2,
178
1
    4 * 5 - 7 * 3 + 8 * 10 + 10 * 11 - 1,
179
1
    -(0.5 * 5 - 0.2 * 3 - 0.3 * 10 + 2 * 11 - 4) + 1,
180
1
    -(1 * 5 - 8 * 3 + 2 * 10 + 8 * 11 + 1) + 4,
181
1
    -(-10 * 3 - 10 - 2 * 11 - 3) + 2,
182
1
    -(4 * 5 - 7 * 3 + 8 * 10 + 10 * 11) - 1
183
1
  };
184
1
  ccv_nnc_tensor_t bot = ccv_nnc_tensor(bo, CPU_TENSOR_NHWC(32F, 2, 4), 0);
185
1
  REQUIRE_TENSOR_EQ(b, &bot, "forward propagation result should match expected value");
186
1
  ccv_nnc_cmd_t back_cmd = CMD_GEMM_BACKWARD(4);
187
1
  ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 5), 0);
188
1
  ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
189
1
  ccv_nnc_tensor_t* h = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 5), 0);
190
1
  ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2, 4), 0);
191
1
  int i;
192
5
  for (i = 0; i < 4; 
i++4
)
193
4
  {
194
4
    g->data.f32[i] = -bias->data.f32[i];
195
4
    g->data.f32[i + 4] = bias->data.f32[i];
196
4
  }
197
1
  // Pass in bias as gradient
198
1
  ccv_nnc_cmd_exec(back_cmd, ccv_nnc_no_hint, 0, TENSOR_LIST(g, a, w), TENSOR_LIST(h, gw, gbias), 0);
199
1
  // Therefore, gradient bias should match bias.
200
5
  for (i = 0; i < 4; 
i++4
)
201
4
    bias->data.f32[i] = 0;
202
1
  REQUIRE_TENSOR_EQ(gbias, bias, "bias gradients should match expected value");
203
1
  float go[] = {
204
1
    5, -3, 10, 11, -1,
205
1
    4 * 5, -4 * 3, 4 * 10, 4 * 11, -4,
206
1
    2 * 5, -2 * 3, 2 * 10, 2 * 11, -2,
207
1
    -5, 3, -10, -11, 1
208
1
  };
209
21
  for (i = 0; i < 5 * 4; 
i++20
)
210
20
    go[i] = -go[i] * 2; // Because the gradient is negative in the first example, and the input is negative in the second example, we basically doubled the weight gradients.
211
1
  ccv_nnc_tensor_t got = ccv_nnc_tensor(go, CPU_TENSOR_NHWC(32F, 4, 5), 0);
212
1
  REQUIRE_TENSOR_EQ(gw, &got, "weight gradients should match expected value");
213
1
  ccv_nnc_tensor_t hot = ccv_nnc_tensor(ho, CPU_TENSOR_NHWC(32F, 2, 5), 0);
214
1
  REQUIRE_TENSOR_EQ(h, &hot, "back propagation error should match expected value");
215
1
  ccv_nnc_tensor_free(a);
216
1
  ccv_nnc_tensor_free(b);
217
1
  ccv_nnc_tensor_free(w);
218
1
  ccv_nnc_tensor_free(bias);
219
1
  ccv_nnc_tensor_free(g);
220
1
  ccv_nnc_tensor_free(h);
221
1
  ccv_nnc_tensor_free(gw);
222
1
  ccv_nnc_tensor_free(gbias);
223
1
}
224
225
#include "case_main.h"