Coverage Report

Created: 2019-07-03 22:50

/home/liu/buildslave/linux-x64-runtests/build/test/unit/nnc/graph.tests.c
Line
Count
Source
1
#include "case.h"
2
#include "ccv_case.h"
3
#include "ccv_nnc_case.h"
4
#include <ccv.h>
5
#include <nnc/ccv_nnc.h>
6
#include <nnc/ccv_nnc_easy.h>
7
#include "3rdparty/dsfmt/dSFMT.h"
8
9
TEST_SETUP()
10
{
11
  ccv_nnc_init();
12
}
13
14
static int _ccv_nnc_custom_24_loss_exec(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
15
1
{
16
1
  int i;
17
1
  assert(input_size == 1);
18
1
  const ccv_nnc_tensor_t* m = inputs[0];
19
1
  assert(output_size == 1);
20
1
  ccv_nnc_tensor_t* g = outputs[0];
21
2.60k
  for (i = 0; i < 21 * 31 * 4; 
i++2.60k
)
22
2.60k
    g->data.f32[i] = m->data.f32[i] - (i == 24);
23
1
  return CCV_NNC_EXEC_SUCCESS;
24
1
}
25
26
TEST_CASE("run simple graph network")
27
1
{
28
1
  ccv_nnc_graph_t* graph = ccv_nnc_graph_new();
29
1
  ccv_nnc_tensor_t* a = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 31, 21, 2), 0);
30
1
  ccv_nnc_tensor_t* b = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 31, 21, 4), 0);
31
1
  ccv_nnc_cmd_t forw_cmd = CMD_CONVOLUTION_FORWARD(1, 4, 5, 3, 2);
32
1
  ccv_nnc_hint_t hint = ccv_nnc_hint_auto(forw_cmd.info, a->info, b->info);
33
1
  ccv_nnc_tensor_t* w = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4, 5, 3, 2), 0);
34
1
  ccv_nnc_tensor_t* bias = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 4), 0);
35
1
  dsfmt_t dsfmt;
36
1
  dsfmt_init_gen_rand(&dsfmt, 1);
37
1
  int i;
38
121
  for (i = 0; i < 2 * 3 * 5 * 4; 
i++120
)
39
120
    w->data.f32[i] = (dsfmt_genrand_open_close(&dsfmt) * 2 - 1) * 1.41421356237 / sqrtf(21 * 31 * 2 + 21 * 31 * 4);
40
1
  float denom = (21 * 31 * 2 - 1) * 21 * 31 * 2;
41
1.30k
  for (i = 0; i < 21 * 31 * 2; 
i++1.30k
)
42
1.30k
    a->data.f32[i] = (float)(i - 21 * 31) / denom;
43
5
  for (i = 0; i < 4; 
i++4
)
44
4
    bias->data.f32[i] = 0;
45
1
  ccv_nnc_graph_exec_t forw_node = ccv_nnc_graph_exec_new(graph, forw_cmd, hint, TENSOR_LIST(a, w, bias), TENSOR_LIST(b));
46
1
  ccv_nnc_cmd_t softmax_cmd = CMD_SOFTMAX_FORWARD();
47
1
  ccv_nnc_tensor_t* m = ccv_nnc_tensor_new(0, b->info, 0);
48
1
  ccv_nnc_graph_exec_t softmax_node = ccv_nnc_graph_exec_new(graph, softmax_cmd, hint, TENSOR_LIST(b), TENSOR_LIST(m));
49
1
  ccv_nnc_tensor_t* g = ccv_nnc_tensor_new(0, b->info, 0);
50
1
  ccv_nnc_cmd_t loss_cmd = CMD_CUSTOM_FORWARD(_ccv_nnc_custom_24_loss_exec);
51
1
  ccv_nnc_graph_exec_t loss_node = ccv_nnc_graph_exec_new(graph, loss_cmd, hint, TENSOR_LIST(m), TENSOR_LIST(g));
52
1
  ccv_nnc_cmd_t back_cmd = CMD_CONVOLUTION_BACKWARD(1, 4, 2, 3, 5);
53
1
  ccv_nnc_tensor_t* gw = ccv_nnc_tensor_new(0, w->info, 0);
54
1
  ccv_nnc_tensor_t* gbias = ccv_nnc_tensor_new(0, bias->info, 0);
55
1
  ccv_nnc_tensor_t* h = ccv_nnc_tensor_new(0, a->info, 0);
56
1
  ccv_nnc_graph_exec_t back_node = ccv_nnc_graph_exec_new(graph, back_cmd, hint, TENSOR_LIST(g, a, w), TENSOR_LIST(h, gw, gbias));
57
1
  // All nodes are created, now to concat the graph.
58
1
  ccv_nnc_graph_exec_concat(graph, forw_node, softmax_node);
59
1
  ccv_nnc_graph_exec_concat(graph, softmax_node, loss_node);
60
1
  ccv_nnc_graph_exec_concat(graph, loss_node, back_node);
61
1
  ccv_nnc_graph_exec_t source_nodes[] = {
62
1
    forw_node,
63
1
  };
64
1
  ccv_nnc_graph_exec_t destination_nodes[] = {
65
1
    back_node,
66
1
  };
67
1
  ccv_nnc_graph_run(graph, 0, 0, 0, source_nodes, 1, destination_nodes, 1);
68
1
  ccv_nnc_graph_free(graph);
69
1
  /* At this point, do the computation with a different set of tensors and then compare */
70
1
  ccv_nnc_tensor_t* vb = ccv_nnc_tensor_new(0, b->info, 0);
71
1
  ccv_nnc_cmd_exec(forw_cmd, hint, 0, TENSOR_LIST(a, w, bias), TENSOR_LIST(vb), 0);
72
1
  REQUIRE_TENSOR_EQ(b, vb, "Graph computed forward pass result should be the same.");
73
1
  ccv_nnc_tensor_t* vm = ccv_nnc_tensor_new(0, b->info, 0);
74
1
  ccv_nnc_cmd_exec(softmax_cmd, hint, 0, TENSOR_LIST(vb), TENSOR_LIST(vm), 0);
75
1
  REQUIRE_TENSOR_EQ(m, vm, "Graph computed softmax pass result should be the same.");
76
1
  ccv_nnc_tensor_t* vg = ccv_nnc_tensor_new(0, g->info, 0);
77
2.60k
  for (i = 0; i < 21 * 31 * 4; 
i++2.60k
)
78
2.60k
    vg->data.f32[i] = vm->data.f32[i] - (i == 24);
79
1
  REQUIRE_TENSOR_EQ(g, vg, "Graph computed custom loss result should be the same.");
80
1
  ccv_nnc_tensor_t* vgw = ccv_nnc_tensor_new(0, w->info, 0);
81
1
  ccv_nnc_tensor_t* vgbias = ccv_nnc_tensor_new(0, bias->info, 0);
82
1
  ccv_nnc_tensor_t* vh = ccv_nnc_tensor_new(0, h->info, 0);
83
1
  ccv_nnc_cmd_exec(back_cmd, hint, 0, TENSOR_LIST(vg, a, w), TENSOR_LIST(vh, vgw, vgbias), 0);
84
1
  REQUIRE_TENSOR_EQ(gbias, vgbias, "Graph computed backward pass weight delta should be the same.");
85
1
  REQUIRE_TENSOR_EQ(gw, vgw, "Graph computed backward pass bias delta should be the same.");
86
1
  REQUIRE_TENSOR_EQ(h, vh, "Graph computed backward pass result should be the same.");
87
1
  // free all the tensor data.
88
1
  ccv_nnc_tensor_free(a);
89
1
  ccv_nnc_tensor_free(b);
90
1
  ccv_nnc_tensor_free(m);
91
1
  ccv_nnc_tensor_free(g);
92
1
  ccv_nnc_tensor_free(h);
93
1
  ccv_nnc_tensor_free(w);
94
1
  ccv_nnc_tensor_free(bias);
95
1
  ccv_nnc_tensor_free(gw);
96
1
  ccv_nnc_tensor_free(gbias);
97
1
  ccv_nnc_tensor_free(vb);
98
1
  ccv_nnc_tensor_free(vm);
99
1
  ccv_nnc_tensor_free(vg);
100
1
  ccv_nnc_tensor_free(vh);
101
1
  ccv_nnc_tensor_free(vgw);
102
1
  ccv_nnc_tensor_free(vgbias);
103
1
}
104
105
#include "case_main.h"