Coverage Report

Created: 2019-07-03 22:50

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/ccv_nnc_graph_while.c
Line
Count
Source (jump to first uncovered line)
1
#include "ccv_nnc.h"
2
#include "ccv_nnc_easy.h"
3
#include "ccv_nnc_internal.h"
4
#include "ccv_internal.h"
5
#include "_ccv_nnc_graph.h"
6
7
#pragma mark - Level-3.5 API
8
9
void ccv_nnc_tensor_multiview(ccv_nnc_tensor_t* data[], const uint8_t kind, const uint16_t repeat, const ccv_nnc_graph_t* const graph, ccv_nnc_tensor_multiview_t* const tensor_multiview)
10
76
{
11
76
  assert(kind == CCV_NNC_MULTIVIEW_K0N || kind == CCV_NNC_MULTIVIEW_K1N);
12
76
  assert(repeat > 0);
13
76
  tensor_multiview->type = CCV_TENSOR_MULTIVIEW;
14
76
  tensor_multiview->kind = kind;
15
76
  tensor_multiview->repeat = repeat;
16
76
  tensor_multiview->anchor = (intptr_t)graph;
17
76
  tensor_multiview->it = 0;
18
76
  tensor_multiview->p = 0;
19
76
  tensor_multiview->offset = 0;
20
76
  tensor_multiview->sp = 0;
21
76
  tensor_multiview->_heap_data = (repeat + kind <= sizeof(tensor_multiview->_inline_data) / sizeof(tensor_multiview->_inline_data[0])) ? 0 : 
ccmalloc0
(sizeof(ccv_nnc_tensor_t*) * (repeat + kind))0
;
22
76
  int i;
23
76
  // Currently, only CCV_NNC_MULTIVIEW_K12 uses 3 tensors.
24
244
  for (i = 0; i < repeat + kind; 
i++168
)
25
168
  {
26
168
    CCV_NNC_MULTIVIEW_DATA(tensor_multiview)[i] = data[i];
27
168
    ccv_nnc_tensor_multiview_t* const mv = (ccv_nnc_tensor_multiview_t*)data[i];
28
168
    if (data[i] != CCV_NNC_TENSOR_PLACEHOLDER && 
CCV_IS_TENSOR_MULTIVIEW160
(mv))
29
168
      
mv->p = tensor_multiview9
;
30
168
  }
31
76
}
32
33
void ccv_nnc_tensor_multiview_free(const ccv_nnc_tensor_multiview_t tensor_multiview)
34
65
{
35
65
  if (tensor_multiview.sp)
36
36
    ccv_array_free(tensor_multiview.sp);
37
65
  if (tensor_multiview._heap_data)
38
65
    
ccfree0
(tensor_multiview._heap_data)0
;
39
65
}
40
41
void ccv_nnc_tensor_synchronize_to_multiview(ccv_nnc_tensor_multiview_t* const tensor_multiview, ccv_nnc_tensor_t* const tensor)
42
56
{
43
56
  if (!tensor_multiview->sp)
44
36
    tensor_multiview->sp = ccv_array_new(sizeof(ccv_nnc_tensor_t*), 0, 0);
45
56
  ccv_array_push(tensor_multiview->sp, &tensor);
46
56
}
47
48
void ccv_nnc_tensor_multiview_synchronize(ccv_nnc_tensor_multiview_t* const tensor_multiview)
49
538
{
50
538
  assert(tensor_multiview->it && !CCV_IS_TENSOR_MULTIVIEW(tensor_multiview->it));
51
538
  // Update the pointer on tv only if it is not a single tensor pointer.
52
538
  unsigned char* const data = tensor_multiview->it->data.u8 - tensor_multiview->offset;
53
538
  const ccv_nnc_tensor_multiview_t* c = tensor_multiview;
54
538
  int i;
55
699
  do {
56
699
    if (c->sp)
57
734
      
for (i = 0; 270
i < c->sp->rnum;
i++464
)
58
464
      {
59
464
        ccv_nnc_tensor_t* const tensor = *(ccv_nnc_tensor_t**)ccv_array_get(c->sp, i);
60
464
        if (CCV_IS_TENSOR_VIEW(tensor))
61
464
        {
62
10
          ccv_nnc_tensor_view_t* const tensor_view = (ccv_nnc_tensor_view_t*)tensor;
63
10
          tensor_view->data.u8 = data + tensor_view->off;
64
10
        } else
65
454
          tensor->data.u8 = data;
66
464
      }
67
699
    c = c->p;
68
699
  } while (c);
69
538
}
70
71
ccv_nnc_graph_exec_t ccv_nnc_graph_while(ccv_nnc_graph_t* const graph, const uint32_t cmd, ccv_nnc_graph_t* const while_graph)
72
27
{
73
27
  assert(cmd == CCV_NNC_GRAPH_FORWARD || cmd == CCV_NNC_GRAPH_BACKWARD);
74
27
  ccv_nnc_graph_exec_t while_exec = ccv_nnc_graph_exec_new(graph, ccv_nnc_cmd(cmd, 0, CMD_GENERIC(), 0), ccv_nnc_no_hint, 0, 0, 0, 0);
75
27
  ccv_nnc_graph_exec_info_t* while_exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, while_exec.d);
76
27
  while_exec_info->flags |= CCV_NNC_GRAPH_EXEC_P_WHILE;
77
27
  if (!graph->sub_graphs)
78
25
    graph->sub_graphs = ccv_array_new(sizeof(ccv_nnc_graph_t*), 1, 0);
79
27
  int i;
80
27
  if (while_graph->tensor_wraps_refs)
81
0
  {
82
0
    // Copy wraps from sub graph to parent graph.
83
0
    if (!graph->tensor_wraps_refs)
84
0
      graph->tensor_wraps_refs = ccv_array_new(sizeof(ccv_nnc_graph_tensor_wraps_ref_t), while_graph->tensor_wraps_refs->rnum, 0);
85
0
    for (i = 0; i < while_graph->tensor_wraps_refs->rnum; i++)
86
0
      ccv_array_push(graph->tensor_wraps_refs, ccv_array_get(while_graph->tensor_wraps_refs, i));
87
0
  }
88
27
  ccv_array_push(graph->sub_graphs, &while_graph);
89
27
  while_graph->p = graph;
90
27
  while_graph->p_idx = graph->sub_graphs->rnum;
91
27
  while_graph->exec_idx = while_exec.d + 1;
92
27
  while_exec_info->graph_ref_size = 1;
93
27
  CCV_NNC_GRAPH_REF(while_exec_info)[0] = graph->sub_graphs->rnum;
94
27
  return while_exec;
95
27
}
96
97
ccv_nnc_graph_t* ccv_nnc_graph_from_while_exec(const ccv_nnc_graph_t* const graph, ccv_nnc_graph_exec_t exec)
98
1
{
99
1
  assert(exec.graph == graph);
100
1
  assert(exec.d < graph->exec_info->rnum);
101
1
  assert(graph->sub_graphs);
102
1
  ccv_nnc_graph_exec_info_t* exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(graph->exec_info, exec.d);
103
1
  assert(CCV_NNC_GRAPH_REF(exec_info)[0]);
104
1
  const int graph_ref = CCV_NNC_GRAPH_REF(exec_info)[0] - 1;
105
1
  assert(graph_ref < graph->sub_graphs->rnum);
106
1
  ccv_nnc_graph_t* sub_graph = *(ccv_nnc_graph_t**)ccv_array_get(graph->sub_graphs, graph_ref);
107
1
  return sub_graph;
108
1
}
109
110
ccv_nnc_tensor_t ccv_nnc_tensor_for_while_count(const ccv_nnc_graph_t* const while_graph)
111
23
{
112
23
  return ccv_nnc_tensor(&while_graph->while_count, CPU_TENSOR_NHWC(64S, 1), 0);
113
23
}
114
115
void ccv_nnc_graph_set_while_expr(ccv_nnc_graph_t* const while_graph, const ccv_nnc_graph_while_f while_expr, const void* const while_data, ccv_nnc_tensor_t* const* const inputs, const int input_size, const ccv_nnc_graph_exec_t* const breakpoints, const int breakpoint_size)
116
26
{
117
26
  assert(while_graph->p);
118
26
  const int exec_idx = while_graph->exec_idx - 1;
119
26
  assert(exec_idx >= 0 && exec_idx < while_graph->p->exec_info->rnum);
120
26
  ccv_nnc_graph_exec_info_t* const exec_info = (ccv_nnc_graph_exec_info_t*)ccv_array_get(while_graph->p->exec_info, exec_idx);
121
26
  assert(!exec_info->p_while.expr);
122
26
  exec_info->p_while.expr = while_expr;
123
26
  exec_info->p_while.data = while_data;
124
26
  if (input_size > 0)
125
23
  {
126
23
    exec_info->p_while.input_size = input_size;
127
23
    exec_info->p_while.inputs = (ccv_nnc_tensor_t**)ccmalloc(sizeof(ccv_nnc_tensor_t*) * input_size);
128
23
    memcpy(exec_info->p_while.inputs, inputs, sizeof(ccv_nnc_tensor_t*) * input_size);
129
23
    // Register for unwrapping.
130
23
    if (ccv_nnc_tensors_have_wraps(exec_info->p_while.inputs, input_size))
131
1
    {
132
1
      ccv_nnc_graph_tensor_wrap_array_t* const tensor_wrap_array = ccv_nnc_get_tensor_wrap_array(while_graph, input_size, &exec_info->p_while.tensor_wraps_ref);
133
1
      ccv_nnc_set_tensor_wraps(tensor_wrap_array->tensor_wraps, exec_info->p_while.inputs, input_size);
134
1
      assert(exec_info->p_while.tensor_wraps_ref);
135
1
      ccv_nnc_graph_register_tensor_wraps(while_graph, exec_info->p_while.tensor_wraps_ref - 1);
136
1
    }
137
23
  }
138
26
  assert(breakpoint_size > 0);
139
26
  while_graph->breakpoint_size = breakpoint_size;
140
26
  while_graph->breakpoints = (ccv_nnc_graph_exec_t*)((while_graph->breakpoints) ? 
ccrealloc0
(while_graph->breakpoints, sizeof(ccv_nnc_graph_exec_t) * breakpoint_size)0
: ccmalloc(sizeof(ccv_nnc_graph_exec_t) * breakpoint_size));
141
26
  memcpy(while_graph->breakpoints, breakpoints, sizeof(ccv_nnc_graph_exec_t) * breakpoint_size);
142
26
}