Coverage Report

Created: 2024-08-18 16:21

/home/liu/actions-runner/_work/ccv/ccv/test/unit/nnc/symbolic.graph.tests.c
Line
Count
Source
1
#include "case.h"
2
#include "ccv_case.h"
3
#include "ccv_nnc_case.h"
4
#include <ccv.h>
5
#include <nnc/ccv_nnc.h>
6
#include <nnc/ccv_nnc_easy.h>
7
#include "3rdparty/dsfmt/dSFMT.h"
8
9
TEST_SETUP()
10
{
11
  ccv_nnc_init();
12
}
13
14
TEST_CASE("compile symbolic graph of one node")
15
1
{
16
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
17
1
  ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "a");
18
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "b");
19
1
  ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "c");
20
1
  ccv_nnc_graph_exec_symbol_t prod = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWPROD_FORWARD(), TENSOR_SYMBOL_LIST(a, b), TENSOR_SYMBOL_LIST(c), "prod");
21
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS);
22
1
  ccv_nnc_graph_t* graph = 0;
23
1
  ccv_nnc_tensor_arena_t* tensor_arena = 0;
24
1
  ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0;
25
1
  ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, GRAPH_EXEC_SYMBOL_LIST(prod), GRAPH_EXEC_SYMBOL_LIST(prod), &graph, &tensor_arena, &graph_exec_arena);
26
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_SHORT_DOT_GRAPH);
27
1
  GRAPH_GEN(graph, CCV_NNC_SHORT_DOT_GRAPH);
28
1
  ccv_nnc_tensor_t* a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a);
29
1
  ccv_nnc_tensor_t* b_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, b);
30
1
  ccv_nnc_tensor_t* c_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, c);
31
1
  a_tensor->data.f32[0] = 1.2;
32
1
  b_tensor->data.f32[0] = 2.3;
33
1
  ccv_nnc_graph_exec_t prod_exec = ccv_nnc_graph_exec_from_symbol(graph_exec_arena, prod);
34
1
  ccv_nnc_graph_run(graph, 0, GRAPH_EXEC_LIST(prod_exec), GRAPH_EXEC_LIST(prod_exec), 0, 0);
35
1
  REQUIRE(a_tensor->data.f32 == c_tensor->data.f32, "trivially in-place operation, should point to the same memory region");
36
1
  REQUIRE_EQ_WITH_TOLERANCE(c_tensor->data.f32[0], 1.2 * 2.3, 1e-6, "should be equal to 1.2 * 2.3");
37
1
  ccv_nnc_graph_free(graph);
38
1
  ccv_nnc_tensor_arena_free(tensor_arena);
39
1
  ccv_nnc_graph_exec_arena_free(graph_exec_arena);
40
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
41
1
}
42
43
TEST_CASE("compile a simple symbolic graph with autogen")
44
1
{
45
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
46
1
  ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 31, 21, 2), "a");
47
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 31, 21, 4), "b");
48
1
  ccv_nnc_cmd_t forw_cmd = CMD_CONVOLUTION_FORWARD(1, 4, 5, 3, 2);
49
1
  ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 4, 5, 3, 2), "w");
50
1
  ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 4), "bias");
51
  // See what we compile to when have unused tensors.
52
1
  ccv_nnc_tensor_symbol_t unused0 = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "unused0");
53
1
  ccv_nnc_tensor_symbol_t unused1 = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "unused1");
54
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, forw_cmd, TENSOR_SYMBOL_LIST(a, w, bias), TENSOR_SYMBOL_LIST(b), "forw");
55
1
  ccv_nnc_tensor_symbol_t m = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 31, 21, 4), "m");
56
1
  ccv_nnc_cmd_t softmax_cmd = CMD_SOFTMAX_FORWARD();
57
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, softmax_cmd, TENSOR_SYMBOL_LIST(b), TENSOR_SYMBOL_LIST(m), "softmax");
58
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS);
59
1
  ccv_nnc_graph_t* graph = 0;
60
1
  ccv_nnc_tensor_arena_t* tensor_arena = 0;
61
1
  ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0;
62
1
  ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena);
63
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
64
1
  GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH);
65
1
  ccv_nnc_tensor_t* a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a);
66
1
  ccv_nnc_tensor_t* b_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, b);
67
1
  ccv_nnc_tensor_t* m_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, m);
68
1
  REQUIRE(a_tensor->data.u8 != b_tensor->data.u8, "tensor a and b shouldn't share the memory.");
69
1
  REQUIRE(b_tensor->data.u8 == m_tensor->data.u8, "tensor b and m should share the memory because softmax is an inplace op.");
70
1
  REQUIRE(ccv_nnc_tensor_from_symbol(tensor_arena, unused0) == 0, "tensor unused 0 should have not pointed memory.");
71
1
  REQUIRE(ccv_nnc_tensor_from_symbol(tensor_arena, unused1) == 0, "tensor unused 0 should have not pointed memory.");
72
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
73
1
  ccv_nnc_graph_free(graph);
74
1
  ccv_nnc_tensor_arena_free(tensor_arena);
75
1
  ccv_nnc_graph_exec_arena_free(graph_exec_arena);
76
1
}
77
78
TEST_CASE("use symbolic graph disjoin and free")
79
1
{
80
1
  ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new();
81
1
  ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "a");
82
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "b");
83
1
  ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "c");
84
1
  ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "x");
85
1
  ccv_nnc_tensor_symbol_t y = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "y");
86
1
  ccv_nnc_tensor_symbol_t z = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "z");
87
1
  ccv_nnc_tensor_symbol_t p = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "p");
88
1
  ccv_nnc_tensor_symbol_t q = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "q");
89
1
  ccv_nnc_graph_exec_symbol_t prod = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWPROD_FORWARD(), TENSOR_SYMBOL_LIST(x, y), TENSOR_SYMBOL_LIST(z), "prod");
90
1
  ccv_nnc_graph_exec_symbol_t log0 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWLOG_FORWARD(), TENSOR_SYMBOL_LIST(p), TENSOR_SYMBOL_LIST(q), "log0");
91
1
  ccv_nnc_graph_exec_symbol_t sum = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWSUM_FORWARD(), TENSOR_SYMBOL_LIST(a, b), TENSOR_SYMBOL_LIST(c), "sum");
92
1
  ccv_nnc_graph_exec_symbol_concat(symbolic_graph, sum, prod);
93
1
  ccv_nnc_graph_exec_symbol_concat(symbolic_graph, sum, log0);
94
1
  ccv_nnc_graph_exec_symbol_disjoin(symbolic_graph, sum, prod);
95
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS);
96
1
  REQUIRE(ccv_nnc_symbolic_graph_source_size(symbolic_graph) == 2, "sources now should contain both sum and prod");
97
1
  REQUIRE(ccv_nnc_symbolic_graph_destination_size(symbolic_graph) == 2, "destinations now should contain both log0 and prod");
98
1
  ccv_nnc_graph_exec_symbol_free(symbolic_graph, prod);
99
1
  REQUIRE(ccv_nnc_symbolic_graph_source_size(symbolic_graph) == 1, "sources now should contain sum");
100
1
  REQUIRE(ccv_nnc_symbolic_graph_destination_size(symbolic_graph) == 1, "destinations now should contain log0");
101
1
  REQUIRE(ccv_nnc_symbolic_graph_sources(symbolic_graph)->d == sum.d, "source should be sum");
102
1
  REQUIRE(ccv_nnc_symbolic_graph_destinations(symbolic_graph)->d == log0.d, "source should be log0");
103
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_SHORT_DOT_GRAPH);
104
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
105
1
}
106
107
TEST_CASE("set tensor symbol shape after computation specified")
108
1
{
109
1
  ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new();
110
1
  ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, ccv_nnc_tensor_auto, "a");
111
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, ccv_nnc_tensor_auto, "b");
112
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWLOG_FORWARD(), TENSOR_SYMBOL_LIST(a), TENSOR_SYMBOL_LIST(b), "log");
113
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS);
114
1
  ccv_nnc_tensor_symbol_set(symbolic_graph, a, CPU_TENSOR_NHWC(32F, 1));
115
1
  ccv_nnc_graph_t* graph = 0;
116
1
  ccv_nnc_tensor_arena_t* tensor_arena = 0;
117
1
  ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0;
118
1
  ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena);
119
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
120
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
121
1
  GRAPH_GEN(graph, CCV_NNC_SHORT_DOT_GRAPH);
122
1
  ccv_nnc_tensor_t* const a_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, a);
123
1
  a_tensor->data.f32[0] = 1.25;
124
1
  ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0);
125
1
  ccv_nnc_tensor_t* const b_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, b);
126
1
  REQUIRE_EQ_WITH_TOLERANCE(b_tensor->data.f32[0], logf(1.25), 1e-5, "should be equal to logf(1.25)");
127
1
  ccv_nnc_graph_free(graph);
128
1
  ccv_nnc_tensor_arena_free(tensor_arena);
129
1
  ccv_nnc_graph_exec_arena_free(graph_exec_arena);
130
1
}
131
132
TEST_CASE("query connectivity from one exec symbol to another")
133
1
{
134
1
  ccv_nnc_symbolic_graph_t* const symbolic_graph = ccv_nnc_symbolic_graph_new();
135
1
  ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "a");
136
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "b");
137
1
  ccv_nnc_graph_exec_symbol_t log1 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWLOG_FORWARD(), TENSOR_SYMBOL_LIST(a), TENSOR_SYMBOL_LIST(b), "log1");
138
1
  ccv_nnc_tensor_symbol_t c = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "c");
139
1
  ccv_nnc_tensor_symbol_t d = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "d");
140
1
  ccv_nnc_graph_exec_symbol_t log2 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWLOG_FORWARD(), TENSOR_SYMBOL_LIST(c), TENSOR_SYMBOL_LIST(d), "log2");
141
1
  ccv_nnc_tensor_symbol_t e = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "e");
142
1
  ccv_nnc_graph_exec_symbol_t sum1 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWSUM_FORWARD(), TENSOR_SYMBOL_LIST(a, c), TENSOR_SYMBOL_LIST(e), "sum1");
143
1
  ccv_nnc_tensor_symbol_t f = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "f");
144
1
  ccv_nnc_graph_exec_symbol_t log3 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWLOG_FORWARD(), TENSOR_SYMBOL_LIST(d), TENSOR_SYMBOL_LIST(f), "log3");
145
1
  ccv_nnc_tensor_symbol_t g = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 1), "g");
146
1
  ccv_nnc_graph_exec_symbol_t sum2 = ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_EWSUM_FORWARD(), TENSOR_SYMBOL_LIST(b, f), TENSOR_SYMBOL_LIST(g), "sum2");
147
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS);
148
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
149
1
  uint64_t bitmask = 1;
150
1
  ccv_nnc_symbolic_graph_sources_to_destinations(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(sum1, log1, log2, log1), GRAPH_EXEC_SYMBOL_LIST(sum2), &bitmask);
151
1
  REQUIRE(bitmask == 14, "log1 and log2 should be sources for sum2, not sum1");
152
1
  bitmask = 0;
153
1
  ccv_nnc_symbolic_graph_sources_to_destinations(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(sum1, log1, log2), GRAPH_EXEC_SYMBOL_LIST(sum1), &bitmask);
154
1
  REQUIRE(bitmask == 1, "log1 and log2 are not sources for sum1");
155
1
  bitmask = 0;
156
1
  ccv_nnc_symbolic_graph_sources_to_destinations(symbolic_graph, GRAPH_EXEC_SYMBOL_LIST(log3, log1, log2), GRAPH_EXEC_SYMBOL_LIST(sum2), &bitmask);
157
1
  REQUIRE(bitmask == 7, "log3, log1 and log2 are not sources for sum2");
158
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
159
1
}
160
161
typedef struct {
162
  int called;
163
  int softmax_called;
164
  int convolution_called;
165
  int softmax_incomings;
166
  int softmax_outgoings;
167
  int convolution_incomings;
168
  int convolution_outgoings;
169
} format_stats_t;
170
171
static void _format_fn(const ccv_nnc_symbolic_graph_t* const graph, const int node, const char* const name, const ccv_nnc_cmd_t cmd, const int flags, const int* const incomings, const int incoming_size, const int* const outgoings, const int outgoing_size, const int* const inputs, const int input_size, const int* const outputs, const int output_size, void* const context)
172
2
{
173
2
  format_stats_t* const stats = (format_stats_t*)context;
174
2
  ++stats->called;
175
2
  if (cmd.cmd == CCV_NNC_CONVOLUTION_FORWARD)
176
1
  {
177
1
    ++stats->convolution_called;
178
1
    stats->convolution_incomings = incoming_size;
179
1
    stats->convolution_outgoings = outgoing_size;
180
1
  } else if (cmd.cmd == CCV_NNC_SOFTMAX_FORWARD) {
181
1
    ++stats->softmax_called;
182
1
    stats->softmax_incomings = incoming_size;
183
1
    stats->softmax_outgoings = outgoing_size;
184
1
  }
185
2
}
186
187
TEST_CASE("build a simple symbolic graph and check its format")
188
1
{
189
1
  ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new();
190
1
  ccv_nnc_tensor_symbol_t a = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 31, 21, 2), "a");
191
1
  ccv_nnc_tensor_symbol_t b = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 31, 21, 4), "b");
192
1
  ccv_nnc_cmd_t forw_cmd = CMD_CONVOLUTION_FORWARD(1, 4, 5, 3, 2);
193
1
  ccv_nnc_tensor_symbol_t w = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 4, 5, 3, 2), "w");
194
1
  ccv_nnc_tensor_symbol_t bias = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 4), "bias");
195
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, forw_cmd, TENSOR_SYMBOL_LIST(a, w, bias), TENSOR_SYMBOL_LIST(b), "forw");
196
1
  ccv_nnc_tensor_symbol_t m = ccv_nnc_tensor_symbol_new(symbolic_graph, CPU_TENSOR_NHWC(32F, 31, 21, 4), "m");
197
1
  ccv_nnc_cmd_t softmax_cmd = CMD_SOFTMAX_FORWARD();
198
1
  ccv_nnc_graph_exec_symbol_new(symbolic_graph, softmax_cmd, TENSOR_SYMBOL_LIST(b), TENSOR_SYMBOL_LIST(m), "softmax");
199
1
  ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS);
200
1
  SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH);
201
1
  format_stats_t stats = {
202
1
    .called = 0,
203
1
    .softmax_called = 0,
204
1
    .convolution_called = 0,
205
1
    .softmax_incomings = 0,
206
1
    .softmax_outgoings = 0,
207
1
    .convolution_incomings = 0,
208
1
    .convolution_outgoings = 0,
209
1
  };
210
1
  ccv_nnc_symbolic_graph_format(symbolic_graph, 0, 0, 0, 0, _format_fn, &stats);
211
1
  REQUIRE(stats.called == 2, "called twice");
212
1
  REQUIRE(stats.convolution_called == 1, "called convolution");
213
1
  REQUIRE(stats.softmax_called == 1, "called softmax");
214
1
  REQUIRE(stats.convolution_incomings == 0, "convolution has no incomings");
215
1
  REQUIRE(stats.convolution_outgoings == 1, "convolution has 1 outgoing");
216
1
  REQUIRE(stats.softmax_incomings == 1, "softmax has 1 incoming");
217
1
  REQUIRE(stats.softmax_outgoings == 0, "softmax has no outgoings");
218
1
  ccv_nnc_symbolic_graph_free(symbolic_graph);
219
1
}
220
221
#include "case_main.h"