Coverage Report

Created: 2024-08-19 11:27

/home/liu/actions-runner/_work/ccv/ccv/test/int/nnc/concat.tests.c
Line
Count
Source
1
#include "case.h"
2
#include "ccv_case.h"
3
#include "ccv_nnc_case.h"
4
#include <ccv.h>
5
#include <nnc/ccv_nnc.h>
6
#include <nnc/ccv_nnc_easy.h>
7
#include "3rdparty/dsfmt/dSFMT.h"
8
9
TEST_SETUP()
10
{
11
  ccv_nnc_init();
12
}
13
14
TEST_CASE("concatenate several tensors together")
15
{
16
  GUARD_ELSE_RETURN((ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_GPU_CUDNN) &&
17
    ccv_nnc_cmd_ok(CCV_NNC_DATA_TRANSFER_FORWARD, CCV_NNC_BACKEND_GPU_REF) &&
18
    ccv_nnc_cmd_ok(CCV_NNC_GEMM_FORWARD, CCV_NNC_BACKEND_GPU_CUBLAS) &&
19
    ccv_nnc_cmd_ok(CCV_NNC_FORMAT_TRANSFORM_FORWARD, CCV_NNC_BACKEND_GPU_CUDNN)) ||
20
    (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) &&
21
    ccv_nnc_cmd_ok(CCV_NNC_DATA_TRANSFER_FORWARD, CCV_NNC_BACKEND_MPS) &&
22
    ccv_nnc_cmd_ok(CCV_NNC_GEMM_FORWARD, CCV_NNC_BACKEND_MPS) &&
23
    ccv_nnc_cmd_ok(CCV_NNC_FORMAT_TRANSFORM_FORWARD, CCV_NNC_BACKEND_MPS)));
24
  ccv_cnnp_model_t* const concat = ccv_cnnp_concat(0, "concat");
25
  ccv_cnnp_model_t* const dense = ccv_cnnp_dense(1, 1, 0, 1, "linear");
26
  ccv_cnnp_model_t* const full = ccv_cnnp_sequential_new(MODEL_LIST(concat, dense), 1, "full");
27
  ccv_nnc_tensor_param_t a_params = GPU_TENSOR_NCHW(000, 32F, 1);
28
  ccv_nnc_tensor_param_t b_params = GPU_TENSOR_NCHW(000, 32F, 2);
29
  ccv_cnnp_model_compile(full, TENSOR_PARAM_LIST(a_params, b_params), CMD_NOOP(), CMD_NOOP());
30
  CNNP_MODEL_GEN(full, CCV_NNC_LONG_DOT_GRAPH);
31
  ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 1), 0);
32
  ha->data.f32[0] = -0.5;
33
  ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 2), 0);
34
  hb->data.f32[0] = 0.3;
35
  hb->data.f32[1] = 2;
36
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, a_params, 0);
37
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, b_params, 0);
38
  ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(a, b), 0);
39
  ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 1), 0);
40
  ccv_nnc_tensor_t* const hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 1), 0);
41
  ccv_cnnp_model_evaluate(full, (ccv_cnnp_evaluate_param_t){}, TENSOR_LIST(a, b), TENSOR_LIST(c), 0, 0);
42
  ccv_cnnp_model_parameters_map(full, ccv_cnnp_model_parameters(full, ALL_PARAMETERS, ALL_PARAMETERS), CMD_SET_FORWARD(1), ccv_nnc_no_hint, 0, 0, 0, 0, 0, 0);
43
  ccv_cnnp_model_evaluate(full, (ccv_cnnp_evaluate_param_t){}, TENSOR_LIST(a, b), TENSOR_LIST(c), 0, 0);
44
  ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(c), TENSOR_LIST(hc), 0);
45
  REQUIRE_EQ_WITH_TOLERANCE(hc->data.f32[0], -0.5 + 0.3 + 2, 1e-5, "should be equal");
46
  ccv_nnc_tensor_free(a);
47
  ccv_nnc_tensor_free(b);
48
  ccv_nnc_tensor_free(c);
49
  ccv_nnc_tensor_free(ha);
50
  ccv_nnc_tensor_free(hb);
51
  ccv_nnc_tensor_free(hc);
52
  ccv_cnnp_model_free(full);
53
}
54
55
TEST_CASE("concatenate several tensors together and make sure they are simplified away")
56
1
{
57
1
  GUARD_ELSE_RETURN((ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_GPU_CUDNN) &&
58
1
    ccv_nnc_cmd_ok(CCV_NNC_DATA_TRANSFER_FORWARD, CCV_NNC_BACKEND_GPU_REF) &&
59
1
    ccv_nnc_cmd_ok(CCV_NNC_GEMM_FORWARD, CCV_NNC_BACKEND_GPU_CUBLAS) &&
60
1
    ccv_nnc_cmd_ok(CCV_NNC_FORMAT_TRANSFORM_FORWARD, CCV_NNC_BACKEND_GPU_CUDNN)) ||
61
1
    (ccv_nnc_cmd_ok(CCV_NNC_SET_FORWARD, CCV_NNC_BACKEND_MPS) &&
62
1
    ccv_nnc_cmd_ok(CCV_NNC_DATA_TRANSFER_FORWARD, CCV_NNC_BACKEND_MPS) &&
63
1
    ccv_nnc_cmd_ok(CCV_NNC_GEMM_FORWARD, CCV_NNC_BACKEND_MPS) &&
64
1
    ccv_nnc_cmd_ok(CCV_NNC_FORMAT_TRANSFORM_FORWARD, CCV_NNC_BACKEND_MPS)));
65
1
  ccv_cnnp_model_t* const x_dense = ccv_cnnp_dense(1, 1, 0, 1, "linear");
66
1
  ccv_cnnp_model_t* const y_dense = ccv_cnnp_dense(2, 1, 0, 1, "linear");
67
1
  ccv_cnnp_model_t* const concat = ccv_cnnp_concat(0, "concat");
68
1
  ccv_cnnp_model_t* const dense = ccv_cnnp_dense(1, 1, 0, 1, "linear");
69
1
  ccv_cnnp_model_io_t const x = ccv_cnnp_input();
70
1
  ccv_cnnp_model_io_t const y = ccv_cnnp_input();
71
1
  ccv_cnnp_model_io_t xz = ccv_cnnp_model_apply(x_dense, MODEL_IO_LIST(x));
72
1
  ccv_cnnp_model_io_t yz = ccv_cnnp_model_apply(y_dense, MODEL_IO_LIST(y));
73
1
  ccv_cnnp_model_io_t z = ccv_cnnp_model_apply(concat, MODEL_IO_LIST(xz, yz));
74
1
  z = ccv_cnnp_model_apply(dense, MODEL_IO_LIST(z));
75
1
  ccv_cnnp_model_t* const full = ccv_cnnp_model_new(MODEL_IO_LIST(x, y), MODEL_IO_LIST(z), 1, "full");
76
1
  ccv_nnc_tensor_param_t a_params = GPU_TENSOR_NCHW(000, 32F, 1);
77
1
  ccv_nnc_tensor_param_t b_params = GPU_TENSOR_NCHW(000, 32F, 2);
78
1
  ccv_cnnp_model_compile(full, TENSOR_PARAM_LIST(a_params, b_params), CMD_NOOP(), CMD_NOOP());
79
1
  CNNP_MODEL_GEN(full, CCV_NNC_LONG_DOT_GRAPH);
80
1
  ccv_nnc_tensor_t* const ha = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 1), 0);
81
1
  ha->data.f32[0] = -0.5;
82
1
  ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 2), 0);
83
1
  hb->data.f32[0] = 0.3;
84
1
  hb->data.f32[1] = 2;
85
1
  ccv_nnc_tensor_t* const a = ccv_nnc_tensor_new(0, a_params, 0);
86
1
  ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, b_params, 0);
87
1
  ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha, hb), TENSOR_LIST(a, b), 0);
88
1
  ccv_nnc_tensor_t* const c = ccv_nnc_tensor_new(0, GPU_TENSOR_NCHW(000, 32F, 1), 0);
89
1
  ccv_nnc_tensor_t* const hc = ccv_nnc_tensor_new(0, CPU_TENSOR_NCHW(32F, 1), 0);
90
1
  ccv_cnnp_model_evaluate(full, (ccv_cnnp_evaluate_param_t){}, TENSOR_LIST(a, b), TENSOR_LIST(c), 0, 0);
91
1
  ccv_cnnp_model_parameters_map(full, ccv_cnnp_model_parameters(x_dense, ALL_PARAMETERS, ALL_PARAMETERS), CMD_SET_FORWARD(0.5), ccv_nnc_no_hint, 0, 0, 0, 0, 0, 0);
92
1
  ccv_cnnp_model_parameters_map(full, ccv_cnnp_model_parameters(y_dense, ALL_PARAMETERS, ALL_PARAMETERS), CMD_SET_FORWARD(-0.5), ccv_nnc_no_hint, 0, 0, 0, 0, 0, 0);
93
1
  ccv_cnnp_model_parameters_map(full, ccv_cnnp_model_parameters(dense, ALL_PARAMETERS, ALL_PARAMETERS), CMD_SET_FORWARD(1), ccv_nnc_no_hint, 0, 0, 0, 0, 0, 0);
94
1
  ccv_cnnp_model_evaluate(full, (ccv_cnnp_evaluate_param_t){}, TENSOR_LIST(a, b), TENSOR_LIST(c), 0, 0);
95
1
  ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(c), TENSOR_LIST(hc), 0);
96
1
  REQUIRE_EQ_WITH_TOLERANCE(hc->data.f32[0], -0.5 * 0.5 + (0.3 + 2) * -0.5 * 2, 1e-5, "should be equal");
97
1
  ccv_nnc_tensor_free(a);
98
1
  ccv_nnc_tensor_free(b);
99
1
  ccv_nnc_tensor_free(c);
100
1
  ccv_nnc_tensor_free(ha);
101
1
  ccv_nnc_tensor_free(hb);
102
1
  ccv_nnc_tensor_free(hc);
103
1
  ccv_cnnp_model_free(full);
104
1
}
105
106
#include "case_main.h"