/home/liu/actions-runner/_work/ccv/ccv/test/int/nnc/datatype.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include "3rdparty/dsfmt/dSFMT.h" |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("datatype conversion model can reference to the last parameter for the type") |
15 | 1 | { |
16 | 1 | GUARD_ELSE_RETURN((ccv_nnc_cmd_ok(CCV_NNC_EWSUM_FORWARD, CCV_NNC_BACKEND_GPU_CUDNN) && |
17 | 1 | ccv_nnc_cmd_ok(CCV_NNC_DATA_TRANSFER_FORWARD, CCV_NNC_BACKEND_GPU_REF)) || |
18 | 1 | (ccv_nnc_cmd_ok(CCV_NNC_EWSUM_FORWARD, CCV_NNC_BACKEND_MPS) && |
19 | 1 | ccv_nnc_cmd_ok(CCV_NNC_DATA_TRANSFER_FORWARD, CCV_NNC_BACKEND_MPS))); |
20 | 1 | const ccv_nnc_tensor_param_t a1_params = GPU_TENSOR_NHWC(000, 32F, 2); |
21 | 1 | ccv_nnc_tensor_t* const ha1 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
22 | 1 | ha1->data.f32[0] = 0.24; |
23 | 1 | ha1->data.f32[1] = -1.4; |
24 | 1 | const ccv_nnc_tensor_param_t a2_params = GPU_TENSOR_NHWC(000, 32F, 2); |
25 | 1 | ccv_nnc_tensor_t* const ha2 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
26 | 1 | ha2->data.f32[0] = -3.23; |
27 | 1 | ha2->data.f32[1] = 2.44; |
28 | 1 | const ccv_nnc_tensor_param_t a3_params = GPU_TENSOR_NHWC(000, 16F, 2); |
29 | 1 | ccv_nnc_tensor_t* const ha3 = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 2), 0); |
30 | 1 | ccv_float_to_half_precision(ha2->data.f32, (uint16_t*)ha3->data.f16, 2); |
31 | 1 | ccv_nnc_tensor_t* const a1 = ccv_nnc_tensor_new(0, a1_params, 0); |
32 | 1 | ccv_nnc_tensor_t* const a2 = ccv_nnc_tensor_new(0, a2_params, 0); |
33 | 1 | ccv_nnc_tensor_t* const a3 = ccv_nnc_tensor_new(0, a3_params, 0); |
34 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(ha1, ha2, ha3), TENSOR_LIST(a1, a2, a3), 0); |
35 | 1 | const ccv_cnnp_model_io_t input1 = ccv_cnnp_input(); |
36 | 1 | ccv_cnnp_model_io_t output = ccv_cnnp_model_apply(ccv_cnnp_datatype_conversion(CCV_16F, 0, 0), MODEL_IO_LIST(input1)); |
37 | 1 | const ccv_cnnp_model_io_t input3 = ccv_cnnp_input(); |
38 | 1 | output = ccv_cnnp_model_apply(ccv_cnnp_sum(0), MODEL_IO_LIST(output, input3)); |
39 | 1 | const ccv_cnnp_model_io_t input2 = ccv_cnnp_input(); |
40 | 1 | output = ccv_cnnp_model_apply(ccv_cnnp_datatype_conversion(0, 1, 0), MODEL_IO_LIST(output, input2)); |
41 | 1 | output = ccv_cnnp_model_apply(ccv_cnnp_sum(0), MODEL_IO_LIST(output, input2)); |
42 | 1 | ccv_cnnp_model_t* const final = ccv_cnnp_model_new(MODEL_IO_LIST(input1, input2, input3), MODEL_IO_LIST(output), 1, 0); |
43 | 1 | ccv_cnnp_model_compile(final, TENSOR_PARAM_LIST(a1_params, a2_params, a3_params), CMD_NOOP(), CMD_NOOP()); |
44 | 1 | ccv_nnc_tensor_t* const b = ccv_nnc_tensor_new(0, a1_params, 0); |
45 | 1 | ccv_nnc_tensor_t* const hb = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 2), 0); |
46 | 1 | hb->data.f32[0] = 1; |
47 | 1 | hb->data.f32[1] = 2; |
48 | 1 | ccv_cnnp_model_evaluate(final, (ccv_cnnp_evaluate_param_t){}, TENSOR_LIST(a1, a2, a3), TENSOR_LIST(b), 0, 0); |
49 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(b), TENSOR_LIST(hb), 0); |
50 | 1 | REQUIRE_EQ_WITH_TOLERANCE(hb->data.f32[0], 0.24 + (-3.23) * 2, 1e-2, "should match"); |
51 | 1 | REQUIRE_EQ_WITH_TOLERANCE(hb->data.f32[1], -1.4 + 2.44 * 2, 1e-2, "should match"); |
52 | 1 | ccv_nnc_tensor_free(a1); |
53 | 1 | ccv_nnc_tensor_free(a2); |
54 | 1 | ccv_nnc_tensor_free(a3); |
55 | 1 | ccv_nnc_tensor_free(b); |
56 | 1 | ccv_nnc_tensor_free(ha1); |
57 | 1 | ccv_nnc_tensor_free(ha2); |
58 | 1 | ccv_nnc_tensor_free(ha3); |
59 | 1 | ccv_nnc_tensor_free(hb); |
60 | 1 | ccv_cnnp_model_free(final); |
61 | 1 | } |
62 | | |
63 | | #include "case_main.h" |