/home/liu/actions-runner/_work/ccv/ccv/test/int/nnc/random.tests.c
Line | Count | Source |
1 | | #include "case.h" |
2 | | #include "ccv_case.h" |
3 | | #include "ccv_nnc_case.h" |
4 | | #include <ccv.h> |
5 | | #include <nnc/ccv_nnc.h> |
6 | | #include <nnc/ccv_nnc_easy.h> |
7 | | #include <3rdparty/dsfmt/dSFMT.h> |
8 | | |
9 | | TEST_SETUP() |
10 | | { |
11 | | ccv_nnc_init(); |
12 | | } |
13 | | |
14 | | TEST_CASE("random normal distribution") |
15 | 1 | { |
16 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_RANDOM_NORMAL_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_RANDOM_NORMAL_FORWARD, CCV_NNC_BACKEND_MPS)); |
17 | 1 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
18 | 1 | const ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 100000), "x"); |
19 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_RANDOM_NORMAL_FORWARD(1, 0), TENSOR_SYMBOL_LIST(), TENSOR_SYMBOL_LIST(x), "random uniform"); |
20 | 1 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
21 | 1 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
22 | 1 | ccv_nnc_graph_t* graph = 0; |
23 | 1 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
24 | 1 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
25 | 1 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
26 | 1 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
27 | 1 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
28 | 1 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
29 | 1 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100000), 0); |
30 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
31 | 1 | int i; |
32 | 1 | double mean = 0; |
33 | 100k | for (i = 0; i < 100000; i++100k ) |
34 | 100k | mean += xt->data.f32[i]; |
35 | 1 | mean = mean / 100000.0; |
36 | 1 | double std = 0; |
37 | 100k | for (i = 0; i < 100000; i++100k ) |
38 | 100k | std += (xt->data.f32[i] - mean) * (xt->data.f32[i] - mean); |
39 | 1 | std = sqrt(std / 100000.0); |
40 | 1 | REQUIRE_EQ_WITH_TOLERANCE(mean, 0, 1e-2, "mean should be 0"); |
41 | 1 | REQUIRE_EQ_WITH_TOLERANCE(std, 1, 1e-2, "std should be 1"); |
42 | 1 | ccv_nnc_tensor_free(xt); |
43 | 1 | ccv_nnc_graph_free(graph); |
44 | 1 | ccv_nnc_tensor_arena_free(tensor_arena); |
45 | 1 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
46 | 1 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
47 | 1 | } |
48 | | |
49 | | TEST_CASE("random uniform distribution") |
50 | 1 | { |
51 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_RANDOM_UNIFORM_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_RANDOM_UNIFORM_FORWARD, CCV_NNC_BACKEND_MPS)); |
52 | 1 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
53 | 1 | const ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 32F, 100000), "x"); |
54 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_RANDOM_UNIFORM_FORWARD(-8, 4), TENSOR_SYMBOL_LIST(), TENSOR_SYMBOL_LIST(x), "random uniform"); |
55 | 1 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
56 | 1 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
57 | 1 | ccv_nnc_graph_t* graph = 0; |
58 | 1 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
59 | 1 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
60 | 1 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
61 | 1 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
62 | 1 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
63 | 1 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
64 | 1 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100000), 0); |
65 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(xt), 0); |
66 | 1 | int i; |
67 | 1 | int h[4 + 8] = {}; |
68 | 100k | for (i = 0; i < 100000; i++100k ) |
69 | 100k | { |
70 | 100k | REQUIRE(xt->data.f32[i] > -8 - 1e-5, "it must be bigger than lower bound"); |
71 | 100k | REQUIRE(xt->data.f32[i] < 4 + 1e-5, "and smaller than upper bound"); |
72 | 100k | int b = (int)roundf(xt->data.f32[i] - 0.5) + 8; |
73 | 100k | b = ccv_max(ccv_min(b, 11), 0); |
74 | 100k | ++h[b]; |
75 | 100k | } |
76 | 1 | const int count = (int)roundf(100000. / (4 + 8)); |
77 | 13 | for (i = 0; i < 12; i++12 ) |
78 | 12 | { REQUIRE(h[i] >= count - 1000 && h[i] <= count + 1000, "uniform distribution"); } |
79 | 1 | ccv_nnc_tensor_free(xt); |
80 | 1 | ccv_nnc_graph_free(graph); |
81 | 1 | ccv_nnc_tensor_arena_free(tensor_arena); |
82 | 1 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
83 | 1 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
84 | 1 | } |
85 | | |
86 | | TEST_CASE("random uniform distribution in half precision") |
87 | 1 | { |
88 | 1 | GUARD_ELSE_RETURN(ccv_nnc_cmd_ok(CCV_NNC_RANDOM_UNIFORM_FORWARD, CCV_NNC_BACKEND_GPU_REF) || ccv_nnc_cmd_ok(CCV_NNC_RANDOM_UNIFORM_FORWARD, CCV_NNC_BACKEND_MPS)); |
89 | 1 | ccv_nnc_symbolic_graph_t* symbolic_graph = ccv_nnc_symbolic_graph_new(); |
90 | 1 | const ccv_nnc_tensor_symbol_t x = ccv_nnc_tensor_symbol_new(symbolic_graph, GPU_TENSOR_NHWC(000, 16F, 100000), "x"); |
91 | 1 | ccv_nnc_graph_exec_symbol_new(symbolic_graph, CMD_RANDOM_UNIFORM_FORWARD(-8, 4), TENSOR_SYMBOL_LIST(), TENSOR_SYMBOL_LIST(x), "random uniform"); |
92 | 1 | ccv_nnc_graph_exec_symbol_autogen(symbolic_graph, 0, 0, CCV_NNC_AUTOGEN_ALL_EXECS | CCV_NNC_AUTOGEN_SOURCES_AND_DESTINATIONS); |
93 | 1 | SYMBOLIC_GRAPH_GEN(symbolic_graph, CCV_NNC_LONG_DOT_GRAPH); |
94 | 1 | ccv_nnc_graph_t* graph = 0; |
95 | 1 | ccv_nnc_tensor_arena_t* tensor_arena = 0; |
96 | 1 | ccv_nnc_graph_exec_arena_t* graph_exec_arena = 0; |
97 | 1 | ccv_nnc_symbolic_graph_compile(symbolic_graph, ccv_nnc_default_compile_params, 0, 0, 0, 0, SYMBOLIC_GRAPH_SOURCES(symbolic_graph), SYMBOLIC_GRAPH_DESTINATIONS(symbolic_graph), &graph, &tensor_arena, &graph_exec_arena); |
98 | 1 | GRAPH_GEN(graph, CCV_NNC_LONG_DOT_GRAPH); |
99 | 1 | ccv_nnc_graph_run(graph, 0, TRAVERSE_FULL, 0, 0); |
100 | 1 | ccv_nnc_tensor_t* const x_tensor = ccv_nnc_tensor_from_symbol(tensor_arena, x); |
101 | 1 | ccv_nnc_tensor_t* const x16t = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(16F, 100000), 0); |
102 | 1 | ccv_nnc_tensor_t* const xt = ccv_nnc_tensor_new(0, CPU_TENSOR_NHWC(32F, 100000), 0); |
103 | 1 | ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x_tensor), TENSOR_LIST(x16t), 0); |
104 | 1 | ccv_nnc_cmd_exec(CMD_DATATYPE_CONVERSION_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(x16t), TENSOR_LIST(xt), 0); |
105 | 1 | int i; |
106 | 1 | int h[4 + 8] = {}; |
107 | 100k | for (i = 0; i < 100000; i++100k ) |
108 | 100k | { |
109 | 100k | REQUIRE(xt->data.f32[i] > -8 - 1e-5, "it must be bigger than lower bound"); |
110 | 100k | REQUIRE(xt->data.f32[i] < 4 + 1e-5, "and smaller than upper bound"); |
111 | 100k | int b = (int)roundf(xt->data.f32[i] - 0.5) + 8; |
112 | 100k | b = ccv_max(ccv_min(b, 11), 0); |
113 | 100k | ++h[b]; |
114 | 100k | } |
115 | 1 | const int count = (int)roundf(100000. / (4 + 8)); |
116 | 13 | for (i = 0; i < 12; i++12 ) |
117 | 12 | { REQUIRE(h[i] >= count - 1000 && h[i] <= count + 1000, "uniform distribution"); } |
118 | 1 | ccv_nnc_tensor_free(xt); |
119 | 1 | ccv_nnc_tensor_free(x16t); |
120 | 1 | ccv_nnc_graph_free(graph); |
121 | 1 | ccv_nnc_tensor_arena_free(tensor_arena); |
122 | 1 | ccv_nnc_graph_exec_arena_free(graph_exec_arena); |
123 | 1 | ccv_nnc_symbolic_graph_free(symbolic_graph); |
124 | 1 | } |
125 | | |
126 | | #include "case_main.h" |