Coverage Report

Created: 2025-05-09 15:33

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/home/liu/actions-runner/_work/ccv/ccv/lib/nnc/cmd/unique_consecutive/ccv_nnc_unique_consecutive_cpu_ref.c
Line
Count
Source
1
#include "ccv.h"
2
#include "ccv_internal.h"
3
#include "nnc/ccv_nnc.h"
4
#include "nnc/ccv_nnc_easy.h"
5
#include "nnc/ccv_nnc_internal.h"
6
#ifdef USE_OPENMP
7
#include <omp.h>
8
#endif
9
#ifdef USE_DISPATCH
10
#include <dispatch/dispatch.h>
11
#endif
12
13
static int _ccv_nnc_unique_consecutive_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
14
6
{
15
6
  assert(input_size == 1);
16
6
  assert(output_size == 2);
17
6
  const ccv_nnc_tensor_view_t* const a = (ccv_nnc_tensor_view_t*)inputs[0];
18
6
  const int a_nd = ccv_nnc_tensor_nd(a->info.dim);
19
6
  ccv_nnc_tensor_view_t* const b = (ccv_nnc_tensor_view_t*)outputs[0];
20
6
  assert(ccv_nnc_tensor_nd(b->info.dim) == a_nd);
21
6
  ccv_nnc_tensor_view_t* const indices = (ccv_nnc_tensor_view_t*)outputs[1];
22
6
  assert(ccv_nnc_tensor_nd(indices->info.dim) == a_nd);
23
6
  assert(indices->info.datatype == CCV_32S);
24
6
  assert(CCV_IS_TENSOR_CONTIGUOUS(a));
25
6
  assert(CCV_IS_TENSOR_CONTIGUOUS(b));
26
6
  assert(CCV_IS_TENSOR_CONTIGUOUS(indices));
27
6
  assert(a->info.datatype == b->info.datatype);
28
6
  const int count = ccv_nnc_tensor_count(a->info);
29
6
  assert(a_nd == 1); // Can only handle 1d tensor for this.
30
6
  const int bincount = b->info.dim[0];
31
6
  assert(bincount > 0);
32
6
  assert(bincount == indices->info.dim[0]);
33
6
  int i;
34
6
  if (a->info.datatype == CCV_32F)
35
3
  {
36
3
    b->data.f32[0] = a->data.f32[0];
37
3
    float last_val = b->data.f32[0];
38
3
    int j = 1;
39
3
    int len = 1;
40
32
    for (i = 1; i < count; 
i++29
)
41
30
      if (a->data.f32[i] != last_val)
42
15
      {
43
15
        if (j >= bincount)
44
1
          break;
45
14
        indices->data.i32[j - 1] = len;
46
14
        b->data.f32[j] = a->data.f32[i];
47
14
        last_val = b->data.f32[j];
48
14
        ++j;
49
14
        len = 1;
50
14
      } else
51
15
        ++len;
52
3
    if (j <= count)
53
3
      indices->data.i32[j - 1] = len;
54
5
    for (i = j; i < bincount; 
i++2
)
55
2
      b->data.f32[i] = -1, indices->data.i32[i] = 0;
56
3
  } else {
57
3
    assert(a->info.datatype == CCV_32S);
58
3
    b->data.i32[0] = a->data.i32[0];
59
3
    int last_val = b->data.i32[0];
60
3
    int j = 1;
61
3
    int len = 1;
62
32
    for (i = 1; i < count; 
i++29
)
63
30
      if (a->data.i32[i] != last_val)
64
14
      {
65
14
        if (j >= bincount)
66
1
          break;
67
13
        indices->data.i32[j - 1] = len;
68
13
        b->data.i32[j] = a->data.i32[i];
69
13
        last_val = b->data.i32[j];
70
13
        ++j;
71
13
        len = 1;
72
13
      } else
73
16
        ++len;
74
3
    if (j <= count)
75
3
      indices->data.i32[j - 1] = len;
76
6
    for (i = j; i < bincount; 
i++3
)
77
3
      b->data.i32[i] = -1, indices->data.i32[i] = 0;
78
3
  }
79
6
  return CCV_NNC_EXEC_SUCCESS;
80
6
}
81
82
static int _ccv_nnc_unique_consecutive_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
83
0
{
84
0
  return CCV_NNC_EXEC_INVALID;
85
0
}
86
87
REGISTER_COMMAND_BACKEND(CCV_NNC_UNIQUE_CONSECUTIVE_FORWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
88
1
{
89
1
  registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW;
90
1
  registry->tensor_datatypes = CCV_32F | CCV_32S;
91
1
  registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
92
1
  registry->algorithms = 1;
93
1
  registry->exec = _ccv_nnc_unique_consecutive_forw;
94
1
}
95
96
REGISTER_COMMAND_BACKEND(CCV_NNC_UNIQUE_CONSECUTIVE_BACKWARD, CCV_NNC_BACKEND_CPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
97
1
{
98
1
  registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW;
99
1
  registry->tensor_datatypes = CCV_32F | CCV_32S;
100
1
  registry->tensor_memory = CCV_TENSOR_CPU_MEMORY;
101
1
  registry->algorithms = 1;
102
1
  registry->exec = _ccv_nnc_unique_consecutive_back;
103
1
}