Coverage Report

Created: 2021-04-06 02:31

/home/liu/buildslave/linux-x64-runtests/build/lib/nnc/cmd/ew/ccv_nnc_ew.c
Line
Count
Source (jump to first uncovered line)
1
#include "ccv.h"
2
#include "nnc/ccv_nnc.h"
3
#include "nnc/ccv_nnc_internal.h"
4
5
static int _ccv_nnc_arbitary_inplace(const int input_idx, const int input_size, const int output_idx, const int output_size)
6
11.3k
{
7
11.3k
  return 1;
8
11.3k
}
9
10
static int _ccv_nnc_ewsum_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
11
241
{
12
241
  if (output_size == 1 && output_bitmasks[0] == 1)
13
241
  {
14
241
    int i, j, flag = 0;
15
241
    int input_bitcount = 0;
16
399
    for (i = 0; i < input_bitmask_size; 
i++158
)
17
241
    {
18
490
      for (j = 0; j < 64; 
j++249
)
19
490
        if (input_bitmasks[i] & (uint64_t)1 << j)
20
249
        {
21
249
          if (flag)
22
0
            return 0;
23
241
        } else
24
241
          break;
25
241
      input_bitcount += j;
26
241
      // Trailing zero even if it is not the end of input_bitmask_size, mark flag,
27
241
      // if we encounter additional 1, return invalid.
28
241
      if (j < 64)
29
241
        flag = 1;
30
241
      // Always like 1111100000, no 1110010101
31
10.1k
      for (; j < 64; 
j++9.95k
)
32
10.0k
        if (input_bitmasks[i] & (uint64_t)1 << j)
33
83
          return 0;
34
241
    }
35
241
    
return input_size == input_bitcount158
;
36
0
  }
37
0
  return 0;
38
0
}
39
40
static int _ccv_nnc_ewsum_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
41
269
{
42
269
  if (input_size >= 1 && (input_bitmasks[0] & 1u) == 1u)
43
207
  {
44
207
    int i, j, flag = 0;
45
207
    int output_bitcount = 0;
46
410
    for (i = 0; i < output_bitmask_size; 
i++203
)
47
207
    {
48
600
      for (j = 0; j < 64; 
j++393
)
49
600
        if (output_bitmasks[i] & (uint64_t)1 << j)
50
393
        {
51
393
          if (flag)
52
0
            return 0;
53
207
        } else
54
207
          break;
55
207
      output_bitcount += j;
56
207
      // Trailing zero even if it is not the end of input_bitmask_size, mark flag,
57
207
      // if we encounter additional 1, return invalid.
58
207
      if (j < 64)
59
207
        flag = 1;
60
207
      // Always like 1111100000, no 1110010101
61
12.8k
      for (; j < 64; 
j++12.6k
)
62
12.6k
        if (output_bitmasks[i] & (uint64_t)1 << j)
63
4
          return 0;
64
207
    }
65
207
    
return output_size == output_bitcount203
;
66
62
  }
67
62
  return 0;
68
62
}
69
70
REGISTER_COMMAND(CCV_NNC_EWSUM_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
71
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c, gpu/ccv_nnc_ew_gpu_cudnn.cu)
72
1
{
73
1
  registry->bitmask = _ccv_nnc_ewsum_forw_bitmask;
74
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_forward_from_inputs;
75
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
76
1
}
77
78
REGISTER_COMMAND(CCV_NNC_EWSUM_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
79
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c, gpu/ccv_nnc_ew_gpu_cudnn.cu)
80
1
{
81
1
  registry->flags = CCV_NNC_CMD_ATTR_PASSTHROUGH | CCV_NNC_CMD_ATTR_NULL_IS_ONES;
82
1
  registry->bitmask = _ccv_nnc_ewsum_back_bitmask;
83
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
84
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
85
1
}
86
87
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWSUM_FORWARD)
88
#define CMD_EWSUM_FORWARD() ccv_nnc_cmd(CCV_NNC_EWSUM_FORWARD, 0, ccv_nnc_cmd_auto, 0)
89
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWSUM_BACKWARD)
90
#define CMD_EWSUM_BACKWARD() ccv_nnc_cmd(CCV_NNC_EWSUM_BACKWARD, 0, ccv_nnc_cmd_auto, 0)
91
92
static int _ccv_nnc_ewprod_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
93
132
{
94
132
  if (output_size == 1 && output_bitmasks[0] == 1)
95
132
  {
96
132
    int i, j, flag = 0;
97
132
    int input_bitcount = 0;
98
220
    for (i = 0; i < input_bitmask_size; 
i++88
)
99
132
    {
100
264
      for (j = 0; j < 64; 
j++132
)
101
264
        if (input_bitmasks[i] & (uint64_t)1 << j)
102
132
        {
103
132
          if (flag)
104
0
            return 0;
105
132
        } else
106
132
          break;
107
132
      input_bitcount += j;
108
132
      // Trailing zero even if it is not the end of input_bitmask_size, mark flag,
109
132
      // if we encounter additional 1, return invalid.
110
132
      if (j < 64)
111
132
        flag = 1;
112
132
      // Always like 1111100000, no 1110010101
113
5.67k
      for (; j < 64; 
j++5.54k
)
114
5.58k
        if (input_bitmasks[i] & (uint64_t)1 << j)
115
44
          return 0;
116
132
    }
117
132
    
return input_size == input_bitcount88
;
118
0
  }
119
0
  return 0;
120
0
}
121
122
static int _ccv_nnc_ewprod_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
123
11.2k
{
124
11.2k
  int i, j;
125
11.2k
  int input_flag = 0;
126
11.2k
  int input_bitcount = 0;
127
15.7k
  for (i = 0; i < input_bitmask_size; 
i++4.51k
)
128
11.2k
  {
129
33.7k
    for (j = 0; j < 64; 
j++22.5k
)
130
33.7k
      if (input_bitmasks[i] & (uint64_t)1 << j)
131
22.5k
      {
132
22.5k
        if (input_flag)
133
0
          return 0;
134
11.2k
      } else
135
11.2k
        break;
136
11.2k
    input_bitcount += j;
137
11.2k
    if (j < 64)
138
11.2k
      input_flag = 1;
139
11.2k
    // Always like 1111100000, no 1110010101
140
290k
    for (; j < 64; 
j++279k
)
141
286k
      if (input_bitmasks[i] & (uint64_t)1 << j)
142
6.73k
        return 0;
143
11.2k
  }
144
11.2k
  int output_flag = 0;
145
4.51k
  int output_bitcount = 0;
146
9.01k
  for (i = 0; i < output_bitmask_size; 
i++4.50k
)
147
4.51k
  {
148
13.5k
    for (j = 0; j < 64; 
j++9.00k
)
149
13.5k
      if ((output_bitmasks[i] & (uint64_t)1 << j))
150
9.00k
      {
151
9.00k
        if (output_flag)
152
0
          return 0;
153
4.51k
      } else
154
4.51k
        break;
155
4.51k
    output_bitcount += j;
156
4.51k
    if (j < 64)
157
4.51k
      output_flag = 1;
158
284k
    for (; j < 64; 
j++279k
)
159
279k
      if (output_bitmasks[i] & (uint64_t)1 << j)
160
2
        return 0;
161
4.51k
  }
162
4.51k
  
if (4.50k
output_bitcount != output_size4.50k
)
163
10
    return 0;
164
4.49k
  return output_bitcount + 2 /* Gradient + Original output */ == input_bitcount;
165
4.49k
}
166
167
REGISTER_COMMAND(CCV_NNC_EWPROD_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
168
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c)
169
1
{
170
1
  registry->bitmask = _ccv_nnc_ewprod_forw_bitmask;
171
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_forward_from_inputs;
172
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
173
1
}
174
175
REGISTER_COMMAND(CCV_NNC_EWPROD_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
176
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c)
177
1
{
178
1
  registry->flags = CCV_NNC_CMD_ATTR_NULL_IS_ONES;
179
1
  registry->bitmask = _ccv_nnc_ewprod_back_bitmask;
180
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
181
1
}
182
183
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWPROD_FORWARD)
184
#define CMD_EWPROD_FORWARD() ccv_nnc_cmd(CCV_NNC_EWPROD_FORWARD, 0, ccv_nnc_cmd_auto, 0)
185
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWPROD_BACKWARD)
186
#define CMD_EWPROD_BACKWARD() ccv_nnc_cmd(CCV_NNC_EWPROD_BACKWARD, 0, ccv_nnc_cmd_auto, 0)
187
188
static int _ccv_nnc_ewdiv_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
189
0
{
190
0
  if ((input_bitmasks[0] & 3u) == ((1u << 0) | (1u << 1)) && output_bitmasks[0] == 1u)
191
0
    return 1;
192
0
  // Nominator can be null (meaning 1).
193
0
  if ((input_bitmasks[0] & 3u) == ((0u << 0) | (1u << 1)) && output_bitmasks[0] == 1u)
194
0
    return 1;
195
0
  return 0;
196
0
}
197
198
static int _ccv_nnc_ewdiv_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
199
39
{
200
39
  if ((input_bitmasks[0] & (15u & ~((uint64_t)1u << 1))) == ((1u << 0) | (0u << 1) | (1u << 2) | (1u << 3)) && 
output_bitmasks[0] == ((1u << 0) | (1u << 1))12
)
201
3
    return 1;
202
36
  // We don't need to know the original output.
203
36
  if ((input_bitmasks[0] & (15u & ~((uint64_t)1u << 1))) == ((1u << 0) | (0u << 1) | (1u << 2) | (0u << 3)) && 
output_bitmasks[0] == ((1u << 0) | (0u << 1))9
)
204
0
    return 1;
205
36
  if ((input_bitmasks[0] & (15u & ~((uint64_t)1u << 1))) == ((1u << 0) | (0u << 1) | (1u << 2) | (1u << 3)) && 
output_bitmasks[0] == ((0u << 0) | (1u << 1))9
)
206
9
    return 1;
207
27
  return 0;
208
27
}
209
210
REGISTER_COMMAND(CCV_NNC_EWDIV_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
211
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c, gpu/ccv_nnc_ew_gpu_ref.cu)
212
1
{
213
1
  registry->flags = CCV_NNC_CMD_ATTR_NULL_IS_ONES;
214
1
  registry->bitmask = _ccv_nnc_ewdiv_forw_bitmask;
215
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_forward_from_inputs;
216
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
217
1
}
218
219
REGISTER_COMMAND(CCV_NNC_EWDIV_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
220
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c, gpu/ccv_nnc_ew_gpu_ref.cu)
221
1
{
222
1
  registry->flags = CCV_NNC_CMD_ATTR_NULL_IS_ONES;
223
1
  registry->bitmask = _ccv_nnc_ewdiv_back_bitmask;
224
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
225
1
}
226
227
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWDIV_FORWARD)
228
#define CMD_EWDIV_FORWARD() ccv_nnc_cmd(CCV_NNC_EWDIV_FORWARD, 0, ccv_nnc_cmd_auto, 0)
229
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWDIV_BACKWARD)
230
#define CMD_EWDIV_BACKWARD() ccv_nnc_cmd(CCV_NNC_EWDIV_BACKWARD, 0, ccv_nnc_cmd_auto, 0)
231
232
static int _ccv_nnc_ewexp_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
233
0
{
234
0
  if ((input_bitmasks[0] & 1u) == 1u && output_bitmasks[0] == 1u)
235
0
    return 1;
236
0
  return 0;
237
0
}
238
239
static int _ccv_nnc_ewexp_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
240
18
{
241
18
  // We don't care about the original input.
242
18
  if ((input_bitmasks[0] & (7u & ~((uint64_t)1u << 1))) == ((1u << 0) | (0u << 1) | (1u << 2)) && 
output_bitmasks[0] == 1u6
)
243
6
    return 1;
244
12
  return 0;
245
12
}
246
247
REGISTER_COMMAND(CCV_NNC_EWEXP_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
248
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c)
249
1
{
250
1
  registry->bitmask = _ccv_nnc_ewexp_forw_bitmask;
251
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_forward_from_inputs;
252
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
253
1
}
254
255
REGISTER_COMMAND(CCV_NNC_EWEXP_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
256
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c)
257
1
{
258
1
  registry->flags = CCV_NNC_CMD_ATTR_NULL_IS_ONES;
259
1
  registry->bitmask = _ccv_nnc_ewexp_back_bitmask;
260
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
261
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
262
1
}
263
264
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWEXP_FORWARD)
265
#define CMD_EWEXP_FORWARD() ccv_nnc_cmd(CCV_NNC_EWEXP_FORWARD, 0, ccv_nnc_cmd_auto, 0)
266
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWEXP_BACKWARD)
267
#define CMD_EWEXP_BACKWARD() ccv_nnc_cmd(CCV_NNC_EWEXP_BACKWARD, 0, ccv_nnc_cmd_auto, 0)
268
269
static int _ccv_nnc_ewlog_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
270
0
{
271
0
  if ((input_bitmasks[0] & 1u) == 1u && output_bitmasks[0] == 1u)
272
0
    return 1;
273
0
  return 0;
274
0
}
275
276
static int _ccv_nnc_ewlog_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
277
1.28k
{
278
1.28k
  // We don't care about the original output.
279
1.28k
  if ((input_bitmasks[0] & 3u) == 3u && 
output_bitmasks[0] == 1u428
)
280
428
    return 1;
281
852
  return 0;
282
852
}
283
284
REGISTER_COMMAND(CCV_NNC_EWLOG_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
285
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c)
286
1
{
287
1
  registry->bitmask = _ccv_nnc_ewlog_forw_bitmask;
288
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_forward_from_inputs;
289
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
290
1
}
291
292
REGISTER_COMMAND(CCV_NNC_EWLOG_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
293
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c)
294
1
{
295
1
  registry->flags = CCV_NNC_CMD_ATTR_NULL_IS_ONES;
296
1
  registry->bitmask = _ccv_nnc_ewlog_back_bitmask;
297
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
298
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
299
1
}
300
301
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWLOG_FORWARD)
302
#define CMD_EWLOG_FORWARD() ccv_nnc_cmd(CCV_NNC_EWLOG_FORWARD, 0, ccv_nnc_cmd_auto, 0)
303
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWLOG_BACKWARD)
304
#define CMD_EWLOG_BACKWARD() ccv_nnc_cmd(CCV_NNC_EWLOG_BACKWARD, 0, ccv_nnc_cmd_auto, 0)
305
306
static int _ccv_nnc_ewsqrt_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
307
0
{
308
0
  if ((input_bitmasks[0] & 1u) == 1u && output_bitmasks[0] == 1u)
309
0
    return 1;
310
0
  return 0;
311
0
}
312
313
static int _ccv_nnc_ewsqrt_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
314
6
{
315
6
  // We don't care about the original input.
316
6
  if ((input_bitmasks[0] & (7u & ~((uint64_t)1u << 1))) == ((1u << 0) | (0u << 1) | (1u << 2)) && 
output_bitmasks[0] == 1u2
)
317
2
    return 1;
318
4
  return 0;
319
4
}
320
321
REGISTER_COMMAND(CCV_NNC_EWSQRT_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
322
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c)
323
1
{
324
1
  registry->bitmask = _ccv_nnc_ewsqrt_forw_bitmask;
325
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_forward_from_inputs;
326
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
327
1
}
328
329
REGISTER_COMMAND(CCV_NNC_EWSQRT_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
330
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c)
331
1
{
332
1
  registry->flags = CCV_NNC_CMD_ATTR_NULL_IS_ONES;
333
1
  registry->bitmask = _ccv_nnc_ewsqrt_back_bitmask;
334
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
335
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
336
1
}
337
338
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWSQRT_FORWARD)
339
#define CMD_EWSQRT_FORWARD() ccv_nnc_cmd(CCV_NNC_EWSQRT_FORWARD, 0, ccv_nnc_cmd_auto, 0)
340
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_EWSQRT_BACKWARD)
341
#define CMD_EWSQRT_BACKWARD() ccv_nnc_cmd(CCV_NNC_EWSQRT_BACKWARD, 0, ccv_nnc_cmd_auto, 0)
342
343
static int _ccv_nnc_clamp_forw_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
344
0
{
345
0
  if ((input_bitmasks[0] & 1u) == 1u && output_bitmasks[0] == 1u)
346
0
    return 1;
347
0
  return 0;
348
0
}
349
350
static int _ccv_nnc_clamp_back_bitmask(const int input_size, const int output_size, const uint64_t* const input_bitmasks, const int input_bitmask_size, const uint64_t* const output_bitmasks, const int output_bitmask_size)
351
0
{
352
0
  // We don't care about the original input.
353
0
  if ((input_bitmasks[0] & (7u & ~((uint64_t)1u << 1))) == ((1u << 0) | (0u << 1) | (1u << 2)) && output_bitmasks[0] == 1u)
354
0
    return 1;
355
0
  return 0;
356
0
}
357
358
REGISTER_COMMAND(CCV_NNC_CLAMP_FORWARD)(ccv_nnc_cmd_registry_t* const registry)
359
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c, gpu/ccv_nnc_ew_gpu_ref.cu)
360
1
{
361
1
  registry->bitmask = _ccv_nnc_clamp_forw_bitmask;
362
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_forward_from_inputs;
363
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
364
1
}
365
366
REGISTER_COMMAND(CCV_NNC_CLAMP_BACKWARD)(ccv_nnc_cmd_registry_t* const registry)
367
  FIND_BACKEND(ccv_nnc_ew_cpu_ref.c, gpu/ccv_nnc_ew_gpu_ref.cu)
368
1
{
369
1
  registry->flags = CCV_NNC_CMD_ATTR_NULL_IS_ONES;
370
1
  registry->bitmask = _ccv_nnc_clamp_back_bitmask;
371
1
  registry->tensor_auto = ccv_nnc_hint_tensor_auto_backward_from_gradient;
372
1
  registry->allow_inplace = _ccv_nnc_arbitary_inplace;
373
1
}
374
375
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_CLAMP_FORWARD)
376
#define CMD_CLAMP_FORWARD(_min, _max) ccv_nnc_cmd(CCV_NNC_CLAMP_FORWARD, 0, (ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.clamp={.min=_min,.max=_max}}, 0)
377
//@REGISTER_EASY_COMMAND_MACRO(CCV_NNC_CLAMP_BACKWARD)
378
#define CMD_CLAMP_BACKWARD(_min, _max) ccv_nnc_cmd(CCV_NNC_CLAMP_BACKWARD, 0, (ccv_nnc_cmd_param_t){.size={.dim={1,1,1}},.clamp={.min=_min,.max=_max}}, 0)