Skip to content

Commit 5b2489f

Browse files
authored
Add detail:: namespace to CUB struct (#708)
1 parent 9070391 commit 5b2489f

File tree

1 file changed

+8
-8
lines changed
  • include/matx/transforms

1 file changed

+8
-8
lines changed

include/matx/transforms/cub.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1225,12 +1225,12 @@ void cub_sum(OutputTensor &a_out, const InputOperator &a,
12251225
InputOperator,
12261226
detail::CUB_OP_REDUCE_SUM>::GetCubParams(a_out, a, stream);
12271227

1228-
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_SUM, EmptyParams_t>;
1228+
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_SUM, detail::EmptyParams_t>;
12291229
detail::GetCache().LookupAndExec<detail::cub_cache_t>(
12301230
detail::GetCacheIdFromType<detail::cub_cache_t>(),
12311231
params,
12321232
[&]() {
1233-
return std::make_shared<cache_val_type>(a_out, a, EmptyParams_t{}, stream);
1233+
return std::make_shared<cache_val_type>(a_out, a, detail::EmptyParams_t{}, stream);
12341234
},
12351235
[&](std::shared_ptr<cache_val_type> ctype) {
12361236
ctype->ExecSum(a_out, a, stream);
@@ -1270,12 +1270,12 @@ void cub_min(OutputTensor &a_out, const InputOperator &a,
12701270
InputOperator,
12711271
detail::CUB_OP_REDUCE_MIN>::GetCubParams(a_out, a, stream);
12721272

1273-
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_MIN, EmptyParams_t>;
1273+
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_MIN, detail::EmptyParams_t>;
12741274
detail::GetCache().LookupAndExec<detail::cub_cache_t>(
12751275
detail::GetCacheIdFromType<detail::cub_cache_t>(),
12761276
params,
12771277
[&]() {
1278-
return std::make_shared<cache_val_type>(a_out, a, EmptyParams_t{}, stream);
1278+
return std::make_shared<cache_val_type>(a_out, a, detail::EmptyParams_t{}, stream);
12791279
},
12801280
[&](std::shared_ptr<cache_val_type> ctype) {
12811281
ctype->ExecMin(a_out, a, stream);
@@ -1316,12 +1316,12 @@ void cub_max(OutputTensor &a_out, const InputOperator &a,
13161316
InputOperator,
13171317
detail::CUB_OP_REDUCE_MAX>::GetCubParams(a_out, a, stream);
13181318

1319-
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_MAX, EmptyParams_t>;
1319+
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_MAX, detail::EmptyParams_t>;
13201320
detail::GetCache().LookupAndExec<detail::cub_cache_t>(
13211321
detail::GetCacheIdFromType<detail::cub_cache_t>(),
13221322
params,
13231323
[&]() {
1324-
return std::make_shared<cache_val_type>(a_out, a, EmptyParams_t{}, stream);
1324+
return std::make_shared<cache_val_type>(a_out, a, detail::EmptyParams_t{}, stream);
13251325
},
13261326
[&](std::shared_ptr<cache_val_type> ctype) {
13271327
ctype->ExecMax(a_out, a, stream);
@@ -1475,12 +1475,12 @@ void cumsum_impl(OutputTensor &a_out, const InputOperator &a,
14751475
auto params =
14761476
detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_INC_SUM>::GetCubParams(a_out, a, stream);
14771477

1478-
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_INC_SUM, EmptyParams_t>;
1478+
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_INC_SUM, detail::EmptyParams_t>;
14791479
detail::GetCache().LookupAndExec<detail::cub_cache_t>(
14801480
detail::GetCacheIdFromType<detail::cub_cache_t>(),
14811481
params,
14821482
[&]() {
1483-
return std::make_shared<cache_val_type>(a_out, a, EmptyParams_t{}, stream);
1483+
return std::make_shared<cache_val_type>(a_out, a, detail::EmptyParams_t{}, stream);
14841484
},
14851485
[&](std::shared_ptr<cache_val_type> ctype) {
14861486
ctype->ExecPrefixScanEx(a_out, a, stream);

0 commit comments

Comments
 (0)