Skip to content

Commit b9ce3a5

Browse files
committed
update to correct empty param for cub plan
1 parent 1969207 commit b9ce3a5

File tree

1 file changed

+8
-12
lines changed
  • include/matx/transforms

1 file changed

+8
-12
lines changed

include/matx/transforms/cub.h

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1225,13 +1225,12 @@ void cub_sum(OutputTensor &a_out, const InputOperator &a,
12251225
InputOperator,
12261226
detail::CUB_OP_REDUCE_SUM>::GetCubParams(a_out, a, stream);
12271227

1228-
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_SUM, int>;
1228+
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_SUM, EmptyParams_t>;
12291229
detail::GetCache().LookupAndExec<detail::cub_cache_t>(
12301230
detail::GetCacheIdFromType<detail::cub_cache_t>(),
12311231
params,
12321232
[&]() {
1233-
int test; ///\todo TYLER_TODO: this should not be needed
1234-
return std::make_shared<cache_val_type>(a_out, a, test, stream);
1233+
return std::make_shared<cache_val_type>(a_out, a, EmptyParams_t{}, stream);
12351234
},
12361235
[&](std::shared_ptr<cache_val_type> ctype) {
12371236
ctype->ExecSum(a_out, a, stream);
@@ -1271,13 +1270,12 @@ void cub_min(OutputTensor &a_out, const InputOperator &a,
12711270
InputOperator,
12721271
detail::CUB_OP_REDUCE_MIN>::GetCubParams(a_out, a, stream);
12731272

1274-
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_MIN, int>;
1273+
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_MIN, EmptyParams_t>;
12751274
detail::GetCache().LookupAndExec<detail::cub_cache_t>(
12761275
detail::GetCacheIdFromType<detail::cub_cache_t>(),
12771276
params,
12781277
[&]() {
1279-
int test; ///\todo TYLER_TODO: this should not be needed
1280-
return std::make_shared<cache_val_type>(a_out, a, test, stream);
1278+
return std::make_shared<cache_val_type>(a_out, a, EmptyParams_t{}, stream);
12811279
},
12821280
[&](std::shared_ptr<cache_val_type> ctype) {
12831281
ctype->ExecMin(a_out, a, stream);
@@ -1318,13 +1316,12 @@ void cub_max(OutputTensor &a_out, const InputOperator &a,
13181316
InputOperator,
13191317
detail::CUB_OP_REDUCE_MAX>::GetCubParams(a_out, a, stream);
13201318

1321-
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_MAX, int>;
1319+
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_REDUCE_MAX, EmptyParams_t>;
13221320
detail::GetCache().LookupAndExec<detail::cub_cache_t>(
13231321
detail::GetCacheIdFromType<detail::cub_cache_t>(),
13241322
params,
13251323
[&]() {
1326-
int test; ///\todo TYLER_TODO: this should not be needed
1327-
return std::make_shared<cache_val_type>(a_out, a, test, stream);
1324+
return std::make_shared<cache_val_type>(a_out, a, EmptyParams_t{}, stream);
13281325
},
13291326
[&](std::shared_ptr<cache_val_type> ctype) {
13301327
ctype->ExecMax(a_out, a, stream);
@@ -1478,13 +1475,12 @@ void cumsum_impl(OutputTensor &a_out, const InputOperator &a,
14781475
auto params =
14791476
detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_INC_SUM>::GetCubParams(a_out, a, stream);
14801477

1481-
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_INC_SUM, int>;
1478+
using cache_val_type = detail::matxCubPlan_t<OutputTensor, InputOperator, detail::CUB_OP_INC_SUM, EmptyParams_t>;
14821479
detail::GetCache().LookupAndExec<detail::cub_cache_t>(
14831480
detail::GetCacheIdFromType<detail::cub_cache_t>(),
14841481
params,
14851482
[&]() {
1486-
int test; ///\todo TYLER_TODO: this should not be needed
1487-
return std::make_shared<cache_val_type>(a_out, a, test, stream);
1483+
return std::make_shared<cache_val_type>(a_out, a, EmptyParams_t{}, stream);
14881484
},
14891485
[&](std::shared_ptr<cache_val_type> ctype) {
14901486
ctype->ExecPrefixScanEx(a_out, a, stream);

0 commit comments

Comments
 (0)