Skip to content

Commit c8e2bc8

Browse files
committed
Added toeplitz operator
1 parent 4fd3986 commit c8e2bc8

File tree

145 files changed

+930
-575
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

145 files changed

+930
-575
lines changed
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
.. _toeplitz_func:
2+
3+
toeplitz
4+
========
5+
6+
Generate a toeplitz matrix
7+
8+
`c` represents the first column of the matrix while `r` represents the first row. `c` and `r` must
9+
have the same first value; if they don't match, the first value from `r` will be used.
10+
11+
Passing a single array/operator as input is equivalent to passing the conjugate of the same
12+
input as the second parameter.
13+
14+
.. doxygenfunction:: toeplitz(const T (&c)[D])
15+
.. doxygenfunction:: toeplitz(const Op &c)
16+
.. doxygenfunction:: auto __MATX_INLINE__ toeplitz(const T (&c)[D1], const T (&r)[D2])
17+
.. doxygenfunction:: toeplitz(const COp &cop, const ROp &rop)
18+
19+
Examples
20+
~~~~~~~~
21+
22+
.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
23+
:language: cpp
24+
:start-after: example-begin toeplitz-test-1
25+
:end-before: example-end toeplitz-test-1
26+
:dedent:
27+
28+
.. literalinclude:: ../../../../test/00_transform/Norm.cu
29+
:language: cpp
30+
:start-after: example-begin toeplitz-test-2
31+
:end-before: example-end toeplitz-test-2
32+
:dedent:
33+

include/matx/core/file_io.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ void read_csv(TensorType &t, const std::string fname,
158158
auto np = pybind11::module_::import("numpy");
159159
auto obj = np.attr("genfromtxt")("fname"_a = fname.c_str(), "delimiter"_a = delimiter,
160160
"skip_header"_a = skip_header,
161-
"dtype"_a = detail::MatXPybind::GetNumpyDtype<typename TensorType::scalar_type>());
161+
"dtype"_a = detail::MatXPybind::GetNumpyDtype<typename TensorType::value_type>());
162162
pb->NumpyToTensorView(t, obj);
163163
}
164164

include/matx/core/iterator.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,7 @@ namespace matx {
4747
template <typename OperatorType, bool ConvertType = true>
4848
struct RandomOperatorIterator {
4949
using self_type = RandomOperatorIterator<OperatorType, ConvertType>;
50-
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::scalar_type>, typename OperatorType::scalar_type>;
51-
using scalar_type = value_type;
50+
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::value_type>, typename OperatorType::value_type>;
5251
// using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
5352
// index_t>;
5453
using stride_type = index_t;
@@ -174,8 +173,7 @@ __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t operator-(const RandomOper
174173
template <typename OperatorType, bool ConvertType = true>
175174
struct RandomOperatorOutputIterator {
176175
using self_type = RandomOperatorOutputIterator<OperatorType, ConvertType>;
177-
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::scalar_type>, typename OperatorType::scalar_type>;
178-
using scalar_type = value_type;
176+
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::value_type>, typename OperatorType::value_type>;
179177
// using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
180178
// index_t>;
181179
using stride_type = index_t;

include/matx/core/make_tensor.h

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ void make_tensor( TensorType &tensor,
7878
cudaStream_t stream = 0) {
7979
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
8080

81-
auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(shape, space, stream);
81+
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(shape, space, stream);
8282
tensor.Shallow(tmp);
8383
}
8484

@@ -166,7 +166,7 @@ auto make_tensor( TensorType &tensor,
166166
cudaStream_t stream = 0) {
167167
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
168168

169-
auto tmp = make_tensor<typename TensorType::scalar_type, ShapeType>(std::forward<ShapeType>(shape), space, stream);
169+
auto tmp = make_tensor<typename TensorType::value_type, ShapeType>(std::forward<ShapeType>(shape), space, stream);
170170
tensor.Shallow(tmp);
171171
}
172172

@@ -225,7 +225,7 @@ auto make_tensor_p( TensorType &tensor,
225225
cudaStream_t stream = 0) {
226226
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
227227

228-
auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::shape_container>(std::forward<typename TensorType::shape_container>(shape), space, stream);
228+
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::shape_container>(std::forward<typename TensorType::shape_container>(shape), space, stream);
229229
tensor.Shallow(tmp);
230230
}
231231

@@ -261,7 +261,7 @@ template <typename TensorType,
261261
auto make_tensor( TensorType &tensor,
262262
matxMemorySpace_t space = MATX_MANAGED_MEMORY,
263263
cudaStream_t stream = 0) {
264-
auto tmp = make_tensor<typename TensorType::scalar_type>({}, space, stream);
264+
auto tmp = make_tensor<typename TensorType::value_type>({}, space, stream);
265265
tensor.Shallow(tmp);
266266
}
267267

@@ -322,12 +322,12 @@ auto make_tensor( T *data,
322322
template <typename TensorType,
323323
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
324324
auto make_tensor( TensorType &tensor,
325-
typename TensorType::scalar_type *data,
325+
typename TensorType::value_type *data,
326326
const index_t (&shape)[TensorType::Rank()],
327327
bool owning = false) {
328328
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
329329

330-
auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(data, shape, owning);
330+
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(data, shape, owning);
331331
tensor.Shallow(tmp);
332332
}
333333

@@ -373,12 +373,12 @@ auto make_tensor( T *data,
373373
template <typename TensorType,
374374
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
375375
auto make_tensor( TensorType &tensor,
376-
typename TensorType::scalar_type *data,
376+
typename TensorType::value_type *data,
377377
typename TensorType::shape_container &&shape,
378378
bool owning = false) {
379379
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
380380

381-
auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::shape_container>(data, std::forward<typename TensorType::shape_container>(shape), owning);
381+
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::shape_container>(data, std::forward<typename TensorType::shape_container>(shape), owning);
382382
tensor.Shallow(tmp);
383383
}
384384

@@ -414,9 +414,9 @@ auto make_tensor( T *ptr,
414414
template <typename TensorType,
415415
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
416416
auto make_tensor( TensorType &tensor,
417-
typename TensorType::scalar_type *ptr,
417+
typename TensorType::value_type *ptr,
418418
bool owning = false) {
419-
auto tmp = make_tensor<typename TensorType::scalar_type>(ptr, owning);
419+
auto tmp = make_tensor<typename TensorType::value_type>(ptr, owning);
420420
tensor.Shallow(tmp);
421421
}
422422

@@ -534,12 +534,12 @@ auto make_tensor( T* const data,
534534
template <typename TensorType,
535535
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
536536
auto make_tensor( TensorType &tensor,
537-
typename TensorType::scalar_type* const data,
537+
typename TensorType::value_type* const data,
538538
typename TensorType::desc_type &&desc,
539539
bool owning = false) {
540540
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
541541

542-
auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::desc_type>(data, std::forward<typename TensorType::desc_type>(desc), owning);
542+
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::desc_type>(data, std::forward<typename TensorType::desc_type>(desc), owning);
543543
tensor.Shallow(tmp);
544544
}
545545

@@ -585,7 +585,7 @@ auto make_tensor( TensorType &&tensor,
585585
cudaStream_t stream = 0) {
586586
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
587587

588-
auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::desc_type>(std::forward<typename TensorType::desc_type>(desc), space, stream);
588+
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::desc_type>(std::forward<typename TensorType::desc_type>(desc), space, stream);
589589
tensor.Shallow(tmp);
590590
}
591591

@@ -633,13 +633,13 @@ auto make_tensor( T *const data,
633633
template <typename TensorType,
634634
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
635635
auto make_tensor( TensorType &tensor,
636-
typename TensorType::scalar_type *const data,
636+
typename TensorType::value_type *const data,
637637
const index_t (&shape)[TensorType::Rank()],
638638
const index_t (&strides)[TensorType::Rank()],
639639
bool owning = false) {
640640
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
641641

642-
auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(data, shape, strides, owning);
642+
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(data, shape, strides, owning);
643643
tensor.Shallow(tmp);
644644
}
645645

include/matx/core/operator_utils.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,13 @@ namespace matx {
4444
if (out.IsContiguous()) {
4545
if constexpr(ConvertType) {
4646
return func( in,
47-
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<OutputOp>::scalar_type> *>(out.Data()),
47+
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<OutputOp>::value_type> *>(out.Data()),
4848
bi,
4949
ei);
5050
}
5151
else {
5252
return func( in,
53-
reinterpret_cast<typename remove_cvref_t<OutputOp>::scalar_type *>(out.Data()),
53+
reinterpret_cast<typename remove_cvref_t<OutputOp>::value_type *>(out.Data()),
5454
bi,
5555
ei);
5656
}
@@ -70,14 +70,14 @@ namespace matx {
7070
if constexpr (ConvertType) {
7171
return ReduceOutput<ConvertType>( std::forward<Func>(func),
7272
std::forward<OutputOp>(out),
73-
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<InputOp>::scalar_type> *>(in_base.Data()),
73+
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<InputOp>::value_type> *>(in_base.Data()),
7474
BeginOffset{in_base},
7575
EndOffset{in_base});
7676
}
7777
else {
7878
return ReduceOutput<ConvertType>( std::forward<Func>(func),
7979
std::forward<OutputOp>(out),
80-
reinterpret_cast<typename remove_cvref_t<InputOp>::scalar_type *>(in_base.Data()),
80+
reinterpret_cast<typename remove_cvref_t<InputOp>::value_type *>(in_base.Data()),
8181
BeginOffset{in_base},
8282
EndOffset{in_base});
8383
}
@@ -118,9 +118,9 @@ namespace matx {
118118
namespace detail {
119119
// Used inside of transforms to allocate temporary output
120120
template <typename TensorType, typename Executor, typename ShapeType>
121-
__MATX_HOST__ __MATX_INLINE__ void AllocateTempTensor(TensorType &tensor, Executor &&ex, ShapeType &&shape, typename TensorType::scalar_type **ptr) {
121+
__MATX_HOST__ __MATX_INLINE__ void AllocateTempTensor(TensorType &tensor, Executor &&ex, ShapeType &&shape, typename TensorType::value_type **ptr) {
122122
const auto ttl_size = std::accumulate(shape.begin(), shape.end(), static_cast<index_t>(1),
123-
std::multiplies<index_t>()) * sizeof(typename TensorType::scalar_type);
123+
std::multiplies<index_t>()) * sizeof(typename TensorType::value_type);
124124
if constexpr (is_cuda_executor_v<Executor>) {
125125
matxAlloc((void**)ptr, ttl_size, MATX_ASYNC_DEVICE_MEMORY, ex.getStream());
126126
make_tensor(tensor, *ptr, shape);

include/matx/core/pybind.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ class MatXPybind {
176176
template <typename TensorType>
177177
static pybind11::object GetEmptyNumpy(const TensorType &ten)
178178
{
179-
using T = typename TensorType::scalar_type;
179+
using T = typename TensorType::value_type;
180180
auto np = pybind11::module_::import("numpy");
181181
pybind11::list dims;
182182

@@ -329,7 +329,7 @@ class MatXPybind {
329329
void NumpyToTensorView(TensorType ten,
330330
const pybind11::object &np_ten)
331331
{
332-
using T = typename TensorType::scalar_type;
332+
using T = typename TensorType::value_type;
333333
constexpr int RANK = TensorType::Rank();
334334
static_assert(RANK <=5, "NumpyToTensorView only supports max(RANK) = 5 at the moment.");
335335

@@ -377,7 +377,7 @@ class MatXPybind {
377377
template <typename TensorType>
378378
auto NumpyToTensorView(const pybind11::object &np_ten)
379379
{
380-
using T = typename TensorType::scalar_type;
380+
using T = typename TensorType::value_type;
381381
constexpr int RANK = TensorType::Rank();
382382
using ntype = matx_convert_complex_type<T>;
383383
auto ften = pybind11::array_t<ntype, pybind11::array::c_style | pybind11::array::forcecast>(np_ten);
@@ -398,7 +398,7 @@ class MatXPybind {
398398

399399
template <typename TensorType>
400400
auto TensorViewToNumpy(const TensorType &ten) {
401-
using tensor_type = typename TensorType::scalar_type;
401+
using tensor_type = typename TensorType::value_type;
402402
using ntype = matx_convert_complex_type<tensor_type>;
403403
constexpr int RANK = TensorType::Rank();
404404

@@ -466,12 +466,12 @@ class MatXPybind {
466466

467467

468468
template <typename TensorType,
469-
typename CT = matx_convert_cuda_complex_type<typename TensorType::scalar_type>>
469+
typename CT = matx_convert_cuda_complex_type<typename TensorType::value_type>>
470470
std::optional<TestFailResult<CT>>
471471
CompareOutput(const TensorType &ten,
472472
const std::string fname, double thresh, bool debug = false)
473473
{
474-
using raw_type = typename TensorType::scalar_type;
474+
using raw_type = typename TensorType::value_type;
475475
using ntype = matx_convert_complex_type<raw_type>;
476476
using ctype = matx_convert_cuda_complex_type<raw_type>;
477477
auto resobj = res_dict[fname.c_str()];

include/matx/core/tensor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ class tensor_t : public detail::tensor_impl_t<T,RANK,Desc> {
8787
public:
8888
// Type specifier for reflection on class
8989
using type = T; ///< Type of traits
90-
using scalar_type = T; ///< Type of traits
90+
using value_type = T; ///< Type of traits
9191
// Type specifier for signaling this is a matx operation or tensor view
9292
using matxop = bool; ///< Indicate this is a MatX operator
9393
using matxoplvalue = bool; ///< Indicate this is a MatX operator that can be on the lhs of an equation

include/matx/core/tensor_impl.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ class tensor_impl_t {
7171
public:
7272
// Type specifier for reflection on class
7373
using type = T; // TODO is this necessary
74-
using scalar_type = T;
7574
using value_type = T;
7675
using tensor_view = bool;
7776
using desc_type = Desc;

include/matx/core/tensor_utils.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -842,7 +842,7 @@ namespace detail {
842842
PrintKernel<<<1, 1>>>(op, dims...);
843843
}
844844
else {
845-
auto tmpv = make_tensor<typename Op::scalar_type>(op.Shape());
845+
auto tmpv = make_tensor<typename Op::value_type>(op.Shape());
846846
(tmpv = op).run();
847847
PrintData(fp, tmpv, dims...);
848848
}
@@ -911,7 +911,7 @@ void PrintData(FILE* fp, const Op &op, Args... dims) {
911911
}
912912
}
913913
else {
914-
auto tmpv = make_tensor<typename Op::scalar_type>(op.Shape());
914+
auto tmpv = make_tensor<typename Op::value_type>(op.Shape());
915915
(tmpv = op).run();
916916
cudaStreamSynchronize(0);
917917
InternalPrint(fp, tmpv, dims...);
@@ -962,7 +962,7 @@ void PrintData(FILE* fp, const Op &op, Args... dims) {
962962
// PrintKernel<<<1, 1>>>(op, dims...);
963963
// }
964964
// else {
965-
// auto tmpv = make_tensor<typename Op::scalar_type>(op.Shape());
965+
// auto tmpv = make_tensor<typename Op::value_type>(op.Shape());
966966
// (tmpv = op).run();
967967
// PrintData(tmpv, dims...);
968968
// }
@@ -1004,7 +1004,7 @@ void fprint(FILE* fp, const Op &op, Args... dims)
10041004
// print tensor size info first
10051005
std::string type = (is_tensor_view_v<Op>) ? "Tensor" : "Operator";
10061006

1007-
fprintf(fp, "%s{%s} Rank: %d, Sizes:[", type.c_str(), detail::GetTensorType<typename Op::scalar_type>().c_str(), op.Rank());
1007+
fprintf(fp, "%s{%s} Rank: %d, Sizes:[", type.c_str(), detail::GetTensorType<typename Op::value_type>().c_str(), op.Rank());
10081008

10091009
for (index_t dimIdx = 0; dimIdx < (op.Rank() ); dimIdx++ )
10101010
{
@@ -1110,7 +1110,7 @@ void print(const Op &op)
11101110
template <typename Op>
11111111
auto OpToTensor(Op &&op, [[maybe_unused]] cudaStream_t stream) {
11121112
if constexpr (!is_tensor_view_v<Op>) {
1113-
return make_tensor<typename remove_cvref<Op>::scalar_type>(op.Shape(), MATX_ASYNC_DEVICE_MEMORY, stream);
1113+
return make_tensor<typename remove_cvref<Op>::value_type>(op.Shape(), MATX_ASYNC_DEVICE_MEMORY, stream);
11141114
} else {
11151115
return op;
11161116
}

include/matx/core/tie.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,7 @@ namespace matx {
4343
template <typename... Ts>
4444
struct mtie : public BaseOp<mtie<Ts...>>{
4545
using mtie_type = bool;
46-
using scalar_type = void; // Doesn't matter since it's not used
47-
using value_type = void; // Doesn't matter since it's not used
46+
using value_type = void; // Doesn't matter since it's not used
4847
using shape_type = index_t;
4948
using matxoplvalue = bool;
5049

@@ -97,7 +96,7 @@ struct mtie : public BaseOp<mtie<Ts...>>{
9796
// Run the PreRun on the inner type to avoid allocation but allow transforms using MatX operators
9897
// to do any setup needed
9998
if constexpr (sizeof...(Ts) == 2) {
100-
cuda::std::get<sizeof...(Ts) - 1>(ts_).InnerPreRun(NoShape{}, std::forward<Executor>(ex));
99+
cuda::std::get<sizeof...(Ts) - 1>(ts_).InnerPreRun(detail::NoShape{}, std::forward<Executor>(ex));
101100
}
102101
cuda::std::get<sizeof...(Ts) - 1>(ts_).Exec(ts_, std::forward<Executor>(ex));
103102
}

0 commit comments

Comments
 (0)