diff --git a/include/redisserver.h b/include/redisserver.h index a0e74cb3..61d4cb34 100644 --- a/include/redisserver.h +++ b/include/redisserver.h @@ -609,7 +609,7 @@ class RedisServer { /*! * \brief Default socket timeout (milliseconds) */ - static constexpr int _DEFAULT_SOCKET_TIMEOUT = 250; + static constexpr int _DEFAULT_SOCKET_TIMEOUT = 1000; /*! * \brief Default value of connection timeout (seconds) diff --git a/include/sr_enums.h b/include/sr_enums.h index 87b8d094..49f4fa0f 100644 --- a/include/sr_enums.h +++ b/include/sr_enums.h @@ -39,8 +39,6 @@ typedef enum { SRMemLayoutInvalid = 0, // Invalid or uninitialized memory layout SRMemLayoutNested = 1, // Multidimensional row-major array layout with nested arrays of pointers (contiguous at innermost layer) SRMemLayoutContiguous = 2, // Multidimensional row-major array layout in contiguous memory - SRMemLayoutFortranNested = 3, // Multidimensional column-major array layout with nested arrays of pointers (contiguous at innermost layer) - SRMemLayoutFortranContiguous = 4 // Multidimensional column-major array layout in contiguous memory } SRMemoryLayout; /*! diff --git a/include/tensor.h b/include/tensor.h index 2847288e..7c4439c6 100644 --- a/include/tensor.h +++ b/include/tensor.h @@ -217,85 +217,6 @@ class Tensor : public TensorBase const std::vector& dims, const SRMemoryLayout mem_layout); - /*! - * \brief This function will copy a fortran array - * memory space (column major) to a c-style - * memory space layout (row major) - * \param c_data A pointer to the row major memory space - * \param f_data A pointer to the col major memory space - * \param dims The dimensions of the tensor - */ - void _f_to_c_memcpy(T* c_data, - const T* f_data, - const std::vector& dims); - - /*! - * \brief This function will copy a c-style array - * memory space (row major) to a fortran - * memory space layout (col major) - * \param f_data A pointer to the col major memory space - * \param c_data A pointer to the row major memory space - * \param dims The dimensions of the tensor - */ - void _c_to_f_memcpy(T* f_data, - const T* c_data, - const std::vector& dims); - - /*! - * \brief This is a recursive function used to copy - * fortran column major memory to c-style row - * major memory - * \param c_data A pointer to the row major memory space - * \param f_data A pointer to the col major memory space - * \param dims The dimensions of the tensor - * \param dim_positions The current position in each - * dimension - * \param current_dim The index of the current dimension - */ - void _f_to_c(T* c_data, - const T* f_data, - const std::vector& dims, - std::vector dim_positions, - size_t current_dim); - - /*! - * \brief This is a recursive function used to - * copy c-style row major memory to fortran - * column major memory - * \param f_data A pointer to the col major memory space - * \param c_data A pointer to the row major memory space - * \param dims The dimensions of the tensor - * \param dim_positions The current position in each - * dimension - * \param current_dim The index of the current dimension - */ - void _c_to_f(T* f_data, - const T* c_data, - const std::vector& dims, - std::vector dim_positions, - size_t current_dim); - - /*! - * \brief Calculate the contiguous array position - * for a column major position - * \param dims The tensor dimensions - * \param dim_positions The current position for each - * dimension - * \returns The contiguous memory index position - */ - inline size_t _f_index(const std::vector& dims, - const std::vector& dim_positions); - - /*! - * \brief Calculate the contiguous array position - * for a row major position - * \param dims The tensor dimensions - * \param dim_positions The current position for each dimension - * \returns The contiguous memory index position - */ - inline size_t _c_index(const std::vector& dims, - const std::vector& dim_positions); - /*! * \brief Get the total number of bytes of the data * \returns Total number of bytes of the data @@ -305,12 +226,8 @@ class Tensor : public TensorBase /*! * \brief Memory allocated for c nested tensor memory views */ - SharedMemoryList _c_mem_views; + SharedMemoryList _mem_views; - /*! - * \brief Memory allocated for f nested tensor memory views - */ - SharedMemoryList _f_mem_views; }; #include "tensor.tcc" diff --git a/include/tensor.tcc b/include/tensor.tcc index ed05488b..bee5e460 100644 --- a/include/tensor.tcc +++ b/include/tensor.tcc @@ -52,16 +52,14 @@ Tensor::Tensor(const Tensor& tensor) : TensorBase(tensor) return; _set_tensor_data(tensor._data, tensor._dims, SRMemLayoutContiguous); - _c_mem_views = tensor._c_mem_views; - _f_mem_views = tensor._f_mem_views; + _mem_views = tensor._mem_views; } // Tensor move constructor template Tensor::Tensor(Tensor&& tensor) : TensorBase(std::move(tensor)) { - _c_mem_views = std::move(tensor._c_mem_views); - _f_mem_views = std::move(tensor._f_mem_views); + _mem_views = std::move(tensor._mem_views); } // Tensor copy assignment operator @@ -75,8 +73,7 @@ Tensor& Tensor::operator=(const Tensor& tensor) // Deep copy tensor data TensorBase::operator=(tensor); _set_tensor_data(tensor._data, tensor._dims, SRMemLayoutContiguous); - _c_mem_views = tensor._c_mem_views; - _f_mem_views = tensor._f_mem_views; + _mem_views = tensor._mem_views; // Done return *this; @@ -92,8 +89,7 @@ Tensor& Tensor::operator=(Tensor&& tensor) // Move data TensorBase::operator=(std::move(tensor)); - _c_mem_views = std::move(tensor._c_mem_views); - _f_mem_views = std::move(tensor._f_mem_views); + _mem_views = std::move(tensor._mem_views); // Done return *this; @@ -132,11 +128,6 @@ void* Tensor::data_view(const SRMemoryLayout mem_layout) pointers so that the caller can cast to a nested pointer structure and index with multiple [] operators. - 3) MemoryLayout::fortran_contiguous : - The internal row major format will - be copied into a new allocated memory - space that is the transpose (column major) - of the row major layout. */ void* ptr = NULL; @@ -145,10 +136,6 @@ void* Tensor::data_view(const SRMemoryLayout mem_layout) case SRMemLayoutContiguous: ptr = _data; break; - case SRMemLayoutFortranContiguous: - ptr = _f_mem_views.allocate_bytes(_n_data_bytes()); - _c_to_f_memcpy((T*)ptr, (T*)_data, _dims); - break; case SRMemLayoutNested: _build_nested_memory(&ptr, _dims.data(), @@ -196,9 +183,6 @@ void Tensor::fill_mem_space(void* data, // Copy over the data switch (mem_layout) { - case SRMemLayoutFortranContiguous: - _c_to_f_memcpy((T*)data, (T*)_data, _dims); - break; case SRMemLayoutContiguous: std::memcpy(data, _data, _n_data_bytes()); break; @@ -275,7 +259,7 @@ T* Tensor::_build_nested_memory(void** data, "_build_nested_memory"); } if (n_dims > 1) { - T** new_data = _c_mem_views.allocate(dims[0]); + T** new_data = _mem_views.allocate(dims[0]); if (new_data == NULL) throw SRBadAllocException("nested memory for tensor"); (*data) = reinterpret_cast(new_data); @@ -310,9 +294,6 @@ void Tensor::_set_tensor_data(const void* src_data, case SRMemLayoutContiguous: std::memcpy(_data, src_data, n_bytes); break; - case SRMemLayoutFortranContiguous: - _f_to_c_memcpy((T*)_data, (const T*)src_data, dims); - break; case SRMemLayoutNested: _copy_nested_to_contiguous( src_data, dims.data(), dims.size(), _data); @@ -329,123 +310,5 @@ size_t Tensor::_n_data_bytes() { return num_values() * sizeof(T); } -// Copy a fortran memory space layout (col major) to a -// c-style array memory space (row major) -template -void Tensor::_f_to_c_memcpy(T* c_data, - const T* f_data, - const std::vector& dims) -{ - if (c_data == NULL || f_data == NULL) { - throw SRRuntimeException("Invalid buffer suppplied to _f_to_c_memcpy"); - } - std::vector dim_positions(dims.size(), 0); - _f_to_c(c_data, f_data, dims, dim_positions, 0); -} - -// Copy a c-style array memory space (row major) to a -// fortran memory space layout (col major) -template -void Tensor::_c_to_f_memcpy(T* f_data, - const T* c_data, - const std::vector& dims) -{ - if (c_data == NULL || f_data == NULL) { - throw SRRuntimeException("Invalid buffer suppplied to _c_to_f_memcpy"); - } - std::vector dim_positions(dims.size(), 0); - _c_to_f(f_data, c_data, dims, dim_positions, 0); -} - -// Copy fortran column major memory to c-style row major memory recursively -template -void Tensor::_f_to_c(T* c_data, - const T* f_data, - const std::vector& dims, - std::vector dim_positions, - size_t current_dim) -{ - if (c_data == NULL || f_data == NULL) { - throw SRRuntimeException("Invalid buffer suppplied to _f_to_c"); - } - size_t start = dim_positions[current_dim]; - size_t end = dims[current_dim]; - bool more_dims = (current_dim + 1 != dims.size()); - - for (size_t i = start; i < end; i++) { - if (more_dims) - _f_to_c(c_data, f_data, dims, dim_positions, - current_dim + 1); - else { - size_t f_index = _f_index(dims, dim_positions); - size_t c_index = _c_index(dims, dim_positions); - c_data[c_index] = f_data[f_index]; - } - dim_positions[current_dim]++; - } -} - -// Copy c-style row major memory to fortran column major memory recursively -template -void Tensor::_c_to_f(T* f_data, - const T* c_data, - const std::vector& dims, - std::vector dim_positions, - size_t current_dim) -{ - if (c_data == NULL || f_data == NULL) { - throw SRRuntimeException("Invalid buffer suppplied to _f_to_c"); - } - size_t start = dim_positions[current_dim]; - size_t end = dims[current_dim]; - bool more_dims = (current_dim + 1 != dims.size()); - - for (size_t i = start; i < end; i++) { - if (more_dims) { - _c_to_f(f_data, c_data, dims, dim_positions, - current_dim + 1); - } - else { - size_t f_index = _f_index(dims, dim_positions); - size_t c_index = _c_index(dims, dim_positions); - f_data[f_index] = c_data[c_index]; - } - dim_positions[current_dim]++; - } -} - -// Calculate the contiguous array position for a column major position -template -inline size_t Tensor::_f_index(const std::vector& dims, - const std::vector& dim_positions) -{ - size_t position = 0; - - for (size_t k = 0; k < dims.size(); k++) { - size_t sum_product = dim_positions[k]; - for (size_t m = 0; m < k; m++) { - sum_product *= dims[m]; - } - position += sum_product; - } - return position; -} - -// Calculate the contiguous array position for a row major position -template -inline size_t Tensor::_c_index(const std::vector& dims, - const std::vector& dim_positions) -{ - size_t position = 0; - - for(size_t k = 0; k < dims.size(); k++) { - size_t sum_product = dim_positions[k]; - for(size_t m = k + 1; m < dims.size(); m++) { - sum_product *= dims[m]; - } - position += sum_product; - } - return position; -} #endif // SMARTREDIS_TENSOR_TCC diff --git a/src/cpp/client.cpp b/src/cpp/client.cpp index aebc82c5..991f30c5 100644 --- a/src/cpp/client.cpp +++ b/src/cpp/client.cpp @@ -425,8 +425,7 @@ void Client::unpack_tensor(const std::string& name, std::vector reply_dims = GetTensorCommand::get_dims(reply); // Make sure we have the right dims to unpack into (Contiguous case) - if (mem_layout == SRMemLayoutContiguous || - mem_layout == SRMemLayoutFortranContiguous) { + if (mem_layout == SRMemLayoutContiguous) { size_t total_dims = 1; for (size_t i = 0; i < reply_dims.size(); i++) { total_dims *= reply_dims[i]; diff --git a/src/cpp/redis.cpp b/src/cpp/redis.cpp index 1b23a07c..ffab3596 100644 --- a/src/cpp/redis.cpp +++ b/src/cpp/redis.cpp @@ -648,6 +648,7 @@ void Redis::set_model_chunk_size(int chunk_size) inline CommandReply Redis::_run(const Command& cmd) { for (int i = 1; i <= _command_attempts; i++) { + std::cout<<"Attempt # "<command(cmd.cbegin(), cmd.cend()); diff --git a/tests/cpp/CMakeLists.txt b/tests/cpp/CMakeLists.txt index e649dd8a..e50eef79 100644 --- a/tests/cpp/CMakeLists.txt +++ b/tests/cpp/CMakeLists.txt @@ -90,7 +90,6 @@ list(APPEND EXECUTABLES client_test_put_get_3D client_test_put_get_3D_static_values client_test_put_get_contiguous_3D - client_test_put_get_transpose_3D client_test_put_get_2D client_test_put_get_1D client_test_mnist @@ -103,7 +102,7 @@ list(APPEND EXECUTABLES foreach(EXECUTABLE ${EXECUTABLES}) add_executable(${EXECUTABLE}_cpp_test ${EXECUTABLE}.cpp - ) + )make t set_target_properties(${EXECUTABLE}_cpp_test PROPERTIES OUTPUT_NAME ${EXECUTABLE} ) diff --git a/tests/cpp/client_test_put_get_transpose_3D.cpp b/tests/cpp/client_test_put_get_transpose_3D.cpp deleted file mode 100644 index 83f3d1be..00000000 --- a/tests/cpp/client_test_put_get_transpose_3D.cpp +++ /dev/null @@ -1,302 +0,0 @@ -/* - * BSD 2-Clause License - * - * Copyright (c) 2021-2024, Hewlett Packard Enterprise - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "client.h" -#include "client_test_utils.h" -#include -#include - -inline size_t _c_index(const std::vector& dims, - const std::vector dim_positions) -{ - /* This function will return the row major - index in a contiguous memory array corresponding - to the dimensions and dimension positions. - */ - size_t position = 0; - size_t sum_product; - for(size_t k = 0; k < dims.size(); k++) { - sum_product = dim_positions[k]; - for(size_t m = k+1; m < dims.size(); m++) { - sum_product *= dims[m]; - } - position += sum_product; - } - return position; -} - - -template -void put_get_3D_array( - void (*fill_array)(T_send*, int), - std::vector dims, - SRTensorType type, - std::string key_suffix = "", - SRMemoryLayout send_direction = SRMemLayoutContiguous, - SRMemoryLayout recv_direction = SRMemLayoutContiguous) -{ - SmartRedis::Client client("client_test_put_get_transpose_3D"); - - //Allocate and fill arrays - T_send* array = (T_send*)malloc(dims[0]*dims[1]*dims[2]*sizeof(T_send)); - T_recv* u_array = (T_recv*)malloc(dims[0]*dims[1]*dims[2]*sizeof(T_recv)); - - fill_array(array, dims[0]*dims[1]*dims[2]); - std::string key = "3d_tensor_transpose_test" + key_suffix; - - /* - size_t c=0; - for(size_t i = 0; i < dims[0]; i++) { - for(size_t j = 0; j < dims[1]; j++) { - for(size_t k = 0; k < dims[2]; k++) { - std::cout.precision(17); - std::cout<<"Sending value "< r_dims(dims.rbegin(), dims.rend()); - for(size_t i = 0; i < dims[0]; i++) { - for(size_t j = 0; j < dims[1]; j++) { - for(size_t k = 0; k < dims[2]; k++) { - if(send_direction == SRMemLayoutFortranContiguous && - recv_direction == SRMemLayoutContiguous) { - u_index = _c_index(dims, {i,j,k}); - index = _c_index(r_dims, {k,j,i}); - } - else if(send_direction == SRMemLayoutContiguous && - recv_direction == SRMemLayoutFortranContiguous) { - index = _c_index(dims, {i,j,k}); - u_index = _c_index(r_dims, {k,j,i}); - } - else { - throw std::runtime_error("Invalid test configuration."); - } - if(u_array[u_index]!=array[index]) { - throw std::runtime_error("The returned matrix is not a "\ - "transpose of the original matrix."); - } - } - } - } - - SRTensorType g_type_transpose; - std::vector g_dims_transpose; - void* g_array; - client.get_tensor(key, g_array, - g_dims_transpose, g_type_transpose, - recv_direction); - - if(g_dims_transpose!=dims) - throw std::runtime_error("The tensor dimensions retrieved "\ - "client.get_tensor() do not match "\ - "the known tensor dimensions."); - - - if(type!=g_type_transpose) - throw std::runtime_error("The tensor type "\ - "retrieved with client.get_tensor() "\ - "does not match the known type."); - - size_t g_index; - for(size_t i = 0; i < dims[0]; i++) { - for(size_t j = 0; j < dims[1]; j++) { - for(size_t k = 0; k < dims[2]; k++) { - if(send_direction == SRMemLayoutFortranContiguous && - recv_direction == SRMemLayoutContiguous) { - g_index = _c_index(dims, {i,j,k}); - index = _c_index(r_dims, {k,j,i}); - } - else if(send_direction == SRMemLayoutContiguous && - recv_direction == SRMemLayoutFortranContiguous) { - index = _c_index(dims, {i,j,k}); - g_index = _c_index(r_dims, {k,j,i}); - } - else { - throw std::runtime_error("Invalid test configuration."); - } - if(((T_recv*)g_array)[g_index]!=array[index]) { - throw std::runtime_error("The returned matrix is not a "\ - "transpose of the original matrix."); - } - } - } - } - - free(array); - free(u_array); -} - -int main(int argc, char* argv[]) -{ - /* This tests whether the conversion from - column major to row major is implemented - correctly in the client. To do this, - we put a 3D array assuming it is fortran - (column major) and retrieve assuming - it is c-style (row major). If the - two tensors are the transpose of each other - it has been implemented correctly. - */ - - size_t dim1 = 4; - size_t dim2 = 3; - size_t dim3 = 2; - - std::vector dims = {dim1, dim2, dim3}; - - /* Test conversion on the put side - */ - put_get_3D_array( - &set_1D_array_floating_point_values, - dims, SRTensorTypeDouble, "_dbl", - SRMemLayoutFortranContiguous, - SRMemLayoutContiguous); - - put_get_3D_array( - &set_1D_array_floating_point_values, - dims, SRTensorTypeFloat, "_flt", - SRMemLayoutFortranContiguous, - SRMemLayoutContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeInt64, "_i64", - SRMemLayoutFortranContiguous, - SRMemLayoutContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeInt32, "_i32", - SRMemLayoutFortranContiguous, - SRMemLayoutContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeInt16, "_i16", - SRMemLayoutFortranContiguous, - SRMemLayoutContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeInt8, "_i8", - SRMemLayoutFortranContiguous, - SRMemLayoutContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeUint16, "_ui16", - SRMemLayoutFortranContiguous, - SRMemLayoutContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeUint8, "_ui8", - SRMemLayoutFortranContiguous, - SRMemLayoutContiguous); - - /* Test conversion on the get side - */ - put_get_3D_array( - &set_1D_array_floating_point_values, - dims, SRTensorTypeDouble, "_dbl", - SRMemLayoutContiguous, - SRMemLayoutFortranContiguous); - - put_get_3D_array( - &set_1D_array_floating_point_values, - dims, SRTensorTypeFloat, "_flt", - SRMemLayoutContiguous, - SRMemLayoutFortranContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeInt64, "_i64", - SRMemLayoutContiguous, - SRMemLayoutFortranContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeInt32, "_i32", - SRMemLayoutContiguous, - SRMemLayoutFortranContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeInt16, "_i16", - SRMemLayoutContiguous, - SRMemLayoutFortranContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeInt8, "_i8", - SRMemLayoutContiguous, - SRMemLayoutFortranContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeUint16, "_ui16", - SRMemLayoutContiguous, - SRMemLayoutFortranContiguous); - - put_get_3D_array( - &set_1D_array_integral_values, - dims, SRTensorTypeUint8, "_ui8", - SRMemLayoutContiguous, - SRMemLayoutFortranContiguous); - - std::cout<<"3D put and get to test matrix "\ - "transpose complete."<