/*************************************************************************** * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht * * Copyright (c) QuantStack * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef XTENSOR_CHUNKED_ARRAY_HPP #define XTENSOR_CHUNKED_ARRAY_HPP #include #include #include "xarray.hpp" #include "xchunked_assign.hpp" namespace xt { /** * @defgroup xt_xchunked_array * * Chunked array container. * Defined in ``xtensor/xchunked_array.hpp``. */ /****************************** * xchunked_array declaration * ******************************/ template class xchunked_array; template struct xcontainer_inner_types> { using chunk_type = typename chunk_storage::value_type; using const_reference = typename chunk_type::const_reference; using reference = typename chunk_type::reference; using size_type = std::size_t; using storage_type = chunk_type; using temporary_type = xchunked_array; }; template struct xiterable_inner_types> { using chunk_type = typename chunk_storage::value_type; using inner_shape_type = typename chunk_type::shape_type; using const_stepper = xindexed_stepper, true>; using stepper = xindexed_stepper, false>; }; template class xchunked_array : public xaccessible>, public xiterable>, public xchunked_semantic> { public: using chunk_storage_type = chunk_storage; using chunk_type = typename chunk_storage::value_type; using grid_shape_type = typename chunk_storage::shape_type; using const_reference = typename chunk_type::const_reference; using reference = typename chunk_type::reference; using self_type = xchunked_array; using semantic_base = xchunked_semantic; using iterable_base = xconst_iterable; using const_stepper = typename iterable_base::const_stepper; using stepper = typename iterable_base::stepper; using inner_types = xcontainer_inner_types; using size_type = typename inner_types::size_type; using storage_type = typename inner_types::storage_type; using value_type = typename storage_type::value_type; using pointer = value_type*; using const_pointer = const value_type*; using difference_type = std::ptrdiff_t; using shape_type = typename chunk_type::shape_type; using temporary_type = typename inner_types::temporary_type; using bool_load_type = xt::bool_load_type; static constexpr layout_type static_layout = layout_type::dynamic; static constexpr bool contiguous_layout = false; using chunk_iterator = xchunk_iterator; using const_chunk_iterator = xchunk_iterator; template xchunked_array( chunk_storage_type&& chunks, S&& shape, S&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT ); ~xchunked_array() = default; xchunked_array(const xchunked_array&) = default; xchunked_array& operator=(const xchunked_array&) = default; xchunked_array(xchunked_array&&) = default; xchunked_array& operator=(xchunked_array&&) = default; template xchunked_array( const xexpression& e, chunk_storage_type&& chunks, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT ); template xchunked_array( const xexpression& e, chunk_storage_type&& chunks, S&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT ); template xchunked_array& operator=(const xexpression& e); size_type dimension() const noexcept; const shape_type& shape() const noexcept; layout_type layout() const noexcept; bool is_contiguous() const noexcept; template reference operator()(Idxs... idxs); template const_reference operator()(Idxs... idxs) const; template reference element(It first, It last); template const_reference element(It first, It last) const; template bool broadcast_shape(S& s, bool reuse_cache = false) const; template bool has_linear_assign(const S& strides) const noexcept; template stepper stepper_begin(const S& shape) noexcept; template stepper stepper_end(const S& shape, layout_type) noexcept; template const_stepper stepper_begin(const S& shape) const noexcept; template const_stepper stepper_end(const S& shape, layout_type) const noexcept; const shape_type& chunk_shape() const noexcept; size_type grid_size() const noexcept; const grid_shape_type& grid_shape() const noexcept; chunk_storage_type& chunks(); const chunk_storage_type& chunks() const; chunk_iterator chunk_begin(); chunk_iterator chunk_end(); const_chunk_iterator chunk_begin() const; const_chunk_iterator chunk_end() const; const_chunk_iterator chunk_cbegin() const; const_chunk_iterator chunk_cend() const; private: template using indexes_type = std:: pair, std::array>; template using chunk_indexes_type = std::array, sizeof...(Idxs)>; template using static_indexes_type = std::pair, std::array>; using dynamic_indexes_type = std::pair, std::vector>; template void resize(S1&& shape, S2&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT); template indexes_type get_indexes(Idxs... idxs) const; template std::pair get_chunk_indexes_in_dimension(std::size_t dim, Idx idx) const; template chunk_indexes_type get_chunk_indexes(std::index_sequence, Idxs... idxs) const; template static_indexes_type unpack(const std::array& arr) const; template dynamic_indexes_type get_indexes_dynamic(It first, It last) const; shape_type m_shape; shape_type m_chunk_shape; chunk_storage_type m_chunks; }; template constexpr bool is_chunked(const xexpression& e); template constexpr bool is_chunked(); /** * Creates an in-memory chunked array. * * This function returns an uninitialized ``xt::xchunked_array>``. * * @ingroup xt_xchunked_array * * @tparam T The type of the elements (e.g. double) * @tparam L The layout_type of the array * * @param shape The shape of the array * @param chunk_shape The shape of a chunk * @param chunk_memory_layout The layout of each chunk (default: XTENSOR_DEFAULT_LAYOUT) * * @return returns a ``xt::xchunked_array>`` with the given shape, chunk shape and memory * layout. */ template xchunked_array>> chunked_array(S&& shape, S&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT); template xchunked_array>> chunked_array( std::initializer_list shape, std::initializer_list chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT ); /** * Creates an in-memory chunked array. * * This function returns a ``xt::xchunked_array>`` initialized from an expression. * * @ingroup xt_xchunked_array * * @tparam L The layout_type of the array * * @param e The expression to initialize the chunked array from * @param chunk_shape The shape of a chunk * @param chunk_memory_layout The layout of each chunk (default: XTENSOR_DEFAULT_LAYOUT) * * @return returns a ``xt::xchunked_array>`` from the given expression, with the given chunk * shape and memory layout. */ template xchunked_array>> chunked_array(const xexpression& e, S&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT); /** * Creates an in-memory chunked array. * * This function returns a ``xt::xchunked_array>`` initialized from an expression. * * @ingroup xt_xchunked_array * * @tparam L The layout_type of the array * * @param e The expression to initialize the chunked array from * @param chunk_memory_layout The layout of each chunk (default: XTENSOR_DEFAULT_LAYOUT) * * @return returns a ``xt::xchunked_array>`` from the given expression, with the * expression's chunk shape and the given memory layout. */ template xchunked_array>> chunked_array(const xexpression& e, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT); /******************************* * chunk_helper implementation * *******************************/ namespace detail { // Workaround for VS2015 template using try_chunk_shape = decltype(std::declval().chunk_shape()); template class OP, class = void> struct chunk_helper_impl { using is_chunked = std::false_type; static const auto& chunk_shape(const xexpression& e) { return e.derived_cast().shape(); } template static void resize(E& chunks, const S1& container_shape, const S2& chunk_shape, layout_type chunk_memory_layout) { chunks.resize(container_shape); for (auto& c : chunks) { c.resize(chunk_shape, chunk_memory_layout); } } }; template class OP> struct chunk_helper_impl>> { using is_chunked = std::true_type; static const auto& chunk_shape(const xexpression& e) { return e.derived_cast().chunk_shape(); } template static void resize(E& chunks, const S1& container_shape, const S2& /*chunk_shape*/, layout_type /*chunk_memory_layout*/) { chunks.resize(container_shape); } }; template using chunk_helper = chunk_helper_impl; } template constexpr bool is_chunked(const xexpression&) { return is_chunked(); } template constexpr bool is_chunked() { using return_type = typename detail::chunk_helper::is_chunked; return return_type::value; } template inline xchunked_array>> chunked_array(S&& shape, S&& chunk_shape, layout_type chunk_memory_layout) { using chunk_storage = xarray>; return xchunked_array( chunk_storage(), std::forward(shape), std::forward(chunk_shape), chunk_memory_layout ); } template xchunked_array>> chunked_array(std::initializer_list shape, std::initializer_list chunk_shape, layout_type chunk_memory_layout) { using sh_type = std::vector; auto sh = xtl::forward_sequence>(shape); auto ch_sh = xtl::forward_sequence>(chunk_shape); return chunked_array(std::move(sh), std::move(ch_sh), chunk_memory_layout); } template inline xchunked_array>> chunked_array(const xexpression& e, S&& chunk_shape, layout_type chunk_memory_layout) { using chunk_storage = xarray>; return xchunked_array(e, chunk_storage(), std::forward(chunk_shape), chunk_memory_layout); } template inline xchunked_array>> chunked_array(const xexpression& e, layout_type chunk_memory_layout) { using chunk_storage = xarray>; return xchunked_array(e, chunk_storage(), chunk_memory_layout); } /********************************* * xchunked_array implementation * *********************************/ template template inline xchunked_array::xchunked_array(CS&& chunks, S&& shape, S&& chunk_shape, layout_type chunk_memory_layout) : m_chunks(std::move(chunks)) { resize(std::forward(shape), std::forward(chunk_shape), chunk_memory_layout); } template template inline xchunked_array::xchunked_array(const xexpression& e, CS&& chunks, layout_type chunk_memory_layout) : xchunked_array(e, std::move(chunks), detail::chunk_helper::chunk_shape(e), chunk_memory_layout) { } template template inline xchunked_array::xchunked_array( const xexpression& e, CS&& chunks, S&& chunk_shape, layout_type chunk_memory_layout ) : m_chunks(std::move(chunks)) { resize(e.derived_cast().shape(), std::forward(chunk_shape), chunk_memory_layout); semantic_base::assign_xexpression(e); } template template inline auto xchunked_array::operator=(const xexpression& e) -> self_type& { return semantic_base::operator=(e); } template inline auto xchunked_array::dimension() const noexcept -> size_type { return m_shape.size(); } template inline auto xchunked_array::shape() const noexcept -> const shape_type& { return m_shape; } template inline auto xchunked_array::layout() const noexcept -> layout_type { return static_layout; } template inline bool xchunked_array::is_contiguous() const noexcept { return false; } template template inline auto xchunked_array::operator()(Idxs... idxs) -> reference { auto ii = get_indexes(idxs...); auto& chunk = m_chunks.element(ii.first.cbegin(), ii.first.cend()); return chunk.element(ii.second.cbegin(), ii.second.cend()); } template template inline auto xchunked_array::operator()(Idxs... idxs) const -> const_reference { auto ii = get_indexes(idxs...); auto& chunk = m_chunks.element(ii.first.cbegin(), ii.first.cend()); return chunk.element(ii.second.cbegin(), ii.second.cend()); } template template inline auto xchunked_array::element(It first, It last) -> reference { auto ii = get_indexes_dynamic(first, last); auto& chunk = m_chunks.element(ii.first.begin(), ii.first.end()); return chunk.element(ii.second.begin(), ii.second.end()); } template template inline auto xchunked_array::element(It first, It last) const -> const_reference { auto ii = get_indexes_dynamic(first, last); auto& chunk = m_chunks.element(ii.first.begin(), ii.first.end()); return chunk.element(ii.second.begin(), ii.second.end()); } template template inline bool xchunked_array::broadcast_shape(S& s, bool) const { return xt::broadcast_shape(shape(), s); } template template inline bool xchunked_array::has_linear_assign(const S&) const noexcept { return false; } template template inline auto xchunked_array::stepper_begin(const S& shape) noexcept -> stepper { size_type offset = shape.size() - this->dimension(); return stepper(this, offset); } template template inline auto xchunked_array::stepper_end(const S& shape, layout_type) noexcept -> stepper { size_type offset = shape.size() - this->dimension(); return stepper(this, offset, true); } template template inline auto xchunked_array::stepper_begin(const S& shape) const noexcept -> const_stepper { size_type offset = shape.size() - this->dimension(); return const_stepper(this, offset); } template template inline auto xchunked_array::stepper_end(const S& shape, layout_type) const noexcept -> const_stepper { size_type offset = shape.size() - this->dimension(); return const_stepper(this, offset, true); } template inline auto xchunked_array::chunk_shape() const noexcept -> const shape_type& { return m_chunk_shape; } template inline auto xchunked_array::grid_size() const noexcept -> size_type { return m_chunks.size(); } template inline auto xchunked_array::grid_shape() const noexcept -> const grid_shape_type& { return m_chunks.shape(); } template inline auto xchunked_array::chunks() -> chunk_storage_type& { return m_chunks; } template inline auto xchunked_array::chunks() const -> const chunk_storage_type& { return m_chunks; } template inline auto xchunked_array::chunk_begin() -> chunk_iterator { shape_type chunk_index(m_shape.size(), size_type(0)); return chunk_iterator(*this, std::move(chunk_index), 0u); } template inline auto xchunked_array::chunk_end() -> chunk_iterator { shape_type sh = xtl::forward_sequence(grid_shape()); return chunk_iterator(*this, std::move(sh), grid_size()); } template inline auto xchunked_array::chunk_begin() const -> const_chunk_iterator { shape_type chunk_index(m_shape.size(), size_type(0)); return const_chunk_iterator(*this, std::move(chunk_index), 0u); } template inline auto xchunked_array::chunk_end() const -> const_chunk_iterator { shape_type sh = xtl::forward_sequence(grid_shape()); return const_chunk_iterator(*this, std::move(sh), grid_size()); } template inline auto xchunked_array::chunk_cbegin() const -> const_chunk_iterator { return chunk_begin(); } template inline auto xchunked_array::chunk_cend() const -> const_chunk_iterator { return chunk_end(); } template template inline void xchunked_array::resize(S1&& shape, S2&& chunk_shape, layout_type chunk_memory_layout) { // compute chunk number in each dimension (shape_of_chunks) std::vector shape_of_chunks(shape.size()); std::transform( shape.cbegin(), shape.cend(), chunk_shape.cbegin(), shape_of_chunks.begin(), [](auto s, auto cs) { std::size_t cn = s / cs; if (s % cs > 0) { cn += std::size_t(1); // edge_chunk } return cn; } ); detail::chunk_helper::resize(m_chunks, shape_of_chunks, chunk_shape, chunk_memory_layout); m_shape = xtl::forward_sequence(shape); m_chunk_shape = xtl::forward_sequence(chunk_shape); } template template inline auto xchunked_array::get_indexes(Idxs... idxs) const -> indexes_type { auto chunk_indexes_packed = get_chunk_indexes(std::make_index_sequence(), idxs...); return unpack(chunk_indexes_packed); } template template inline std::pair xchunked_array::get_chunk_indexes_in_dimension(std::size_t dim, Idx idx) const { std::size_t index_of_chunk = static_cast(idx) / m_chunk_shape[dim]; std::size_t index_in_chunk = static_cast(idx) - index_of_chunk * m_chunk_shape[dim]; return std::make_pair(index_of_chunk, index_in_chunk); } template template inline auto xchunked_array::get_chunk_indexes(std::index_sequence, Idxs... idxs) const -> chunk_indexes_type { chunk_indexes_type chunk_indexes = {{get_chunk_indexes_in_dimension(dims, idxs)...}}; return chunk_indexes; } template template inline auto xchunked_array::unpack(const std::array& arr) const -> static_indexes_type { std::array arr0; std::array arr1; for (std::size_t i = 0; i < N; ++i) { arr0[i] = std::get<0>(arr[i]); arr1[i] = std::get<1>(arr[i]); } return std::make_pair(arr0, arr1); } template template inline auto xchunked_array::get_indexes_dynamic(It first, It last) const -> dynamic_indexes_type { auto size = static_cast(std::distance(first, last)); std::vector indexes_of_chunk(size); std::vector indexes_in_chunk(size); for (std::size_t dim = 0; dim < size; ++dim) { auto chunk_index = get_chunk_indexes_in_dimension(dim, *first++); indexes_of_chunk[dim] = chunk_index.first; indexes_in_chunk[dim] = chunk_index.second; } return std::make_pair(indexes_of_chunk, indexes_in_chunk); } } #endif