/*************************************************************************** * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht * * Copyright (c) QuantStack * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef XTENSOR_SLICE_HPP #define XTENSOR_SLICE_HPP #include #include #include #include #include #include "xstorage.hpp" #include "xtensor_config.hpp" #include "xutils.hpp" #ifndef XTENSOR_CONSTEXPR #if (defined(_MSC_VER) || __GNUC__ < 8) #define XTENSOR_CONSTEXPR inline #define XTENSOR_GLOBAL_CONSTEXPR static const #else #define XTENSOR_CONSTEXPR constexpr #define XTENSOR_GLOBAL_CONSTEXPR constexpr #endif #endif namespace xt { /********************** * xslice declaration * **********************/ template class xslice { public: using derived_type = D; derived_type& derived_cast() noexcept; const derived_type& derived_cast() const noexcept; protected: xslice() = default; ~xslice() = default; xslice(const xslice&) = default; xslice& operator=(const xslice&) = default; xslice(xslice&&) = default; xslice& operator=(xslice&&) = default; }; template using is_xslice = std::is_base_of, S>; template using disable_xslice = typename std::enable_if::value, R>::type; template using has_xslice = xtl::disjunction...>; /************** * slice tags * **************/ #define DEFINE_TAG_CONVERSION(NAME) \ template \ XTENSOR_CONSTEXPR NAME convert() const noexcept \ { \ return NAME(); \ } struct xall_tag { DEFINE_TAG_CONVERSION(xall_tag) }; struct xnewaxis_tag { DEFINE_TAG_CONVERSION(xnewaxis_tag) }; struct xellipsis_tag { DEFINE_TAG_CONVERSION(xellipsis_tag) }; #undef DEFINE_TAG_CONVERSION /********************** * xrange declaration * **********************/ template class xrange : public xslice> { public: using size_type = T; using self_type = xrange; xrange() = default; xrange(size_type start_val, size_type stop_val) noexcept; template ::value, void>> operator xrange() const noexcept; // Same as implicit conversion operator but more convenient to call // from a variant visitor template ::value, void>> xrange convert() const noexcept; size_type operator()(size_type i) const noexcept; size_type size() const noexcept; size_type step_size() const noexcept; size_type step_size(std::size_t i, std::size_t n = 1) const noexcept; size_type revert_index(std::size_t i) const noexcept; bool contains(size_type i) const noexcept; bool operator==(const self_type& rhs) const noexcept; bool operator!=(const self_type& rhs) const noexcept; private: size_type m_start; size_type m_size; template friend class xrange; }; /****************************** * xstepped_range declaration * ******************************/ template class xstepped_range : public xslice> { public: using size_type = T; using self_type = xstepped_range; xstepped_range() = default; xstepped_range(size_type start_val, size_type stop_val, size_type step) noexcept; template ::value, void>> operator xstepped_range() const noexcept; // Same as implicit conversion operator but more convenient to call // from a variant visitor template ::value, void>> xstepped_range convert() const noexcept; size_type operator()(size_type i) const noexcept; size_type size() const noexcept; size_type step_size() const noexcept; size_type step_size(std::size_t i, std::size_t n = 1) const noexcept; size_type revert_index(std::size_t i) const noexcept; bool contains(size_type i) const noexcept; bool operator==(const self_type& rhs) const noexcept; bool operator!=(const self_type& rhs) const noexcept; private: size_type m_start; size_type m_size; size_type m_step; template friend class xstepped_range; }; /******************** * xall declaration * ********************/ template class xall : public xslice> { public: using size_type = T; using self_type = xall; xall() = default; explicit xall(size_type size) noexcept; template ::value, void>> operator xall() const noexcept; // Same as implicit conversion operator but more convenient to call // from a variant visitor template ::value, void>> xall convert() const noexcept; size_type operator()(size_type i) const noexcept; size_type size() const noexcept; size_type step_size() const noexcept; size_type step_size(std::size_t i, std::size_t n = 1) const noexcept; size_type revert_index(std::size_t i) const noexcept; bool contains(size_type i) const noexcept; bool operator==(const self_type& rhs) const noexcept; bool operator!=(const self_type& rhs) const noexcept; private: size_type m_size; }; /** * Returns a slice representing a full dimension, * to be used as an argument of view function. * @sa view, strided_view */ inline auto all() noexcept { return xall_tag(); } /** * Returns a slice representing all remaining dimensions, * and selecting all in these dimensions. Ellipsis will expand * to a series of `all()` slices, until the number of slices is * equal to the number of dimensions of the source array. * * Note: ellipsis can only be used in strided_view! * * @code{.cpp} * xarray a = xarray::from_shape({5, 5, 1, 1, 5}); * auto v = xt::strided_view(a, {2, xt::ellipsis(), 2}); * // equivalent to using {2, xt::all(), xt::all(), xt::all(), 2}; * @endcode * * @sa strided_view */ inline auto ellipsis() noexcept { return xellipsis_tag(); } /************************ * xnewaxis declaration * ************************/ template class xnewaxis : public xslice> { public: using size_type = T; using self_type = xnewaxis; xnewaxis() = default; template ::value, void>> operator xnewaxis() const noexcept; // Same as implicit conversion operator but more convenient to call // from a variant visitor template ::value, void>> xnewaxis convert() const noexcept; size_type operator()(size_type i) const noexcept; size_type size() const noexcept; size_type step_size() const noexcept; size_type step_size(std::size_t i, std::size_t n = 1) const noexcept; size_type revert_index(std::size_t i) const noexcept; bool contains(size_type i) const noexcept; bool operator==(const self_type& rhs) const noexcept; bool operator!=(const self_type& rhs) const noexcept; }; /** * Returns a slice representing a new axis of length one, * to be used as an argument of view function. * @sa view, strided_view */ inline auto newaxis() noexcept { return xnewaxis_tag(); } /*************************** * xkeep_slice declaration * ***************************/ template class xkeep_slice; namespace detail { template struct is_xkeep_slice : std::false_type { }; template struct is_xkeep_slice> : std::true_type { }; template using disable_xkeep_slice_t = std::enable_if_t>::value, void>; template using enable_xkeep_slice_t = std::enable_if_t>::value, void>; } template class xkeep_slice : public xslice> { public: using container_type = svector; using size_type = typename container_type::value_type; using self_type = xkeep_slice; template > explicit xkeep_slice(C& cont); explicit xkeep_slice(container_type&& cont); template xkeep_slice(std::initializer_list t); template ::value, void>> operator xkeep_slice() const noexcept; // Same as implicit conversion operator but more convenient to call // from a variant visitor template ::value, void>> xkeep_slice convert() const noexcept; size_type operator()(size_type i) const noexcept; size_type size() const noexcept; void normalize(std::size_t s); size_type step_size(std::size_t i, std::size_t n = 1) const noexcept; size_type revert_index(std::size_t i) const; bool contains(size_type i) const noexcept; bool operator==(const self_type& rhs) const noexcept; bool operator!=(const self_type& rhs) const noexcept; private: xkeep_slice() = default; container_type m_indices; container_type m_raw_indices; template friend class xkeep_slice; }; namespace detail { template using disable_integral_keep = std::enable_if_t< !xtl::is_integral>::value, xkeep_slice::value_type>>; template using enable_integral_keep = std::enable_if_t::value, xkeep_slice>; } /** * Create a non-contigous slice from a container of indices to keep. * Note: this slice cannot be used in the xstrided_view! * * @code{.cpp} * xt::xarray a = xt::arange(9); * a.reshape({3, 3}); * xt::view(a, xt::keep(0, 2); // => {{0, 1, 2}, {6, 7, 8}} * xt::view(a, xt::keep(1, 1, 1); // => {{3, 4, 5}, {3, 4, 5}, {3, 4, 5}} * @endcode * * @param indices The indices container * @return instance of xkeep_slice */ template inline detail::disable_integral_keep keep(T&& indices) { return xkeep_slice::value_type>(std::forward(indices)); } template inline detail::enable_integral_keep keep(T i) { using slice_type = xkeep_slice; using container_type = typename slice_type::container_type; container_type tmp = {static_cast(i)}; return slice_type(std::move(tmp)); } template inline xkeep_slice keep(Arg0 i0, Arg1 i1, Args... args) { using slice_type = xkeep_slice; using container_type = typename slice_type::container_type; container_type tmp = {static_cast(i0), static_cast(i1), static_cast(args)...}; return slice_type(std::move(tmp)); } /*************************** * xdrop_slice declaration * ***************************/ template class xdrop_slice; namespace detail { template struct is_xdrop_slice : std::false_type { }; template struct is_xdrop_slice> : std::true_type { }; template using disable_xdrop_slice_t = std::enable_if_t>::value, void>; template using enable_xdrop_slice_t = std::enable_if_t>::value, void>; } template class xdrop_slice : public xslice> { public: using container_type = svector; using size_type = typename container_type::value_type; using self_type = xdrop_slice; template > explicit xdrop_slice(C& cont); explicit xdrop_slice(container_type&& cont); template xdrop_slice(std::initializer_list t); template ::value, void>> operator xdrop_slice() const noexcept; // Same as implicit conversion operator but more convenient to call // from a variant visitor template ::value, void>> xdrop_slice convert() const noexcept; size_type operator()(size_type i) const noexcept; size_type size() const noexcept; void normalize(std::size_t s); size_type step_size(std::size_t i, std::size_t n = 1) const noexcept; size_type revert_index(std::size_t i) const; bool contains(size_type i) const noexcept; bool operator==(const self_type& rhs) const noexcept; bool operator!=(const self_type& rhs) const noexcept; private: xdrop_slice() = default; container_type m_indices; container_type m_raw_indices; std::map m_inc; size_type m_size; template friend class xdrop_slice; }; namespace detail { template using disable_integral_drop = std::enable_if_t< !xtl::is_integral>::value, xdrop_slice::value_type>>; template using enable_integral_drop = std::enable_if_t::value, xdrop_slice>; } /** * Create a non-contigous slice from a container of indices to drop. * Note: this slice cannot be used in the xstrided_view! * * @code{.cpp} * xt::xarray a = xt::arange(9); * a.reshape({3, 3}); * xt::view(a, xt::drop(0, 2); // => {{3, 4, 5}} * @endcode * * @param indices The container of indices to drop * @return instance of xdrop_slice */ template inline detail::disable_integral_drop drop(T&& indices) { return xdrop_slice::value_type>(std::forward(indices)); } template inline detail::enable_integral_drop drop(T i) { using slice_type = xdrop_slice; using container_type = typename slice_type::container_type; container_type tmp = {static_cast(i)}; return slice_type(std::move(tmp)); } template inline xdrop_slice drop(Arg0 i0, Arg1 i1, Args... args) { using slice_type = xdrop_slice; using container_type = typename slice_type::container_type; container_type tmp = {static_cast(i0), static_cast(i1), static_cast(args)...}; return slice_type(std::move(tmp)); } /****************************** * xrange_adaptor declaration * ******************************/ template struct xrange_adaptor { xrange_adaptor(A start_val, B stop_val, C step) : m_start(start_val) , m_stop(stop_val) , m_step(step) { } template inline std::enable_if_t< xtl::is_integral::value && xtl::is_integral::value && xtl::is_integral::value, xstepped_range> get(std::size_t size) const { return get_stepped_range(m_start, m_stop, m_step, size); } template inline std::enable_if_t< !xtl::is_integral::value && xtl::is_integral::value && xtl::is_integral::value, xstepped_range> get(std::size_t size) const { return get_stepped_range(m_step > 0 ? 0 : static_cast(size) - 1, m_stop, m_step, size); } template inline std::enable_if_t< xtl::is_integral::value && !xtl::is_integral::value && xtl::is_integral::value, xstepped_range> get(std::size_t size) const { auto sz = static_cast(size); return get_stepped_range(m_start, m_step > 0 ? sz : -(sz + 1), m_step, size); } template inline std::enable_if_t< xtl::is_integral::value && xtl::is_integral::value && !xtl::is_integral::value, xrange> get(std::size_t size) const { return xrange(normalize(m_start, size), normalize(m_stop, size)); } template inline std::enable_if_t< !xtl::is_integral::value && !xtl::is_integral::value && xtl::is_integral::value, xstepped_range> get(std::size_t size) const { std::ptrdiff_t start = m_step >= 0 ? 0 : static_cast(size) - 1; std::ptrdiff_t stop = m_step >= 0 ? static_cast(size) : -1; return xstepped_range(start, stop, m_step); } template inline std::enable_if_t< xtl::is_integral::value && !xtl::is_integral::value && !xtl::is_integral::value, xrange> get(std::size_t size) const { return xrange(normalize(m_start, size), static_cast(size)); } template inline std::enable_if_t< !xtl::is_integral::value && xtl::is_integral::value && !xtl::is_integral::value, xrange> get(std::size_t size) const { return xrange(0, normalize(m_stop, size)); } template inline std::enable_if_t< !xtl::is_integral::value && !xtl::is_integral::value && !xtl::is_integral::value, xall> get(std::size_t size) const { return xall(static_cast(size)); } A start() const { return m_start; } B stop() const { return m_stop; } C step() const { return m_step; } private: static auto normalize(std::ptrdiff_t val, std::size_t ssize) { std::ptrdiff_t size = static_cast(ssize); val = (val >= 0) ? val : val + size; return (std::max)(std::ptrdiff_t(0), (std::min)(size, val)); } static auto get_stepped_range(std::ptrdiff_t start, std::ptrdiff_t stop, std::ptrdiff_t step, std::size_t ssize) { std::ptrdiff_t size = static_cast(ssize); start = (start >= 0) ? start : start + size; stop = (stop >= 0) ? stop : stop + size; if (step > 0) { start = (std::max)(std::ptrdiff_t(0), (std::min)(size, start)); stop = (std::max)(std::ptrdiff_t(0), (std::min)(size, stop)); } else { start = (std::max)(std::ptrdiff_t(-1), (std::min)(size - 1, start)); stop = (std::max)(std::ptrdiff_t(-1), (std::min)(size - 1, stop)); } return xstepped_range(start, stop, step); } A m_start; B m_stop; C m_step; }; /******************************* * Placeholders and rangemaker * *******************************/ namespace placeholders { // xtensor universal placeholder struct xtuph { }; template struct rangemaker { std::ptrdiff_t rng[3]; // = { 0, 0, 0 }; }; XTENSOR_CONSTEXPR xtuph get_tuph_or_val(std::ptrdiff_t /*val*/, std::true_type) { return xtuph(); } XTENSOR_CONSTEXPR std::ptrdiff_t get_tuph_or_val(std::ptrdiff_t val, std::false_type) { return val; } template struct rangemaker { XTENSOR_CONSTEXPR operator xrange_adaptor() { return xrange_adaptor( {get_tuph_or_val(rng[0], std::is_same()), get_tuph_or_val(rng[1], std::is_same()), get_tuph_or_val(rng[2], std::is_same())} ); } std::ptrdiff_t rng[3]; // = { 0, 0, 0 }; }; template struct rangemaker { XTENSOR_CONSTEXPR operator xrange_adaptor() { return xrange_adaptor( {get_tuph_or_val(rng[0], std::is_same()), get_tuph_or_val(rng[1], std::is_same()), xtuph()} ); } std::ptrdiff_t rng[3]; // = { 0, 0, 0 }; }; template XTENSOR_CONSTEXPR auto operator|(const rangemaker& rng, const std::ptrdiff_t& t) { auto nrng = rangemaker({rng.rng[0], rng.rng[1], rng.rng[2]}); nrng.rng[sizeof...(OA)] = t; return nrng; } template XTENSOR_CONSTEXPR auto operator|(const rangemaker& rng, const xt::placeholders::xtuph& /*t*/) { auto nrng = rangemaker({rng.rng[0], rng.rng[1], rng.rng[2]}); return nrng; } XTENSOR_GLOBAL_CONSTEXPR xtuph _{}; XTENSOR_GLOBAL_CONSTEXPR rangemaker<> _r = rangemaker<>({0, 0, 0}); XTENSOR_GLOBAL_CONSTEXPR xall_tag _a{}; XTENSOR_GLOBAL_CONSTEXPR xnewaxis_tag _n{}; XTENSOR_GLOBAL_CONSTEXPR xellipsis_tag _e{}; } inline auto xnone() { return placeholders::xtuph(); } namespace detail { template struct cast_if_integer { using type = T; type operator()(T t) { return t; } }; template struct cast_if_integer::value>> { using type = std::ptrdiff_t; type operator()(T t) { return static_cast(t); } }; template using cast_if_integer_t = typename cast_if_integer::type; } /** * Select a range from start_val to stop_val (excluded). * You can use the shorthand `_` syntax to select from the start or until the end. * * @code{.cpp} * using namespace xt::placeholders; // to enable _ syntax * * range(3, _) // select from index 3 to the end * range(_, 5) // select from index 0 to 5 (excluded) * range(_, _) // equivalent to `all()` * @endcode * * @sa view, strided_view */ template inline auto range(A start_val, B stop_val) { return xrange_adaptor, detail::cast_if_integer_t, placeholders::xtuph>( detail::cast_if_integer{}(start_val), detail::cast_if_integer{}(stop_val), placeholders::xtuph() ); } /** * Select a range from start_val to stop_val (excluded) with step * You can use the shorthand `_` syntax to select from the start or until the end. * * @code{.cpp} * using namespace xt::placeholders; // to enable _ syntax * range(3, _, 5) // select from index 3 to the end with stepsize 5 * @endcode * * @sa view, strided_view */ template inline auto range(A start_val, B stop_val, C step) { return xrange_adaptor, detail::cast_if_integer_t, detail::cast_if_integer_t>( detail::cast_if_integer{}(start_val), detail::cast_if_integer{}(stop_val), detail::cast_if_integer{}(step) ); } /****************************************************** * homogeneous get_size for integral types and slices * ******************************************************/ template inline disable_xslice get_size(const S&) noexcept { return 1; } template inline auto get_size(const xslice& slice) noexcept { return slice.derived_cast().size(); } /******************************************************* * homogeneous step_size for integral types and slices * *******************************************************/ template inline disable_xslice step_size(const S&, std::size_t) noexcept { return 0; } template inline disable_xslice step_size(const S&, std::size_t, std::size_t) noexcept { return 0; } template inline auto step_size(const xslice& slice, std::size_t idx) noexcept { return slice.derived_cast().step_size(idx); } template inline auto step_size(const xslice& slice, std::size_t idx, std::size_t n) noexcept { return slice.derived_cast().step_size(idx, n); } /********************************************* * homogeneous value for integral and slices * *********************************************/ template inline disable_xslice value(const S& s, I) noexcept { return static_cast(s); } template inline auto value(const xslice& slice, I i) noexcept { using ST = typename S::size_type; return slice.derived_cast()(static_cast(i)); } /**************************************** * homogeneous get_slice_implementation * ****************************************/ namespace detail { template struct slice_implementation_getter { template inline decltype(auto) operator()(E& e, SL&& slice, std::size_t index) const { return get_slice(e, std::forward(slice), index, xtl::is_signed>()); } private: template inline decltype(auto) get_slice(E&, SL&& slice, std::size_t, std::false_type) const { return std::forward(slice); } template inline decltype(auto) get_slice(E& e, SL&& slice, std::size_t index, std::true_type) const { using int_type = std::decay_t; return slice < int_type(0) ? slice + static_cast(e.shape(index)) : std::ptrdiff_t(slice); } }; struct keep_drop_getter { template inline decltype(auto) operator()(E& e, SL&& slice, std::size_t index) const { slice.normalize(e.shape()[index]); return std::forward(slice); } template inline auto operator()(E& e, const SL& slice, std::size_t index) const { return this->operator()(e, SL(slice), index); } }; template struct slice_implementation_getter> : keep_drop_getter { }; template struct slice_implementation_getter> : keep_drop_getter { }; template <> struct slice_implementation_getter { template inline auto operator()(E& e, SL&&, std::size_t index) const { return xall(e.shape()[index]); } }; template <> struct slice_implementation_getter { template inline auto operator()(E&, SL&&, std::size_t) const { return xnewaxis(); } }; template struct slice_implementation_getter> { template inline auto operator()(E& e, SL&& adaptor, std::size_t index) const { return adaptor.get(e.shape()[index]); } }; } template inline auto get_slice_implementation(E& e, SL&& slice, std::size_t index) { detail::slice_implementation_getter> getter; return getter(e, std::forward(slice), index); } /****************************** * homogeneous get_slice_type * ******************************/ namespace detail { template struct get_slice_type_impl { using type = SL; }; template struct get_slice_type_impl { using type = xall; }; template struct get_slice_type_impl { using type = xnewaxis; }; template struct get_slice_type_impl> { using type = decltype(xrange_adaptor(A(), B(), C()).get(0)); }; } template using get_slice_type = typename detail::get_slice_type_impl>::type; /************************* * xslice implementation * *************************/ template inline auto xslice::derived_cast() noexcept -> derived_type& { return *static_cast(this); } template inline auto xslice::derived_cast() const noexcept -> const derived_type& { return *static_cast(this); } /************************* * xrange implementation * *************************/ template inline xrange::xrange(size_type start_val, size_type stop_val) noexcept : m_start(start_val) , m_size(stop_val > start_val ? stop_val - start_val : 0) { } template template inline xrange::operator xrange() const noexcept { xrange ret; ret.m_start = static_cast(m_start); ret.m_size = static_cast(m_size); return ret; } template template inline xrange xrange::convert() const noexcept { return xrange(*this); } template inline auto xrange::operator()(size_type i) const noexcept -> size_type { return m_start + i; } template inline auto xrange::size() const noexcept -> size_type { return m_size; } template inline auto xrange::step_size() const noexcept -> size_type { return 1; } template inline auto xrange::step_size(std::size_t /*i*/, std::size_t n) const noexcept -> size_type { return static_cast(n); } template inline auto xrange::revert_index(std::size_t i) const noexcept -> size_type { return i - m_start; } template inline bool xrange::contains(size_type i) const noexcept { return i >= m_start && i < m_start + m_size; } template inline bool xrange::operator==(const self_type& rhs) const noexcept { return (m_start == rhs.m_start) && (m_size == rhs.m_size); } template inline bool xrange::operator!=(const self_type& rhs) const noexcept { return !(*this == rhs); } /******************************** * xtepped_range implementation * ********************************/ template inline xstepped_range::xstepped_range(size_type start_val, size_type stop_val, size_type step) noexcept : m_start(start_val) , m_size(size_type(0)) , m_step(step) { size_type n = stop_val - start_val; m_size = n / step + (((n < 0) ^ (step > 0)) && (n % step)); } template template inline xstepped_range::operator xstepped_range() const noexcept { xstepped_range ret; ret.m_start = static_cast(m_start); ret.m_size = static_cast(m_size); ret.m_step = static_cast(m_step); return ret; } template template inline xstepped_range xstepped_range::convert() const noexcept { return xstepped_range(*this); } template inline auto xstepped_range::operator()(size_type i) const noexcept -> size_type { return m_start + i * m_step; } template inline auto xstepped_range::size() const noexcept -> size_type { return m_size; } template inline auto xstepped_range::step_size() const noexcept -> size_type { return m_step; } template inline auto xstepped_range::step_size(std::size_t /*i*/, std::size_t n) const noexcept -> size_type { return m_step * static_cast(n); } template inline auto xstepped_range::revert_index(std::size_t i) const noexcept -> size_type { return (i - m_start) / m_step; } template inline bool xstepped_range::contains(size_type i) const noexcept { return i >= m_start && i < m_start + m_size * m_step && ((i - m_start) % m_step == 0); } template inline bool xstepped_range::operator==(const self_type& rhs) const noexcept { return (m_start == rhs.m_start) && (m_size == rhs.m_size) && (m_step == rhs.m_step); } template inline bool xstepped_range::operator!=(const self_type& rhs) const noexcept { return !(*this == rhs); } /*********************** * xall implementation * ***********************/ template inline xall::xall(size_type size) noexcept : m_size(size) { } template template inline xall::operator xall() const noexcept { return xall(static_cast(m_size)); } template template inline xall xall::convert() const noexcept { return xall(*this); } template inline auto xall::operator()(size_type i) const noexcept -> size_type { return i; } template inline auto xall::size() const noexcept -> size_type { return m_size; } template inline auto xall::step_size() const noexcept -> size_type { return 1; } template inline auto xall::step_size(std::size_t /*i*/, std::size_t n) const noexcept -> size_type { return static_cast(n); } template inline auto xall::revert_index(std::size_t i) const noexcept -> size_type { return i; } template inline bool xall::contains(size_type i) const noexcept { return i < m_size; } template inline bool xall::operator==(const self_type& rhs) const noexcept { return m_size == rhs.m_size; } template inline bool xall::operator!=(const self_type& rhs) const noexcept { return !(*this == rhs); } /*************************** * xnewaxis implementation * ***************************/ template template inline xnewaxis::operator xnewaxis() const noexcept { return xnewaxis(); } template template inline xnewaxis xnewaxis::convert() const noexcept { return xnewaxis(*this); } template inline auto xnewaxis::operator()(size_type) const noexcept -> size_type { return 0; } template inline auto xnewaxis::size() const noexcept -> size_type { return 1; } template inline auto xnewaxis::step_size() const noexcept -> size_type { return 0; } template inline auto xnewaxis::step_size(std::size_t /*i*/, std::size_t /*n*/) const noexcept -> size_type { return 0; } template inline auto xnewaxis::revert_index(std::size_t i) const noexcept -> size_type { return i; } template inline bool xnewaxis::contains(size_type i) const noexcept { return i == 0; } template inline bool xnewaxis::operator==(const self_type& /*rhs*/) const noexcept { return true; } template inline bool xnewaxis::operator!=(const self_type& /*rhs*/) const noexcept { return true; } /****************************** * xkeep_slice implementation * ******************************/ template template inline xkeep_slice::xkeep_slice(C& cont) : m_raw_indices(cont.begin(), cont.end()) { } template inline xkeep_slice::xkeep_slice(container_type&& cont) : m_raw_indices(std::move(cont)) { } template template inline xkeep_slice::xkeep_slice(std::initializer_list t) : m_raw_indices(t.size()) { std::transform( t.begin(), t.end(), m_raw_indices.begin(), [](auto t) { return static_cast(t); } ); } template template inline xkeep_slice::operator xkeep_slice() const noexcept { xkeep_slice ret; using us_type = typename container_type::size_type; us_type sz = static_cast(size()); ret.m_raw_indices.resize(sz); ret.m_indices.resize(sz); std::transform( m_raw_indices.cbegin(), m_raw_indices.cend(), ret.m_raw_indices.begin(), [](const T& val) { return static_cast(val); } ); std::transform( m_indices.cbegin(), m_indices.cend(), ret.m_indices.begin(), [](const T& val) { return static_cast(val); } ); return ret; } template template inline xkeep_slice xkeep_slice::convert() const noexcept { return xkeep_slice(*this); } template inline void xkeep_slice::normalize(std::size_t shape) { m_indices.resize(m_raw_indices.size()); std::size_t sz = m_indices.size(); for (std::size_t i = 0; i < sz; ++i) { m_indices[i] = m_raw_indices[i] < 0 ? static_cast(shape) + m_raw_indices[i] : m_raw_indices[i]; } } template inline auto xkeep_slice::operator()(size_type i) const noexcept -> size_type { return m_indices.size() == size_type(1) ? m_indices.front() : m_indices[static_cast(i)]; } template inline auto xkeep_slice::size() const noexcept -> size_type { return static_cast(m_raw_indices.size()); } template inline auto xkeep_slice::step_size(std::size_t i, std::size_t n) const noexcept -> size_type { if (m_indices.size() == 1) { return 0; } if (i + n >= m_indices.size()) { return m_indices.back() - m_indices[i] + 1; } else { return m_indices[i + n] - m_indices[i]; } } template inline auto xkeep_slice::revert_index(std::size_t i) const -> size_type { auto it = std::find(m_indices.begin(), m_indices.end(), i); if (it != m_indices.end()) { return std::distance(m_indices.begin(), it); } else { XTENSOR_THROW(std::runtime_error, "Index i (" + std::to_string(i) + ") not in indices of islice."); } } template inline bool xkeep_slice::contains(size_type i) const noexcept { return (std::find(m_indices.begin(), m_indices.end(), i) == m_indices.end()) ? false : true; } template inline bool xkeep_slice::operator==(const self_type& rhs) const noexcept { return m_indices == rhs.m_indices; } template inline bool xkeep_slice::operator!=(const self_type& rhs) const noexcept { return !(*this == rhs); } /****************************** * xdrop_slice implementation * ******************************/ template template inline xdrop_slice::xdrop_slice(C& cont) : m_raw_indices(cont.begin(), cont.end()) { } template inline xdrop_slice::xdrop_slice(container_type&& cont) : m_raw_indices(std::move(cont)) { } template template inline xdrop_slice::xdrop_slice(std::initializer_list t) : m_raw_indices(t.size()) { std::transform( t.begin(), t.end(), m_raw_indices.begin(), [](auto t) { return static_cast(t); } ); } template template inline xdrop_slice::operator xdrop_slice() const noexcept { xdrop_slice ret; ret.m_raw_indices.resize(m_raw_indices.size()); ret.m_indices.resize(m_indices.size()); std::transform( m_raw_indices.cbegin(), m_raw_indices.cend(), ret.m_raw_indices.begin(), [](const T& val) { return static_cast(val); } ); std::transform( m_indices.cbegin(), m_indices.cend(), ret.m_indices.begin(), [](const T& val) { return static_cast(val); } ); std::transform( m_inc.cbegin(), m_inc.cend(), std::inserter(ret.m_inc, ret.m_inc.begin()), [](const auto& val) { return std::make_pair(static_cast(val.first), static_cast(val.second)); } ); ret.m_size = static_cast(m_size); return ret; } template template inline xdrop_slice xdrop_slice::convert() const noexcept { return xdrop_slice(*this); } template inline void xdrop_slice::normalize(std::size_t shape) { m_size = static_cast(shape - m_raw_indices.size()); m_indices.resize(m_raw_indices.size()); std::size_t sz = m_indices.size(); for (std::size_t i = 0; i < sz; ++i) { m_indices[i] = m_raw_indices[i] < 0 ? static_cast(shape) + m_raw_indices[i] : m_raw_indices[i]; } size_type cum = size_type(0); size_type prev_cum = cum; for (std::size_t i = 0; i < sz; ++i) { std::size_t ind = i; size_type d = m_indices[i]; while (i + 1 < sz && m_indices[i + 1] == m_indices[i] + 1) { ++i; } cum += (static_cast(i) - static_cast(ind)) + 1; m_inc[d - prev_cum] = cum; prev_cum = cum; } } template inline auto xdrop_slice::operator()(size_type i) const noexcept -> size_type { if (m_inc.empty() || i < m_inc.begin()->first) { return i; } else { auto iter = --m_inc.upper_bound(i); return i + iter->second; } } template inline auto xdrop_slice::size() const noexcept -> size_type { return m_size; } template inline auto xdrop_slice::step_size(std::size_t i, std::size_t n) const noexcept -> size_type { if (i + n >= static_cast(m_size)) { return (*this)(static_cast(m_size - 1)) - (*this)(static_cast(i)) + 1; } else { return (*this)(static_cast(i + n)) - (*this)(static_cast(i)); } } template inline auto xdrop_slice::revert_index(std::size_t i) const -> size_type { if (i < m_inc.begin()->first) { return i; } else { auto iter = --m_inc.lower_bound(i); auto check = iter->first + iter->second; if (check > i) { --iter; } return i - iter->second; } } template inline bool xdrop_slice::contains(size_type i) const noexcept { return (std::find(m_indices.begin(), m_indices.end(), i) == m_indices.end()) ? true : false; } template inline bool xdrop_slice::operator==(const self_type& rhs) const noexcept { return m_indices == rhs.m_indices; } template inline bool xdrop_slice::operator!=(const self_type& rhs) const noexcept { return !(*this == rhs); } } #undef XTENSOR_CONSTEXPR #endif