/*************************************************************************** * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht * * Copyright (c) QuantStack * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef XTENSOR_OFFSET_VIEW_HPP #define XTENSOR_OFFSET_VIEW_HPP #include #include "xtensor/xfunctor_view.hpp" namespace xt { namespace detail { template struct offset_forwarder { using value_type = M; using reference = M&; using const_reference = const M&; using pointer = M*; using const_pointer = const M*; using proxy = xtl::xproxy_wrapper; template using simd_return_type = xt_simd::simd_return_type; template decltype(auto) operator()(T&& t) const { return xtl::forward_offset(std::forward(t)); } template < class align, class requested_type, std::size_t N, class E, class MF = M, class = std::enable_if_t< (std::is_same::value || std::is_same::value) && I <= sizeof(MF), int>> auto proxy_simd_load(const E& expr, std::size_t n) const { // TODO refactor using shuffle only auto batch = expr.template load_simd(n); if (I == 0) { return batch.real(); } else { return batch.imag(); } } template < class align, class simd, class E, class MF = M, class = std::enable_if_t< (std::is_same::value || std::is_same::value) && I <= sizeof(MF), int>> auto proxy_simd_store(E& expr, std::size_t n, const simd& batch) const { auto x = expr.template load_simd(n); if (I == 0) { x.real() = batch; } else { x.imag() = batch; } expr.template store_simd(n, x); } }; } template using xoffset_view = xfunctor_view, CT>; template using xoffset_adaptor = xfunctor_adaptor, CT>; } #endif