53 template <
int _Rank>
class index
58 template <
typename _Value_type,
int _Rank>
61 template <
int _Rank,
int _Element_size>
64 template <
int _Rank,
int _Element_size>
67 static const int rank = _Rank;
97 static_assert(_Rank == 1,
"This constructor can only be used to construct an index<1> object.");
112 static_assert(_Rank == 2,
"This constructor can only be used to construct an index<2> object.");
131 static_assert(_Rank == 3,
"This constructor can only be used to construct an index<3> object.");
345 template<
class _Tuple_type>
388 template <
typename _Value_type,
int _Rank>
391 template <
int _Rank,
int _Element_size>
394 template <
int _Rank,
int _Element_size>
428 static_assert(_Rank == 1,
"This constructor can only be used to construct an extent<1> object.");
443 static_assert(_Rank == 2,
"This constructor can only be used to construct an extent<2> object.");
462 static_assert(_Rank == 3,
"This constructor can only be used to construct an extent<3> object.");
539 static_assert(rank == 1,
"One-dimensional tile() method only available on extent<1>");
540 static_assert(_Dim0>0,
"All tile dimensions must be positive");
550 static_assert(rank == 2,
"Two-dimensional tile() method only available on extent<2>");
551 static_assert(_Dim0>0 && _Dim1>0,
"All tile dimensions must be positive");
561 static_assert(rank == 3,
"Three-dimensional tile() method only available on extent<3>");
562 static_assert(_Dim0>0 && _Dim1>0 && _Dim2>0,
"All tile dimensions must be positive");
794 template<
class _Tuple_type>
809 template <
int _Rank,
template <
int>
class _Tuple_type>
815 template <
int _Rank,
template <
int>
class _Tuple_type>
821 template <
int _Rank,
template <
int>
class _Tuple_type>
822 _Tuple_type<_Rank>
operator+(
const _Tuple_type<_Rank>& _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
824 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
829 template <
int _Rank,
template <
int>
class _Tuple_type>
830 _Tuple_type<_Rank>
operator-(
const _Tuple_type<_Rank>& _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
832 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
837 template <
int _Rank,
template <
int>
class _Tuple_type>
838 _Tuple_type<_Rank>
operator+(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
840 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
845 template <
int _Rank,
template <
int>
class _Tuple_type>
846 _Tuple_type<_Rank>
operator+(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
848 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
853 template <
int _Rank,
template <
int>
class _Tuple_type>
854 _Tuple_type<_Rank>
operator-(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
856 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
861 template <
int _Rank,
template <
int>
class _Tuple_type>
862 _Tuple_type<_Rank>
operator-(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
864 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
869 template <
int _Rank,
template <
int>
class _Tuple_type>
870 _Tuple_type<_Rank>
operator*(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
872 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
877 template <
int _Rank,
template <
int>
class _Tuple_type>
878 _Tuple_type<_Rank>
operator*(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
880 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
885 template <
int _Rank,
template <
int>
class _Tuple_type>
886 _Tuple_type<_Rank>
operator/(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
888 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
893 template <
int _Rank,
template <
int>
class _Tuple_type>
894 _Tuple_type<_Rank>
operator/(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
896 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
901 template <
int _Rank,
template <
int>
class _Tuple_type>
902 _Tuple_type<_Rank>
operator%(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
904 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
909 template <
int _Rank,
template <
int>
class _Tuple_type>
910 _Tuple_type<_Rank>
operator%(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
912 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
933 #pragma warning( suppress : 4100 ) // unreferenced formal parameter
1044 local(_Other.local),
1120 static const int tile_dim0 = _Dim0;
1121 static const int tile_dim1 = _Dim1;
1122 static const int tile_dim2 = _Dim2;
1128 template <
int _Dim0,
int _Dim1>
1178 static const int tile_dim0 = _Dim0;
1179 static const int tile_dim1 = _Dim1;
1185 template <
int _Dim0>
1235 static const int tile_dim0 = _Dim0;
1253 static_assert(_Dim0>0,
"_Dim0 must be positive");
1254 static_assert(_Dim1>0,
"_Dim1 must be positive");
1255 static_assert(_Dim2>0,
"_Dim2 must be positive");
1299 ((static_cast<unsigned int>((*
this)[1]) + _Dim1 - 1)/_Dim1) * _Dim1,
1300 ((static_cast<unsigned int>((*
this)[2]) + _Dim2 - 1)/_Dim2) * _Dim2);
1310 Concurrency::extent<rank> _New_extent(((*
this)[0]/_Dim0) * _Dim0, ((*
this)[1]/_Dim1) * _Dim1, ((*
this)[2]/_Dim2) * _Dim2);
1317 static const int tile_dim0 = _Dim0;
1318 static const int tile_dim1 = _Dim1;
1319 static const int tile_dim2 = _Dim2;
1328 index<rank> _Tile_origin = details::_Create_uninitialized_tuple<index<rank>>();
1336 template <
int _Dim0,
int _Dim1>
1341 static_assert(_Dim0>0,
"_Dim0 must be positive");
1342 static_assert(_Dim1>0,
"_Dim1 must be positive");
1386 ((static_cast<unsigned int>((*
this)[1]) + _Dim1 - 1)/_Dim1) * _Dim1);
1402 static const int tile_dim0 = _Dim0;
1403 static const int tile_dim1 = _Dim1;
1412 index<rank> _Tile_origin = details::_Create_uninitialized_tuple<index<rank>>();
1419 template <
int _Dim0>
1424 static_assert(_Dim0>0,
"_Dim0 must be positive");
1483 static const int tile_dim0 = _Dim0;
1492 index<rank> _Tile_origin = details::_Create_uninitialized_tuple<index<rank>>();
1501 template <
int _Old_element_size,
int _New_element_size>
1504 int _Total_size = _Old_element_size * _Old_size;
1505 int _New_size = (_Total_size + _New_element_size - 1)/ _New_element_size;
1511 template <
int _Old_element_size,
int _New_element_size>
1514 int _Total_size = _Old_element_size * _Old_size;
1515 int _New_size = (_Total_size + _New_element_size - 1)/ _New_element_size;
1517 if (_New_size * _New_element_size > _Total_size)
1518 throw runtime_exception(
"Element type of reinterpret_as does not evenly divide into extent", E_INVALIDARG);
1527 template <
int _Rank,
int _Element_size >
1540 return _M_view_extent;
1548 return (_M_total_linear_offset - (_Element_size * _Flatten_helper::func(_M_array_multiplier._M_base, _M_view_offset._M_base)));
1553 _M_array_extent(_Other._M_array_extent),
1554 _M_array_multiplier(_Other._M_array_multiplier),
1555 _M_view_offset(_Other._M_view_offset),
1556 _M_total_linear_offset(_Other._M_total_linear_offset),
1557 _M_view_extent(_Other._M_view_extent)
1564 _M_array_extent(_Other._M_array_extent),
1565 _M_array_multiplier(_Other._M_array_multiplier),
1566 _M_view_offset(_Other._M_view_offset + _Section_origin),
1567 _M_view_extent(_Section_extent)
1571 _M_total_linear_offset = _Other._Base_linear_offset() + (_Element_size * _Flatten_helper::func(_M_array_multiplier._M_base, _M_view_offset._M_base));
1576 _M_array_extent(_Array_extent),
1578 _M_total_linear_offset(_Base_linear_offset),
1579 _M_view_extent(_Array_extent)
1581 _Initialize_multiplier();
1587 _M_array_extent(_Array_extent),
1588 _M_view_offset(_Section_origin),
1589 _M_total_linear_offset(_Base_linear_offset),
1590 _M_view_extent(_Section_extent)
1594 _Initialize_multiplier();
1595 _M_total_linear_offset += (_Element_size * _Flatten_helper::func(_M_array_multiplier._M_base, _M_view_offset._M_base));
1600 _M_array_extent = _Other._M_array_extent;
1601 _M_array_multiplier = _Other._M_array_multiplier;
1602 _M_view_offset = _Other._M_view_offset;
1603 _M_total_linear_offset = _Other._M_total_linear_offset;
1604 _M_view_extent = _Other._M_view_extent;
1610 static_assert(_Rank > 1,
"Projection is only supported on array_views with a rank of 2 or higher");
1619 _Projected_shape._M_array_extent, this->_M_array_extent,
1620 _Projected_shape._M_array_multiplier, this->_M_array_multiplier,
1621 _Projected_shape._M_view_offset, this->_M_view_offset,
1622 _Projected_shape._M_view_extent, this->_M_view_extent);
1624 _Projected_shape._M_total_linear_offset = _M_total_linear_offset + (_Element_size * _I * _M_array_multiplier[0]);
1628 : _M_array_extent(details::
_do_not_initialize), _M_array_multiplier(details::_do_not_initialize),
1629 _M_view_offset(details::_do_not_initialize), _M_view_extent(details::_do_not_initialize)
1638 unsigned int _Ext = _M_array_extent[_Rank-1];
1650 template <
int _Rank,
int _Element_size>
1653 template <
int _R,
int _S>
1672 _M_buffer_descriptor(_Buffer_desc),
1681 _M_buffer_descriptor(_Other._M_buffer_descriptor),
1685 _Register_copy(_Other);
1690 _M_buffer_descriptor(_Other._M_buffer_descriptor),
1699 _M_buffer_descriptor(_Other._M_buffer_descriptor),
1708 _M_buffer_descriptor(_Buffer_desc),
1717 _M_buffer_descriptor(_Buffer_desc),
1725 const _Buffer_descriptor& _Buffer_desc,
1726 int _Base_linear_offset,
1732 _M_buffer_descriptor(_Buffer_desc),
1742 _M_buffer_descriptor(_Buffer_desc),
1753 _Ubiquitous_buffer_ptr _PUBuf = _Ubiquitous_buffer::_Create_ubiquitous_buffer(_Array_extent.size(), _Element_size *
sizeof(
int));
1764 if (_Data ==
NULL) {
1765 throw runtime_exception(
"Invalid pointer argument (NULL) to array_view constructor", E_INVALIDARG);
1786 if (_Data ==
NULL) {
1787 throw runtime_exception(
"Invalid pointer argument (NULL) to array_view constructor", E_INVALIDARG);
1800 #pragma warning( push )
1801 #pragma warning( disable : 4880 )
1807 #pragma warning( pop )
1813 if (
this != &_Other)
1818 _M_buffer_descriptor = _Other._M_buffer_descriptor;
1822 _Register_copy(_Other);
1830 int * _Ptr =
reinterpret_cast<int *
>(_M_buffer_descriptor._M_data_ptr);
1831 return &_Ptr[_M_total_linear_offset + (_Element_size * _Flatten_helper::func(_M_array_multiplier._M_base, _Index._M_base))];
1837 if ((_M_buffer_descriptor._M_curr_cpu_access_mode & _Requested_mode) != _Requested_mode) {
1838 _M_buffer_descriptor._Get_CPU_access(_Requested_mode);
1841 return _Access(_Index);
1846 UNREFERENCED_PARAMETER(_Requested_mode);
1848 return _Access(_Index);
1856 _View._Register(_Array_view_base::_Create_section_buffer_shape(this->_M_buffer_descriptor, _Section_origin, _Section_extent));
1863 return _Section(_Idx, this->extent - _Idx);
1868 _Projected_view._M_buffer_descriptor = this->_M_buffer_descriptor;
1872 _Projected_view._Register(_Array_view_base::_Create_projection_buffer_shape(this->_M_buffer_descriptor, 0, _I));
1875 template <
int _New_element_size>
1878 static_assert(_Rank==1,
"reinterpret_as is only permissible on array views of rank 1");
1879 int _New_size = _Calculate_reinterpreted_size<_Element_size,_New_element_size>(_M_view_extent.size());
1881 _M_total_linear_offset,
1885 template <
int _New_rank>
1888 static_assert(_Rank==1,
"view_as is only permissible on array views of rank 1");
1890 _M_total_linear_offset,
1898 unsigned int bufElemSize =
static_cast<unsigned int>(_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_buffer_elem_size());
1899 unsigned int elemSize = _Element_size *
sizeof(
int);
1901 size_t linearOffsetInBytes = _Base_linear_offset() *
sizeof(
int);
1903 size_t baseLSDExtentInBytes = _M_array_extent[_Rank - 1];
1904 baseLSDExtentInBytes *= elemSize;
1906 size_t viewLSDOffsetInBytes = _M_view_offset[_Rank - 1];
1907 viewLSDOffsetInBytes *= elemSize;
1909 size_t viewLSDExtentInBytes = _M_view_extent[_Rank - 1];
1910 viewLSDExtentInBytes *= elemSize;
1914 if (((linearOffsetInBytes % bufElemSize) != 0) ||
1915 ((baseLSDExtentInBytes % bufElemSize) != 0) ||
1916 ((viewLSDOffsetInBytes % bufElemSize) != 0) ||
1917 ((viewLSDExtentInBytes % bufElemSize) != 0))
1919 throw runtime_exception(
"The array_view base extent, view offset and/or view extent is incompatible with the underlying buffer", E_FAIL);
1925 unsigned int linearOffset =
static_cast<unsigned int>(linearOffsetInBytes / bufElemSize);
1927 unsigned int baseExtent[_Rank];
1928 unsigned int viewOffset[_Rank];
1929 unsigned int viewExtent[_Rank];
1930 #pragma warning( push )
1931 #pragma warning( disable : 6294 )
1932 #pragma warning( disable : 6201 ) // Index '-1' is out of valid index range '0' to '0' for possibly stack allocated buffer 'baseExtent'.
1933 for (
int i = 0;
i < _Rank - 1; ++
i) {
1934 baseExtent[
i] = _M_array_extent[
i];
1935 viewOffset[
i] = _M_view_offset[
i];
1936 viewExtent[
i] = _M_view_extent[
i];
1938 #pragma warning( pop )
1943 baseExtent[_Rank - 1] =
static_cast<unsigned int>(baseLSDExtentInBytes / bufElemSize);
1946 viewOffset[_Rank - 1] =
static_cast<unsigned int>(viewLSDOffsetInBytes / bufElemSize);
1949 viewExtent[_Rank - 1] =
static_cast<unsigned int>(viewLSDExtentInBytes / bufElemSize);
1951 return _View_shape::_Create_view_shape(_Rank, linearOffset, baseExtent, viewOffset, viewExtent);
1965 _Create_buffer_view_shape());
1975 _M_buffer_descriptor.
_M_data_ptr = _PBuf->_Get_host_ptr();
1986 if (_Shape ==
NULL) {
2005 _M_buffer_descriptor.
_M_data_ptr = _PBuf->_Get_host_ptr();
2012 _Throw_exception =
true;
2021 if (_Throw_exception) {
2031 std::vector<unsigned int> _New_view_extent(_Base_shape->
_Get_rank());
2032 std::vector<unsigned int> _New_view_offset(_Base_shape->
_Get_rank());
2033 bool *_New_projection_info =
new bool[_Base_shape->
_Get_rank()];
2034 for (
unsigned int _I = 0; _I < _Base_shape->
_Get_rank(); ++_I)
2042 unsigned int _UnProjectedDimCount = 0;
2043 for (
unsigned int _I = 0; _I < _Base_shape->
_Get_rank(); ++_I)
2049 if (_UnProjectedDimCount == _Dim) {
2050 _New_view_extent[_I] = 1;
2051 _New_view_offset[_I] += _Dim_offset;
2052 _New_projection_info[_I] =
true;
2056 _UnProjectedDimCount++;
2060 auto _PView_shape = _View_shape::_Create_view_shape(_Base_shape->
_Get_rank(),
2063 _New_view_offset.data(),
2064 _New_view_extent.data(),
2065 _New_projection_info);
2067 delete [] _New_projection_info;
2069 return _PView_shape;
2076 if (_Base_shape->
_Get_rank() == _Rank) {
2080 std::vector<unsigned int> _New_view_extent(_Base_shape->
_Get_rank());
2081 std::vector<unsigned int> _New_view_offset(_Base_shape->
_Get_rank());
2082 unsigned int _I = 0, _J = 0;
2083 while (_I < _Base_shape->_Get_rank())
2094 if (_J == (_Rank - 1))
2096 unsigned int bufElemSize =
static_cast<unsigned int>(_Descriptor._Get_buffer_ptr()->_Get_master_buffer_elem_size());
2097 unsigned int elemSize = _Element_size *
sizeof(
int);
2099 size_t sectionLSDOriginInBytes = _Section_origin[_J];
2100 sectionLSDOriginInBytes *= elemSize;
2102 size_t sectionLSDExtentInBytes = _Section_extent[_J];
2103 sectionLSDExtentInBytes *= elemSize;
2107 if (((sectionLSDOriginInBytes % bufElemSize) != 0) ||
2108 ((sectionLSDExtentInBytes % bufElemSize) != 0))
2110 throw runtime_exception(
"The array_view section origin and/or extent is incompatible with the underlying buffer", E_FAIL);
2116 _New_view_offset[_I] = _Base_shape->
_Get_view_offset()[_I] +
static_cast<unsigned int>(sectionLSDOriginInBytes / bufElemSize);
2119 _New_view_extent[_I] =
static_cast<unsigned int>(sectionLSDExtentInBytes / bufElemSize);
2123 _New_view_extent[_I] = _Section_extent[_J];
2124 _New_view_offset[_I] = _Base_shape->
_Get_view_offset()[_I] + _Section_origin[_J];
2135 return _View_shape::_Create_view_shape(_Base_shape->
_Get_rank(),
2138 _New_view_offset.data(),
2139 _New_view_extent.data(),
2147 UNREFERENCED_PARAMETER(_Other);
2152 UNREFERENCED_PARAMETER(_Shape);
2157 UNREFERENCED_PARAMETER(_Throw_exception);
2162 UNREFERENCED_PARAMETER(_Descriptor);
2163 UNREFERENCED_PARAMETER(_Dim);
2164 UNREFERENCED_PARAMETER(_I);
2171 UNREFERENCED_PARAMETER(_Descriptor);
2172 UNREFERENCED_PARAMETER(_Section_origin);
2173 UNREFERENCED_PARAMETER(_Section_extent);
2179 template<
typename _Container>
2182 template<
class _Uty>
static auto _Fn(_Uty
_Val, decltype(_Val.size(), _Val.data(), 0)) ->
std::true_type;
2183 template<
class _Uty>
static auto _Fn(_Uty _Val, ...) ->
std::false_type;
2184 typedef decltype(_Fn(std::declval<_Container>(),0)) type;
2205 static_assert(0 == (
sizeof(_Value_type) %
sizeof(
int)),
"only value types whose size is a multiple of the size of an integer are allowed in array views");
2216 template <
typename _T,
int _R>
2219 friend const _Buffer_descriptor& details::_Get_buffer_descriptor<array_view<_Value_type, _Rank>>(
const array_view<_Value_type, _Rank>& _Array) __GPU;
2222 static const int rank = _Rank;
2262 _Initialize(_Extent.size(),
true);
2275 :_Base(
_Src.data(),_Extent)
2277 static_assert( std::is_same<decltype(
_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2278 _Initialize(
_Src.size());
2292 :_Base(
_Src,_Extent)
2306 static_assert(_Rank == 1,
"rank must be 1");
2307 _Initialize(get_extent().
size(),
true);
2321 throw runtime_exception(
"Invalid _Src container argument - _Src size is greater than INT_MAX", E_INVALIDARG);
2323 static_assert( std::is_same<decltype(_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2324 static_assert(_Rank == 1,
"rank must be 1");
2325 _Initialize(_Src.size());
2340 static_assert( std::is_same<decltype(
_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2341 static_assert(_Rank == 1,
"rank must be 1");
2342 _Initialize(
_Src.size());
2357 static_assert(_Rank == 2,
"rank must be 2");
2358 _Initialize(get_extent().
size(),
true);
2376 static_assert( std::is_same<decltype(
_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2377 static_assert(_Rank == 2,
"rank must be 2");
2378 _Initialize(
_Src.size());
2396 static_assert(_Rank == 3,
"rank must be 3");
2397 _Initialize(get_extent().
size(),
true);
2418 static_assert( std::is_same<decltype(
_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2419 static_assert(_Rank == 3,
"rank must be 3");
2420 _Initialize(
_Src.size());
2436 static_assert(_Rank == 1,
"rank must be 1");
2449 static_assert( std::is_same<
typename std::remove_reference<decltype(*
_Src)>::type, _Value_type>::value,
"container element type and array view element type must match");
2450 static_assert(_Rank == 1,
"rank must be 1");
2470 static_assert(_Rank == 2,
"rank must be 2");
2493 static_assert(_Rank == 3,
"rank must be 3");
2502 _Base::operator=(_Other);
2552 void *_Ptr = _Access(_Index);
2553 return *
reinterpret_cast<value_type*
>(_Ptr);
2567 return this->operator()(_Index);
2582 return *
reinterpret_cast<value_type*
>(_Ptr);
2612 value_type& operator() (
int _I0,
int _I1)
const __GPU
2614 static_assert(_Rank == 2,
"value_type& array_view::operator()(int,int) is only permissible on array_view<T, 2>");
2615 return this->operator()(
index<2>(_I0,_I1));
2633 value_type& operator() (
int _I0,
int _I1,
int _I2)
const __GPU
2635 static_assert(_Rank == 3,
"value_type& array_view::operator()(int,int,int) is only permissible on array_view<T, 3>");
2636 return this->operator()(
index<3>(_I0,_I1,_I2));
2653 return _Convert<_Value_type>(_Section(_Section_origin, _Section_extent));
2668 return section(_Idx, this->extent - _Idx);
2701 static_assert(_Rank == 1,
"rank must be 1");
2726 static_assert(_Rank == 2,
"rank must be 2");
2757 static_assert(_Rank == 3,
"rank must be 3");
2771 return _Convert<_Value_type2>(this->
template _Reinterpret_as<
sizeof(_Value_type2)/
sizeof(
int)>());
2785 return _Convert<_Value_type>(_View_as(_View_extent));
2793 static_assert(_Rank == 1,
"array_view::data() is only permissible on array_view<T, 1>");
2805 if (!_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
2810 _Get_access_async(_M_buffer_descriptor._Get_view_key(), _M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
_Write_access, _PBuf)._Get();
2858 if ((_Access_type !=
access_type_none) && _M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source())
2861 _M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
2906 if ((_Access_type !=
access_type_none) && _M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source())
2909 _M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
2926 _M_buffer_descriptor._Get_buffer_ptr()->_Discard(_M_buffer_descriptor._Get_view_key());
2935 if (_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
2936 return _M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view();
2939 throw runtime_exception(
"Cannot query source accelerator_view for an array_view without a data source.", E_INVALIDARG);
2946 template <typename
_T,
int _R>
2949 static_assert(
sizeof(
array_view<_T,_R>) ==
sizeof(
_Array_view_base<_R,
sizeof(_T)/
sizeof(
int)>),
"ASSERT FAILURE: implementation relies on binary conversion between the two");
2955 _Base::_Project0(_I, _Projected_view);
2956 _Projected_view._Initialize();
2962 :_Base(_Other, _Section_origin, _Section_extent)
2968 :_Base(_Src_buffer,_Extent)
2983 if (_Src_data_size < this->extent.size()) {
2984 throw runtime_exception(
"Invalid _Src container argument - _Src size is less than the size of the array_view.", E_INVALIDARG);
2989 if (_Discard_data) {
2997 template <
typename _Value_type,
int _Rank>
3001 static_assert(0 == (
sizeof(_Value_type) %
sizeof(
int)),
"only value types whose size is a multiple of the size of an integer are allowed in array views");
3017 static const int rank = _Rank;
3065 :_Base(
_Src.data(),_Extent)
3067 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*
_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3068 _Initialize(
_Src.size());
3082 :_Base(_Src.data(),
Concurrency::extent<1>(static_cast<
int>(_Src.size())))
3085 throw runtime_exception(
"Invalid _Src container argument - _Src size is greater than INT_MAX", E_INVALIDARG);
3087 static_assert( std::is_same<decltype(_Src.data()),
const _Value_type*>::value,
"container element type and array view element type must match");
3088 static_assert(_Rank == 1,
"rank must be 1");
3089 _Initialize(_Src.size());
3102 :_Base(
_Src.data(),_Extent)
3104 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*
_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3105 _Initialize(
_Src.size());
3119 :_Base(
_Src,_Extent)
3135 :_Base(
_Src,_Extent)
3152 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*
_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3153 static_assert(_Rank == 1,
"rank must be 1");
3154 _Initialize(
_Src.size());
3167 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*
_Src)>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3168 static_assert(_Rank == 1,
"rank must be 1");
3187 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*
_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3188 static_assert(_Rank == 2,
"rank must be 2");
3189 _Initialize(
_Src.size());
3210 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*
_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3211 static_assert(_Rank == 3,
"rank must be 3");
3212 _Initialize(
_Src.size());
3228 static_assert(_Rank == 1,
"rank must be 1");
3248 static_assert(_Rank == 2,
"rank must be 2");
3271 static_assert(_Rank == 3,
"rank must be 3");
3288 static_assert(_Rank == 1,
"rank must be 1");
3308 static_assert(_Rank == 2,
"rank must be 2");
3331 static_assert(_Rank == 3,
"rank must be 3");
3340 _Base::operator=(_Other);
3349 _Base::operator=(_Other);
3399 void *_Ptr = _Access(_Index);
3400 return *
reinterpret_cast<value_type*
>(_Ptr);
3414 return this->operator()(_Index);
3429 return *
reinterpret_cast<value_type*
>(_Ptr);
3459 value_type& operator() (
int _I0,
int _I1)
const __GPU
3461 static_assert(_Rank == 2,
"value_type& array_view::operator()(int,int) is only permissible on array_view<T, 2>");
3462 return this->operator()(
index<2>(_I0,_I1));
3480 value_type& operator() (
int _I0,
int _I1,
int _I2)
const __GPU
3482 static_assert(_Rank == 3,
"value_type& array_view::operator()(int,int,int) is only permissible on array_view<T, 3>");
3483 return this->operator()(
index<3>(_I0,_I1,_I2));
3500 return _Convert<_Value_type>(_Section(_Section_origin, _Section_extent));
3530 return section(_Idx, this->extent - _Idx);
3548 static_assert(_Rank == 1,
"rank must be 1");
3573 static_assert(_Rank == 2,
"rank must be 2");
3604 static_assert(_Rank == 3,
"rank must be 3");
3618 return _Convert<_Value_type2>(this->
template _Reinterpret_as<
sizeof(_Value_type2)/
sizeof(
int)>());
3632 return _Convert<_Value_type>(_View_as(_View_extent));
3638 const _Value_type*
data() const __GPU
3640 static_assert(_Rank == 1,
"array_view::data() is only permissible on array_view<T, 1>");
3651 _Get_access_async(_M_buffer_descriptor._Get_view_key(), _M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
_Write_access, _PBuf)._Get();
3690 if (_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
3691 _Ev =
_Get_access_async(_M_buffer_descriptor._Get_view_key(), _M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
_Read_access, _PBuf);
3725 if (_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
3726 _Get_access_async(_M_buffer_descriptor._Get_view_key(), _M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
_Read_access, _PBuf)._Get();
3738 if (_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
3739 return _M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view();
3742 throw runtime_exception(
"Cannot query source accelerator_view for an array_view without a data source.", E_INVALIDARG);
3749 template <typename
_T,
int _R>
3758 _Base::_Project0(_I, _Projected_view);
3759 _Projected_view._Initialize();
3766 _Base(_Other, _Section_origin, _Section_extent)
3774 _M_buffer_descriptor._M_type_access_mode =
_Read_access;
3781 if (_Src_data_size < this->extent.size()) {
3782 throw runtime_exception(
"Invalid _Src container argument - _Src size is less than the size of the array_view.", E_INVALIDARG);
3794 template <
typename InputIterator,
typename _Value_type,
int _Rank>
void copy(InputIterator _SrcFirst, InputIterator _SrcLast,
array<_Value_type, _Rank> &_Dest);
3806 template <
typename _Value_type,
int _Rank>
void copy(
const array_view<const _Value_type, _Rank>&
_Src,
const array_view<_Value_type, _Rank>& _Dest);
3808 template <
typename _Value_type,
int _Rank>
void copy(
const array_view<_Value_type, _Rank>&
_Src,
const array_view<_Value_type, _Rank>& _Dest);
3809 template <
typename InputIterator,
typename _Value_type,
int _Rank>
concurrency::completion_future copy_async(InputIterator _SrcFirst, InputIterator _SrcLast,
const array_view<_Value_type, _Rank> &_Dest);
3811 template <
typename InputIterator,
typename _Value_type,
int _Rank>
void copy(InputIterator _SrcFirst, InputIterator _SrcLast,
const array_view<_Value_type, _Rank> &_Dest);
3812 template <
typename InputIterator,
typename _Value_type,
int _Rank>
void copy(InputIterator _SrcFirst,
const array_view<_Value_type, _Rank> &_Dest);
3814 template <
typename OutputIterator,
typename _Value_type,
int _Rank>
void copy(
const array_view<_Value_type, _Rank> &
_Src, OutputIterator _DestIter);
3818 template<
typename _Value_type,
int _Rank>
3831 template <
typename _Value_type,
int _Rank = 1>
class array
3838 static_assert(!std::is_const<_Value_type>::value,
"array<const _Value_type> is not supported");
3839 static_assert(0 == (
sizeof(_Value_type) %
sizeof(
int)),
"only value types whose size is a multiple of the size of an integer are allowed in array");
3842 template<
typename _Value_type,
int _Rank>
3844 friend const _Buffer_descriptor& details::_Get_buffer_descriptor<array<_Value_type,_Rank>>(
const array<_Value_type,_Rank>& _Array) __GPU;
3849 static const int rank = _Rank;
3859 : _M_extent(_Extent)
3873 static_assert(_Rank == 1,
"array(int) is only permissible on array<T, 1>");
3886 explicit array(
int _E0,
int _E1) __CPU_ONLY
3889 static_assert(_Rank == 2,
"array(int, int) is only permissible on array<T, 2>");
3905 explicit array(
int _E0,
int _E1,
int _E2) __CPU_ONLY
3908 static_assert(_Rank == 3,
"array(int, int, int) is only permissible on array<T, 3>");
3929 : _M_extent(_Extent)
3931 _Initialize(_Av, _Cpu_access_type);
3953 static_assert(_Rank == 1,
"array(int, accelerator_view) is only permissible on array<T, 1>");
3954 _Initialize(_Av, _Cpu_access_type);
3979 static_assert(_Rank == 2,
"array(int, int, accelerator_view) is only permissible on array<T, 2>");
3980 _Initialize(_Av, _Cpu_access_type);
4006 : _M_extent(
Concurrency::extent<_Rank>(_E0, _E1, _E2))
4008 static_assert(_Rank == 3,
"array(int, int, int, accelerator_view) is only permissible on array<T, 3>");
4009 _Initialize(_Av, _Cpu_access_type);
4025 : _M_extent(_Extent)
4027 _Initialize(_Av, _Associated_Av);
4045 static_assert(_Rank == 1,
"array(int, accelerator_view, accelerator_view) is only permissible on array<T, 1>");
4046 _Initialize(_Av, _Associated_Av);
4067 static_assert(_Rank == 2,
"array(int, int, accelerator_view, accelerator_view) is only permissible on array<T, 2>");
4068 _Initialize(_Av, _Associated_Av);
4092 static_assert(_Rank == 3,
"array(int, int, int, accelerator_view, accelerator_view) is only permissible on array<T, 3>");
4093 _Initialize(_Av, _Associated_Av);
4109 : _M_extent(_Extent)
4125 : _M_extent(_Extent)
4127 _InputIterator _Src_last = _Src_first;
4145 template <
typename _InputIterator>
array(
int _E0, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
4148 static_assert(_Rank == 1,
"array(int, iterator, iterator) is only permissible on array<T, 1>");
4162 template <
typename _InputIterator>
array(
int _E0, _InputIterator _Src_first) __CPU_ONLY
4165 static_assert(_Rank == 1,
"array(int, iterator) is only permissible on array<T, 1>");
4167 _InputIterator _Src_last = _Src_first;
4188 template <
typename _InputIterator>
array(
int _E0,
int _E1, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
4191 static_assert(_Rank == 2,
"array(int, int, iterator, iterator) is only permissible on array<T, 2>");
4208 template <
typename _InputIterator>
array(
int _E0,
int _E1, _InputIterator _Src_first) __CPU_ONLY
4211 static_assert(_Rank == 2,
"array(int, int, iterator) is only permissible on array<T, 2>");
4213 _InputIterator _Src_last = _Src_first;
4235 template <
typename _InputIterator>
array(
int _E0,
int _E1,
int _E2, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
4238 static_assert(_Rank == 3,
"array(int, int, int, iterator, iterator) is only permissible on array<T, 3>");
4258 template <
typename _InputIterator>
array(
int _E0,
int _E1,
int _E2, _InputIterator _Src_first) __CPU_ONLY
4261 static_assert(_Rank == 3,
"array(int, int, int, iterator) is only permissible on array<T, 3>");
4263 _InputIterator _Src_last = _Src_first;
4292 : _M_extent(_Extent)
4294 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4318 : _M_extent(_Extent)
4320 _InputIterator _Src_last = _Src_first;
4323 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4351 static_assert(_Rank == 1,
"array(int, iterator, iterator) is only permissible on array<T, 1>");
4352 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4378 static_assert(_Rank == 1,
"array(int, iterator) is only permissible on array<T, 1>");
4380 _InputIterator _Src_last = _Src_first;
4383 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4414 static_assert(_Rank == 2,
"array(int, int, iterator, iterator) is only permissible on array<T, 2>");
4415 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4444 static_assert(_Rank == 2,
"array(int, int, iterator) is only permissible on array<T, 2>");
4446 _InputIterator _Src_last = _Src_first;
4449 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4481 : _M_extent(
Concurrency::extent<_Rank>(_E0, _E1, _E2))
4483 static_assert(_Rank == 3,
"array(int, int, int, iterator, iterator) is only permissible on array<T, 3>");
4484 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4514 : _M_extent(
Concurrency::extent<_Rank>(_E0, _E1, _E2))
4516 static_assert(_Rank == 3,
"array(int, int, int, iterator) is only permissible on array<T, 3>");
4518 _InputIterator _Src_last = _Src_first;
4521 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4543 : _M_extent(_Extent)
4545 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4565 : _M_extent(_Extent)
4567 _InputIterator _Src_last = _Src_first;
4570 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4594 static_assert(_Rank == 1,
"array(int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 1>");
4595 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4617 static_assert(_Rank == 1,
"array(int, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 1>");
4619 _InputIterator _Src_last = _Src_first;
4622 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4649 static_assert(_Rank == 2,
"array(int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 2>");
4650 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4675 static_assert(_Rank == 2,
"array(int, int, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 2>");
4677 _InputIterator _Src_last = _Src_first;
4680 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4710 static_assert(_Rank == 3,
"array(int, int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 3>");
4711 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4739 static_assert(_Rank == 3,
"array(int, int, int, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 3>");
4741 _InputIterator _Src_last = _Src_first;
4744 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4754 :_M_extent(
_Src.extent)
4777 :_M_extent(_Src.extent)
4779 _Initialize(_Av, _Cpu_access_type);
4796 :_M_extent(
_Src.extent)
4798 _Initialize(_Av, _Associated_Av);
4806 : _M_extent(_Other._M_extent)
4808 _Initialize(_Other.accelerator_view, _Other.associated_accelerator_view);
4816 : _M_extent(_Other._M_extent), _M_multiplier(_Other._M_multiplier)
4817 , _M_buffer_descriptor(_Other._M_buffer_descriptor)
4820 this->_Register_copy(_Other);
4823 _Other._Unregister();
4824 _Other._M_buffer_descriptor._M_data_ptr =
NULL;
4825 _Other._M_buffer_descriptor._Set_buffer_ptr(
NULL);
4833 if (
this != &_Other)
4838 _M_extent = _Other._M_extent;
4839 _Initialize(_Other.accelerator_view, _Other.associated_accelerator_view);
4850 if (
this != &_Other)
4855 _M_extent = _Other._M_extent;
4856 _M_multiplier = _Other._M_multiplier;
4857 _M_buffer_descriptor = _Other._M_buffer_descriptor;
4858 this->_Register_copy(_Other);
4861 _Other._Unregister();
4862 _Other._M_buffer_descriptor._M_data_ptr =
NULL;
4863 _Other._M_buffer_descriptor._Set_buffer_ptr(
NULL);
4943 _Value_type * _Ptr =
reinterpret_cast<_Value_type *
>(_M_buffer_descriptor._M_data_ptr);
4944 return _Ptr[_Flatten_helper::func(_M_multiplier._M_base, _Index._M_base)];
4959 #pragma warning( push )
4960 #pragma warning( disable : 4880 )
4966 #pragma warning( pop )
4968 _Value_type * _Ptr =
reinterpret_cast<_Value_type *
>(_M_buffer_descriptor._M_data_ptr);
4969 return _Ptr[_Flatten_helper::func(_M_multiplier._M_base, _Index._M_base)];
5013 return this->operator[](_Index);
5027 return this->operator[](_Index);
5042 value_type& operator() (
int _I0,
int _I1) __GPU
5044 static_assert(_Rank == 2,
"value_type& array::operator()(int, int) is only permissible on array<T, 2>");
5045 return this->operator[](
index<2>(_I0, _I1));
5060 const value_type& operator() (
int _I0,
int _I1)
const __GPU
5062 static_assert(_Rank == 2,
"const value_type& array::operator()(int, int) is only permissible on array<T, 2>");
5063 return this->operator[](
index<2>(_I0, _I1));
5081 value_type& operator() (
int _I0,
int _I1,
int _I2) __GPU
5083 static_assert(_Rank == 3,
"value_type& array::operator()(int, int, int) is only permissible on array<T, 3>");
5084 return this->operator[](
index<3>(_I0, _I1, _I2));
5102 const value_type& operator() (
int _I0,
int _I1,
int _I2)
const __GPU
5104 static_assert(_Rank == 3,
"const value_type& array::operator()(int, int, int) const is only permissible on array<T, 3>");
5105 return this->operator[](
index<3>(_I0, _I1, _I2));
5153 return _T1.
section(_Section_origin, _Section_extent);
5171 return _T1.
section(_Section_origin, _Section_extent);
5296 return _T1.
section(_I0,_I1,_E0,_E1);
5321 return _T1.
section(_I0,_I1,_E0,_E1);
5352 return _T1.
section(_I0,_I1,_I2,_E0,_E1,_E2);
5383 return _T1.
section(_I0,_I1,_I2,_E0,_E1,_E2);
5407 #pragma warning( push )
5408 #pragma warning( disable : 4880 )
5413 return const_cast<array*
>(
this)->reinterpret_as<_Value_type2>();
5414 #pragma warning( pop )
5442 #pragma warning( push )
5443 #pragma warning( disable : 4880 )
5448 return const_cast<array*
>(
this)->view_as<_New_rank>(_View_extent);
5449 #pragma warning( pop )
5455 operator std::vector<_Value_type>()
const __CPU_ONLY
5457 std::vector<_Value_type> _return_vector(extent.size());
5460 return _return_vector;
5469 return reinterpret_cast<_Value_type*
>(_M_buffer_descriptor._M_data_ptr);
5475 const _Value_type*
data() const __GPU
5477 #pragma warning( push )
5478 #pragma warning( disable : 4880 )
5484 #pragma warning( pop )
5485 return reinterpret_cast<const _Value_type*
>(_M_buffer_descriptor._M_data_ptr);
5515 : _M_extent(_Extent), _M_buffer_descriptor(_Buffer_descriptor)
5531 unsigned int totalExtent = _M_extent[_Rank-1];
5540 unsigned int totalExtent = _Initialize();
5542 _M_buffer_descriptor._Set_buffer_ptr(
NULL);
5544 _Buffer_ptr _PBuf = _Buffer::_Create_buffer(_Av, _Av, totalExtent,
sizeof(_Value_type),
false , _Cpu_access_type);
5546 _M_buffer_descriptor._Set_buffer_ptr(_Ubiquitous_buffer::_Create_ubiquitous_buffer(_PBuf));
5551 template <
typename _InputIterator>
5554 _Initialize(_Av, _Cpu_access_type);
5555 copy(_Src_first, _Src_last, *
this);
5561 unsigned int totalExtent = _Initialize();
5567 _M_buffer_descriptor._Set_buffer_ptr(
NULL);
5575 _PBuf = _Buffer::_Create_buffer(_Associated_Av, _Av, totalExtent,
sizeof(_Value_type),
false ,
access_type_read_write);
5578 _PBuf = _Buffer::_Create_stage_buffer(_Associated_Av, _Av, totalExtent,
sizeof(_Value_type));
5585 _PBuf = _Buffer::_Create_buffer(_Av, _Av, totalExtent,
sizeof(_Value_type),
false ,
access_type_auto);
5588 _M_buffer_descriptor._Set_buffer_ptr(_Ubiquitous_buffer::_Create_ubiquitous_buffer(_PBuf));
5593 template <
typename _InputIterator>
5596 _Initialize(_Av, _Associated_Av);
5597 copy(_Src_first, _Src_last, *
this);
5604 _M_buffer_descriptor._Get_buffer_ptr()->_Register_view(_M_buffer_descriptor._Get_view_key(), cpuAv, _Create_buffer_view_shape());
5606 _M_buffer_descriptor._Get_buffer_ptr()->_Discard(_M_buffer_descriptor._Get_view_key());
5619 _M_buffer_descriptor._Get_buffer_ptr()->_Register_view_copy(_M_buffer_descriptor._Get_view_key(), _Other._M_buffer_descriptor._Get_view_key());
5625 if (_M_buffer_descriptor._Get_buffer_ptr() !=
NULL) {
5626 _M_buffer_descriptor._Get_buffer_ptr()->_Unregister_view(_M_buffer_descriptor._Get_view_key());
5632 return _M_buffer_descriptor._Get_buffer_ptr();
5648 const_cast<array*
>(
this)->_M_buffer_descriptor._M_data_ptr = _PBuf->_Get_host_ptr();
5660 unsigned int _ZeroOffset[_Rank] = {0};
5661 unsigned int _View_extent[_Rank];
5662 for(
int i=0;
i<_Rank; ++
i)
5664 _View_extent[
i] =
static_cast<unsigned int>(this->_M_extent[
i]);
5666 return _View_shape::_Create_view_shape(static_cast<unsigned int>(_Rank), 0, &_View_extent[0], &_ZeroOffset[0], &_View_extent[0]);
5683 if ((_Requested_mode ==
_No_access) || ((_M_buffer_descriptor._M_curr_cpu_access_mode & _Requested_mode) != _Requested_mode))
5685 if (_Has_cpu_access() && (_Requested_mode !=
_No_access))
5697 if (!_Has_cpu_access()) {
5701 throw runtime_exception(
"The array is not accessible for reading on CPU.", E_FAIL);
5710 UNREFERENCED_PARAMETER(_Requested_mode);
5711 UNREFERENCED_PARAMETER(_Exception);
5728 template <
typename _Value_type,
int _Rank>
5731 if (_Src.extent.size() > _Dest.extent.size())
5733 throw runtime_exception(
"Invalid _Src argument. _Src size exceeds total size of the _Dest.", E_INVALIDARG);
5742 size_t _NumElemsToCopy = (_Src.extent.size() *
sizeof(_Value_type)) / _PBufSrc->_Get_elem_size();
5748 template <
typename InputIterator,
typename _Value_type,
int _Rank>
5751 size_t _NumElemsToCopy =
std::distance(_SrcFirst, _SrcLast);
5758 return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _SrcLast, _NumElemsToCopy, _PDestBuf, 0);
5762 template <
typename OutputIterator,
typename _Value_type,
int _Rank>
5767 size_t _NumElemsToCopy = (_Src.extent.size() *
sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size();
5769 return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, 0, _NumElemsToCopy, _DestIter);
5773 template <
typename _Value_type,
int _Rank>
5779 throw runtime_exception(
"Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5797 template <
typename _Value_type,
int _Rank>
5803 throw runtime_exception(
"Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5813 return _Ev._Add_continuation(std::function<
_Event()>([_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape]()
mutable ->
_Event {
5818 template <
typename _Value_type,
int _Rank>
5826 throw runtime_exception(
"Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5839 template <
typename InputIterator,
typename _Value_type,
int _Rank>
5842 static_assert(!std::is_const<_Value_type>::value,
"Cannot copy to array_view<const _Value_type, _Rank>.");
5847 if (_Src_size > _Dest.extent.size())
5849 throw runtime_exception(
"Number of elements in range between [_SrcFirst, _SrcLast) exceeds total size of the _Dest.", E_INVALIDARG);
5852 #pragma warning( push )
5853 #pragma warning( disable : 4127 ) // Disable warning about constant conditional expression
5855 if ((_Rank > 1) && (_Src_size != _Dest.extent.size()))
5857 throw runtime_exception(
"For _Rank > 1 the number of elements in range between [_SrcFirst, _SrcLast) has to be equal to total size of the _Dest.", E_INVALIDARG);
5859 #pragma warning( pop )
5875 unsigned int _Dst_linear_offset, _Dst_linear_size;
5876 if (_Dst_shape->_Is_view_linear(_Dst_linear_offset, _Dst_linear_size))
5879 return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _SrcLast, _Src_size, _PDestBuf, _Dst_linear_offset);
5887 std::vector<unsigned int> _Src_offset(_Reinterpreted_dst_shape->_Get_rank(), 0);
5889 _Reinterpreted_dst_shape->_Get_view_extent(), _Src_offset.data(),
5890 _Reinterpreted_dst_shape->_Get_view_extent());
5893 return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _Src_shape, _PDestBuf, _Dst_shape);
5900 template <
typename OutputIterator,
typename _Value_type,
int _Rank>
5917 unsigned int _Src_linear_offset, _Src_linear_size;
5918 if (_Src_shape->_Is_view_linear(_Src_linear_offset, _Src_linear_size))
5921 return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, _Src_linear_offset, _Src_linear_size, _DestIter);
5929 std::vector<unsigned int> _Dst_offset(_Reinterpreted_src_shape->_Get_rank(), 0);
5931 _Reinterpreted_src_shape->_Get_view_extent(), _Dst_offset.data(),
5932 _Reinterpreted_src_shape->_Get_view_extent());
5935 return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, _Src_shape, _DestIter, _Dst_shape);
5960 sizeof(_Value_type) * _Src.extent.size());
5980 sizeof(_Value_type) * _Src.extent.size());
6051 InputIterator _SrcLast = _SrcFirst;
6053 return copy_async(_SrcFirst, _SrcLast, _Dest);
6068 InputIterator _SrcLast = _SrcFirst;
6070 copy(_SrcFirst, _SrcLast, _Dest);
6091 sizeof(_Value_type) * _Src.extent.size());
6112 sizeof(_Value_type) * _Src.extent.size());
6135 sizeof(_Value_type) * _Src.extent.size());
6155 sizeof(_Value_type) * _Src.extent.size());
6178 sizeof(_Value_type) * _Src.extent.size());
6198 sizeof(_Value_type) * _Src.extent.size());
6252 sizeof(_Value_type) * _Src.extent.size());
6272 sizeof(_Value_type) * _Src.extent.size());
6351 InputIterator _SrcLast = _SrcFirst;
6353 return copy_async(_SrcFirst, _SrcLast, _Dest);
6391 InputIterator _SrcLast = _SrcFirst;
6393 copy(_SrcFirst, _SrcLast, _Dest);
6415 sizeof(_Value_type) * _Src.extent.size());
6437 sizeof(_Value_type) * _Src.extent.size());
6494 if (_D3D_buffer ==
NULL)
6501 throw runtime_exception(
"Cannot create D3D buffer on a non-D3D accelerator_view.", E_INVALIDARG);
6504 _Ubiquitous_buffer_ptr _PBuf = _Ubiquitous_buffer::_Create_ubiquitous_buffer(_Buffer::_Create_buffer(_D3D_buffer, _Av, _Extent.size(),
sizeof(_Value_type)));
6514 #define AS_UINT_PTR(p) reinterpret_cast<unsigned int *>(p)
6515 #define AS_UINT(v) *(reinterpret_cast<unsigned int *>(&(v)))
6516 #define AS_INT(v) *(reinterpret_cast<int *>(&(v)))
6517 #define AS_FLOAT(v) *(reinterpret_cast<float *>(&(v)))
6590 #pragma warning( push )
6591 #pragma warning( disable : 4146 )
6598 #pragma warning( pop )
6643 #pragma warning( push )
6644 #pragma warning( disable : 4146 )
6650 #pragma warning( pop )
6664 #pragma warning( push )
6665 #pragma warning( disable : 4146 )
6669 #pragma warning( pop )
6745 int _Old = *_Expected_value;
6753 *_Expected_value =
AS_INT(_Ret);
6778 unsigned int _Old = *_Expected_value;
6786 *_Expected_value = _Ret;
7137 #pragma warning( push )
7138 #pragma warning( disable : 4100 ) // unreferenced formal parameter
7173 #pragma warning( pop )
7189 inline int abs(
int _X) __GPU_ONLY
7209 inline float clamp(
float _X,
float _Min,
float _Max) __GPU_ONLY
7229 inline int clamp(
int _X,
int _Min,
int _Max) __GPU_ONLY
7322 inline unsigned int umax(
unsigned int _X,
unsigned int _Y) __GPU_ONLY
7339 inline unsigned int umin(
unsigned int _X,
unsigned int _Y) __GPU_ONLY
7359 inline float mad(
float _X,
float _Y,
float _Z) __GPU_ONLY
7379 inline double mad(
double _X,
double _Y,
double _Z) __GPU_ONLY
7399 inline int mad(
int _X,
int _Y,
int _Z) __GPU_ONLY
7419 inline unsigned int mad(
unsigned int _X,
unsigned int _Y,
unsigned int _Z) __GPU_ONLY
7461 inline float rcp(
float _X) __GPU_ONLY
7523 inline float smoothstep(
float _Min,
float _Max,
float _X) __GPU_ONLY
7540 inline float step(
float _Y,
float _X) __GPU_ONLY
int __dp_d3d_interlocked_min_int(_Inout_ int *, int) __GPU_ONLY
array(int _E0, int _E1, int _E2) __CPU_ONLY
Construct an array from three integer extents.
Definition: amp.h:3905
tiled_extent(const tiled_extent &_Other) __GPU
Copy constructor. Constructs a new tiled_extent from the supplied argument "_Other".
Definition: amp.h:1271
_Array_view_base(_In_ void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __GPU_ONLY
Definition: amp.h:1776
void synchronize() const __CPU_ONLY
Synchronizes any modifications made to "this" array_view to its source data.
Definition: amp.h:3717
friend class accelerator
Definition: amprt.h:1520
static _Projection_result_type< _T, _R >::_Result_type _Project0(const array_view< _T, _R > *_Arr_view, int _I) __GPU
Definition: xxamp_inl.h:42
value_type & get_ref(const index< _Rank > &_Index) const __GPU
Get a reference to the element indexed by _Index. Unlike the other indexing operators for accessing t...
Definition: amp.h:2550
tiled_extent truncate() const __GPU
Returns a new tiled_extent with extents adjusted down to be evenly divisible by the tile dimensions...
Definition: amp.h:1474
array_view(const array< _Value_type, _Rank > &_Src) __GPU
Construct an array_view which is bound to the data contained in the _Src array. The extent of the arr...
Definition: amp.h:3032
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:5720
accelerator_view get_source_accelerator_view() const
Returns the accelerator_view where the data source of the array_view is located. If the array_view do...
Definition: amp.h:2933
int __dp_d3d_firstbitlowi(int) __GPU_ONLY
_Tuple_type< _Rank > operator*(const _Tuple_type< _Rank > &_Lhs, typename _Tuple_type< _Rank >::value_type _Rhs) __GPU
Definition: amp.h:870
array_view section(const Concurrency::index< _Rank > &_Idx) const __GPU
Produces a subsection of the source array_view with origin specified by an index, with an extent of (...
Definition: amp.h:3528
int atomic_fetch_or(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic bitwise or operation of _Value to the memory location pointed to by _Dest ...
Definition: amp.h:6913
void refresh() const __CPU_ONLY
Informs the array_view that its bound memory has been modified outside the array_view interface...
Definition: amp.h:2801
array(int _E0, int _E1, int _E2, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array from three integer extents, bound to a specific accelerator_view.
Definition: amp.h:4005
_CPP_AMP_VERIFY_RANK(_Rank, index)
array_view section(int _I0, int _E0) const __GPU
Produces a one-dimensional subsection of the source array_view with origin specified by the index com...
Definition: amp.h:2699
index< _Rank > & operator-=(int _Rhs) __GPU
Subtracts an integer value from each element of this index.
Definition: amp.h:243
reference operator[](size_type _Pos)
Definition: array:145
Concurrency::extent< _Rank > _M_array_multiplier
Definition: amp.h:1644
void _Initialize(Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Definition: amp.h:5559
array(int _E0, _InputIterator _Src_first, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an iterator into a container, bound to a specific accelerator_vie...
Definition: amp.h:4375
array_view< _Value_type, 3 > section(int _I0, int _I1, int _I2, int _E0, int _E1, int _E2) __GPU
Produces a three-dimensional subsection of the source array with origin specified by the index compon...
Definition: amp.h:5349
array_view(int _E0, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2337
array(array &&_Other) __CPU_ONLY
Move constructor.
Definition: amp.h:4815
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:1658
_CRTIMP _In_ int _Value
Definition: setjmp.h:190
float __dp_d3d_radiansf(float) __GPU_ONLY
unsigned int __dp_d3d_interlocked_add(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
exception_ptr current_exception()
Definition: exception:527
index operator--(int) __GPU
Post-decrements each element of this index.
Definition: amp.h:337
const unsigned int * _Get_base_extent() const
Definition: amprt.h:1677
int atomic_exchange(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Sets the value of location pointed to by _Dest to _Value as an atomic operation
Definition: amp.h:6684
void _Register() __CPU_ONLY
Definition: amp.h:1961
_Tuple_type _Create_uninitialized_tuple() __GPU
Definition: xxamp.h:214
index< _Rank > & operator++() __GPU
Pre-increments each element of this index.
Definition: amp.h:300
array_view() __GPU
Definition: amp.h:2959
details::_Projection_result_type< _Value_type, _Rank >::_Result_type operator[](int _I) __GPU
Projects the most-significant dimension of this array. If the array rank is 1, this produces a single...
Definition: amp.h:4982
void all_memory_fence(const tile_barrier &_Barrier) __GPU_ONLY
Memory fences and tile barriers.
Definition: amp.h:7146
array_view(int _E0, int _E1, int _E2, const _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3207
const index< rank > global
An index that represents the global index within an extent.
Definition: amp.h:988
tiled_index< _Dim0, _Dim1 > _map_index(const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Global, tile_barrier &_Barrier) const __GPU
Definition: amp.h:1410
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_AMPIMP _Access_mode __cdecl _Get_recommended_buffer_host_access_mode(const accelerator_view &_Av)
const _Value_type * data() const __GPU
Returns a pointer to the raw data of this array.
Definition: amp.h:5475
array_view section(int _I0, int _I1, int _E0, int _E1) const __GPU
Produces a two-dimensional subsection of the source array_view with origin specified by the index com...
Definition: amp.h:2724
_Check_return_ _In_ int _Mode
Definition: io.h:338
array_view(const array_view &_Other) __GPU
Copy constructor. Shallow copy.
Definition: amp.h:2247
float __dp_d3d_madf(float, float, float) __GPU_ONLY
~_Array_view_shape() __GPU
Definition: amp.h:1543
const unsigned int * _Get_view_offset() const
Definition: amprt.h:1682
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3457
array_view< _Value_type, _Rank > section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Produces a subsection of the source array at the given origin and extent.
Definition: amp.h:5150
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:3834
void _Register_copy(const _Array_view_base &_Other) __CPU_ONLY
Definition: amp.h:1979
extent< _Rank > operator+(const index< _Rank > &_Rhs) __GPU
Element-wise addition of this extent with an index.
Definition: amp.h:578
~array_view() __GPU
Destroys this array_view and reclaims resources.
Definition: amp.h:2229
size_type size() const _NOEXCEPT
Definition: array:116
index< _Rank > & operator+=(int _Rhs) __GPU
Adds an integer value to each element of this index.
Definition: amp.h:228
extent< _Rank > & operator+=(int _Rhs) __GPU
Adds an integer value to each element of this extent.
Definition: amp.h:670
unsigned int __dp_d3d_interlocked_xor(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
array_view(int _E0) __CPU_ONLY
Construct an array_view which is not bound to a data source.
Definition: amp.h:2303
index< _Rank > & operator/=(int _Rhs) __GPU
Divides each element of this index by an integer value.
Definition: amp.h:273
index< _Rank > operator++(int) __GPU
Post-increments each element of this index.
Definition: amp.h:312
void _Unregister(bool _Throw_exception=true) __GPU_ONLY
Definition: amp.h:2155
tiled_extent & operator=(const tiled_extent &_Other) __GPU
copy-assignment operator
Definition: amp.h:1446
array(int _E0, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container, bound to a specific acceler...
Definition: amp.h:4348
_Array_view_base< _Rank, _New_element_size > _Reinterpret_as() const __GPU
Definition: amp.h:1876
int __dp_d3d_clampi(int, int, int) __GPU_ONLY
array_view< _Value_type2, _Rank > reinterpret_as() const __GPU
Produces a (possibly unsafe) reinterpretation of this array_view that is linear and with a different ...
Definition: amp.h:2769
unsigned int __dp_d3d_madu(unsigned int, unsigned int, unsigned int) __GPU_ONLY
_Value_type value_type
Definition: amp.h:2223
void _Register() __GPU_ONLY
Definition: amp.h:2143
array_view(const Concurrency::extent< _Rank > &_Extent) __CPU_ONLY
Construct an array_view which is not bound to a data source.
Definition: amp.h:2259
_eInitializeState
Definition: xxamp.h:208
_Array_view_base(const void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
Definition: amp.h:1782
static _Ret_ void * _Create_section_buffer_shape(const _Buffer_descriptor &_Descriptor, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __CPU_ONLY
Definition: amp.h:2072
const index< rank > tile
An index that represents the coordinates of the current tile of a tiled_extent.
Definition: amp.h:998
void refresh() const __CPU_ONLY
Informs the array_view that its bound memory has been modified outside the array_view interface...
Definition: amp.h:3648
array_view< const _Value_type, _New_rank > view_as(const Concurrency::extent< _New_rank > &_View_extent) const __GPU
Produces an array_view of a different rank over this array_view's data.
Definition: amp.h:3630
void _Project0(int _I, _Array_view_base< _Rank-1, _Element_size > &_Projected_view) const __GPU
Definition: amp.h:1866
array_view(const array_view &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:3764
tiled_extent & operator=(const tiled_extent &_Other) __GPU
copy-assignment operator
Definition: amp.h:1364
float noise(float _X) __GPU_ONLY
Generates a random value using the Perlin noise algorithm
Definition: amp.h:7433
array_view< _Value_type, 1 > section(int _I0, int _E0) __GPU
Produces a one-dimensional subsection of the source array with origin specified by the index componen...
Definition: amp.h:5249
_Tuple_type< _Rank > operator/(const _Tuple_type< _Rank > &_Lhs, typename _Tuple_type< _Rank >::value_type _Rhs) __GPU
Definition: amp.h:886
unsigned int __dp_d3d_interlocked_or(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
_CRTIMP _In_opt_z_ const wchar_t _In_opt_z_ const wchar_t unsigned int
Definition: crtdefs.h:642
#define __GPU
Definition: amprt.h:41
_Array_view_base< _New_rank, _Element_size > _View_as(const Concurrency::extent< _New_rank > &_View_extent) const __GPU
Definition: amp.h:1886
Concurrency::extent< _Rank > _M_multiplier
Definition: amp.h:5723
index< _Rank > & operator--() __GPU
Pre-decrements each element of this index.
Definition: amp.h:325
array_view< const _Value_type, _Rank > section(const Concurrency::extent< _Rank > &_Ext) const __GPU
Produces a subsection of the source array_view with origin of zero, with an extent of _Ext...
Definition: amp.h:5199
array(const array &_Other) __CPU_ONLY
Copy constructor. Deep copy.
Definition: amp.h:4805
tiled_extent() __GPU
Default constructor.
Definition: amp.h:1260
tiled_extent pad() const __GPU
Returns a new tiled_extent with extents adjusted up to be evenly divisible by the tile dimensions...
Definition: amp.h:1383
int __dp_d3d_maxi(int, int) __GPU_ONLY
Class represents a virtual device abstraction on a C++ AMP data-parallel accelerator ...
Definition: amprt.h:1518
index< _Rank > & operator-=(const index< _Rank > &_Rhs) __GPU
Element-wise subtraction of this index with another index.
Definition: amp.h:213
_Array_view_shape(const _Array_view_shape &_Other) __GPU
Definition: amp.h:1551
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
extent< _Rank > & operator++() __GPU
Pre-increments each element of this extent.
Definition: amp.h:742
static _AMPIMP _Ret_ IUnknown *__cdecl _Get_D3D_buffer(_In_ _Buffer *_Buffer_ptr)
void _Project0(int _I, array_view< const _Value_type, _Rank-1 > &_Projected_view) const __GPU
Definition: amp.h:3756
int firstbitlow(int _X) __GPU_ONLY
Gets the location of the first set bit in _X, starting from the lowest order bit and working upward ...
Definition: amp.h:7271
static _AMPIMP _Ret_ _View_shape *__cdecl _Create_view_shape(unsigned int _Rank, unsigned int _Linear_offset, const unsigned int *_Base_extent, const unsigned int *_View_offset, const unsigned int *_View_extent, const bool *_Projection_info=NULL)
array_view< _Value_type2, 1 > reinterpret_as() __GPU
Produces a (possibly unsafe) reinterpretation of this array that is linear and with a different eleme...
Definition: amp.h:5393
The Concurrency namespace provides classes and functions that provide access to the Concurrency Runti...
Definition: agents.h:42
Class represents a future corresponding to a C++ AMP asynchronous operation
Definition: amprt.h:1342
bool operator!=(const _Tuple_type< _Rank > &_Lhs, const _Tuple_type< _Rank > &_Rhs) __GPU
Definition: amp.h:816
array_view< const _Value_type, 3 > section(int _I0, int _I1, int _I2, int _E0, int _E1, int _E2) const __GPU
Produces a three-dimensional subsection of the source array with origin specified by the index compon...
Definition: amp.h:5380
static void _Is_valid_extent(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1203
bool _Is_valid_access_mode(_Access_mode _Mode)
Definition: amprt.h:411
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
unsigned int __dp_d3d_maxu(unsigned int, unsigned int) __GPU_ONLY
float __dp_d3d_smoothstepf(float, float, float) __GPU_ONLY
void _Project0(int _I, array_view< _Value_type, _Rank-1 > &_Projected_view) const __GPU
Definition: amp.h:2953
extent< _Rank > & operator--() __GPU
Pre-decrements each element of this extent.
Definition: amp.h:767
array_view(const Concurrency::extent< _Rank > &_Extent, _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:2291
unsigned int umax(unsigned int _X, unsigned int _Y) __GPU_ONLY
Determine the maximum numeric value of the arguments
Definition: amp.h:7322
concurrency::completion_future synchronize_to_async(const accelerator_view &_Accl_view) const __CPU_ONLY
Asynchronously synchronizes any modifications made to "this" array_view to the specified accelerator_...
Definition: amp.h:3663
A _Tiled_index_base is the base class of all three kinds of tiled_index to share the common code...
Definition: amp.h:977
array< _Value_type, _Rank > make_array(const Concurrency::extent< _Rank > &_Extent, const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_buffer) __CPU_ONLY
Create an array from a D3D buffer interface pointer.
Definition: amp.h:6490
float __dp_d3d_clampf(float, float, float) __GPU_ONLY
array_view(int _E0, int _E1, int _E2) __CPU_ONLY
Construct an array_view which is not bound to a data source.
Definition: amp.h:2393
array_view section(int _I0, int _I1, int _E0, int _E1) const __GPU
Produces a two-dimensional subsection of the source array_view with origin specified by the index com...
Definition: amp.h:3571
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an iterator into a container, bound to a specific accelerator_vie...
Definition: amp.h:4317
tiled_extent(const Concurrency::extent< rank > &_Other) __GPU
Constructs a new tiled_extent from the supplied extent.
Definition: amp.h:1434
_AMPIMP void _Register_view(_In_ _View_key _Key, accelerator_view _Cpu_av, _View_shape_ptr _Shape)
double __dp_d3d_madd(double, double, double) __GPU_ONLY
const bool * _Get_projection_info() const
Definition: amprt.h:1691
void __dp_d3d_tile_static_memory_fence_with_tile_barrier() __GPU_ONLY
_Array_flatten_helper< _Rank, typename Concurrency::extent< _Rank >::value_type, typename Concurrency::index< _Rank >::value_type > _Flatten_helper
Definition: amp.h:3835
array(int _E0) __CPU_ONLY
Construct array with the extent _E0
Definition: amp.h:3870
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:1957
float rcp(float _X) __GPU_ONLY
Calculates a fast, approximate reciprocal of the argument
Definition: amp.h:7461
#define NULL
Definition: crtdbg.h:30
_Ret_ _View_shape * _Create_buffer_view_shape() const __CPU_ONLY
Definition: amp.h:1896
_Ret_ _View_shape * _Create_buffer_view_shape() const
Definition: amp.h:5656
integral_constant< bool, false > false_type
Definition: xtr1common:48
array_view(array< _Value_type, _Rank > &_Src) __GPU
Construct an array_view which is bound to the data contained in the _Src array. The extent of the arr...
Definition: amp.h:2238
_Array_view_base(const _Array_view_base &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:1697
array_view(const array_view< _Value_type, _Rank > &_Src) __GPU
Copy constructor. Shallow copy.
Definition: amp.h:3041
array(int _E0, accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view.
Definition: amp.h:4042
void _Register(_In_ void *_Shape) __GPU_ONLY
Definition: amp.h:2150
_Tiled_index_base & operator=(const _Tiled_index_base &) __GPU
iterator_traits< _InIt >::difference_type distance(_InIt _First, _InIt _Last)
Definition: xutility:755
array(const array_view< const _Value_type, _Rank > &_Src, accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an array_view, bound to a specific accelerator_view.
Definition: amp.h:4776
A tiled_index is a set of indices of 1 to 3 dimensions which have been subdivided into 1-...
Definition: amp.h:1071
_AMPIMP bool __cdecl _Is_D3D_accelerator_view(const accelerator_view &_Av)
_AMPIMP void _Get()
Wait until the _Event completes and throw any exceptions that occur.
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container, bound to a specific acceler...
Definition: amp.h:4291
extent(const int _Array[_Rank]) __GPU
Constructs an extent with the coordinate values provided the array of int component values...
Definition: amp.h:476
_AMPIMP _Ret_ _Amp_runtime_trace *__cdecl _Get_amp_trace()
int atomic_fetch_dec(_Inout_ int *_Dest) __GPU_ONLY
Performs an atomic decrement to the memory location pointed to by _Dest
Definition: amp.h:6641
concurrency::completion_future synchronize_async(access_type _Access_type=access_type_read) const __CPU_ONLY
Asynchronously synchronizes any modifications made to "this" array_view to its source data...
Definition: amp.h:2850
array(const Concurrency::extent< _Rank > &_Extent) __CPU_ONLY
Construct an array from extents
Definition: amp.h:3858
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, const Concurrency::extent< _Rank > &_Array_extent) __GPU
Definition: amp.h:1706
tiled_index(const tiled_index &_Other) __GPU
Copy Constructor.
Definition: amp.h:1106
static const int rank
Definition: amp.h:67
unsigned int reversebits(unsigned int _X) __GPU_ONLY
Reverses the order of the bits in _X
Definition: amp.h:7475
void wait_with_all_memory_fence() const __GPU_ONLY
Blocks execution of all threads in a tile until all all threads in the tile have reached this call...
Definition: amp.h:949
#define UINT_MAX
Definition: limits.h:41
extent() __GPU
Default constructor. The value at each dimension is initialized to zero.
Definition: amp.h:404
const unsigned int * _Get_view_extent() const
Definition: amprt.h:1686
_Event _Get_access_async(_Access_mode _Mode, _Buffer_ptr &_Buf_ptr, bool _Zero_copy_cpu_access=false) __CPU_ONLY const
Definition: amp.h:5635
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1111
extent< _Rank > & operator%=(int _Rhs) __GPU
Modulus an integer value from each element of this extent.
Definition: amp.h:730
_Array_view_shape(int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent) __GPU
Definition: amp.h:1574
array_view & operator=(const array_view< _Value_type, _Rank > &_Other) __GPU
Copy Assignment operator. Shallow copy.
Definition: amp.h:3347
void _Initialize(Concurrency::accelerator_view _Av, access_type _Cpu_access_type) __CPU_ONLY
Definition: amp.h:5538
static void _Is_valid_section(const _T2< _Rank > &_Base_extent, const _T1< _Rank > &_Section_origin, const _T2< _Rank > &_Section_extent) __CPU_ONLY
Definition: xxamp.h:1107
int atomic_fetch_max(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Atomically computes the maximum of _Value and the value of the memory location pointed to by _Dest...
Definition: amp.h:6804
void _Initialize(size_t _Src_data_size, bool _Discard_data=false) __CPU_ONLY
Definition: amp.h:2979
_Array_view_base< _Rank, sizeof(_Value_type)/sizeof(int)> _Base
Definition: amp.h:3001
The tile_barrier class is a capability class that is only creatable by the system, and passed to a tiled parallel_for_each lambda as part of the tiled_index parameter. It provides wait methods whose purpose is to synchronize execution of threads running within the thread group (tile).
Definition: amp.h:923
array(int _E0, int _E1, _InputIterator _Src_first, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from an iterator into ...
Definition: amp.h:4672
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
#define AS_UINT(v)
Definition: amp.h:6515
index< _Rank > & operator*=(int _Rhs) __GPU
Multiplies each element of this index with an integer value.
Definition: amp.h:258
_Ret_ _Value_type * data() __GPU
Returns a pointer to the raw data of this array.
Definition: amp.h:5466
index(int _I0, int _I1) __GPU
Constructor for index<2>
Definition: amp.h:110
void __dp_d3d_tile_static_memory_fence() __GPU_ONLY
extent< _Rank > & operator/=(int _Rhs) __GPU
Divides an integer value into each element of this extent.
Definition: amp.h:715
void _Initialize(Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Definition: amp.h:5594
array(int _E0, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container.
Definition: amp.h:4145
int i[4]
Definition: dvec.h:70
array_view(const Concurrency::extent< _Rank > &_Extent, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3134
array_view< _Value_type, _Rank > section(const index< _Rank > &_Idx) __GPU
Produces a subsection of the source array with origin specified by an index, with an extent of (this-...
Definition: amp.h:5214
void _Refresh_data_ptr(_Access_mode _Requested_mode, bool _Exception=true) __GPU_ONLY
Definition: amp.h:5708
array(int _E0, int _E1, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array from two integer extents, bound to a specific accelerator_view.
Definition: amp.h:3976
_Array_view_base() __GPU
Definition: amp.h:1668
index() __GPU
Default constructor, initializes all elements with 0.
Definition: amp.h:73
concurrency::completion_future synchronize_to_async(const accelerator_view &_Accl_view, access_type _Access_type=access_type_read) const __CPU_ONLY
Asynchronously synchronizes any modifications made to "this" array_view to the specified accelerator_...
Definition: amp.h:2826
array_view(const Concurrency::extent< _Rank > &_Extent, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2274
#define _ASSERTE(expr)
Definition: crtdbg.h:216
void copy_to(const array_view< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array_view to the destination array_view.
Definition: amp.h:3364
tiled_extent & operator=(const tiled_extent &_Other) __GPU
copy-assignment operator
Definition: amp.h:1277
int sign(int _X) __GPU_ONLY
Returns the sign of the argument
Definition: amp.h:7503
void direct3d_printf(const char *,...) __GPU_ONLY
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:444
_In_ size_t _In_z_ const unsigned char * _Src
Definition: mbstring.h:95
_Array_view_base(const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
Definition: amp.h:1749
#define _In_
Definition: sal.h:314
~array() __CPU_ONLY
Destroys this array and reclaims resources.
Definition: amp.h:5491
index< _Rank > & operator+=(const index< _Rank > &_Rhs) __GPU
Element-wise addition of this index with another index.
Definition: amp.h:198
void _Refresh_data_ptr(_Access_mode _Requested_mode, bool _Exception=true) __CPU_ONLY
Definition: amp.h:5674
index< _Rank > & operator%=(int _Rhs) __GPU
Modulus an integer value into each element of this index.
Definition: amp.h:288
int _Base_linear_offset() const __GPU
Definition: amp.h:1546
unsigned int __dp_d3d_reversebitsu(unsigned int) __GPU_ONLY
_Ret_ _Ubiquitous_buffer * _Get_buffer() __CPU_ONLY const
Definition: amp.h:5630
array(const array_view< const _Value_type, _Rank > &_Src) __CPU_ONLY
Construct an array initialized from an array_view.
Definition: amp.h:4753
static const int rank
Definition: amp.h:397
Concurrency::extent< _Rank > _M_view_extent
Definition: amp.h:1647
tiled_extent(const Concurrency::extent< rank > &_Other) __GPU
Constructs a new tiled_extent from the supplied extent.
Definition: amp.h:1265
void _Initialize() __GPU
Definition: amp.h:3771
int __dp_d3d_mini(int, int) __GPU_ONLY
float __dp_d3d_rcpf(float) __GPU_ONLY
unsigned int _Initialize() __CPU_ONLY
Definition: amp.h:5524
array_view< const _Value_type, _New_rank > view_as(const Concurrency::extent< _New_rank > &_View_extent) const __GPU
Produces an array_view of a different rank over this array's data.
Definition: amp.h:5440
tiled_extent(const tiled_extent &_Other) __GPU
Copy constructor. Constructs a new tiled_extent from the supplied argument "_Other".
Definition: amp.h:1358
void synchronize(access_type _Access_type=access_type_read) const __CPU_ONLY
Synchronizes any modifications made to "this" array_view to its source data.
Definition: amp.h:2899
tiled_extent< _Dim0 > tile() const __GPU
Produces a tiled_extent object with the tile extents given by _Dim0.
Definition: amp.h:537
bool _Has_cpu_access() const __CPU_ONLY
Definition: amp.h:5669
#define __CPU_ONLY
Definition: amprt.h:43
void _Unregister(bool _Throw_exception=true) __CPU_ONLY
Definition: amp.h:2009
void synchronize_to(const accelerator_view &_Accl_view, access_type _Access_type=access_type_read) const __CPU_ONLY
Synchronizes any modifications made to "this" array_view to the specified accelerator_view.
Definition: amp.h:2879
value_type & get_ref(const index< _Rank > &_Index) const __GPU
Get a reference to the element indexed by _Index. Unlike the other indexing operators for accessing t...
Definition: amp.h:3397
void _Initialize_multiplier() __GPU
Definition: amp.h:1635
void _Register() __CPU_ONLY
Definition: amp.h:5600
tiled_extent(const tiled_extent &_Other) __GPU
Copy constructor. Constructs a new tiled_extent from the supplied argument "_Other".
Definition: amp.h:1440
array(int _E0, _InputIterator _Src_first) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4162
int value_type
Definition: amp.h:68
array_view< _Value_type, _Rank > section(const Concurrency::extent< _Rank > &_Ext) __GPU
Produces a subsection of the source array_view with origin of zero, with an extent of _Ext...
Definition: amp.h:5184
tiled_index< _Dim0, _Dim1, _Dim2 > _map_index(const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Global, tile_barrier &_Barrier) const __GPU
Definition: amp.h:1326
Exception thrown due to a C++ AMP runtime_exception. This is the base type for all C++ AMP exception ...
Definition: amprt.h:835
_In_ double _Y
Definition: math.h:999
array_view section(int _I0, int _I1, int _I2, int _E0, int _E1, int _E2) const __GPU
Produces a three-dimensional subsection of the source array_view with origin specified by the index c...
Definition: amp.h:2755
_CPP_AMP_VERIFY_RANK(_Rank, extent)
Concurrency::extent< _Rank > _M_extent
Definition: amp.h:5717
_Ty * data() _NOEXCEPT
Definition: array:195
static void _Is_valid_projection(int _I, const _T1< _Rank > &_Base_extent) __CPU_ONLY
Definition: xxamp.h:1135
array(const Concurrency::extent< _Rank > &_Extent, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array from extents, bound to a specific accelerator_view.
Definition: amp.h:3928
struct Concurrency::details::_Buffer_descriptor _Buffer_descriptor
extent< _Rank > & operator+=(const extent< _Rank > &_Rhs) __GPU
Element-wise addition of this extent with another extent.
Definition: amp.h:610
float saturate(float _X) __GPU_ONLY
Clamps _X within the range of 0 to 1
Definition: amp.h:7489
_AMPIMP accelerator __cdecl _Select_default_accelerator()
void parallel_for_each(const extent< _Rank > &_Compute_domain, const _Kernel_type &_Kernel)
Invokes a parallel computation of a kernel function over a compute domain on an accelerator_view. The accelerator_view is determined from the arrays and/or array_views captured by the kernel function, or if no accelerator_view can be derived, the default is chosen.
Definition: amp.h:6988
unsigned int _Get_linear_offset() const
Definition: amprt.h:1672
_Ret_ void * _Access(_Access_mode _Requested_mode, const index< _Rank > &_Index) const __CPU_ONLY
Definition: amp.h:1834
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4235
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent) __GPU
Definition: amp.h:1715
_Array_flatten_helper< _Rank, typename Concurrency::extent< _Rank >::value_type, typename Concurrency::index< _Rank >::value_type > _Flatten_helper
Definition: amp.h:1530
_Ret_ IUnknown * get_buffer(const array< _Value_type, _Rank > &_Array) __CPU_ONLY
Get the D3D buffer interface underlying an array.
Definition: amp.h:6462
tiled_extent() __GPU
Default constructor.
Definition: amp.h:1347
static void func(_RES_EXT &_ResArrayExtent, const _SRC_EXT &_SrcArrayExtent, _RES_EXT &_ResArrayMultiplier, const _SRC_EXT &_SrcArrayMultiplier, _RES_IDX &_ResViewOffset, const _SRC_IDX &_SrcViewOffset, _RES_EXT &_ResViewExtent, const _SRC_EXT &_SrcViewExtent) __GPU
Definition: xxamp.h:768
array_view< const _Value_type, 1 > section(int _I0, int _E0) const __GPU
Produces a one-dimensional subsection of the source array with origin specified by the index componen...
Definition: amp.h:5268
void synchronize_to(const accelerator_view &_Accl_view) const __CPU_ONLY
Synchronizes any modifications made to "this" array_view to the specified accelerator_view.
Definition: amp.h:3703
_AMPIMP _Buffer_ptr _Get_master_buffer() const
extent< _Rank > & operator+=(const index< _Rank > &_Rhs) __GPU
Element-wise addition of this extent with an index.
Definition: amp.h:640
const _Value_type * data() const __GPU
Returns a pointer to the raw data of this array_view.
Definition: amp.h:3638
array_view< const _Value_type, _Rank > section(const index< _Rank > &_Idx) const __GPU
Produces a subsection of the source array with origin specified by an index, with an extent of (this-...
Definition: amp.h:5230
tiled_index(const tiled_index &_Other) __GPU
Copy Constructor.
Definition: amp.h:1221
int atomic_fetch_xor(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic bitwise xor operation of _Value to the memory location pointed to by _Dest ...
Definition: amp.h:6949
tiled_extent(const Concurrency::extent< rank > &_Other) __GPU
Constructs a new tiled_extent from the supplied extent.
Definition: amp.h:1352
array_view(int _E0, int _E1, int _E2, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:2490
array_view section(const Concurrency::extent< _Rank > &_Ext) const __GPU
Produces a subsection of the source array_view with origin of zero, with an extent of _Ext...
Definition: amp.h:2681
_Tiled_index_base(const _Tiled_index_base &_Other) __GPU
Copy Constructor.
Definition: amp.h:1042
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from a pair of iterato...
Definition: amp.h:4707
void copy_to(const array_view< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array to the destination array_view.
Definition: amp.h:4888
_Ret_ _Ubiquitous_buffer * _Get_buffer(const _Array_type &_Array) __CPU_ONLY
Definition: xxamp.h:1070
unsigned int umin(unsigned int _X, unsigned int _Y) __GPU_ONLY
Determine the minimum numeric value of the arguments
Definition: amp.h:7339
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from a pair of iterato...
Definition: amp.h:4542
_AMPIMP void _Register_view_copy(_In_ _View_key _New_view_key, _In_ _View_key _Existing_view_key)
_Tiled_index_base(const index< rank > &_Global, const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Tile_origin, const tile_barrier &_Barrier) __GPU
A Constructor that initializes data members using the given values.
Definition: amp.h:1028
unsigned int countbits(unsigned int _X) __GPU_ONLY
Counts the number of set bits in _X
Definition: amp.h:7243
void copy_to(array< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array_view to the destination array.
Definition: amp.h:2509
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container.
Definition: amp.h:4108
unsigned int mad(unsigned int _X, unsigned int _Y, unsigned int _Z) __GPU_ONLY
Performs an arithmetic multiply/add operation on three arguments: _X * _Y + _Z
Definition: amp.h:7419
tiled_extent< _Dim0, _Dim1, _Dim2 > tile() const __GPU
Produces a tiled_extent object with the tile extents given by _Dim0, _Dim1, _Dim2.
Definition: amp.h:559
~array_view() __GPU
Destroys this array_view and reclaims resources.
Definition: amp.h:3023
int __dp_d3d_absi(int) __GPU_ONLY
array_view< const _Value_type2, _Rank > reinterpret_as() const __GPU
Produces a (possibly unsafe) reinterpretation of this array_view that is linear and with a different ...
Definition: amp.h:3616
void direct3d_abort() __GPU_ONLY
void __dp_d3d_all_memory_fence() __GPU_ONLY
extent(int _I) __GPU
Constructor for extent<1>.
Definition: amp.h:426
unsigned int __dp_d3d_interlocked_and(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container, bound to a specific acceler...
Definition: amp.h:4480
void _Register_copy(const _Array_view_base &_Other) __GPU_ONLY
Definition: amp.h:2145
array_view(const Concurrency::extent< _Rank > &_Extent, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3101
void copy_to(array< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array to the destination array.
Definition: amp.h:4880
_AMPIMP void _Unregister_view(_In_ _View_key _Key)
void __dp_d3d_all_memory_fence_with_tile_barrier() __GPU_ONLY
array_view(int _E0, int _E1, const _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3245
const tile_barrier barrier
An object which represents a barrier within the current tile of threads.
Definition: amp.h:1008
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:532
const index< rank > tile_origin
An index that represents the global coordinates of the origin of the current tile within a tiled_exte...
Definition: amp.h:1003
array_view section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Produces a subsection of the source array_view at the given origin and extent.
Definition: amp.h:3498
array_view section(const Concurrency::extent< _Rank > &_Ext) const __GPU
Produces a subsection of the source array_view with origin of zero, with an extent of _Ext...
Definition: amp.h:3513
Concurrency::extent< _Rank > _M_array_extent
Definition: amp.h:1643
accelerator_view get_source_accelerator_view() const
Returns the accelerator_view where the data source of the array_view is located. If the array_view do...
Definition: amp.h:3736
The extent type represents an N-dimensional vector of int which specifies the bounds of an N-dimen...
Definition: amp.h:383
~_Array_view_base() __GPU
Definition: amp.h:1660
_Array_view_base< _Rank, sizeof(_Value_type)/sizeof(int)> _Base
Definition: amp.h:2202
An array is a multi-dimensional data aggregate on a accelerator_view.
Definition: amp.h:3831
_Tuple_type< _Rank > operator+(const _Tuple_type< _Rank > &_Lhs, const _Tuple_type< _Rank > &_Rhs) __GPU
Definition: amp.h:822
void direct3d_errorf(const char *,...) __GPU_ONLY
void discard_data() const __CPU_ONLY
Discards the current data underlying this view. This is an optimization hint to the runtime used to a...
Definition: amp.h:2924
#define AS_INT(v)
Definition: amp.h:6516
extent(int _I0, int _I1, int _I2) __GPU
Constructor for extent<3>
Definition: amp.h:460
int abs(int _X) __GPU_ONLY
Returns the absolute value of the argument
Definition: amp.h:7189
index(const int _Array[_Rank]) __GPU
Constructs an index with the coordinate values provided the array of int component values...
Definition: amp.h:145
array_view(const _Container &_Src, typename std::enable_if< details::_Is_container< _Container >::type::value, void ** >::type=0) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container; ...
Definition: amp.h:3081
int __dp_d3d_madi(int, int, int) __GPU_ONLY
_AMPIMP ULONG _Launch_async_copy_event_helper(const _Buffer_descriptor &_Src, const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy)
void _Register_copy(const array &_Other) __CPU_ONLY
Definition: amp.h:5617
int value_type
Definition: amp.h:398
unsigned int __dp_d3d_interlocked_min_uint(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
index(const index< _Rank > &_Other) __GPU
Copy Constructor.
Definition: amp.h:84
array(int _E0, int _E1, int _E2, _InputIterator _Src_first) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4258
void * _M_data_ptr
Definition: amprt.h:432
array(const Concurrency::extent< _Rank > &_Extent, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view.
Definition: amp.h:4024
_Array_view_shape(int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:1584
unsigned int __dp_d3d_interlocked_exchange(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
array_view() __GPU
Definition: amp.h:3762
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3463
array_view & operator=(const array_view &_Other) __GPU
Copy Assignment operator. Shallow copy.
Definition: amp.h:3338
int imin(int _X, int _Y) __GPU_ONLY
Determine the minimum numeric value of the arguments
Definition: amp.h:7305
array(int _E0, int _E1, int _E2, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view.
Definition: amp.h:4089
_Access_mode
Definition: amprt.h:82
array(int _E0, int _E1, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container.
Definition: amp.h:4188
tiled_extent() __GPU
Default constructor.
Definition: amp.h:1429
array(int _E0, _InputIterator _Src_first, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av)
Construct a staging array between two associated accelerator_view, initialized from an iterator into ...
Definition: amp.h:4614
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1064
array_view(const array_view &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:2961
const index< rank > local
An index that represents the relative index within the current tile of a tiled_extent.
Definition: amp.h:993
array(int _E0, int _E1, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from a pair of iterato...
Definition: amp.h:4646
float radians(float _X) __GPU_ONLY
Converts _X from degrees to radians
Definition: amp.h:7447
_AMPIMP ULONG _Launch_array_view_synchronize_event_helper(const _Buffer_descriptor &_Buff_desc)
extent< _Rank > operator++(int) __GPU
Post-increments each element of this extent.
Definition: amp.h:754
_AMPIMP ULONG _Start_copy_event_helper(const _Buffer_descriptor &_Src, const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy)
float smoothstep(float _Min, float _Max, float _X) __GPU_ONLY
Returns a smooth Hermite interpolation between 0 and 1, if _X is in the range [_Min, _Max].
Definition: amp.h:7523
tiled_extent pad() const __GPU
Returns a new tiled_extent with extents adjusted up to be evenly divisible by the tile dimensions...
Definition: amp.h:1465
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_View_offset, const Concurrency::extent< _Rank > &_View_extent) __GPU
Definition: amp.h:1724
_Array_view_base(_In_ void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
Definition: amp.h:1760
tiled_index(const index< rank > &_Global, const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Tile_origin, const tile_barrier &_Barrier) __GPU
A Constructor that initializes data members using the given values.
Definition: amp.h:1092
int clamp(int _X, int _Min, int _Max) __GPU_ONLY
Clamps _X to the specified _Min and _Max range
Definition: amp.h:7229
array_view(int _E0, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:2433
tiled_extent< _Dim0, _Dim1 > tile() const __GPU
Produces a tiled_extent object with the tile extents given by _Dim0, _Dim1
Definition: amp.h:548
_Array_view_shape & operator=(const _Array_view_shape &_Other) __GPU
Definition: amp.h:1598
_Ret_ _Value_type * data() const __GPU
Returns a pointer to the raw data of this array_view.
Definition: amp.h:2791
extent< _Rank > operator-(const index< _Rank > &_Rhs) __GPU
Element-wise subtraction of this extent with an index.
Definition: amp.h:594
extent< _Rank > & operator-=(int _Rhs) __GPU
Subtracts an integer value from each element of this extent.
Definition: amp.h:685
int operator[](unsigned int _Index) const __GPU
Index operator.
Definition: amp.h:499
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:1739
#define _CPP_AMP_VERIFY_MUTABLE_ITERATOR(_Type_name)
Definition: xxamp.h:27
void copy(const array< _Value_type, _Rank > &_Src, array< _Value_type, _Rank > &_Dest)
Copies the contents of the source array into the destination array.
Definition: amp.h:5976
int _M_total_linear_offset
Definition: amp.h:1646
array_view(int _E0, const _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3225
array_view(int _E0, const _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3149
float __dp_d3d_saturatef(float) __GPU_ONLY
A tiled_extent is an extent of 1 to 3 dimensions which also subdivides the extent space into 1-...
Definition: amp.h:1249
int _M_base[_Rank]
Definition: amp.h:359
int atomic_fetch_and(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic bitwise and operation of _Value to the memory location pointed to by _Dest ...
Definition: amp.h:6876
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:695
array_view< const _Value_type, _Rank > section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Produces a subsection of the source array at the given origin and extent.
Definition: amp.h:5168
array_view(int _E0, int _E1, const _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3184
array_view(const _In_ _Arr_type(&_Src)[_Size]) __GPU
Construct an array_view which is bound to the data contained in the _Src container; ...
Definition: amp.h:3164
extent(int _I0, int _I1) __GPU
Constructor for extent<2>
Definition: amp.h:441
array(int _E0, int _E1, _InputIterator _Src_first, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an iterator into a container, bound to a specific accelerator_vie...
Definition: amp.h:4441
#define _T(x)
Definition: tchar.h:2498
_AMPIMP ULONG _Start_array_view_synchronize_event_helper(const _Buffer_descriptor &_Buff_desc)
array_view(int _E0, int _E1, int _E2, const _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3268
array_view< const _Value_type2, 1 > reinterpret_as() const __GPU
Produces a (possibly unsafe) reinterpretation of this array that is linear and with a different eleme...
Definition: amp.h:5405
int _Calculate_reinterpreted_size(int _Old_size) __GPU_ONLY
Definition: amp.h:1502
array_view(_Buffer_descriptor &_Src_buffer, const Concurrency::extent< _Rank > &_Extent) __GPU
Definition: amp.h:2967
void _Unregister() __CPU_ONLY
Definition: amp.h:5622
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, const _Array_view_shape &_Shape) __GPU
Definition: amp.h:1670
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an iterator into a container, bound to a specific accelerator_vie...
Definition: amp.h:4513
array_view section(int _I0, int _E0) const __GPU
Produces a one-dimensional subsection of the source array_view with origin specified by the index com...
Definition: amp.h:3546
static _Ret_ void * _Create_projection_buffer_shape(const _Buffer_descriptor &_Descriptor, int _Dim, int _I) __GPU_ONLY
Definition: amp.h:2160
static const int rank
Definition: amp.h:983
int atomic_fetch_sub(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic subtraction of _Value from the memory location pointed to by _Dest ...
Definition: amp.h:6567
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from an iterator into ...
Definition: amp.h:4564
details::_Projection_result_type< _Value_type, _Rank >::_Const_result_type operator()(int _I) const __GPU
Projects the most-significant dimension of this array. If the array rank is 1, this produces a single...
Definition: amp.h:5133
_Event _Copy_async_impl(const array_view< _Value_type, _Rank > &_Src, OutputIterator _DestIter)
Definition: amp.h:5901
void copy_to(const array_view< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array_view to the destination array_view.
Definition: amp.h:2517
int __dp_d3d_firstbithighi(int) __GPU_ONLY
details::_Projection_result_type< _Value_type, _Rank >::_Result_type operator()(int _I) __GPU
Projects the most-significant dimension of this array. If the array rank is 1, this produces a single...
Definition: amp.h:5118
_Ret_ void * _Access(const index< _Rank > &_Index) const __GPU
Definition: amp.h:1828
#define AS_UINT_PTR(p)
Definition: amp.h:6514
bool atomic_compare_exchange(_Inout_ int *_Dest, _Inout_ int *_Expected_value, int _Value) __GPU_ONLY
Atomically, compares the value pointed to by _Dest for equality with that pointed to by _Expected_val...
Definition: amp.h:6743
array_view(int _E0, int _E1, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3305
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1961
extent(const extent< _Rank > &_Other) __GPU
Copy constructor. Constructs a new extent from the supplied argument _Other.
Definition: amp.h:415
unsigned int __dp_d3d_interlocked_compare_exchange(_Inout_ unsigned int *, unsigned int, unsigned int) __GPU_ONLY
int atomic_fetch_inc(_Inout_ int *_Dest) __GPU_ONLY
Performs an atomic increment to the memory location pointed to by _Dest
Definition: amp.h:6611
void global_memory_fence(const tile_barrier &_Barrier) __GPU_ONLY
Ensures that global memory accesses are visible to other threads in the thread tile, and are executed according to program order
Definition: amp.h:7157
static _Ret_ void * _Create_section_buffer_shape(const _Buffer_descriptor &_Descriptor, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU_ONLY
Definition: amp.h:2169
void copy_to(array< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array_view to the destination array.
Definition: amp.h:3356
array_view section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Produces a subsection of the source array_view at the given origin and extent.
Definition: amp.h:2651
array(int _E0, int _E1) __CPU_ONLY
Construct an array from two integer extents.
Definition: amp.h:3886
_Array_view_base _Section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Definition: amp.h:1851
void _Initialize(Concurrency::accelerator_view _Av, _InputIterator _Src_first, _InputIterator _Src_last, access_type _Cpu_access_type) __CPU_ONLY
Definition: amp.h:5552
array(int _E0, int _E1, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view.
Definition: amp.h:4064
void _Parallel_for_each(_In_ _Host_Scheduling_info *_Sch_info, extent< _Rank > _Compute_domain, const _Kernel_type &_F)
static _Projection_result_type< _T, _R >::_Result_type _Project0(_In_ array< _T, _R > *_Array, int _I) __GPU
Definition: xxamp_inl.h:73
array(int _E0, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from a pair of iterato...
Definition: amp.h:4591
#define AS_FLOAT(v)
Definition: amp.h:6517
array_view(int _E0, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3285
_Tuple_type< _Rank > operator-(const _Tuple_type< _Rank > &_Lhs, const _Tuple_type< _Rank > &_Rhs) __GPU
Definition: amp.h:830
int _M_base[_Rank]
Definition: amp.h:806
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
integral_constant< bool, true > true_type
Definition: xtr1common:47
index< _Rank > & operator=(const index< _Rank > &_Other) __GPU
copy-assignment operators
Definition: amp.h:153
extent< _Rank > & operator=(const extent< _Rank > &_Other) __GPU
copy-assignment operator
Definition: amp.h:484
index(int _I) __GPU
Constructor for index<1>
Definition: amp.h:95
Define an N-dimensional index point; which may also be viewed as a vector based at the origin in N-sp...
Definition: amp.h:53
array_view(int _E0, int _E1, int _E2, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3328
#define _Inout_
Definition: sal.h:384
void _Register(_In_ void *_Shape) __CPU_ONLY
Definition: amp.h:1984
_Array_view_base(const _Array_view_base &_Other) __GPU
Definition: amp.h:1679
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4124
tiled_index(const index< rank > &_Global, const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Tile_origin, const tile_barrier &_Barrier) __GPU
A Constructor that initializes data members using the given values.
Definition: amp.h:1207
_Tuple_type< _Rank > operator%(const _Tuple_type< _Rank > &_Lhs, typename _Tuple_type< _Rank >::value_type _Rhs) __GPU
Definition: amp.h:902
array & operator=(const array_view< const _Value_type, _Rank > &_Src) __CPU_ONLY
Assignment operator from an array_view
Definition: amp.h:4871
static _AMPIMP accelerator_view __cdecl get_auto_selection_view()
Returns the auto selection accelerator_view which when specified as the parallel_for_each target resu...
__declspec(deprecated("Concurrency::EnableTracing is a deprecated function.")) _CRTIMP HRESULT __cdecl EnableTracing()
Enables tracing in the Concurrency Runtime. This function is deprecated because ETW tracing is now on...
concurrency::completion_future synchronize_async() const __CPU_ONLY
Asynchronously synchronizes any modifications made to "this" array_view to its source data...
Definition: amp.h:3681
tiled_extent truncate() const __GPU
Returns a new tiled_extent with extents adjusted down to be evenly divisible by the tile dimensions...
Definition: amp.h:1308
array(int _E0, int _E1, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container, bound to a specific acceler...
Definition: amp.h:4411
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:497
int operator[](unsigned _Index) const __GPU
Index operator.
Definition: amp.h:168
static _Projection_result_type< _T, _R >::_Const_result_type _Project0(const array_view< const _T, _R > *_Arr_view, int _I) __GPU
Definition: xxamp_inl.h:33
void _Initialize(size_t _Src_data_size) __CPU_ONLY
Definition: amp.h:3777
void __dp_d3d_device_memory_fence_with_tile_barrier() __GPU_ONLY
_Access_mode _Get_synchronize_access_mode(access_type cpu_access_type)
Definition: amprt.h:2003
extent< _Rank > & operator-=(const extent< _Rank > &_Rhs) __GPU
Element-wise subtraction of this extent with another extent.
Definition: amp.h:625
_Array_view_base(const void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __GPU_ONLY
Definition: amp.h:1798
extent< _Rank > & operator-=(const index< _Rank > &_Rhs) __GPU
Element-wise subtraction of this extent with an index.
Definition: amp.h:655
void wait_with_global_memory_fence() const __GPU_ONLY
Blocks execution of all threads in a tile until all all threads in the tile have reached this call...
Definition: amp.h:958
_Value_type value_type
Definition: amp.h:3850
Concurrency::index< _Rank > _M_view_offset
Definition: amp.h:1645
Class represents a accelerator abstraction for C++ AMP data-parallel devices
Definition: amprt.h:1089
unsigned int __dp_d3d_interlocked_max_uint(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
const _Value_type value_type
Definition: amp.h:3018
void __dp_d3d_device_memory_fence() __GPU_ONLY
_Array_view_base & operator=(const _Array_view_base &_Other) __GPU
Definition: amp.h:1811
tiled_index< _Dim0 > _map_index(const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Global, tile_barrier &_Barrier) const __GPU
Definition: amp.h:1490
#define INT_MAX
Definition: limits.h:40
An array_view is an N-dimensional view over data held in another container (such as array
Definition: amp.h:2200
array_view(int _E0, int _E1) __CPU_ONLY
Construct an array_view which is not bound to a data source.
Definition: amp.h:2354
array_view(_In_ _Arr_type(&_Src)[_Size]) __GPU
Construct an array_view which is bound to the array _Src.
Definition: amp.h:2446
void wait_with_tile_static_memory_fence() const __GPU_ONLY
Blocks execution of all threads in a tile until all all threads in the tile have reached this call...
Definition: amp.h:967
array(int _E0, int _E1, _InputIterator _Src_first) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4208
array_view(_Container &_Src, typename std::enable_if< details::_Is_container< _Container >::type::value, void ** >::type=0) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2317
array_view< _Value_type, 2 > section(int _I0, int _I1, int _E0, int _E1) __GPU
Produces a two-dimensional subsection of the source array with origin specified by the index componen...
Definition: amp.h:5293
#define __GPU_ONLY
Definition: amprt.h:42
_Array_view_base _Section(const index< _Rank > &_Idx) const __GPU
Definition: amp.h:1861
_FwdIt const _Ty _Val
Definition: algorithm:1938
void _Project0(int _I, _Array_view_shape< _Rank-1, _Element_size > &_Projected_shape) const __GPU
Definition: amp.h:1608
array_view< _Value_type, _New_rank > view_as(const Concurrency::extent< _New_rank > &_View_extent) const __GPU
Produces an array_view of a different rank over this array_view's data.
Definition: amp.h:2783
int imax(int _X, int _Y) __GPU_ONLY
Determine the maximum numeric value of the arguments
Definition: amp.h:7288
bool _Is_cpu_accelerator(const accelerator &_Accl)
Definition: amprt.h:3469
array_view(int _E0, int _E1, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:2467
int __dp_d3d_signi(int) __GPU_ONLY
_Check_return_ _In_ long _Size
Definition: io.h:325
void wait() const __GPU_ONLY
Blocks execution of all threads in a tile until all all threads in the tile have reached this call...
Definition: amp.h:940
array_view(int _E0, int _E1, int _E2, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2415
unsigned int __dp_d3d_countbitsu(unsigned int) __GPU_ONLY
array_view(const Concurrency::extent< _Rank > &_Extent, const _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3064
tiled_index(const tiled_index &_Other) __GPU
Copy Constructor.
Definition: amp.h:1164
array_view & operator=(const array_view &_Other) __GPU
Copy Assignment operator. Shallow copy.
Definition: amp.h:2500
int atomic_fetch_add(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic addition of _Value to the memory location pointed to by _Dest
Definition: amp.h:6531
_Array_view_shape() __GPU
Definition: amp.h:1627
array_view section(int _I0, int _I1, int _I2, int _E0, int _E1, int _E2) const __GPU
Produces a three-dimensional subsection of the source array_view with origin specified by the index c...
Definition: amp.h:3602
array_view section(const Concurrency::index< _Rank > &_Idx) const __GPU
Produces a subsection of the source array_view with origin specified by an index, with an extent of (...
Definition: amp.h:2666
extent< _Rank > operator--(int) __GPU
Post-decrements each element of this extent.
Definition: amp.h:779
index< _Rank > _map_index(const index< _Rank > &_Index) const __GPU
Definition: amp.h:789
tile_barrier(const tile_barrier &_Other) __GPU
Copy Constructor. The tile_barrier class does not have a public default constructor or assignment ope...
Definition: amp.h:934
_Array_view_shape(const _Array_view_shape &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:1562
void _Initialize() __GPU
Definition: amp.h:2973
_Ret_ void * _Access(_Access_mode _Requested_mode, const index< _Rank > &_Index) const __GPU_ONLY
Definition: amp.h:1844
float __dp_d3d_noisef(float) __GPU_ONLY
array_view< const _Value_type, 2 > section(int _I0, int _I1, int _E0, int _E1) const __GPU
Produces a two-dimensional subsection of the source array with origin specified by the index componen...
Definition: amp.h:5318
_CPP_AMP_VERIFY_RANK(_Rank, tiled_index)
details::_Projection_result_type< _Value_type, _Rank >::_Const_result_type operator[](int _I) const __GPU
Projects the most-significant dimension of this array. If the array rank is 1, this produces a single...
Definition: amp.h:4997
bool operator==(const _Tuple_type< _Rank > &_Lhs, const _Tuple_type< _Rank > &_Rhs) __GPU
Definition: amp.h:810
static _Ret_ void * _Create_projection_buffer_shape(const _Buffer_descriptor &_Descriptor, unsigned int _Dim, int _Dim_offset) __CPU_ONLY
Definition: amp.h:2027
unsigned int __dp_d3d_minu(unsigned int, unsigned int) __GPU_ONLY
_AMPIMP void _Write_end_event(ULONG _Span_id)
unsigned int _Get_rank() const
Definition: amprt.h:1667
tiled_extent pad() const __GPU
Returns a new tiled_extent with extents adjusted up to be evenly divisible by the tile dimensions...
Definition: amp.h:1296
#define _Ret_
Definition: sal.h:1005
concurrency::completion_future copy_async(const array< _Value_type, _Rank > &_Src, array< _Value_type, _Rank > &_Dest)
Asynchronously copies the contents of the source array into the destination array.
Definition: amp.h:5956
extent< _Rank > & operator*=(int _Rhs) __GPU
Multiplies an integer value to each element of this extent.
Definition: amp.h:700
concurrency::completion_future _Start_async_op_wait_event_helper(ULONG _Async_op_id, _Event _Ev)
Definition: amprt.h:3912
int __dp_d3d_interlocked_max_int(_Inout_ int *, int) __GPU_ONLY
int firstbithigh(int _X) __GPU_ONLY
Gets the location of the first set bit in _X, starting from the highest order bit and working downwar...
Definition: amp.h:7257
float __dp_d3d_stepf(float, float) __GPU_ONLY
access_type
Enumeration type used to denote the various types of access to data.
Definition: amprt.h:97
_Array_view_base(const _Array_view_base &_Other, const Concurrency::extent< _Rank > &_Array_extent) __GPU
Definition: amp.h:1688
_Pre_maybenull_ _Post_z_ char _Pre_maybenull_ _Post_z_ char _Pre_maybenull_ _Post_z_ char _Pre_maybenull_ _Post_z_ char * _Ext
Definition: stdlib.h:854
bool contains(const index< rank > &_Index) const __GPU
Tests whether the index "_Index" is properly contained within this extent.
Definition: amp.h:529
array(const array_view< const _Value_type, _Rank > &_Src, accelerator_view _Av, accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_views, initialized from an array_view...
Definition: amp.h:4795
tiled_extent truncate() const __GPU
Returns a new tiled_extent with extents adjusted down to be evenly divisible by the tile dimensions...
Definition: amp.h:1393
tiled_index(const index< rank > &_Global, const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Tile_origin, const tile_barrier &_Barrier) __GPU
A Constructor that initializes data members using the given values.
Definition: amp.h:1150
array(int _E0, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct array with the extent _E0, bound to a specific accelerator_view.
Definition: amp.h:3950
float step(float _Y, float _X) __GPU_ONLY
Compares two values, returning 0 or 1 based on which value is greater
Definition: amp.h:7540
array_view< _Value_type, _New_rank > view_as(const Concurrency::extent< _New_rank > &_View_extent) __GPU
Produces an array_view of a different rank over this array's data.
Definition: amp.h:5426
static _Projection_result_type< _T, _R >::_Const_result_type _Project0(const array< _T, _R > *_Array, int _I) __GPU
Definition: xxamp_inl.h:65
index(int _I0, int _I1, int _I2) __GPU
Constructor for index<3>
Definition: amp.h:129
int atomic_fetch_min(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Atomically computes the minimum of _Value and the value of the memory location pointed to by _Dest...
Definition: amp.h:6841
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from an iterator into ...
Definition: amp.h:4736
array_view(const array_view< const _Value_type, _Rank > &_Src) __GPU
Copy constructor. Shallow copy.
Definition: amp.h:3050
array_view(int _E0, int _E1, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2373
void tile_static_memory_fence(const tile_barrier &_Barrier) __GPU_ONLY
Ensures that tile_static memory accesses are visible to other threads in the thread tile...
Definition: amp.h:7168
array_view(const Concurrency::extent< _Rank > &_Extent, const _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3118