53 template <
int _Rank>
class index
58 template <
typename _Value_type,
int _Rank>
61 template <
int _Rank,
int _Element_size>
64 template <
int _Rank,
int _Element_size>
67 static const int rank = _Rank;
97 static_assert(_Rank == 1,
"This constructor can only be used to construct an index<1> object.");
112 static_assert(_Rank == 2,
"This constructor can only be used to construct an index<2> object.");
131 static_assert(_Rank == 3,
"This constructor can only be used to construct an index<3> object.");
345 template<
class _Tuple_type>
388 template <
typename _Value_type,
int _Rank>
391 template <
int _Rank,
int _Element_size>
394 template <
int _Rank,
int _Element_size>
428 static_assert(_Rank == 1,
"This constructor can only be used to construct an extent<1> object.");
443 static_assert(_Rank == 2,
"This constructor can only be used to construct an extent<2> object.");
462 static_assert(_Rank == 3,
"This constructor can only be used to construct an extent<3> object.");
539 static_assert(rank == 1,
"One-dimensional tile() method only available on extent<1>");
540 static_assert(_Dim0>0,
"All tile dimensions must be positive");
550 static_assert(rank == 2,
"Two-dimensional tile() method only available on extent<2>");
551 static_assert(_Dim0>0 && _Dim1>0,
"All tile dimensions must be positive");
561 static_assert(rank == 3,
"Three-dimensional tile() method only available on extent<3>");
562 static_assert(_Dim0>0 && _Dim1>0 && _Dim2>0,
"All tile dimensions must be positive");
794 template<
class _Tuple_type>
820 template <
int _Rank,
template <
int>
class _Tuple_type>
821 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value,
bool>::type
827 template <
int _Rank,
template <
int>
class _Tuple_type>
828 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value,
bool>::type
834 template <
int _Rank,
template <
int>
class _Tuple_type>
835 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
838 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
843 template <
int _Rank,
template <
int>
class _Tuple_type>
844 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
847 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
852 template <
int _Rank,
template <
int>
class _Tuple_type>
853 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
854 operator+(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
856 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
861 template <
int _Rank,
template <
int>
class _Tuple_type>
862 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
863 operator+(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
865 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
870 template <
int _Rank,
template <
int>
class _Tuple_type>
871 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
872 operator-(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
874 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
879 template <
int _Rank,
template <
int>
class _Tuple_type>
880 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
881 operator-(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
883 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
888 template <
int _Rank,
template <
int>
class _Tuple_type>
889 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
890 operator*(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
892 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
897 template <
int _Rank,
template <
int>
class _Tuple_type>
898 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
899 operator*(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
901 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
906 template <
int _Rank,
template <
int>
class _Tuple_type>
907 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
908 operator/(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
910 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
915 template <
int _Rank,
template <
int>
class _Tuple_type>
916 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
917 operator/(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
919 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
924 template <
int _Rank,
template <
int>
class _Tuple_type>
925 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
926 operator%(
const _Tuple_type<_Rank>& _Lhs,
typename _Tuple_type<_Rank>::value_type _Rhs)
__GPU
928 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
933 template <
int _Rank,
template <
int>
class _Tuple_type>
934 typename std::enable_if<details::_Is_extent_or_index<_Tuple_type<_Rank>>::value, _Tuple_type<_Rank>>::type
935 operator%(
typename _Tuple_type<_Rank>::value_type _Lhs,
const _Tuple_type<_Rank>& _Rhs)
__GPU
937 _Tuple_type<_Rank> new_Tuple = details::_Create_uninitialized_tuple<_Tuple_type<_Rank>>();
958 #pragma warning( suppress : 4100 ) // unreferenced formal parameter
1069 local(_Other.local),
1145 static const int tile_dim0 = _Dim0;
1146 static const int tile_dim1 = _Dim1;
1147 static const int tile_dim2 = _Dim2;
1153 template <
int _Dim0,
int _Dim1>
1203 static const int tile_dim0 = _Dim0;
1204 static const int tile_dim1 = _Dim1;
1210 template <
int _Dim0>
1260 static const int tile_dim0 = _Dim0;
1278 static_assert(_Dim0>0,
"_Dim0 must be positive");
1279 static_assert(_Dim1>0,
"_Dim1 must be positive");
1280 static_assert(_Dim2>0,
"_Dim2 must be positive");
1324 ((static_cast<unsigned int>((*
this)[1]) + _Dim1 - 1)/_Dim1) * _Dim1,
1325 ((static_cast<unsigned int>((*
this)[2]) + _Dim2 - 1)/_Dim2) * _Dim2);
1335 Concurrency::extent<rank> _New_extent(((*
this)[0]/_Dim0) * _Dim0, ((*
this)[1]/_Dim1) * _Dim1, ((*
this)[2]/_Dim2) * _Dim2);
1342 static const int tile_dim0 = _Dim0;
1343 static const int tile_dim1 = _Dim1;
1344 static const int tile_dim2 = _Dim2;
1353 index<rank> _Tile_origin = details::_Create_uninitialized_tuple<index<rank>>();
1361 template <
int _Dim0,
int _Dim1>
1366 static_assert(_Dim0>0,
"_Dim0 must be positive");
1367 static_assert(_Dim1>0,
"_Dim1 must be positive");
1411 ((static_cast<unsigned int>((*
this)[1]) + _Dim1 - 1)/_Dim1) * _Dim1);
1427 static const int tile_dim0 = _Dim0;
1428 static const int tile_dim1 = _Dim1;
1437 index<rank> _Tile_origin = details::_Create_uninitialized_tuple<index<rank>>();
1444 template <
int _Dim0>
1449 static_assert(_Dim0>0,
"_Dim0 must be positive");
1508 static const int tile_dim0 = _Dim0;
1517 index<rank> _Tile_origin = details::_Create_uninitialized_tuple<index<rank>>();
1526 template <
int _Old_element_size,
int _New_element_size>
1529 int _Total_size = _Old_element_size * _Old_size;
1530 int _New_size = (_Total_size + _New_element_size - 1)/ _New_element_size;
1536 template <
int _Old_element_size,
int _New_element_size>
1539 int _Total_size = _Old_element_size * _Old_size;
1540 int _New_size = (_Total_size + _New_element_size - 1)/ _New_element_size;
1542 if (_New_size * _New_element_size > _Total_size)
1543 throw runtime_exception(
"Element type of reinterpret_as does not evenly divide into extent", E_INVALIDARG);
1552 template <
int _Rank,
int _Element_size >
1565 return _M_view_extent;
1573 return (_M_total_linear_offset - (_Element_size * _Flatten_helper::func(_M_array_multiplier._M_base, _M_view_offset._M_base)));
1578 _M_array_extent(_Other._M_array_extent),
1579 _M_array_multiplier(_Other._M_array_multiplier),
1580 _M_view_offset(_Other._M_view_offset),
1581 _M_total_linear_offset(_Other._M_total_linear_offset),
1582 _M_view_extent(_Other._M_view_extent)
1589 _M_array_extent(_Other._M_array_extent),
1590 _M_array_multiplier(_Other._M_array_multiplier),
1591 _M_view_offset(_Other._M_view_offset + _Section_origin),
1592 _M_view_extent(_Section_extent)
1596 _M_total_linear_offset = _Other._Base_linear_offset() + (_Element_size * _Flatten_helper::func(_M_array_multiplier._M_base, _M_view_offset._M_base));
1601 _M_array_extent(_Array_extent),
1603 _M_total_linear_offset(_Base_linear_offset),
1604 _M_view_extent(_Array_extent)
1606 _Initialize_multiplier();
1612 _M_array_extent(_Array_extent),
1613 _M_view_offset(_Section_origin),
1614 _M_total_linear_offset(_Base_linear_offset),
1615 _M_view_extent(_Section_extent)
1619 _Initialize_multiplier();
1620 _M_total_linear_offset += (_Element_size * _Flatten_helper::func(_M_array_multiplier._M_base, _M_view_offset._M_base));
1625 _M_array_extent = _Other._M_array_extent;
1626 _M_array_multiplier = _Other._M_array_multiplier;
1627 _M_view_offset = _Other._M_view_offset;
1628 _M_total_linear_offset = _Other._M_total_linear_offset;
1629 _M_view_extent = _Other._M_view_extent;
1635 static_assert(_Rank > 1,
"Projection is only supported on array_views with a rank of 2 or higher");
1644 _Projected_shape._M_array_extent, this->_M_array_extent,
1645 _Projected_shape._M_array_multiplier, this->_M_array_multiplier,
1646 _Projected_shape._M_view_offset, this->_M_view_offset,
1647 _Projected_shape._M_view_extent, this->_M_view_extent);
1649 _Projected_shape._M_total_linear_offset = _M_total_linear_offset + (_Element_size * _I * _M_array_multiplier[0]);
1653 : _M_array_extent(details::
_do_not_initialize), _M_array_multiplier(details::_do_not_initialize),
1654 _M_view_offset(details::_do_not_initialize), _M_view_extent(details::_do_not_initialize)
1663 unsigned int _Ext = _M_array_extent[_Rank-1];
1675 template <
int _Rank,
int _Element_size>
1680 template <
int _R,
int _S>
1699 _M_buffer_descriptor(_Buffer_desc),
1708 _M_buffer_descriptor(_Other._M_buffer_descriptor),
1712 _Register_copy(_Other);
1715 _M_buffer_descriptor = _Other._M_buffer_descriptor;
1720 _M_buffer_descriptor(_Other._M_buffer_descriptor),
1729 _M_buffer_descriptor(_Other._M_buffer_descriptor),
1738 _M_buffer_descriptor(_Buffer_desc),
1747 _M_buffer_descriptor(_Buffer_desc),
1755 const _Buffer_descriptor& _Buffer_desc,
1756 int _Base_linear_offset,
1762 _M_buffer_descriptor(_Buffer_desc),
1766 _Register(_Buffer_desc._Get_view_key());
1770 const _Buffer_descriptor& _Buffer_desc,
1771 int _Base_linear_offset,
1777 _M_buffer_descriptor(_Buffer_desc),
1787 _M_buffer_descriptor(_Buffer_desc),
1798 _Ubiquitous_buffer_ptr _PUBuf = _Ubiquitous_buffer::_Create_ubiquitous_buffer(_Array_extent.size(), _Element_size *
sizeof(
int));
1809 if (_Data ==
NULL) {
1810 throw runtime_exception(
"Invalid pointer argument (NULL) to array_view constructor", E_INVALIDARG);
1831 if (_Data ==
NULL) {
1832 throw runtime_exception(
"Invalid pointer argument (NULL) to array_view constructor", E_INVALIDARG);
1845 #pragma warning( push )
1846 #pragma warning( disable : 4880 )
1852 #pragma warning( pop )
1858 if (
this != &_Other)
1863 _M_buffer_descriptor = _Other._M_buffer_descriptor;
1867 _Register_copy(_Other);
1870 _M_buffer_descriptor = _Other._M_buffer_descriptor;
1878 int * _Ptr =
reinterpret_cast<int *
>(_M_buffer_descriptor._M_data_ptr);
1879 return &_Ptr[this->_M_total_linear_offset + (_Element_size * _Flatten_helper::func(this->_M_array_multiplier._M_base, _Index._M_base))];
1885 if ((_M_buffer_descriptor._M_curr_cpu_access_mode & _Requested_mode) != _Requested_mode) {
1886 _M_buffer_descriptor._Get_CPU_access(_Requested_mode);
1889 return _Access(_Index);
1894 return _Access(_Index);
1902 _View._Register(_Array_view_base::_Create_section_buffer_shape(this->_M_buffer_descriptor, _Section_origin, _Section_extent));
1909 return _Section(_Idx, this->extent - _Idx);
1914 _Projected_view._M_buffer_descriptor = this->_M_buffer_descriptor;
1918 _Projected_view._Register(_Array_view_base::_Create_projection_buffer_shape(this->_M_buffer_descriptor, 0, _I));
1921 template <
int _New_element_size>
1924 static_assert(_Rank==1,
"reinterpret_as is only permissible on array views of rank 1");
1925 int _New_size = _Calculate_reinterpreted_size<_Element_size,_New_element_size>(this->_M_view_extent.size());
1927 this->_M_total_linear_offset,
1931 template <
int _New_rank>
1934 static_assert(_Rank==1,
"view_as is only permissible on array views of rank 1");
1936 this->_M_total_linear_offset,
1944 unsigned int bufElemSize =
static_cast<unsigned int>(_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_buffer_elem_size());
1945 unsigned int elemSize = _Element_size *
sizeof(
int);
1947 size_t linearOffsetInBytes = this->_Base_linear_offset() *
sizeof(
int);
1949 size_t baseLSDExtentInBytes = this->_M_array_extent[_Rank - 1];
1950 baseLSDExtentInBytes *= elemSize;
1952 size_t viewLSDOffsetInBytes = this->_M_view_offset[_Rank - 1];
1953 viewLSDOffsetInBytes *= elemSize;
1955 size_t viewLSDExtentInBytes = this->_M_view_extent[_Rank - 1];
1956 viewLSDExtentInBytes *= elemSize;
1960 if (((linearOffsetInBytes % bufElemSize) != 0) ||
1961 ((baseLSDExtentInBytes % bufElemSize) != 0) ||
1962 ((viewLSDOffsetInBytes % bufElemSize) != 0) ||
1963 ((viewLSDExtentInBytes % bufElemSize) != 0))
1965 throw runtime_exception(
"The array_view base extent, view offset and/or view extent is incompatible with the underlying buffer", E_FAIL);
1970 _ASSERTE((linearOffsetInBytes / bufElemSize) <=
UINT_MAX);
1971 unsigned int linearOffset =
static_cast<unsigned int>(linearOffsetInBytes / bufElemSize);
1973 unsigned int baseExtent[_Rank];
1974 unsigned int viewOffset[_Rank];
1975 unsigned int viewExtent[_Rank];
1976 #pragma warning( push )
1977 #pragma warning( disable : 6294 )
1978 #pragma warning( disable : 6201 ) // Index '-1' is out of valid index range '0' to '0' for possibly stack allocated buffer 'baseExtent'.
1979 for (
int i = 0;
i < _Rank - 1; ++
i) {
1980 baseExtent[
i] = this->_M_array_extent[
i];
1981 viewOffset[
i] = this->_M_view_offset[
i];
1982 viewExtent[
i] = this->_M_view_extent[
i];
1984 #pragma warning( pop )
1988 _ASSERTE((baseLSDExtentInBytes / bufElemSize) <=
UINT_MAX);
1989 baseExtent[_Rank - 1] =
static_cast<unsigned int>(baseLSDExtentInBytes / bufElemSize);
1991 _ASSERTE((viewLSDOffsetInBytes / bufElemSize) <=
UINT_MAX);
1992 viewOffset[_Rank - 1] =
static_cast<unsigned int>(viewLSDOffsetInBytes / bufElemSize);
1994 _ASSERTE((viewLSDExtentInBytes / bufElemSize) <=
UINT_MAX);
1995 viewExtent[_Rank - 1] =
static_cast<unsigned int>(viewLSDExtentInBytes / bufElemSize);
1997 return _View_shape::_Create_view_shape(_Rank, linearOffset, baseExtent, viewOffset, viewExtent);
2011 _Create_buffer_view_shape(),
2022 _M_buffer_descriptor.
_M_data_ptr = _PBuf->_Get_host_ptr();
2033 if (_Shape ==
NULL) {
2052 _M_buffer_descriptor.
_M_data_ptr = _PBuf->_Get_host_ptr();
2059 _Throw_exception =
true;
2068 if (_Throw_exception) {
2078 std::vector<unsigned int> _New_view_extent(_Base_shape->
_Get_rank());
2079 std::vector<unsigned int> _New_view_offset(_Base_shape->
_Get_rank());
2080 bool *_New_projection_info =
new bool[_Base_shape->
_Get_rank()];
2081 for (
unsigned int _I = 0; _I < _Base_shape->
_Get_rank(); ++_I)
2089 unsigned int _UnProjectedDimCount = 0;
2090 for (
unsigned int _I = 0; _I < _Base_shape->
_Get_rank(); ++_I)
2096 if (_UnProjectedDimCount == _Dim) {
2097 _New_view_extent[_I] = 1;
2098 _New_view_offset[_I] += _Dim_offset;
2099 _New_projection_info[_I] =
true;
2103 _UnProjectedDimCount++;
2107 auto _PView_shape = _View_shape::_Create_view_shape(_Base_shape->
_Get_rank(),
2110 _New_view_offset.data(),
2111 _New_view_extent.data(),
2112 _New_projection_info);
2114 delete [] _New_projection_info;
2116 return _PView_shape;
2123 if (_Base_shape->
_Get_rank() == _Rank) {
2127 std::vector<unsigned int> _New_view_extent(_Base_shape->
_Get_rank());
2128 std::vector<unsigned int> _New_view_offset(_Base_shape->
_Get_rank());
2129 unsigned int _I = 0, _J = 0;
2130 while (_I < _Base_shape->_Get_rank())
2141 if (_J == (_Rank - 1))
2143 unsigned int bufElemSize =
static_cast<unsigned int>(_Descriptor._Get_buffer_ptr()->_Get_master_buffer_elem_size());
2144 unsigned int elemSize = _Element_size *
sizeof(
int);
2146 size_t sectionLSDOriginInBytes = _Section_origin[_J];
2147 sectionLSDOriginInBytes *= elemSize;
2149 size_t sectionLSDExtentInBytes = _Section_extent[_J];
2150 sectionLSDExtentInBytes *= elemSize;
2154 if (((sectionLSDOriginInBytes % bufElemSize) != 0) ||
2155 ((sectionLSDExtentInBytes % bufElemSize) != 0))
2157 throw runtime_exception(
"The array_view section origin and/or extent is incompatible with the underlying buffer", E_FAIL);
2162 _ASSERTE((sectionLSDOriginInBytes / bufElemSize) <=
UINT_MAX);
2163 _New_view_offset[_I] = _Base_shape->
_Get_view_offset()[_I] +
static_cast<unsigned int>(sectionLSDOriginInBytes / bufElemSize);
2165 _ASSERTE((sectionLSDExtentInBytes / bufElemSize) <=
UINT_MAX);
2166 _New_view_extent[_I] =
static_cast<unsigned int>(sectionLSDExtentInBytes / bufElemSize);
2170 _New_view_extent[_I] = _Section_extent[_J];
2171 _New_view_offset[_I] = _Base_shape->
_Get_view_offset()[_I] + _Section_origin[_J];
2180 _ASSERTE(_J == _Rank);
2182 return _View_shape::_Create_view_shape(_Base_shape->
_Get_rank(),
2185 _New_view_offset.data(),
2186 _New_view_extent.data(),
2215 template<
typename _Container>
2218 template<
class _Uty>
static auto _Fn(_Uty
_Val, decltype(_Val.size(), _Val.data(), 0)) ->
std::true_type;
2219 template<
class _Uty>
static auto _Fn(_Uty _Val, ...) ->
std::false_type;
2220 typedef decltype(_Fn(std::declval<_Container>(),0)) type;
2241 static_assert(0 == (
sizeof(_Value_type) %
sizeof(
int)),
"only value types whose size is a multiple of the size of an integer are allowed in array views");
2252 template <
typename _T,
int _R>
2255 friend const _Buffer_descriptor& details::_Get_buffer_descriptor<array_view<_Value_type, _Rank>>(
const array_view<_Value_type, _Rank>& _Array) __GPU;
2258 static const int rank = _Rank;
2298 _Initialize(
_Extent.size(),
true);
2313 static_assert( std::is_same<decltype(_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2314 _Initialize(_Src.size());
2342 static_assert(_Rank == 1,
"rank must be 1");
2343 _Initialize(this->get_extent().
size(),
true);
2357 throw runtime_exception(
"Invalid _Src container argument - _Src size is greater than INT_MAX", E_INVALIDARG);
2359 static_assert( std::is_same<decltype(_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2360 static_assert(_Rank == 1,
"rank must be 1");
2361 _Initialize(_Src.size());
2376 static_assert( std::is_same<decltype(_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2377 static_assert(_Rank == 1,
"rank must be 1");
2378 _Initialize(_Src.size());
2393 static_assert(_Rank == 2,
"rank must be 2");
2394 _Initialize(this->get_extent().
size(),
true);
2412 static_assert( std::is_same<decltype(_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2413 static_assert(_Rank == 2,
"rank must be 2");
2414 _Initialize(_Src.size());
2432 static_assert(_Rank == 3,
"rank must be 3");
2433 _Initialize(this->get_extent().
size(),
true);
2454 static_assert( std::is_same<decltype(_Src.data()), _Value_type*>::value,
"container element type and array view element type must match");
2455 static_assert(_Rank == 3,
"rank must be 3");
2456 _Initialize(_Src.size());
2472 static_assert(_Rank == 1,
"rank must be 1");
2485 static_assert(_Rank == 1,
"rank must be 1");
2505 static_assert(_Rank == 2,
"rank must be 2");
2528 static_assert(_Rank == 3,
"rank must be 3");
2537 _Base::operator=(_Other);
2587 void *_Ptr = _Access(_Index);
2588 return *
reinterpret_cast<value_type*
>(_Ptr);
2602 return this->operator()(_Index);
2617 return *
reinterpret_cast<_Value_type*
>(_Ptr);
2647 _Value_type& operator() (
int _I0,
int _I1)
const __GPU
2649 static_assert(_Rank == 2,
"value_type& array_view::operator()(int,int) is only permissible on array_view<T, 2>");
2650 return this->operator()(
index<2>(_I0,_I1));
2668 _Value_type& operator() (
int _I0,
int _I1,
int _I2)
const __GPU
2670 static_assert(_Rank == 3,
"value_type& array_view::operator()(int,int,int) is only permissible on array_view<T, 3>");
2671 return this->operator()(
index<3>(_I0,_I1,_I2));
2688 return _Convert<_Value_type>(_Section(_Section_origin, _Section_extent));
2703 return section(_Idx, this->extent - _Idx);
2736 static_assert(_Rank == 1,
"rank must be 1");
2761 static_assert(_Rank == 2,
"rank must be 2");
2792 static_assert(_Rank == 3,
"rank must be 3");
2806 return _Convert<_Value_type2>(this->
template _Reinterpret_as<
sizeof(_Value_type2)/
sizeof(
int)>());
2820 return _Convert<_Value_type>(_View_as(_View_extent));
2828 static_assert(_Rank == 1,
"array_view::data() is only permissible on array_view<T, 1>");
2840 if (!this->_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
2845 _Get_access_async(this->_M_buffer_descriptor._Get_view_key(), this->_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
_Write_access, _PBuf)._Get();
2893 if ((_Access_type !=
access_type_none) && this->_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source())
2896 this->_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
2941 if ((_Access_type !=
access_type_none) && this->_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source())
2944 this->_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
2961 this->_M_buffer_descriptor._Get_buffer_ptr()->_Discard(this->_M_buffer_descriptor._Get_view_key());
2970 if (this->_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
2971 return this->_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view();
2974 throw runtime_exception(
"Cannot query source accelerator_view for an array_view without a data source.", E_INVALIDARG);
2981 template <typename _T,
int _R>
2984 static_assert(
sizeof(
array_view<_T,_R>) ==
sizeof(
_Array_view_base<_R,
sizeof(_T)/
sizeof(
int)>),
"ASSERT FAILURE: implementation relies on binary conversion between the two");
2990 _Base::_Project0(_I, _Projected_view);
2991 _Projected_view._Initialize();
2997 :_Base(_Other, _Section_origin, _Section_extent)
3018 if (_Src_data_size < this->extent.size()) {
3019 throw runtime_exception(
"Invalid _Src container argument - _Src size is less than the size of the array_view.", E_INVALIDARG);
3024 if (_Discard_data) {
3032 template <
typename _Value_type,
int _Rank>
3036 static_assert(0 == (
sizeof(_Value_type) %
sizeof(
int)),
"only value types whose size is a multiple of the size of an integer are allowed in array views");
3052 static const int rank = _Rank;
3102 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3103 _Initialize(_Src.size());
3120 throw runtime_exception(
"Invalid _Src container argument - _Src size is greater than INT_MAX", E_INVALIDARG);
3122 static_assert( std::is_same<decltype(_Src.data()),
const _Value_type*>::value,
"container element type and array view element type must match");
3123 static_assert(_Rank == 1,
"rank must be 1");
3124 _Initialize(_Src.size());
3139 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3140 _Initialize(_Src.size());
3187 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3188 static_assert(_Rank == 1,
"rank must be 1");
3189 _Initialize(_Src.size());
3202 static_assert(_Rank == 1,
"rank must be 1");
3221 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3222 static_assert(_Rank == 2,
"rank must be 2");
3223 _Initialize(_Src.size());
3244 static_assert( std::is_same<
typename std::remove_const<
typename std::remove_reference<decltype(*_Src.data())>::type>::type, _Value_type>::value,
"container element type and array view element type must match");
3245 static_assert(_Rank == 3,
"rank must be 3");
3246 _Initialize(_Src.size());
3262 static_assert(_Rank == 1,
"rank must be 1");
3282 static_assert(_Rank == 2,
"rank must be 2");
3302 array_view(
int _E0,
int _E1,
int _E2,
const _Value_type * _Src) __GPU
3305 static_assert(_Rank == 3,
"rank must be 3");
3322 static_assert(_Rank == 1,
"rank must be 1");
3342 static_assert(_Rank == 2,
"rank must be 2");
3365 static_assert(_Rank == 3,
"rank must be 3");
3374 _Base::operator=(_Other);
3383 _Base::operator=(_Other);
3433 void *_Ptr = _Access(_Index);
3434 return *
reinterpret_cast<value_type*
>(_Ptr);
3448 return this->operator()(_Index);
3463 return *
reinterpret_cast<value_type*
>(_Ptr);
3493 const _Value_type& operator() (
int _I0,
int _I1)
const __GPU
3495 static_assert(_Rank == 2,
"value_type& array_view::operator()(int,int) is only permissible on array_view<T, 2>");
3496 return this->operator()(
index<2>(_I0,_I1));
3514 const _Value_type& operator() (
int _I0,
int _I1,
int _I2)
const __GPU
3516 static_assert(_Rank == 3,
"value_type& array_view::operator()(int,int,int) is only permissible on array_view<T, 3>");
3517 return this->operator()(
index<3>(_I0,_I1,_I2));
3534 return _Convert<_Value_type>(_Section(_Section_origin, _Section_extent));
3564 return section(_Idx, this->extent - _Idx);
3582 static_assert(_Rank == 1,
"rank must be 1");
3607 static_assert(_Rank == 2,
"rank must be 2");
3638 static_assert(_Rank == 3,
"rank must be 3");
3652 return _Convert<_Value_type2>(this->
template _Reinterpret_as<
sizeof(_Value_type2)/
sizeof(
int)>());
3666 return _Convert<_Value_type>(_View_as(_View_extent));
3672 const _Value_type*
data() const __GPU
3674 static_assert(_Rank == 1,
"array_view::data() is only permissible on array_view<T, 1>");
3685 _Get_access_async(this->_M_buffer_descriptor._Get_view_key(), this->_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
_Write_access, _PBuf)._Get();
3724 if (this->_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
3725 _Ev =
_Get_access_async(this->_M_buffer_descriptor._Get_view_key(), this->_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
_Read_access, _PBuf);
3759 if (this->_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
3760 _Get_access_async(this->_M_buffer_descriptor._Get_view_key(), this->_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view(),
_Read_access, _PBuf)._Get();
3772 if (this->_M_buffer_descriptor._Get_buffer_ptr()->_Has_data_source()) {
3773 return this->_M_buffer_descriptor._Get_buffer_ptr()->_Get_master_accelerator_view();
3776 throw runtime_exception(
"Cannot query source accelerator_view for an array_view without a data source.", E_INVALIDARG);
3783 template <typename _T,
int _R>
3792 _Base::_Project0(_I, _Projected_view);
3793 _Projected_view._Initialize();
3800 _Base(_Other, _Section_origin, _Section_extent)
3808 this->_M_buffer_descriptor._M_type_access_mode =
_Read_access;
3815 if (_Src_data_size < this->extent.size()) {
3816 throw runtime_exception(
"Invalid _Src container argument - _Src size is less than the size of the array_view.", E_INVALIDARG);
3828 template <
typename InputIterator,
typename _Value_type,
int _Rank>
void copy(InputIterator _SrcFirst, InputIterator _SrcLast,
array<_Value_type, _Rank> &_Dest);
3832 template <
typename OutputIterator,
typename _Value_type,
int _Rank>
void copy(
const array<_Value_type, _Rank> &_Src, OutputIterator _DestIter);
3840 template <
typename _Value_type,
int _Rank>
void copy(
const array_view<const _Value_type, _Rank>& _Src,
const array_view<_Value_type, _Rank>& _Dest);
3842 template <
typename _Value_type,
int _Rank>
void copy(
const array_view<_Value_type, _Rank>& _Src,
const array_view<_Value_type, _Rank>& _Dest);
3843 template <
typename InputIterator,
typename _Value_type,
int _Rank>
concurrency::completion_future copy_async(InputIterator _SrcFirst, InputIterator _SrcLast,
const array_view<_Value_type, _Rank> &_Dest);
3845 template <
typename InputIterator,
typename _Value_type,
int _Rank>
void copy(InputIterator _SrcFirst, InputIterator _SrcLast,
const array_view<_Value_type, _Rank> &_Dest);
3846 template <
typename InputIterator,
typename _Value_type,
int _Rank>
void copy(InputIterator _SrcFirst,
const array_view<_Value_type, _Rank> &_Dest);
3848 template <
typename OutputIterator,
typename _Value_type,
int _Rank>
void copy(
const array_view<_Value_type, _Rank> &_Src, OutputIterator _DestIter);
3852 template<
typename _Value_type,
int _Rank>
3865 template <
typename _Value_type,
int _Rank = 1>
class array
3872 static_assert(!std::is_const<_Value_type>::value,
"array<const _Value_type> is not supported");
3873 static_assert(0 == (
sizeof(_Value_type) %
sizeof(
int)),
"only value types whose size is a multiple of the size of an integer are allowed in array");
3876 template<
typename _Value_type,
int _Rank>
3878 friend const _Buffer_descriptor& details::_Get_buffer_descriptor<array<_Value_type,_Rank>>(
const array<_Value_type,_Rank>& _Array) __GPU;
3883 static const int rank = _Rank;
3907 static_assert(_Rank == 1,
"array(int) is only permissible on array<T, 1>");
3920 explicit array(
int _E0,
int _E1) __CPU_ONLY
3923 static_assert(_Rank == 2,
"array(int, int) is only permissible on array<T, 2>");
3939 explicit array(
int _E0,
int _E1,
int _E2) __CPU_ONLY
3942 static_assert(_Rank == 3,
"array(int, int, int) is only permissible on array<T, 3>");
3963 : _M_extent(_Extent)
3965 _Initialize(_Av, _Cpu_access_type);
3987 static_assert(_Rank == 1,
"array(int, accelerator_view) is only permissible on array<T, 1>");
3988 _Initialize(_Av, _Cpu_access_type);
4013 static_assert(_Rank == 2,
"array(int, int, accelerator_view) is only permissible on array<T, 2>");
4014 _Initialize(_Av, _Cpu_access_type);
4040 : _M_extent(
Concurrency::extent<_Rank>(_E0, _E1, _E2))
4042 static_assert(_Rank == 3,
"array(int, int, int, accelerator_view) is only permissible on array<T, 3>");
4043 _Initialize(_Av, _Cpu_access_type);
4061 _Initialize(_Av, _Associated_Av);
4079 static_assert(_Rank == 1,
"array(int, accelerator_view, accelerator_view) is only permissible on array<T, 1>");
4080 _Initialize(_Av, _Associated_Av);
4101 static_assert(_Rank == 2,
"array(int, int, accelerator_view, accelerator_view) is only permissible on array<T, 2>");
4102 _Initialize(_Av, _Associated_Av);
4126 static_assert(_Rank == 3,
"array(int, int, int, accelerator_view, accelerator_view) is only permissible on array<T, 3>");
4127 _Initialize(_Av, _Associated_Av);
4161 _InputIterator _Src_last = _Src_first;
4179 template <
typename _InputIterator>
array(
int _E0, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
4182 static_assert(_Rank == 1,
"array(int, iterator, iterator) is only permissible on array<T, 1>");
4196 template <
typename _InputIterator>
array(
int _E0, _InputIterator _Src_first) __CPU_ONLY
4199 static_assert(_Rank == 1,
"array(int, iterator) is only permissible on array<T, 1>");
4201 _InputIterator _Src_last = _Src_first;
4222 template <
typename _InputIterator>
array(
int _E0,
int _E1, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
4225 static_assert(_Rank == 2,
"array(int, int, iterator, iterator) is only permissible on array<T, 2>");
4242 template <
typename _InputIterator>
array(
int _E0,
int _E1, _InputIterator _Src_first) __CPU_ONLY
4245 static_assert(_Rank == 2,
"array(int, int, iterator) is only permissible on array<T, 2>");
4247 _InputIterator _Src_last = _Src_first;
4269 template <
typename _InputIterator>
array(
int _E0,
int _E1,
int _E2, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
4272 static_assert(_Rank == 3,
"array(int, int, int, iterator, iterator) is only permissible on array<T, 3>");
4292 template <
typename _InputIterator>
array(
int _E0,
int _E1,
int _E2, _InputIterator _Src_first) __CPU_ONLY
4295 static_assert(_Rank == 3,
"array(int, int, int, iterator) is only permissible on array<T, 3>");
4297 _InputIterator _Src_last = _Src_first;
4326 : _M_extent(_Extent)
4328 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4352 : _M_extent(_Extent)
4354 _InputIterator _Src_last = _Src_first;
4357 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4385 static_assert(_Rank == 1,
"array(int, iterator, iterator) is only permissible on array<T, 1>");
4386 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4412 static_assert(_Rank == 1,
"array(int, iterator) is only permissible on array<T, 1>");
4414 _InputIterator _Src_last = _Src_first;
4417 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4448 static_assert(_Rank == 2,
"array(int, int, iterator, iterator) is only permissible on array<T, 2>");
4449 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4478 static_assert(_Rank == 2,
"array(int, int, iterator) is only permissible on array<T, 2>");
4480 _InputIterator _Src_last = _Src_first;
4483 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4515 : _M_extent(
Concurrency::extent<_Rank>(_E0, _E1, _E2))
4517 static_assert(_Rank == 3,
"array(int, int, int, iterator, iterator) is only permissible on array<T, 3>");
4518 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4548 : _M_extent(
Concurrency::extent<_Rank>(_E0, _E1, _E2))
4550 static_assert(_Rank == 3,
"array(int, int, int, iterator) is only permissible on array<T, 3>");
4552 _InputIterator _Src_last = _Src_first;
4555 _Initialize(_Av, _Src_first, _Src_last, _Cpu_access_type);
4579 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4601 _InputIterator _Src_last = _Src_first;
4604 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4628 static_assert(_Rank == 1,
"array(int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 1>");
4629 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4651 static_assert(_Rank == 1,
"array(int, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 1>");
4653 _InputIterator _Src_last = _Src_first;
4656 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4683 static_assert(_Rank == 2,
"array(int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 2>");
4684 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4709 static_assert(_Rank == 2,
"array(int, int, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 2>");
4711 _InputIterator _Src_last = _Src_first;
4714 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4744 static_assert(_Rank == 3,
"array(int, int, int, iterator, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 3>");
4745 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4773 static_assert(_Rank == 3,
"array(int, int, int, iterator, accelerator_view, accelerator_view) is only permissible on array<T, 3>");
4775 _InputIterator _Src_last = _Src_first;
4778 _Initialize(_Av, _Associated_Av, _Src_first, _Src_last);
4788 :_M_extent(_Src.extent)
4811 :_M_extent(_Src.extent)
4813 _Initialize(_Av, _Cpu_access_type);
4830 :_M_extent(_Src.extent)
4832 _Initialize(_Av, _Associated_Av);
4840 : _M_extent(_Other._M_extent)
4842 _Initialize(_Other.accelerator_view, _Other.associated_accelerator_view);
4850 : _M_extent(_Other._M_extent), _M_multiplier(_Other._M_multiplier)
4851 , _M_buffer_descriptor(_Other._M_buffer_descriptor)
4854 this->_Register_copy(_Other);
4857 _Other._Unregister();
4858 _Other._M_buffer_descriptor._M_data_ptr =
NULL;
4859 _Other._M_buffer_descriptor._Set_buffer_ptr(
NULL);
4867 if (
this != &_Other)
4872 _M_extent = _Other._M_extent;
4873 _Initialize(_Other.accelerator_view, _Other.associated_accelerator_view);
4884 if (
this != &_Other)
4889 _M_extent = _Other._M_extent;
4890 _M_multiplier = _Other._M_multiplier;
4891 _M_buffer_descriptor = _Other._M_buffer_descriptor;
4892 this->_Register_copy(_Other);
4895 _Other._Unregister();
4896 _Other._M_buffer_descriptor._M_data_ptr =
NULL;
4897 _Other._M_buffer_descriptor._Set_buffer_ptr(
NULL);
4977 _Value_type * _Ptr =
reinterpret_cast<_Value_type *
>(_M_buffer_descriptor._M_data_ptr);
4978 return _Ptr[_Flatten_helper::func(_M_multiplier._M_base, _Index._M_base)];
4993 #pragma warning( push )
4994 #pragma warning( disable : 4880 )
5000 #pragma warning( pop )
5002 _Value_type * _Ptr =
reinterpret_cast<_Value_type *
>(_M_buffer_descriptor._M_data_ptr);
5003 return _Ptr[_Flatten_helper::func(_M_multiplier._M_base, _Index._M_base)];
5047 return this->operator[](_Index);
5061 return this->operator[](_Index);
5076 _Value_type& operator() (
int _I0,
int _I1) __GPU
5078 static_assert(_Rank == 2,
"value_type& array::operator()(int, int) is only permissible on array<T, 2>");
5079 return this->operator[](
index<2>(_I0, _I1));
5094 const _Value_type& operator() (
int _I0,
int _I1)
const __GPU
5096 static_assert(_Rank == 2,
"const value_type& array::operator()(int, int) is only permissible on array<T, 2>");
5097 return this->operator[](
index<2>(_I0, _I1));
5115 _Value_type& operator() (
int _I0,
int _I1,
int _I2) __GPU
5117 static_assert(_Rank == 3,
"value_type& array::operator()(int, int, int) is only permissible on array<T, 3>");
5118 return this->operator[](
index<3>(_I0, _I1, _I2));
5136 const _Value_type& operator() (
int _I0,
int _I1,
int _I2)
const __GPU
5138 static_assert(_Rank == 3,
"const value_type& array::operator()(int, int, int) const is only permissible on array<T, 3>");
5139 return this->operator[](
index<3>(_I0, _I1, _I2));
5187 return _T1.
section(_Section_origin, _Section_extent);
5205 return _T1.
section(_Section_origin, _Section_extent);
5330 return _T1.
section(_I0,_I1,_E0,_E1);
5355 return _T1.
section(_I0,_I1,_E0,_E1);
5386 return _T1.
section(_I0,_I1,_I2,_E0,_E1,_E2);
5417 return _T1.
section(_I0,_I1,_I2,_E0,_E1,_E2);
5441 #pragma warning( push )
5442 #pragma warning( disable : 4880 )
5447 return const_cast<array*
>(
this)->reinterpret_as<_Value_type2>();
5448 #pragma warning( pop )
5476 #pragma warning( push )
5477 #pragma warning( disable : 4880 )
5482 return const_cast<array*
>(
this)->view_as<_New_rank>(_View_extent);
5483 #pragma warning( pop )
5489 operator std::vector<_Value_type>()
const __CPU_ONLY
5491 std::vector<_Value_type> _return_vector(extent.size());
5494 return _return_vector;
5503 return reinterpret_cast<_Value_type*
>(_M_buffer_descriptor._M_data_ptr);
5509 const _Value_type*
data() const __GPU
5511 #pragma warning( push )
5512 #pragma warning( disable : 4880 )
5518 #pragma warning( pop )
5519 return reinterpret_cast<const _Value_type*
>(_M_buffer_descriptor._M_data_ptr);
5549 : _M_extent(_Extent), _M_buffer_descriptor(_Buffer_descriptor)
5565 unsigned int totalExtent = _M_extent[_Rank-1];
5574 unsigned int totalExtent = _Initialize();
5576 _M_buffer_descriptor._Set_buffer_ptr(
NULL);
5578 _Buffer_ptr _PBuf = _Buffer::_Create_buffer(_Av, _Av, totalExtent,
sizeof(_Value_type),
false , _Cpu_access_type);
5580 _M_buffer_descriptor._Set_buffer_ptr(_Ubiquitous_buffer::_Create_ubiquitous_buffer(_PBuf));
5585 template <
typename _InputIterator>
5588 _Initialize(_Av, _Cpu_access_type);
5589 copy(_Src_first, _Src_last, *
this);
5595 unsigned int totalExtent = _Initialize();
5601 _M_buffer_descriptor._Set_buffer_ptr(
NULL);
5609 _PBuf = _Buffer::_Create_buffer(_Associated_Av, _Av, totalExtent,
sizeof(_Value_type),
false ,
access_type_read_write);
5612 _PBuf = _Buffer::_Create_stage_buffer(_Associated_Av, _Av, totalExtent,
sizeof(_Value_type));
5619 _PBuf = _Buffer::_Create_buffer(_Av, _Av, totalExtent,
sizeof(_Value_type),
false ,
access_type_auto);
5622 _M_buffer_descriptor._Set_buffer_ptr(_Ubiquitous_buffer::_Create_ubiquitous_buffer(_PBuf));
5627 template <
typename _InputIterator>
5630 _Initialize(_Av, _Associated_Av);
5631 copy(_Src_first, _Src_last, *
this);
5638 _M_buffer_descriptor._Get_buffer_ptr()->_Register_view(_M_buffer_descriptor._Get_view_key(), cpuAv, _Create_buffer_view_shape());
5640 _M_buffer_descriptor._Get_buffer_ptr()->_Discard(_M_buffer_descriptor._Get_view_key());
5653 _M_buffer_descriptor._Get_buffer_ptr()->_Register_view_copy(_M_buffer_descriptor._Get_view_key(), _Other._M_buffer_descriptor._Get_view_key());
5659 if (_M_buffer_descriptor._Get_buffer_ptr() !=
NULL) {
5660 _M_buffer_descriptor._Get_buffer_ptr()->_Unregister_view(_M_buffer_descriptor._Get_view_key());
5666 return _M_buffer_descriptor._Get_buffer_ptr();
5671 _ASSERTE(!_Zero_copy_cpu_access || (
_Get_buffer()->_Get_master_buffer()->_Get_allowed_host_access_mode() !=
_No_access));
5682 const_cast<array*
>(
this)->_M_buffer_descriptor._M_data_ptr = _PBuf->_Get_host_ptr();
5692 _ASSERTE(
_Get_buffer()->_Get_master_buffer_elem_size() ==
sizeof(_Value_type));
5694 unsigned int _ZeroOffset[_Rank] = {0};
5695 unsigned int _View_extent[_Rank];
5696 for(
int i=0;
i<_Rank; ++
i)
5698 _View_extent[
i] =
static_cast<unsigned int>(this->_M_extent[
i]);
5700 return _View_shape::_Create_view_shape(static_cast<unsigned int>(_Rank), 0, &_View_extent[0], &_ZeroOffset[0], &_View_extent[0]);
5717 if ((_Requested_mode ==
_No_access) || ((_M_buffer_descriptor._M_curr_cpu_access_mode & _Requested_mode) != _Requested_mode))
5719 if (_Has_cpu_access() && (_Requested_mode !=
_No_access))
5731 if (!_Has_cpu_access()) {
5735 throw runtime_exception(
"The array is not accessible for reading on CPU.", E_FAIL);
5760 template <
typename _Value_type,
int _Rank>
5763 if (_Src.extent.size() > _Dest.extent.size())
5765 throw runtime_exception(
"Invalid _Src argument. _Src size exceeds total size of the _Dest.", E_INVALIDARG);
5774 size_t _NumElemsToCopy = (_Src.extent.size() *
sizeof(_Value_type)) / _PBufSrc->_Get_elem_size();
5780 template <
typename InputIterator,
typename _Value_type,
int _Rank>
5783 size_t _NumElemsToCopy =
std::distance(_SrcFirst, _SrcLast);
5790 return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _SrcLast, _NumElemsToCopy, _PDestBuf, 0);
5794 template <
typename OutputIterator,
typename _Value_type,
int _Rank>
5799 size_t _NumElemsToCopy = (_Src.extent.size() *
sizeof(_Value_type)) / _PSrcBuf->_Get_elem_size();
5801 return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, 0, _NumElemsToCopy, _DestIter);
5805 template <
typename _Value_type,
int _Rank>
5811 throw runtime_exception(
"Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5829 template <
typename _Value_type,
int _Rank>
5835 throw runtime_exception(
"Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5845 return _Ev._Add_continuation(std::function<
_Event()>([_PSrcBuf, _PSrcShape, _PDestBuf, _PDestShape]()
mutable ->
_Event {
5850 template <
typename _Value_type,
int _Rank>
5858 throw runtime_exception(
"Cannot copy between overlapping regions of the same buffer.", E_INVALIDARG);
5871 template <
typename InputIterator,
typename _Value_type,
int _Rank>
5874 static_assert(!std::is_const<_Value_type>::value,
"Cannot copy to array_view<const _Value_type, _Rank>.");
5879 if (_Src_size > _Dest.extent.size())
5881 throw runtime_exception(
"Number of elements in range between [_SrcFirst, _SrcLast) exceeds total size of the _Dest.", E_INVALIDARG);
5884 #pragma warning( push )
5885 #pragma warning( disable : 4127 ) // Disable warning about constant conditional expression
5887 if ((_Rank > 1) && (_Src_size != _Dest.extent.size()))
5889 throw runtime_exception(
"For _Rank > 1 the number of elements in range between [_SrcFirst, _SrcLast) has to be equal to total size of the _Dest.", E_INVALIDARG);
5891 #pragma warning( pop )
5907 unsigned int _Dst_linear_offset, _Dst_linear_size;
5908 if (_Dst_shape->_Is_view_linear(_Dst_linear_offset, _Dst_linear_size))
5911 return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _SrcLast, _Src_size, _PDestBuf, _Dst_linear_offset);
5919 std::vector<unsigned int> _Src_offset(_Reinterpreted_dst_shape->_Get_rank(), 0);
5921 _Reinterpreted_dst_shape->_Get_view_extent(), _Src_offset.data(),
5922 _Reinterpreted_dst_shape->_Get_view_extent());
5925 return details::_Copy_impl<InputIterator, _Value_type>(_SrcFirst, _Src_shape, _PDestBuf, _Dst_shape);
5932 template <
typename OutputIterator,
typename _Value_type,
int _Rank>
5949 unsigned int _Src_linear_offset, _Src_linear_size;
5950 if (_Src_shape->_Is_view_linear(_Src_linear_offset, _Src_linear_size))
5953 return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, _Src_linear_offset, _Src_linear_size, _DestIter);
5961 std::vector<unsigned int> _Dst_offset(_Reinterpreted_src_shape->_Get_rank(), 0);
5963 _Reinterpreted_src_shape->_Get_view_extent(), _Dst_offset.data(),
5964 _Reinterpreted_src_shape->_Get_view_extent());
5967 return details::_Copy_impl<OutputIterator, _Value_type>(_PSrcBuf, _Src_shape, _DestIter, _Dst_shape);
5992 sizeof(_Value_type) * _Src.extent.size());
6012 sizeof(_Value_type) * _Src.extent.size());
6083 InputIterator _SrcLast = _SrcFirst;
6085 return copy_async(_SrcFirst, _SrcLast, _Dest);
6100 InputIterator _SrcLast = _SrcFirst;
6102 copy(_SrcFirst, _SrcLast, _Dest);
6123 sizeof(_Value_type) * _Src.extent.size());
6144 sizeof(_Value_type) * _Src.extent.size());
6167 sizeof(_Value_type) * _Src.extent.size());
6187 sizeof(_Value_type) * _Src.extent.size());
6210 sizeof(_Value_type) * _Src.extent.size());
6230 sizeof(_Value_type) * _Src.extent.size());
6284 sizeof(_Value_type) * _Src.extent.size());
6304 sizeof(_Value_type) * _Src.extent.size());
6383 InputIterator _SrcLast = _SrcFirst;
6385 return copy_async(_SrcFirst, _SrcLast, _Dest);
6423 InputIterator _SrcLast = _SrcFirst;
6425 copy(_SrcFirst, _SrcLast, _Dest);
6447 sizeof(_Value_type) * _Src.extent.size());
6469 sizeof(_Value_type) * _Src.extent.size());
6526 if (_D3D_buffer ==
NULL)
6533 throw runtime_exception(
"Cannot create D3D buffer on a non-D3D accelerator_view.", E_INVALIDARG);
6536 _Ubiquitous_buffer_ptr _PBuf = _Ubiquitous_buffer::_Create_ubiquitous_buffer(_Buffer::_Create_buffer(_D3D_buffer, _Av,
_Extent.size(),
sizeof(_Value_type)));
6546 #define AS_UINT_PTR(p) reinterpret_cast<unsigned int *>(p)
6547 #define AS_UINT(v) *(reinterpret_cast<unsigned int *>(&(v)))
6548 #define AS_INT(v) *(reinterpret_cast<int *>(&(v)))
6549 #define AS_FLOAT(v) *(reinterpret_cast<float *>(&(v)))
6622 #pragma warning( push )
6623 #pragma warning( disable : 4146 )
6630 #pragma warning( pop )
6675 #pragma warning( push )
6676 #pragma warning( disable : 4146 )
6682 #pragma warning( pop )
6696 #pragma warning( push )
6697 #pragma warning( disable : 4146 )
6701 #pragma warning( pop )
6777 int _Old = *_Expected_value;
6785 *_Expected_value =
AS_INT(_Ret);
6810 unsigned int _Old = *_Expected_value;
6818 *_Expected_value = _Ret;
7169 #pragma warning( push )
7170 #pragma warning( disable : 4100 ) // unreferenced formal parameter
7205 #pragma warning( pop )
7221 inline int abs(
int _X) __GPU_ONLY
7241 inline float clamp(
float _X,
float _Min,
float _Max) __GPU_ONLY
7261 inline int clamp(
int _X,
int _Min,
int _Max) __GPU_ONLY
7320 inline int imax(
int _X,
int _Y) __GPU_ONLY
7337 inline int imin(
int _X,
int _Y) __GPU_ONLY
7354 inline unsigned int umax(
unsigned int _X,
unsigned int _Y) __GPU_ONLY
7371 inline unsigned int umin(
unsigned int _X,
unsigned int _Y) __GPU_ONLY
7391 inline float mad(
float _X,
float _Y,
float _Z) __GPU_ONLY
7411 inline double mad(
double _X,
double _Y,
double _Z) __GPU_ONLY
7431 inline int mad(
int _X,
int _Y,
int _Z) __GPU_ONLY
7451 inline unsigned int mad(
unsigned int _X,
unsigned int _Y,
unsigned int _Z) __GPU_ONLY
7493 inline float rcp(
float _X) __GPU_ONLY
7555 inline float smoothstep(
float _Min,
float _Max,
float _X) __GPU_ONLY
7572 inline float step(
float _Y,
float _X) __GPU_ONLY
int __dp_d3d_interlocked_min_int(_Inout_ int *, int) __GPU_ONLY
array(int _E0, int _E1, int _E2) __CPU_ONLY
Construct an array from three integer extents.
Definition: amp.h:3939
tiled_extent(const tiled_extent &_Other) __GPU
Copy constructor. Constructs a new tiled_extent from the supplied argument "_Other".
Definition: amp.h:1296
_Array_view_base(_In_ void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __GPU_ONLY
Definition: amp.h:1821
void synchronize() const __CPU_ONLY
Synchronizes any modifications made to "this" array_view to its source data.
Definition: amp.h:3751
friend class accelerator
Definition: amprt.h:1444
static _Projection_result_type< _T, _R >::_Result_type _Project0(const array_view< _T, _R > *_Arr_view, int _I) __GPU
Definition: xxamp_inl.h:42
tiled_extent truncate() const __GPU
Returns a new tiled_extent with extents adjusted down to be evenly divisible by the tile dimensions...
Definition: amp.h:1499
array_view(const array< _Value_type, _Rank > &_Src) __GPU
Construct an array_view which is bound to the data contained in the _Src array. The extent of the arr...
Definition: amp.h:3067
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:5752
accelerator_view get_source_accelerator_view() const
Returns the accelerator_view where the data source of the array_view is located. If the array_view do...
Definition: amp.h:2968
int __dp_d3d_firstbitlowi(int) __GPU_ONLY
array_view section(const Concurrency::index< _Rank > &_Idx) const __GPU
Produces a subsection of the source array_view with origin specified by an index, with an extent of (...
Definition: amp.h:3562
int atomic_fetch_or(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic bitwise or operation of _Value to the memory location pointed to by _Dest ...
Definition: amp.h:6945
void refresh() const __CPU_ONLY
Informs the array_view that its bound memory has been modified outside the array_view interface...
Definition: amp.h:2836
array(int _E0, int _E1, int _E2, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array from three integer extents, bound to a specific accelerator_view.
Definition: amp.h:4039
_CPP_AMP_VERIFY_RANK(_Rank, index)
array_view section(int _I0, int _E0) const __GPU
Produces a one-dimensional subsection of the source array_view with origin specified by the index com...
Definition: amp.h:2734
index< _Rank > & operator-=(int _Rhs) __GPU
Subtracts an integer value from each element of this index.
Definition: amp.h:243
reference operator[](size_type _Pos)
Definition: array:140
Concurrency::extent< _Rank > _M_array_multiplier
Definition: amp.h:1669
void _Initialize(Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Definition: amp.h:5593
#define NULL
Definition: vcruntime.h:236
array(int _E0, _InputIterator _Src_first, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an iterator into a container, bound to a specific accelerator_vie...
Definition: amp.h:4409
array_view< _Value_type, 3 > section(int _I0, int _I1, int _I2, int _E0, int _E1, int _E2) __GPU
Produces a three-dimensional subsection of the source array with origin specified by the index compon...
Definition: amp.h:5383
array_view(int _E0, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2373
array(array &&_Other) __CPU_ONLY
Move constructor.
Definition: amp.h:4849
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:1685
float __dp_d3d_radiansf(float) __GPU_ONLY
unsigned int __dp_d3d_interlocked_add(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
index operator--(int) __GPU
Post-decrements each element of this index.
Definition: amp.h:337
const unsigned int * _Get_base_extent() const
Definition: amprt.h:1601
int atomic_exchange(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Sets the value of location pointed to by _Dest to _Value as an atomic operation
Definition: amp.h:6716
std::enable_if< details::_Is_extent_or_index< _Tuple_type< _Rank > >::value, bool >::type operator==(const _Tuple_type< _Rank > &_Lhs, const _Tuple_type< _Rank > &_Rhs) __GPU
Definition: amp.h:822
_Tuple_type _Create_uninitialized_tuple() __GPU
Definition: xxamp.h:214
index< _Rank > & operator++() __GPU
Pre-increments each element of this index.
Definition: amp.h:300
array_view() __GPU
Definition: amp.h:2994
details::_Projection_result_type< _Value_type, _Rank >::_Result_type operator[](int _I) __GPU
Projects the most-significant dimension of this array. If the array rank is 1, this produces a single...
Definition: amp.h:5016
void all_memory_fence(const tile_barrier &_Barrier) __GPU_ONLY
Memory fences and tile barriers.
Definition: amp.h:7178
array_view(int _E0, int _E1, int _E2, const _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3241
const index< rank > global
An index that represents the global index within an extent.
Definition: amp.h:1013
constexpr auto size(const _Container &_Cont) -> decltype(_Cont.size())
Definition: xutility:1478
tiled_index< _Dim0, _Dim1 > _map_index(const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Global, tile_barrier &_Barrier) const __GPU
Definition: amp.h:1435
_AMPIMP _Event _Add_continuation(const std::function< _Event __cdecl()> &_Continuation_task)
Creates an event which is an ordered collection of this and a continuation task
_AMPIMP _Access_mode __cdecl _Get_recommended_buffer_host_access_mode(const accelerator_view &_Av)
const _Value_type * data() const __GPU
Returns a pointer to the raw data of this array.
Definition: amp.h:5509
array_view section(int _I0, int _I1, int _E0, int _E1) const __GPU
Produces a two-dimensional subsection of the source array_view with origin specified by the index com...
Definition: amp.h:2759
array_view(const array_view &_Other) __GPU
Copy constructor. Shallow copy.
Definition: amp.h:2283
float __dp_d3d_madf(float, float, float) __GPU_ONLY
~_Array_view_shape() __GPU
Definition: amp.h:1568
const unsigned int * _Get_view_offset() const
Definition: amprt.h:1606
_Event _Get_access_async(const _View_key _Key, accelerator_view _Av, _Access_mode _Mode, _Buffer_ptr &_Buf_ptr)
Definition: amprt.h:3389
array_view< _Value_type, _Rank > section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Produces a subsection of the source array at the given origin and extent.
Definition: amp.h:5184
details::_Buffer_descriptor _Buffer_descriptor
Definition: amp.h:3868
void _Register_copy(const _Array_view_base &_Other) __CPU_ONLY
Definition: amp.h:2026
void _Register_copy(const _Array_view_base &) __GPU_ONLY
Definition: amp.h:2192
~array_view() __GPU
Destroys this array_view and reclaims resources.
Definition: amp.h:2265
index< _Rank > & operator+=(int _Rhs) __GPU
Adds an integer value to each element of this index.
Definition: amp.h:228
extent< _Rank > & operator+=(int _Rhs) __GPU
Adds an integer value to each element of this extent.
Definition: amp.h:670
unsigned int __dp_d3d_interlocked_xor(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
array_view(int _E0) __CPU_ONLY
Construct an array_view which is not bound to a data source.
Definition: amp.h:2339
index< _Rank > & operator/=(int _Rhs) __GPU
Divides each element of this index by an integer value.
Definition: amp.h:273
index< _Rank > operator++(int) __GPU
Post-increments each element of this index.
Definition: amp.h:312
tiled_extent & operator=(const tiled_extent &_Other) __GPU
copy-assignment operator
Definition: amp.h:1471
array(int _E0, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container, bound to a specific acceler...
Definition: amp.h:4382
array_view(const _In_ _Value_type(&_Src)[_Size]) __GPU
Construct an array_view which is bound to the data contained in the _Src container; ...
Definition: amp.h:3199
_Array_view_base< _Rank, _New_element_size > _Reinterpret_as() const __GPU
Definition: amp.h:1922
int __dp_d3d_clampi(int, int, int) __GPU_ONLY
array_view< _Value_type2, _Rank > reinterpret_as() const __GPU
Produces a (possibly unsafe) reinterpretation of this array_view that is linear and with a different ...
Definition: amp.h:2804
unsigned int __dp_d3d_madu(unsigned int, unsigned int, unsigned int) __GPU_ONLY
_Value_type value_type
Definition: amp.h:2259
void _Register() __GPU_ONLY
Definition: amp.h:2190
array_view(const Concurrency::extent< _Rank > &_Extent) __CPU_ONLY
Construct an array_view which is not bound to a data source.
Definition: amp.h:2295
_eInitializeState
Definition: xxamp.h:208
_Array_view_base(const void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
Definition: amp.h:1827
extent< _Rank > operator-(const index< _Rank > &_Rhs) const __GPU
Element-wise subtraction of this extent with an index.
Definition: amp.h:594
static _Ret_ void * _Create_section_buffer_shape(const _Buffer_descriptor &_Descriptor, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __CPU_ONLY
Definition: amp.h:2119
const index< rank > tile
An index that represents the coordinates of the current tile of a tiled_extent.
Definition: amp.h:1023
void refresh() const __CPU_ONLY
Informs the array_view that its bound memory has been modified outside the array_view interface...
Definition: amp.h:3682
array_view< const _Value_type, _New_rank > view_as(const Concurrency::extent< _New_rank > &_View_extent) const __GPU
Produces an array_view of a different rank over this array_view's data.
Definition: amp.h:3664
void _Project0(int _I, _Array_view_base< _Rank-1, _Element_size > &_Projected_view) const __GPU
Definition: amp.h:1912
void _Refresh_data_ptr(_Access_mode, bool=true) __GPU_ONLY
Definition: amp.h:5742
array_view(const array_view &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:3798
tiled_extent & operator=(const tiled_extent &_Other) __GPU
copy-assignment operator
Definition: amp.h:1389
float noise(float _X) __GPU_ONLY
Generates a random value using the Perlin noise algorithm
Definition: amp.h:7465
array_view< _Value_type, 1 > section(int _I0, int _E0) __GPU
Produces a one-dimensional subsection of the source array with origin specified by the index componen...
Definition: amp.h:5283
unsigned int __dp_d3d_interlocked_or(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
#define __GPU
Definition: amprt.h:45
std::enable_if< details::_Is_extent_or_index< _Tuple_type< _Rank > >::value, _Tuple_type< _Rank > >::type operator*(const _Tuple_type< _Rank > &_Lhs, typename _Tuple_type< _Rank >::value_type _Rhs) __GPU
Definition: amp.h:890
_Array_view_base< _New_rank, _Element_size > _View_as(const Concurrency::extent< _New_rank > &_View_extent) const __GPU
Definition: amp.h:1932
Concurrency::extent< _Rank > _M_multiplier
Definition: amp.h:5755
index< _Rank > & operator--() __GPU
Pre-decrements each element of this index.
Definition: amp.h:325
array_view< const _Value_type, _Rank > section(const Concurrency::extent< _Rank > &_Ext) const __GPU
Produces a subsection of the source array_view with origin of zero, with an extent of _Ext...
Definition: amp.h:5233
array(const array &_Other) __CPU_ONLY
Copy constructor. Deep copy.
Definition: amp.h:4839
tiled_extent() __GPU
Default constructor.
Definition: amp.h:1285
tiled_extent pad() const __GPU
Returns a new tiled_extent with extents adjusted up to be evenly divisible by the tile dimensions...
Definition: amp.h:1408
_In_ int _Val
Definition: vcruntime_string.h:62
int __dp_d3d_maxi(int, int) __GPU_ONLY
Class represents a virtual device abstraction on a C++ AMP data-parallel accelerator ...
Definition: amprt.h:1442
index< _Rank > & operator-=(const index< _Rank > &_Rhs) __GPU
Element-wise subtraction of this index with another index.
Definition: amp.h:213
_Array_view_shape(const _Array_view_shape &_Other) __GPU
Definition: amp.h:1576
_AMPIMP std::pair< accelerator_view, accelerator_view > __cdecl _Get_src_dest_accelerator_view(_In_opt_ const _Buffer_descriptor *_SrcBuffDescPtr, _In_opt_ const _Buffer_descriptor *_DestBuffDescPtr)
extent< _Rank > & operator++() __GPU
Pre-increments each element of this extent.
Definition: amp.h:742
bool _Local() const _NOEXCEPT
Definition: functional:419
static _AMPIMP _Ret_ IUnknown *__cdecl _Get_D3D_buffer(_In_ _Buffer *_Buffer_ptr)
void _Project0(int _I, array_view< const _Value_type, _Rank-1 > &_Projected_view) const __GPU
Definition: amp.h:3790
int firstbitlow(int _X) __GPU_ONLY
Gets the location of the first set bit in _X, starting from the lowest order bit and working upward ...
Definition: amp.h:7303
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_View_offset, const Concurrency::extent< _Rank > &_View_extent) __GPU_ONLY
Definition: amp.h:1769
static _AMPIMP _Ret_ _View_shape *__cdecl _Create_view_shape(unsigned int _Rank, unsigned int _Linear_offset, const unsigned int *_Base_extent, const unsigned int *_View_offset, const unsigned int *_View_extent, const bool *_Projection_info=NULL)
array_view< _Value_type2, 1 > reinterpret_as() __GPU
Produces a (possibly unsafe) reinterpretation of this array that is linear and with a different eleme...
Definition: amp.h:5427
The Concurrency namespace provides classes and functions that provide access to the Concurrency Runti...
Definition: agents.h:43
Class represents a future corresponding to a C++ AMP asynchronous operation
Definition: amprt.h:1266
array_view< const _Value_type, 3 > section(int _I0, int _I1, int _I2, int _E0, int _E1, int _E2) const __GPU
Produces a three-dimensional subsection of the source array with origin specified by the index compon...
Definition: amp.h:5414
static void _Is_valid_extent(const _T< _Rank > &_Tuple) __CPU_ONLY
Definition: xxamp.h:1195
bool _Is_valid_access_mode(_Access_mode _Mode)
Definition: amprt.h:417
unsigned int size() const __GPU
Returns the total linear size of this extent (in units of elements).
Definition: amp.h:521
unsigned int __dp_d3d_maxu(unsigned int, unsigned int) __GPU_ONLY
float __dp_d3d_smoothstepf(float, float, float) __GPU_ONLY
void _Project0(int _I, array_view< _Value_type, _Rank-1 > &_Projected_view) const __GPU
Definition: amp.h:2988
extent< _Rank > & operator--() __GPU
Pre-decrements each element of this extent.
Definition: amp.h:767
array_view(const Concurrency::extent< _Rank > &_Extent, _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:2327
unsigned int umax(unsigned int _X, unsigned int _Y) __GPU_ONLY
Determine the maximum numeric value of the arguments
Definition: amp.h:7354
concurrency::completion_future synchronize_to_async(const accelerator_view &_Accl_view) const __CPU_ONLY
Asynchronously synchronizes any modifications made to "this" array_view to the specified accelerator_...
Definition: amp.h:3697
A _Tiled_index_base is the base class of all three kinds of tiled_index to share the common code...
Definition: amp.h:1002
array< _Value_type, _Rank > make_array(const Concurrency::extent< _Rank > &_Extent, const Concurrency::accelerator_view &_Av, _In_ IUnknown *_D3D_buffer) __CPU_ONLY
Create an array from a D3D buffer interface pointer.
Definition: amp.h:6522
float __dp_d3d_clampf(float, float, float) __GPU_ONLY
array_view(int _E0, int _E1, int _E2) __CPU_ONLY
Construct an array_view which is not bound to a data source.
Definition: amp.h:2429
array_view section(int _I0, int _I1, int _E0, int _E1) const __GPU
Produces a two-dimensional subsection of the source array_view with origin specified by the index com...
Definition: amp.h:3605
static _Ret_ void * _Create_section_buffer_shape(const _Buffer_descriptor &, const Concurrency::index< _Rank > &, const Concurrency::extent< _Rank > &) __GPU_ONLY
Definition: amp.h:2209
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an iterator into a container, bound to a specific accelerator_vie...
Definition: amp.h:4351
tiled_extent(const Concurrency::extent< rank > &_Other) __GPU
Constructs a new tiled_extent from the supplied extent.
Definition: amp.h:1459
double __dp_d3d_madd(double, double, double) __GPU_ONLY
const bool * _Get_projection_info() const
Definition: amprt.h:1615
void __dp_d3d_tile_static_memory_fence_with_tile_barrier() __GPU_ONLY
_Array_flatten_helper< _Rank, typename Concurrency::extent< _Rank >::value_type, typename Concurrency::index< _Rank >::value_type > _Flatten_helper
Definition: amp.h:3869
array(int _E0) __CPU_ONLY
Construct array with the extent _E0
Definition: amp.h:3904
_Buffer_descriptor _M_buffer_descriptor
Definition: amp.h:2003
float rcp(float _X) __GPU_ONLY
Calculates a fast, approximate reciprocal of the argument
Definition: amp.h:7493
_Ret_ _View_shape * _Create_buffer_view_shape() const __CPU_ONLY
Definition: amp.h:1942
_Ret_ _View_shape * _Create_buffer_view_shape() const
Definition: amp.h:5690
integral_constant< bool, false > false_type
Definition: xtr1common:41
array_view(array< _Value_type, _Rank > &_Src) __GPU
Construct an array_view which is bound to the data contained in the _Src array. The extent of the arr...
Definition: amp.h:2274
_Array_view_base(const _Array_view_base &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:1727
array_view(const array_view< _Value_type, _Rank > &_Src) __GPU
Copy constructor. Shallow copy.
Definition: amp.h:3076
_AMPIMP void _Register_view(_In_ _View_key _Key, accelerator_view _Cpu_av, _View_shape_ptr _Shape, _In_opt_ const _View_key _Source_view_key=nullptr)
array(int _E0, accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view.
Definition: amp.h:4076
_Tiled_index_base & operator=(const _Tiled_index_base &) __GPU
array(const array_view< const _Value_type, _Rank > &_Src, accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an array_view, bound to a specific accelerator_view.
Definition: amp.h:4810
A tiled_index is a set of indices of 1 to 3 dimensions which have been subdivided into 1-...
Definition: amp.h:1096
_AMPIMP bool __cdecl _Is_D3D_accelerator_view(const accelerator_view &_Av)
_AMPIMP void _Get()
Wait until the _Event completes and throw any exceptions that occur.
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container, bound to a specific acceler...
Definition: amp.h:4325
extent(const int _Array[_Rank]) __GPU
Constructs an extent with the coordinate values provided the array of int component values...
Definition: amp.h:476
_AMPIMP _Ret_ _Amp_runtime_trace *__cdecl _Get_amp_trace()
int atomic_fetch_dec(_Inout_ int *_Dest) __GPU_ONLY
Performs an atomic decrement to the memory location pointed to by _Dest
Definition: amp.h:6673
concurrency::completion_future synchronize_async(access_type _Access_type=access_type_read) const __CPU_ONLY
Asynchronously synchronizes any modifications made to "this" array_view to its source data...
Definition: amp.h:2885
array(const Concurrency::extent< _Rank > &_Extent) __CPU_ONLY
Construct an array from extents
Definition: amp.h:3892
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, const Concurrency::extent< _Rank > &_Array_extent) __GPU
Definition: amp.h:1736
tiled_index(const tiled_index &_Other) __GPU
Copy Constructor.
Definition: amp.h:1131
unsigned int reversebits(unsigned int _X) __GPU_ONLY
Reverses the order of the bits in _X
Definition: amp.h:7507
void wait_with_all_memory_fence() const __GPU_ONLY
Blocks execution of all threads in a tile until all all threads in the tile have reached this call...
Definition: amp.h:974
#define UINT_MAX
Definition: limits.h:36
extent() __GPU
Default constructor. The value at each dimension is initialized to zero.
Definition: amp.h:404
const unsigned int * _Get_view_extent() const
Definition: amprt.h:1610
static _AMPIMP const wchar_t cpu_accelerator[]
String constant for cpu accelerator
Definition: amprt.h:1035
extent< _Rank > & operator%=(int _Rhs) __GPU
Modulus an integer value from each element of this extent.
Definition: amp.h:730
_Array_view_shape(int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent) __GPU
Definition: amp.h:1599
array_view & operator=(const array_view< _Value_type, _Rank > &_Other) __GPU
Copy Assignment operator. Shallow copy.
Definition: amp.h:3381
void _Initialize(Concurrency::accelerator_view _Av, access_type _Cpu_access_type) __CPU_ONLY
Definition: amp.h:5572
static void _Is_valid_section(const _T2< _Rank > &_Base_extent, const _T1< _Rank > &_Section_origin, const _T2< _Rank > &_Section_extent) __CPU_ONLY
Definition: xxamp.h:1106
int atomic_fetch_max(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Atomically computes the maximum of _Value and the value of the memory location pointed to by _Dest...
Definition: amp.h:6836
void _Initialize(size_t _Src_data_size, bool _Discard_data=false) __CPU_ONLY
Definition: amp.h:3014
_Array_view_base< _Rank, sizeof(_Value_type)/sizeof(int)> _Base
Definition: amp.h:3036
_Iter_diff_t< _InIt > distance(_InIt _First, _InIt _Last)
Definition: xutility:1124
The tile_barrier class is a capability class that is only creatable by the system, and passed to a tiled parallel_for_each lambda as part of the tiled_index parameter. It provides wait methods whose purpose is to synchronize execution of threads running within the thread group (tile).
Definition: amp.h:948
array(int _E0, int _E1, _InputIterator _Src_first, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from an iterator into ...
Definition: amp.h:4706
_AMPIMP _Event _Add_event(_Event _Ev)
Creates an event which is an ordered collection of this and _Ev
#define AS_UINT(v)
Definition: amp.h:6547
index< _Rank > & operator*=(int _Rhs) __GPU
Multiplies each element of this index with an integer value.
Definition: amp.h:258
_Ret_ _Value_type * data() __GPU
Returns a pointer to the raw data of this array.
Definition: amp.h:5500
index(int _I0, int _I1) __GPU
Constructor for index<2>
Definition: amp.h:110
void __dp_d3d_tile_static_memory_fence() __GPU_ONLY
extent< _Rank > & operator/=(int _Rhs) __GPU
Divides an integer value into each element of this extent.
Definition: amp.h:715
void _Initialize(Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Definition: amp.h:5628
array(int _E0, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container.
Definition: amp.h:4179
int i[4]
Definition: dvec.h:68
std::enable_if< details::_Is_extent_or_index< _Tuple_type< _Rank > >::value, _Tuple_type< _Rank > >::type operator-(const _Tuple_type< _Rank > &_Lhs, const _Tuple_type< _Rank > &_Rhs) __GPU
Definition: amp.h:845
array_view(const Concurrency::extent< _Rank > &_Extent, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3169
array_view< _Value_type, _Rank > section(const index< _Rank > &_Idx) __GPU
Produces a subsection of the source array with origin specified by an index, with an extent of (this-...
Definition: amp.h:5248
array(int _E0, int _E1, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array from two integer extents, bound to a specific accelerator_view.
Definition: amp.h:4010
_Array_view_base() __GPU
Definition: amp.h:1695
index() __GPU
Default constructor, initializes all elements with 0.
Definition: amp.h:73
concurrency::completion_future synchronize_to_async(const accelerator_view &_Accl_view, access_type _Access_type=access_type_read) const __CPU_ONLY
Asynchronously synchronizes any modifications made to "this" array_view to the specified accelerator_...
Definition: amp.h:2861
array_view(const Concurrency::extent< _Rank > &_Extent, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2310
void copy_to(const array_view< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array_view to the destination array_view.
Definition: amp.h:3398
tiled_extent & operator=(const tiled_extent &_Other) __GPU
copy-assignment operator
Definition: amp.h:1302
int sign(int _X) __GPU_ONLY
Returns the sign of the argument
Definition: amp.h:7535
void direct3d_printf(const char *,...) __GPU_ONLY
_Access_mode _M_curr_cpu_access_mode
Definition: amprt.h:450
_Array_view_base(const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
Definition: amp.h:1794
#define _In_
Definition: sal.h:305
index< _Rank > & operator+=(const index< _Rank > &_Rhs) __GPU
Element-wise addition of this index with another index.
Definition: amp.h:198
void _Refresh_data_ptr(_Access_mode _Requested_mode, bool _Exception=true) __CPU_ONLY
Definition: amp.h:5708
index< _Rank > & operator%=(int _Rhs) __GPU
Modulus an integer value into each element of this index.
Definition: amp.h:288
int _Base_linear_offset() const __GPU
Definition: amp.h:1571
std::enable_if< details::_Is_extent_or_index< _Tuple_type< _Rank > >::value, _Tuple_type< _Rank > >::type operator+(const _Tuple_type< _Rank > &_Lhs, const _Tuple_type< _Rank > &_Rhs) __GPU
Definition: amp.h:836
unsigned int __dp_d3d_reversebitsu(unsigned int) __GPU_ONLY
array(const array_view< const _Value_type, _Rank > &_Src) __CPU_ONLY
Construct an array initialized from an array_view.
Definition: amp.h:4787
Concurrency::extent< _Rank > _M_view_extent
Definition: amp.h:1672
tiled_extent(const Concurrency::extent< rank > &_Other) __GPU
Constructs a new tiled_extent from the supplied extent.
Definition: amp.h:1290
void _Initialize() __GPU
Definition: amp.h:3805
int __dp_d3d_mini(int, int) __GPU_ONLY
float __dp_d3d_rcpf(float) __GPU_ONLY
#define _In_opt_
Definition: sal.h:306
static _Ret_ void * _Create_projection_buffer_shape(const _Buffer_descriptor &, int, int) __GPU_ONLY
Definition: amp.h:2204
unsigned int _Initialize() __CPU_ONLY
Definition: amp.h:5558
array_view< const _Value_type, _New_rank > view_as(const Concurrency::extent< _New_rank > &_View_extent) const __GPU
Produces an array_view of a different rank over this array's data.
Definition: amp.h:5474
tiled_extent(const tiled_extent &_Other) __GPU
Copy constructor. Constructs a new tiled_extent from the supplied argument "_Other".
Definition: amp.h:1383
void synchronize(access_type _Access_type=access_type_read) const __CPU_ONLY
Synchronizes any modifications made to "this" array_view to its source data.
Definition: amp.h:2934
tiled_extent< _Dim0 > tile() const __GPU
Produces a tiled_extent object with the tile extents given by _Dim0.
Definition: amp.h:537
bool _Has_cpu_access() const __CPU_ONLY
Definition: amp.h:5703
#define __CPU_ONLY
Definition: amprt.h:47
void _Unregister(bool _Throw_exception=true) __CPU_ONLY
Definition: amp.h:2056
void synchronize_to(const accelerator_view &_Accl_view, access_type _Access_type=access_type_read) const __CPU_ONLY
Synchronizes any modifications made to "this" array_view to the specified accelerator_view.
Definition: amp.h:2914
void _Initialize_multiplier() __GPU
Definition: amp.h:1660
void _Register() __CPU_ONLY
Definition: amp.h:5634
_Ret_ _Ubiquitous_buffer * _Get_buffer() const __CPU_ONLY
Definition: amp.h:5664
tiled_extent(const tiled_extent &_Other) __GPU
Copy constructor. Constructs a new tiled_extent from the supplied argument "_Other".
Definition: amp.h:1465
array(int _E0, _InputIterator _Src_first) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4196
int value_type
Definition: amp.h:68
__declspec(deprecated("Concurrency::EnableTracing is a deprecated function.")) _CONCRTIMP HRESULT __cdecl EnableTracing()
Enables tracing in the Concurrency Runtime. This function is deprecated because ETW tracing is now on...
array_view< _Value_type, _Rank > section(const Concurrency::extent< _Rank > &_Ext) __GPU
Produces a subsection of the source array_view with origin of zero, with an extent of _Ext...
Definition: amp.h:5218
tiled_index< _Dim0, _Dim1, _Dim2 > _map_index(const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Global, tile_barrier &_Barrier) const __GPU
Definition: amp.h:1351
Exception thrown due to a C++ AMP runtime_exception. This is the base type for all C++ AMP exception ...
Definition: amprt_exceptions.h:29
array_view section(int _I0, int _I1, int _I2, int _E0, int _E1, int _E2) const __GPU
Produces a three-dimensional subsection of the source array_view with origin specified by the index c...
Definition: amp.h:2790
_CPP_AMP_VERIFY_RANK(_Rank, extent)
Concurrency::extent< _Rank > _M_extent
Definition: amp.h:5749
extent< _Rank > operator+(const index< _Rank > &_Rhs) const __GPU
Element-wise addition of this extent with an index.
Definition: amp.h:578
static void _Is_valid_projection(int _I, const _T1< _Rank > &_Base_extent) __CPU_ONLY
Definition: xxamp.h:1131
array(const Concurrency::extent< _Rank > &_Extent, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array from extents, bound to a specific accelerator_view.
Definition: amp.h:3962
_Event _Get_access_async(_Access_mode _Mode, _Buffer_ptr &_Buf_ptr, bool _Zero_copy_cpu_access=false) const __CPU_ONLY
Definition: amp.h:5669
struct Concurrency::details::_Buffer_descriptor _Buffer_descriptor
extent< _Rank > & operator+=(const extent< _Rank > &_Rhs) __GPU
Element-wise addition of this extent with another extent.
Definition: amp.h:610
float saturate(float _X) __GPU_ONLY
Clamps _X within the range of 0 to 1
Definition: amp.h:7521
_AMPIMP accelerator __cdecl _Select_default_accelerator()
void parallel_for_each(const extent< _Rank > &_Compute_domain, const _Kernel_type &_Kernel)
Invokes a parallel computation of a kernel function over a compute domain on an accelerator_view. The accelerator_view is determined from the arrays and/or array_views captured by the kernel function, or if no accelerator_view can be derived, the default is chosen.
Definition: amp.h:7020
unsigned int _Get_linear_offset() const
Definition: amprt.h:1596
_Ret_ void * _Access(_Access_mode _Requested_mode, const index< _Rank > &_Index) const __CPU_ONLY
Definition: amp.h:1882
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4269
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent) __GPU
Definition: amp.h:1745
_Array_flatten_helper< _Rank, typename Concurrency::extent< _Rank >::value_type, typename Concurrency::index< _Rank >::value_type > _Flatten_helper
Definition: amp.h:1555
_Ret_ IUnknown * get_buffer(const array< _Value_type, _Rank > &_Array) __CPU_ONLY
Get the D3D buffer interface underlying an array.
Definition: amp.h:6494
tiled_extent() __GPU
Default constructor.
Definition: amp.h:1372
static void func(_RES_EXT &_ResArrayExtent, const _SRC_EXT &_SrcArrayExtent, _RES_EXT &_ResArrayMultiplier, const _SRC_EXT &_SrcArrayMultiplier, _RES_IDX &_ResViewOffset, const _SRC_IDX &_SrcViewOffset, _RES_EXT &_ResViewExtent, const _SRC_EXT &_SrcViewExtent) __GPU
Definition: xxamp.h:768
array_view< const _Value_type, 1 > section(int _I0, int _E0) const __GPU
Produces a one-dimensional subsection of the source array with origin specified by the index componen...
Definition: amp.h:5302
void synchronize_to(const accelerator_view &_Accl_view) const __CPU_ONLY
Synchronizes any modifications made to "this" array_view to the specified accelerator_view.
Definition: amp.h:3737
_AMPIMP _Buffer_ptr _Get_master_buffer() const
extent< _Rank > & operator+=(const index< _Rank > &_Rhs) __GPU
Element-wise addition of this extent with an index.
Definition: amp.h:640
const _Value_type * data() const __GPU
Returns a pointer to the raw data of this array_view.
Definition: amp.h:3672
array_view< const _Value_type, _Rank > section(const index< _Rank > &_Idx) const __GPU
Produces a subsection of the source array with origin specified by an index, with an extent of (this-...
Definition: amp.h:5264
tiled_index(const tiled_index &_Other) __GPU
Copy Constructor.
Definition: amp.h:1246
int atomic_fetch_xor(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic bitwise xor operation of _Value to the memory location pointed to by _Dest ...
Definition: amp.h:6981
tiled_extent(const Concurrency::extent< rank > &_Other) __GPU
Constructs a new tiled_extent from the supplied extent.
Definition: amp.h:1377
array_view(int _E0, int _E1, int _E2, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:2525
array_view section(const Concurrency::extent< _Rank > &_Ext) const __GPU
Produces a subsection of the source array_view with origin of zero, with an extent of _Ext...
Definition: amp.h:2716
_Tiled_index_base(const _Tiled_index_base &_Other) __GPU
Copy Constructor.
Definition: amp.h:1067
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from a pair of iterato...
Definition: amp.h:4741
void copy_to(const array_view< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array to the destination array_view.
Definition: amp.h:4922
_Ret_ _Ubiquitous_buffer * _Get_buffer(const _Array_type &_Array) __CPU_ONLY
Definition: xxamp.h:1069
unsigned int umin(unsigned int _X, unsigned int _Y) __GPU_ONLY
Determine the minimum numeric value of the arguments
Definition: amp.h:7371
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from a pair of iterato...
Definition: amp.h:4576
_AMPIMP void _Register_view_copy(_In_ _View_key _New_view_key, _In_ _View_key _Existing_view_key)
_Tiled_index_base(const index< rank > &_Global, const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Tile_origin, const tile_barrier &_Barrier) __GPU
A Constructor that initializes data members using the given values.
Definition: amp.h:1053
unsigned int countbits(unsigned int _X) __GPU_ONLY
Counts the number of set bits in _X
Definition: amp.h:7275
void copy_to(array< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array_view to the destination array.
Definition: amp.h:2544
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container.
Definition: amp.h:4142
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_View_offset, const Concurrency::extent< _Rank > &_View_extent) __CPU_ONLY
Definition: amp.h:1754
unsigned int mad(unsigned int _X, unsigned int _Y, unsigned int _Z) __GPU_ONLY
Performs an arithmetic multiply/add operation on three arguments: _X * _Y + _Z
Definition: amp.h:7451
tiled_extent< _Dim0, _Dim1, _Dim2 > tile() const __GPU
Produces a tiled_extent object with the tile extents given by _Dim0, _Dim1, _Dim2.
Definition: amp.h:559
~array_view() __GPU
Destroys this array_view and reclaims resources.
Definition: amp.h:3058
int __dp_d3d_absi(int) __GPU_ONLY
#define false
Definition: stdbool.h:16
array_view< const _Value_type2, _Rank > reinterpret_as() const __GPU
Produces a (possibly unsafe) reinterpretation of this array_view that is linear and with a different ...
Definition: amp.h:3650
void direct3d_abort() __GPU_ONLY
void __dp_d3d_all_memory_fence() __GPU_ONLY
extent(int _I) __GPU
Constructor for extent<1>.
Definition: amp.h:426
unsigned int __dp_d3d_interlocked_and(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container, bound to a specific acceler...
Definition: amp.h:4514
array_view(_In_ _Value_type(&_Src)[_Size]) __GPU
Construct an array_view which is bound to the array _Src.
Definition: amp.h:2482
array_view(const Concurrency::extent< _Rank > &_Extent, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3136
void copy_to(array< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array to the destination array.
Definition: amp.h:4914
_AMPIMP void _Unregister_view(_In_ _View_key _Key)
void __dp_d3d_all_memory_fence_with_tile_barrier() __GPU_ONLY
array_view(int _E0, int _E1, const _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3279
const tile_barrier barrier
An object which represents a barrier within the current tile of threads.
Definition: amp.h:1033
_Ret_ _View_key _Get_view_key()
Definition: amprt.h:537
const index< rank > tile_origin
An index that represents the global coordinates of the origin of the current tile within a tiled_exte...
Definition: amp.h:1028
array_view section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Produces a subsection of the source array_view at the given origin and extent.
Definition: amp.h:3532
array_view section(const Concurrency::extent< _Rank > &_Ext) const __GPU
Produces a subsection of the source array_view with origin of zero, with an extent of _Ext...
Definition: amp.h:3547
Concurrency::extent< _Rank > _M_array_extent
Definition: amp.h:1668
accelerator_view get_source_accelerator_view() const
Returns the accelerator_view where the data source of the array_view is located. If the array_view do...
Definition: amp.h:3770
The extent type represents an N-dimensional vector of int which specifies the bounds of an N-dimen...
Definition: amp.h:383
void _Register(_In_opt_ const _View_key _Source_view_key=nullptr) __CPU_ONLY
Definition: amp.h:2007
~_Array_view_base() __GPU
Definition: amp.h:1687
_Array_view_base< _Rank, sizeof(_Value_type)/sizeof(int)> _Base
Definition: amp.h:2238
An array is a multi-dimensional data aggregate on a accelerator_view.
Definition: amp.h:3865
_Array_flatten_helper< _Rank, typename Concurrency::extent< _Rank >::value_type, typename Concurrency::index< _Rank >::value_type > _Flatten_helper
Definition: amp.h:1678
void direct3d_errorf(const char *,...) __GPU_ONLY
void discard_data() const __CPU_ONLY
Discards the current data underlying this view. This is an optimization hint to the runtime used to a...
Definition: amp.h:2959
#define AS_INT(v)
Definition: amp.h:6548
extent(int _I0, int _I1, int _I2) __GPU
Constructor for extent<3>
Definition: amp.h:460
int abs(int _X) __GPU_ONLY
Returns the absolute value of the argument
Definition: amp.h:7221
index(const int _Array[_Rank]) __GPU
Constructs an index with the coordinate values provided the array of int component values...
Definition: amp.h:145
void _Unregister(bool=true) __GPU_ONLY
Definition: amp.h:2200
array_view(const _Container &_Src, typename std::enable_if< details::_Is_container< _Container >::type::value, void ** >::type=0) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container; ...
Definition: amp.h:3116
int __dp_d3d_madi(int, int, int) __GPU_ONLY
_AMPIMP ULONG _Launch_async_copy_event_helper(const _Buffer_descriptor &_Src, const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy)
void _Register_copy(const array &_Other) __CPU_ONLY
Definition: amp.h:5651
int value_type
Definition: amp.h:398
unsigned int __dp_d3d_interlocked_min_uint(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
index(const index< _Rank > &_Other) __GPU
Copy Constructor.
Definition: amp.h:84
array(int _E0, int _E1, int _E2, _InputIterator _Src_first) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4292
void * _M_data_ptr
Definition: amprt.h:438
array(const Concurrency::extent< _Rank > &_Extent, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view.
Definition: amp.h:4058
_Array_view_shape(int _Base_linear_offset, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:1609
unsigned int __dp_d3d_interlocked_exchange(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
array_view() __GPU
Definition: amp.h:3796
_Ret_ _View_shape * _Get_buffer_view_shape(const _Buffer_descriptor &_Descriptor)
Definition: amprt.h:3395
array_view & operator=(const array_view &_Other) __GPU
Copy Assignment operator. Shallow copy.
Definition: amp.h:3372
int imin(int _X, int _Y) __GPU_ONLY
Determine the minimum numeric value of the arguments
Definition: amp.h:7337
array(int _E0, int _E1, int _E2, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view.
Definition: amp.h:4123
_Access_mode
Definition: amprt.h:88
array(int _E0, int _E1, _InputIterator _Src_first, _InputIterator _Src_last) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container.
Definition: amp.h:4222
tiled_extent() __GPU
Default constructor.
Definition: amp.h:1454
array(int _E0, _InputIterator _Src_first, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av)
Construct a staging array between two associated accelerator_view, initialized from an iterator into ...
Definition: amp.h:4648
const _Buffer_descriptor & _Get_buffer_descriptor(const _Array_type &_Array) __GPU
Definition: xxamp.h:1063
array_view(const array_view &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:2996
const index< rank > local
An index that represents the relative index within the current tile of a tiled_extent.
Definition: amp.h:1018
array(int _E0, int _E1, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from a pair of iterato...
Definition: amp.h:4680
constexpr auto data(_Container &_Cont) -> decltype(_Cont.data())
Definition: xutility:1513
float radians(float _X) __GPU_ONLY
Converts _X from degrees to radians
Definition: amp.h:7479
_AMPIMP ULONG _Launch_array_view_synchronize_event_helper(const _Buffer_descriptor &_Buff_desc)
extent< _Rank > operator++(int) __GPU
Post-increments each element of this extent.
Definition: amp.h:754
_AMPIMP ULONG _Start_copy_event_helper(const _Buffer_descriptor &_Src, const _Buffer_descriptor &_Dest, ULONGLONG _Num_bytes_for_copy)
float smoothstep(float _Min, float _Max, float _X) __GPU_ONLY
Returns a smooth Hermite interpolation between 0 and 1, if _X is in the range [_Min, _Max].
Definition: amp.h:7555
tiled_extent pad() const __GPU
Returns a new tiled_extent with extents adjusted up to be evenly divisible by the tile dimensions...
Definition: amp.h:1490
_Array_view_base(_In_ void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __CPU_ONLY
Definition: amp.h:1805
tiled_index(const index< rank > &_Global, const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Tile_origin, const tile_barrier &_Barrier) __GPU
A Constructor that initializes data members using the given values.
Definition: amp.h:1117
int clamp(int _X, int _Min, int _Max) __GPU_ONLY
Clamps _X to the specified _Min and _Max range
Definition: amp.h:7261
array_view(int _E0, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:2469
tiled_extent< _Dim0, _Dim1 > tile() const __GPU
Produces a tiled_extent object with the tile extents given by _Dim0, _Dim1
Definition: amp.h:548
_Array_view_shape & operator=(const _Array_view_shape &_Other) __GPU
Definition: amp.h:1623
_Ret_ _Value_type * data() const __GPU
Returns a pointer to the raw data of this array_view.
Definition: amp.h:2826
extent< _Rank > & operator-=(int _Rhs) __GPU
Subtracts an integer value from each element of this extent.
Definition: amp.h:685
int operator[](unsigned int _Index) const __GPU
Index operator.
Definition: amp.h:499
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, const Concurrency::extent< _Rank > &_Array_extent, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:1784
#define _CPP_AMP_VERIFY_MUTABLE_ITERATOR(_Type_name)
Definition: xxamp.h:27
void copy(const array< _Value_type, _Rank > &_Src, array< _Value_type, _Rank > &_Dest)
Copies the contents of the source array into the destination array.
Definition: amp.h:6008
int _M_total_linear_offset
Definition: amp.h:1671
array_view(int _E0, const _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3259
array_view(int _E0, const _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3184
float __dp_d3d_saturatef(float) __GPU_ONLY
~array() __CPU_ONLY noexcept(false)
Destroys this array and reclaims resources.
Definition: amp.h:5525
A tiled_extent is an extent of 1 to 3 dimensions which also subdivides the extent space into 1-...
Definition: amp.h:1274
int _M_base[_Rank]
Definition: amp.h:359
int atomic_fetch_and(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic bitwise and operation of _Value to the memory location pointed to by _Dest ...
Definition: amp.h:6908
void advance(_InIt &_Where, _Diff _Off)
Definition: xutility:1089
array_view< const _Value_type, _Rank > section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Produces a subsection of the source array at the given origin and extent.
Definition: amp.h:5202
array_view(int _E0, int _E1, const _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3218
extent(int _I0, int _I1) __GPU
Constructor for extent<2>
Definition: amp.h:441
array(int _E0, int _E1, _InputIterator _Src_first, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an iterator into a container, bound to a specific accelerator_vie...
Definition: amp.h:4475
_AMPIMP ULONG _Start_array_view_synchronize_event_helper(const _Buffer_descriptor &_Buff_desc)
array_view(int _E0, int _E1, int _E2, const _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3302
array_view< const _Value_type2, 1 > reinterpret_as() const __GPU
Produces a (possibly unsafe) reinterpretation of this array that is linear and with a different eleme...
Definition: amp.h:5439
int _Calculate_reinterpreted_size(int _Old_size) __GPU_ONLY
Definition: amp.h:1527
array_view(_Buffer_descriptor &_Src_buffer, const Concurrency::extent< _Rank > &_Extent) __GPU
Definition: amp.h:3002
void _Unregister() __CPU_ONLY
Definition: amp.h:5656
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from an iterator into a container, bound to a specific accelerator_vie...
Definition: amp.h:4547
array_view section(int _I0, int _E0) const __GPU
Produces a one-dimensional subsection of the source array_view with origin specified by the index com...
Definition: amp.h:3580
std::enable_if< details::_Is_extent_or_index< _Tuple_type< _Rank > >::value, bool >::type operator!=(const _Tuple_type< _Rank > &_Lhs, const _Tuple_type< _Rank > &_Rhs) __GPU
Definition: amp.h:829
int atomic_fetch_sub(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic subtraction of _Value from the memory location pointed to by _Dest ...
Definition: amp.h:6599
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from an iterator into ...
Definition: amp.h:4598
details::_Projection_result_type< _Value_type, _Rank >::_Const_result_type operator()(int _I) const __GPU
Projects the most-significant dimension of this array. If the array rank is 1, this produces a single...
Definition: amp.h:5167
_Event _Copy_async_impl(const array_view< _Value_type, _Rank > &_Src, OutputIterator _DestIter)
Definition: amp.h:5933
void copy_to(const array_view< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array_view to the destination array_view.
Definition: amp.h:2552
int __dp_d3d_firstbithighi(int) __GPU_ONLY
details::_Projection_result_type< _Value_type, _Rank >::_Result_type operator()(int _I) __GPU
Projects the most-significant dimension of this array. If the array rank is 1, this produces a single...
Definition: amp.h:5152
_Ret_ void * _Access(const index< _Rank > &_Index) const __GPU
Definition: amp.h:1876
#define AS_UINT_PTR(p)
Definition: amp.h:6546
bool atomic_compare_exchange(_Inout_ int *_Dest, _Inout_ int *_Expected_value, int _Value) __GPU_ONLY
Atomically, compares the value pointed to by _Dest for equality with that pointed to by _Expected_val...
Definition: amp.h:6775
array_view(int _E0, int _E1, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3339
_Ret_ _View_shape * _Create_reinterpreted_shape(const _View_shape *_Source_shape, size_t _Curr_elem_size, size_t _New_elem_size)
Definition: amprt.h:1885
extent(const extent< _Rank > &_Other) __GPU
Copy constructor. Constructs a new extent from the supplied argument _Other.
Definition: amp.h:415
unsigned int __dp_d3d_interlocked_compare_exchange(_Inout_ unsigned int *, unsigned int, unsigned int) __GPU_ONLY
int atomic_fetch_inc(_Inout_ int *_Dest) __GPU_ONLY
Performs an atomic increment to the memory location pointed to by _Dest
Definition: amp.h:6643
void global_memory_fence(const tile_barrier &_Barrier) __GPU_ONLY
Ensures that global memory accesses are visible to other threads in the thread tile, and are executed according to program order
Definition: amp.h:7189
_Array_view_base(const _Buffer_descriptor &_Buffer_desc, const _Array_view_shape< _Rank, _Element_size > &_Shape) __GPU
Definition: amp.h:1697
void copy_to(array< _Value_type, _Rank > &_Dest) const __CPU_ONLY
Copies elements from this array_view to the destination array.
Definition: amp.h:3390
array_view section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Produces a subsection of the source array_view at the given origin and extent.
Definition: amp.h:2686
array(int _E0, int _E1) __CPU_ONLY
Construct an array from two integer extents.
Definition: amp.h:3920
_Array_view_base _Section(const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) const __GPU
Definition: amp.h:1897
void _Initialize(Concurrency::accelerator_view _Av, _InputIterator _Src_first, _InputIterator _Src_last, access_type _Cpu_access_type) __CPU_ONLY
Definition: amp.h:5586
array(int _E0, int _E1, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view.
Definition: amp.h:4098
void _Parallel_for_each(_In_ _Host_Scheduling_info *_Sch_info, extent< _Rank > _Compute_domain, const _Kernel_type &_F)
static _Projection_result_type< _T, _R >::_Result_type _Project0(_In_ array< _T, _R > *_Array, int _I) __GPU
Definition: xxamp_inl.h:73
array(int _E0, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from a pair of iterato...
Definition: amp.h:4625
_Ret_ void * _Access(_Access_mode, const index< _Rank > &_Index) const __GPU_ONLY
Definition: amp.h:1892
#define AS_FLOAT(v)
Definition: amp.h:6549
array_view(int _E0, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3319
int _M_base[_Rank]
Definition: amp.h:806
_AMPIMP _Event __cdecl _Copy_impl(_In_ _Buffer *_Src, size_t _Src_offset, _Out_ _Buffer *_Dst, size_t _Dest_offset, size_t _Num_elems, size_t _Preferred_copy_chunk_num_elems=0)
integral_constant< bool, true > true_type
Definition: xtr1common:40
index< _Rank > & operator=(const index< _Rank > &_Other) __GPU
copy-assignment operators
Definition: amp.h:153
Definition: type_traits:931
extent< _Rank > & operator=(const extent< _Rank > &_Other) __GPU
copy-assignment operator
Definition: amp.h:484
index(int _I) __GPU
Constructor for index<1>
Definition: amp.h:95
Define an N-dimensional index point; which may also be viewed as a vector based at the origin in N-sp...
Definition: amp.h:53
array_view(int _E0, int _E1, int _E2, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3362
#define _Inout_
Definition: sal.h:375
void _Register(_In_ void *_Shape) __CPU_ONLY
Definition: amp.h:2031
_Array_view_base(const _Array_view_base &_Other) __GPU
Definition: amp.h:1706
exception_ptr current_exception() _NOEXCEPT
Definition: exception:359
array(const Concurrency::extent< _Rank > &_Extent, _InputIterator _Src_first) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4158
tiled_index(const index< rank > &_Global, const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Tile_origin, const tile_barrier &_Barrier) __GPU
A Constructor that initializes data members using the given values.
Definition: amp.h:1232
void _Register(_In_ void *) __GPU_ONLY
Definition: amp.h:2196
array & operator=(const array_view< const _Value_type, _Rank > &_Src) __CPU_ONLY
Assignment operator from an array_view
Definition: amp.h:4905
static _AMPIMP accelerator_view __cdecl get_auto_selection_view()
Returns the auto selection accelerator_view which when specified as the parallel_for_each target resu...
concurrency::completion_future synchronize_async() const __CPU_ONLY
Asynchronously synchronizes any modifications made to "this" array_view to its source data...
Definition: amp.h:3715
tiled_extent truncate() const __GPU
Returns a new tiled_extent with extents adjusted down to be evenly divisible by the tile dimensions...
Definition: amp.h:1333
array(int _E0, int _E1, _InputIterator _Src_first, _InputIterator _Src_last, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct an array initialized from a pair of iterators into a container, bound to a specific acceler...
Definition: amp.h:4445
_Ret_ _Ubiquitous_buffer * _Get_buffer_ptr() const __CPU_ONLY
Definition: amprt.h:503
int operator[](unsigned _Index) const __GPU
Index operator.
Definition: amp.h:168
static _Projection_result_type< _T, _R >::_Const_result_type _Project0(const array_view< const _T, _R > *_Arr_view, int _I) __GPU
Definition: xxamp_inl.h:33
void _Initialize(size_t _Src_data_size) __CPU_ONLY
Definition: amp.h:3811
void __dp_d3d_device_memory_fence_with_tile_barrier() __GPU_ONLY
_Access_mode _Get_synchronize_access_mode(access_type cpu_access_type)
Definition: amprt.h:1927
extent< _Rank > & operator-=(const extent< _Rank > &_Rhs) __GPU
Element-wise subtraction of this extent with another extent.
Definition: amp.h:625
_Array_view_base(const void *_Data, const Concurrency::extent< _Rank > &_Array_extent) __GPU_ONLY
Definition: amp.h:1843
extent< _Rank > & operator-=(const index< _Rank > &_Rhs) __GPU
Element-wise subtraction of this extent with an index.
Definition: amp.h:655
void wait_with_global_memory_fence() const __GPU_ONLY
Blocks execution of all threads in a tile until all all threads in the tile have reached this call...
Definition: amp.h:983
Definition: type_traits:950
_Value_type value_type
Definition: amp.h:3884
Concurrency::index< _Rank > _M_view_offset
Definition: amp.h:1670
Class represents a accelerator abstraction for C++ AMP data-parallel devices
Definition: amprt.h:1013
std::enable_if< details::_Is_extent_or_index< _Tuple_type< _Rank > >::value, _Tuple_type< _Rank > >::type operator%(const _Tuple_type< _Rank > &_Lhs, typename _Tuple_type< _Rank >::value_type _Rhs) __GPU
Definition: amp.h:926
unsigned int __dp_d3d_interlocked_max_uint(_Inout_ unsigned int *, unsigned int) __GPU_ONLY
const _Value_type value_type
Definition: amp.h:3053
void __dp_d3d_device_memory_fence() __GPU_ONLY
_Array_view_base & operator=(const _Array_view_base &_Other) __GPU
Definition: amp.h:1856
tiled_index< _Dim0 > _map_index(const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Global, tile_barrier &_Barrier) const __GPU
Definition: amp.h:1515
#define INT_MAX
Definition: limits.h:35
An array_view is an N-dimensional view over data held in another container (such as array
Definition: amp.h:2236
array_view(int _E0, int _E1) __CPU_ONLY
Construct an array_view which is not bound to a data source.
Definition: amp.h:2390
void wait_with_tile_static_memory_fence() const __GPU_ONLY
Blocks execution of all threads in a tile until all all threads in the tile have reached this call...
Definition: amp.h:992
array(int _E0, int _E1, _InputIterator _Src_first) __CPU_ONLY
Construct an array initialized from an iterator.
Definition: amp.h:4242
array_view(_Container &_Src, typename std::enable_if< details::_Is_container< _Container >::type::value, void ** >::type=0) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2353
array_view< _Value_type, 2 > section(int _I0, int _I1, int _E0, int _E1) __GPU
Produces a two-dimensional subsection of the source array with origin specified by the index componen...
Definition: amp.h:5327
#define __GPU_ONLY
Definition: amprt.h:46
_Array_view_base _Section(const index< _Rank > &_Idx) const __GPU
Definition: amp.h:1907
void _Project0(int _I, _Array_view_shape< _Rank-1, _Element_size > &_Projected_shape) const __GPU
Definition: amp.h:1633
array_view< _Value_type, _New_rank > view_as(const Concurrency::extent< _New_rank > &_View_extent) const __GPU
Produces an array_view of a different rank over this array_view's data.
Definition: amp.h:2818
int imax(int _X, int _Y) __GPU_ONLY
Determine the maximum numeric value of the arguments
Definition: amp.h:7320
bool _Is_cpu_accelerator(const accelerator &_Accl)
Definition: amprt.h:3401
array_view(int _E0, int _E1, _In_ _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:2502
int __dp_d3d_signi(int) __GPU_ONLY
void wait() const __GPU_ONLY
Blocks execution of all threads in a tile until all all threads in the tile have reached this call...
Definition: amp.h:965
array_view(int _E0, int _E1, int _E2, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2451
unsigned int __dp_d3d_countbitsu(unsigned int) __GPU_ONLY
array_view(const Concurrency::extent< _Rank > &_Extent, const _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:3099
tiled_index(const tiled_index &_Other) __GPU
Copy Constructor.
Definition: amp.h:1189
array_view & operator=(const array_view &_Other) __GPU
Copy Assignment operator. Shallow copy.
Definition: amp.h:2535
_In_ int _Value
Definition: setjmp.h:173
int atomic_fetch_add(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Performs an atomic addition of _Value to the memory location pointed to by _Dest
Definition: amp.h:6563
_Array_view_shape() __GPU
Definition: amp.h:1652
array_view section(int _I0, int _I1, int _I2, int _E0, int _E1, int _E2) const __GPU
Produces a three-dimensional subsection of the source array_view with origin specified by the index c...
Definition: amp.h:3636
array_view section(const Concurrency::index< _Rank > &_Idx) const __GPU
Produces a subsection of the source array_view with origin specified by an index, with an extent of (...
Definition: amp.h:2701
extent< _Rank > operator--(int) __GPU
Post-decrements each element of this extent.
Definition: amp.h:779
index< _Rank > _map_index(const index< _Rank > &_Index) const __GPU
Definition: amp.h:789
tile_barrier(const tile_barrier &_Other) __GPU
Copy Constructor. The tile_barrier class does not have a public default constructor or assignment ope...
Definition: amp.h:959
_Array_view_shape(const _Array_view_shape &_Other, const Concurrency::index< _Rank > &_Section_origin, const Concurrency::extent< _Rank > &_Section_extent) __GPU
Definition: amp.h:1587
void _Initialize() __GPU
Definition: amp.h:3008
_Value_type & get_ref(const index< _Rank > &_Index) const __GPU
Get a reference to the element indexed by _Index. Unlike the other indexing operators for accessing t...
Definition: amp.h:2585
float __dp_d3d_noisef(float) __GPU_ONLY
array_view< const _Value_type, 2 > section(int _I0, int _I1, int _E0, int _E1) const __GPU
Produces a two-dimensional subsection of the source array with origin specified by the index componen...
Definition: amp.h:5352
_CPP_AMP_VERIFY_RANK(_Rank, tiled_index)
details::_Projection_result_type< _Value_type, _Rank >::_Const_result_type operator[](int _I) const __GPU
Projects the most-significant dimension of this array. If the array rank is 1, this produces a single...
Definition: amp.h:5031
_Size
Definition: vcruntime_string.h:36
static _Ret_ void * _Create_projection_buffer_shape(const _Buffer_descriptor &_Descriptor, unsigned int _Dim, int _Dim_offset) __CPU_ONLY
Definition: amp.h:2074
unsigned int __dp_d3d_minu(unsigned int, unsigned int) __GPU_ONLY
_AMPIMP void _Write_end_event(ULONG _Span_id)
unsigned int _Get_rank() const
Definition: amprt.h:1591
tiled_extent pad() const __GPU
Returns a new tiled_extent with extents adjusted up to be evenly divisible by the tile dimensions...
Definition: amp.h:1321
#define _Ret_
Definition: sal.h:996
concurrency::completion_future copy_async(const array< _Value_type, _Rank > &_Src, array< _Value_type, _Rank > &_Dest)
Asynchronously copies the contents of the source array into the destination array.
Definition: amp.h:5988
extent< _Rank > & operator*=(int _Rhs) __GPU
Multiplies an integer value to each element of this extent.
Definition: amp.h:700
concurrency::completion_future _Start_async_op_wait_event_helper(ULONG _Async_op_id, _Event _Ev)
Definition: amprt.h:3753
int __dp_d3d_interlocked_max_int(_Inout_ int *, int) __GPU_ONLY
int firstbithigh(int _X) __GPU_ONLY
Gets the location of the first set bit in _X, starting from the highest order bit and working downwar...
Definition: amp.h:7289
std::enable_if< details::_Is_extent_or_index< _Tuple_type< _Rank > >::value, _Tuple_type< _Rank > >::type operator/(const _Tuple_type< _Rank > &_Lhs, typename _Tuple_type< _Rank >::value_type _Rhs) __GPU
Definition: amp.h:908
float __dp_d3d_stepf(float, float) __GPU_ONLY
access_type
Enumeration type used to denote the various types of access to data.
Definition: amprt.h:103
_Array_view_base(const _Array_view_base &_Other, const Concurrency::extent< _Rank > &_Array_extent) __GPU
Definition: amp.h:1718
bool contains(const index< rank > &_Index) const __GPU
Tests whether the index "_Index" is properly contained within this extent.
Definition: amp.h:529
array(const array_view< const _Value_type, _Rank > &_Src, accelerator_view _Av, accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_views, initialized from an array_view...
Definition: amp.h:4829
tiled_extent truncate() const __GPU
Returns a new tiled_extent with extents adjusted down to be evenly divisible by the tile dimensions...
Definition: amp.h:1418
tiled_index(const index< rank > &_Global, const index< rank > &_Local, const index< rank > &_Tile, const index< rank > &_Tile_origin, const tile_barrier &_Barrier) __GPU
A Constructor that initializes data members using the given values.
Definition: amp.h:1175
array(int _E0, Concurrency::accelerator_view _Av, access_type _Cpu_access_type=access_type_auto) __CPU_ONLY
Construct array with the extent _E0, bound to a specific accelerator_view.
Definition: amp.h:3984
float step(float _Y, float _X) __GPU_ONLY
Compares two values, returning 0 or 1 based on which value is greater
Definition: amp.h:7572
array_view< _Value_type, _New_rank > view_as(const Concurrency::extent< _New_rank > &_View_extent) __GPU
Produces an array_view of a different rank over this array's data.
Definition: amp.h:5460
static _Projection_result_type< _T, _R >::_Const_result_type _Project0(const array< _T, _R > *_Array, int _I) __GPU
Definition: xxamp_inl.h:65
index(int _I0, int _I1, int _I2) __GPU
Constructor for index<3>
Definition: amp.h:129
int atomic_fetch_min(_Inout_ int *_Dest, int _Value) __GPU_ONLY
Atomically computes the minimum of _Value and the value of the memory location pointed to by _Dest...
Definition: amp.h:6873
array(int _E0, int _E1, int _E2, _InputIterator _Src_first, Concurrency::accelerator_view _Av, Concurrency::accelerator_view _Associated_Av) __CPU_ONLY
Construct a staging array between two associated accelerator_view, initialized from an iterator into ...
Definition: amp.h:4770
array_view(const array_view< const _Value_type, _Rank > &_Src) __GPU
Copy constructor. Shallow copy.
Definition: amp.h:3085
array_view(int _E0, int _E1, _Container &_Src) __CPU_ONLY
Construct an array_view which is bound to the data contained in the _Src container.
Definition: amp.h:2409
void tile_static_memory_fence(const tile_barrier &_Barrier) __GPU_ONLY
Ensures that tile_static memory accesses are visible to other threads in the thread tile...
Definition: amp.h:7200
const _Value_type & get_ref(const index< _Rank > &_Index) const __GPU
Get a reference to the element indexed by _Index. Unlike the other indexing operators for accessing t...
Definition: amp.h:3431
array_view(const Concurrency::extent< _Rank > &_Extent, const _Value_type *_Src) __GPU
Construct an array_view which is bound to the data pointed to by _Src.
Definition: amp.h:3153